xref: /dpdk/app/test-eventdev/test_perf_common.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <math.h>
6 
7 #include "test_perf_common.h"
8 
9 #define NB_CRYPTODEV_DESCRIPTORS 1024
10 #define DATA_SIZE		512
11 struct modex_test_data {
12 	enum rte_crypto_asym_xform_type xform_type;
13 	struct {
14 		uint8_t data[DATA_SIZE];
15 		uint16_t len;
16 	} base;
17 	struct {
18 		uint8_t data[DATA_SIZE];
19 		uint16_t len;
20 	} exponent;
21 	struct {
22 		uint8_t data[DATA_SIZE];
23 		uint16_t len;
24 	} modulus;
25 	struct {
26 		uint8_t data[DATA_SIZE];
27 		uint16_t len;
28 	} reminder;
29 	uint16_t result_len;
30 };
31 
32 static struct
33 modex_test_data modex_test_case = {
34 	.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
35 	.base = {
36 		.data = {
37 			0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
38 			0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
39 			0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
40 		},
41 		.len = 20,
42 	},
43 	.exponent = {
44 		.data = {
45 			0x01, 0x00, 0x01
46 		},
47 		.len = 3,
48 	},
49 	.reminder = {
50 		.data = {
51 			0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
52 			0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
53 			0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
54 			0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
55 			0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
56 			0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
57 			0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
58 			0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
59 			0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
60 			0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
61 			0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
62 			0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
63 			0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
64 			0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
65 			0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
66 			0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
67 		},
68 		.len = 128,
69 	},
70 	.modulus = {
71 		.data = {
72 			0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a,
73 			0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce,
74 			0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2,
75 			0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a,
76 			0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d,
77 			0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a,
78 			0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e,
79 			0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72,
80 			0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87,
81 			0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62,
82 			0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18,
83 			0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e,
84 			0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03,
85 			0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee,
86 			0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6,
87 			0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55
88 		},
89 		.len = 128,
90 	},
91 	.result_len = 128,
92 };
93 
94 int
95 perf_test_result(struct evt_test *test, struct evt_options *opt)
96 {
97 	RTE_SET_USED(opt);
98 	int i;
99 	uint64_t total = 0;
100 	struct test_perf *t = evt_test_priv(test);
101 
102 	printf("Packet distribution across worker cores :\n");
103 	for (i = 0; i < t->nb_workers; i++)
104 		total += t->worker[i].processed_pkts;
105 	for (i = 0; i < t->nb_workers; i++)
106 		printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
107 				CLGRN" %3.2f"CLNRM"\n", i,
108 				t->worker[i].processed_pkts,
109 				(((double)t->worker[i].processed_pkts)/total)
110 				* 100);
111 
112 	return t->result;
113 }
114 
115 static inline int
116 perf_producer(void *arg)
117 {
118 	int i;
119 	struct prod_data *p  = arg;
120 	struct test_perf *t = p->t;
121 	struct evt_options *opt = t->opt;
122 	const uint8_t dev_id = p->dev_id;
123 	const uint8_t port = p->port_id;
124 	struct rte_mempool *pool = t->pool;
125 	const uint64_t nb_pkts = t->nb_pkts;
126 	const uint32_t nb_flows = t->nb_flows;
127 	uint32_t flow_counter = 0;
128 	uint64_t count = 0;
129 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
130 	struct rte_event ev;
131 
132 	if (opt->verbose_level > 1)
133 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
134 				rte_lcore_id(), dev_id, port, p->queue_id);
135 
136 	ev.event = 0;
137 	ev.op = RTE_EVENT_OP_NEW;
138 	ev.queue_id = p->queue_id;
139 	ev.sched_type = t->opt->sched_type_list[0];
140 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
141 	ev.event_type =  RTE_EVENT_TYPE_CPU;
142 	ev.sub_event_type = 0; /* stage 0 */
143 
144 	while (count < nb_pkts && t->done == false) {
145 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
146 			continue;
147 		for (i = 0; i < BURST_SIZE; i++) {
148 			ev.flow_id = flow_counter++ % nb_flows;
149 			ev.event_ptr = m[i];
150 			m[i]->timestamp = rte_get_timer_cycles();
151 			while (rte_event_enqueue_burst(dev_id,
152 						       port, &ev, 1) != 1) {
153 				if (t->done)
154 					break;
155 				rte_pause();
156 				m[i]->timestamp = rte_get_timer_cycles();
157 			}
158 		}
159 		count += BURST_SIZE;
160 	}
161 
162 	return 0;
163 }
164 
165 static inline int
166 perf_producer_burst(void *arg)
167 {
168 	uint32_t i;
169 	uint64_t timestamp;
170 	struct rte_event_dev_info dev_info;
171 	struct prod_data *p  = arg;
172 	struct test_perf *t = p->t;
173 	struct evt_options *opt = t->opt;
174 	const uint8_t dev_id = p->dev_id;
175 	const uint8_t port = p->port_id;
176 	struct rte_mempool *pool = t->pool;
177 	const uint64_t nb_pkts = t->nb_pkts;
178 	const uint32_t nb_flows = t->nb_flows;
179 	uint32_t flow_counter = 0;
180 	uint16_t enq = 0;
181 	uint64_t count = 0;
182 	struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
183 	struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
184 	uint32_t burst_size = opt->prod_enq_burst_sz;
185 
186 	memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
187 	rte_event_dev_info_get(dev_id, &dev_info);
188 	if (dev_info.max_event_port_enqueue_depth < burst_size)
189 		burst_size = dev_info.max_event_port_enqueue_depth;
190 
191 	if (opt->verbose_level > 1)
192 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
193 				rte_lcore_id(), dev_id, port, p->queue_id);
194 
195 	for (i = 0; i < burst_size; i++) {
196 		ev[i].op = RTE_EVENT_OP_NEW;
197 		ev[i].queue_id = p->queue_id;
198 		ev[i].sched_type = t->opt->sched_type_list[0];
199 		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
200 		ev[i].event_type =  RTE_EVENT_TYPE_CPU;
201 		ev[i].sub_event_type = 0; /* stage 0 */
202 	}
203 
204 	while (count < nb_pkts && t->done == false) {
205 		if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
206 			continue;
207 		timestamp = rte_get_timer_cycles();
208 		for (i = 0; i < burst_size; i++) {
209 			ev[i].flow_id = flow_counter++ % nb_flows;
210 			ev[i].event_ptr = m[i];
211 			m[i]->timestamp = timestamp;
212 		}
213 		enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
214 		while (enq < burst_size) {
215 			enq += rte_event_enqueue_burst(dev_id, port,
216 							ev + enq,
217 							burst_size - enq);
218 			if (t->done)
219 				break;
220 			rte_pause();
221 			timestamp = rte_get_timer_cycles();
222 			for (i = enq; i < burst_size; i++)
223 				m[i]->timestamp = timestamp;
224 		}
225 		count += burst_size;
226 	}
227 	return 0;
228 }
229 
230 static inline int
231 perf_event_timer_producer(void *arg)
232 {
233 	int i;
234 	struct prod_data *p  = arg;
235 	struct test_perf *t = p->t;
236 	struct evt_options *opt = t->opt;
237 	uint32_t flow_counter = 0;
238 	uint64_t count = 0;
239 	uint64_t arm_latency = 0;
240 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
241 	const uint32_t nb_flows = t->nb_flows;
242 	const uint64_t nb_timers = opt->nb_timers;
243 	struct rte_mempool *pool = t->pool;
244 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
245 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
246 	struct rte_event_timer tim;
247 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
248 
249 	memset(&tim, 0, sizeof(struct rte_event_timer));
250 	timeout_ticks =
251 		opt->optm_timer_tick_nsec
252 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
253 			       opt->optm_timer_tick_nsec)
254 			: timeout_ticks;
255 	timeout_ticks += timeout_ticks ? 0 : 1;
256 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
257 	tim.ev.op = RTE_EVENT_OP_NEW;
258 	tim.ev.sched_type = t->opt->sched_type_list[0];
259 	tim.ev.queue_id = p->queue_id;
260 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
261 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
262 	tim.timeout_ticks = timeout_ticks;
263 
264 	if (opt->verbose_level > 1)
265 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
266 
267 	while (count < nb_timers && t->done == false) {
268 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
269 			continue;
270 		for (i = 0; i < BURST_SIZE; i++) {
271 			rte_prefetch0(m[i + 1]);
272 			m[i]->tim = tim;
273 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
274 			m[i]->tim.ev.event_ptr = m[i];
275 			m[i]->timestamp = rte_get_timer_cycles();
276 			while (rte_event_timer_arm_burst(
277 			       adptr[flow_counter % nb_timer_adptrs],
278 			       (struct rte_event_timer **)&m[i], 1) != 1) {
279 				if (t->done)
280 					break;
281 				m[i]->timestamp = rte_get_timer_cycles();
282 			}
283 			arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
284 		}
285 		count += BURST_SIZE;
286 	}
287 	fflush(stdout);
288 	rte_delay_ms(1000);
289 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
290 			__func__, rte_lcore_id(),
291 			count ? (float)(arm_latency / count) /
292 			(rte_get_timer_hz() / 1000000) : 0);
293 	return 0;
294 }
295 
296 static inline int
297 perf_event_timer_producer_burst(void *arg)
298 {
299 	int i;
300 	struct prod_data *p  = arg;
301 	struct test_perf *t = p->t;
302 	struct evt_options *opt = t->opt;
303 	uint32_t flow_counter = 0;
304 	uint64_t count = 0;
305 	uint64_t arm_latency = 0;
306 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
307 	const uint32_t nb_flows = t->nb_flows;
308 	const uint64_t nb_timers = opt->nb_timers;
309 	struct rte_mempool *pool = t->pool;
310 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
311 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
312 	struct rte_event_timer tim;
313 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
314 
315 	memset(&tim, 0, sizeof(struct rte_event_timer));
316 	timeout_ticks =
317 		opt->optm_timer_tick_nsec
318 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
319 			       opt->optm_timer_tick_nsec)
320 			: timeout_ticks;
321 	timeout_ticks += timeout_ticks ? 0 : 1;
322 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
323 	tim.ev.op = RTE_EVENT_OP_NEW;
324 	tim.ev.sched_type = t->opt->sched_type_list[0];
325 	tim.ev.queue_id = p->queue_id;
326 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
327 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
328 	tim.timeout_ticks = timeout_ticks;
329 
330 	if (opt->verbose_level > 1)
331 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
332 
333 	while (count < nb_timers && t->done == false) {
334 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
335 			continue;
336 		for (i = 0; i < BURST_SIZE; i++) {
337 			rte_prefetch0(m[i + 1]);
338 			m[i]->tim = tim;
339 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
340 			m[i]->tim.ev.event_ptr = m[i];
341 			m[i]->timestamp = rte_get_timer_cycles();
342 		}
343 		rte_event_timer_arm_tmo_tick_burst(
344 				adptr[flow_counter % nb_timer_adptrs],
345 				(struct rte_event_timer **)m,
346 				tim.timeout_ticks,
347 				BURST_SIZE);
348 		arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
349 		count += BURST_SIZE;
350 	}
351 	fflush(stdout);
352 	rte_delay_ms(1000);
353 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
354 			__func__, rte_lcore_id(),
355 			count ? (float)(arm_latency / count) /
356 			(rte_get_timer_hz() / 1000000) : 0);
357 	return 0;
358 }
359 
360 static inline void
361 crypto_adapter_enq_op_new(struct prod_data *p)
362 {
363 	struct test_perf *t = p->t;
364 	const uint32_t nb_flows = t->nb_flows;
365 	const uint64_t nb_pkts = t->nb_pkts;
366 	struct rte_mempool *pool = t->pool;
367 	struct evt_options *opt = t->opt;
368 	uint16_t qp_id = p->ca.cdev_qp_id;
369 	uint8_t cdev_id = p->ca.cdev_id;
370 	uint64_t alloc_failures = 0;
371 	uint32_t flow_counter = 0;
372 	struct rte_crypto_op *op;
373 	uint16_t len, offset;
374 	struct rte_mbuf *m;
375 	uint64_t count = 0;
376 
377 	if (opt->verbose_level > 1)
378 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
379 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
380 		       p->ca.cdev_qp_id);
381 
382 	offset = sizeof(struct perf_elt);
383 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
384 
385 	while (count < nb_pkts && t->done == false) {
386 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
387 			struct rte_crypto_sym_op *sym_op;
388 
389 			op = rte_crypto_op_alloc(t->ca_op_pool,
390 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
391 			if (unlikely(op == NULL)) {
392 				alloc_failures++;
393 				continue;
394 			}
395 
396 			m = rte_pktmbuf_alloc(pool);
397 			if (unlikely(m == NULL)) {
398 				alloc_failures++;
399 				rte_crypto_op_free(op);
400 				continue;
401 			}
402 
403 			rte_pktmbuf_append(m, len);
404 			sym_op = op->sym;
405 			sym_op->m_src = m;
406 			sym_op->cipher.data.offset = offset;
407 			sym_op->cipher.data.length = len - offset;
408 			rte_crypto_op_attach_sym_session(
409 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
410 		} else {
411 			struct rte_crypto_asym_op *asym_op;
412 			uint8_t *result;
413 
414 			if (rte_mempool_get(pool, (void **)&result)) {
415 				alloc_failures++;
416 				continue;
417 			}
418 
419 			op = rte_crypto_op_alloc(t->ca_op_pool,
420 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
421 			if (unlikely(op == NULL)) {
422 				alloc_failures++;
423 				rte_mempool_put(pool, result);
424 				continue;
425 			}
426 
427 			asym_op = op->asym;
428 			asym_op->modex.base.data = modex_test_case.base.data;
429 			asym_op->modex.base.length = modex_test_case.base.len;
430 			asym_op->modex.result.data = result;
431 			asym_op->modex.result.length = modex_test_case.result_len;
432 			rte_crypto_op_attach_asym_session(
433 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
434 		}
435 		while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
436 				t->done == false)
437 			rte_pause();
438 
439 		count++;
440 	}
441 
442 	if (opt->verbose_level > 1 && alloc_failures)
443 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
444 		       __func__, rte_lcore_id(), alloc_failures);
445 }
446 
447 static inline void
448 crypto_adapter_enq_op_fwd(struct prod_data *p)
449 {
450 	const uint8_t dev_id = p->dev_id;
451 	const uint8_t port = p->port_id;
452 	struct test_perf *t = p->t;
453 	const uint32_t nb_flows = t->nb_flows;
454 	const uint64_t nb_pkts = t->nb_pkts;
455 	struct rte_mempool *pool = t->pool;
456 	struct evt_options *opt = t->opt;
457 	uint64_t alloc_failures = 0;
458 	uint32_t flow_counter = 0;
459 	struct rte_crypto_op *op;
460 	uint16_t len, offset;
461 	struct rte_event ev;
462 	struct rte_mbuf *m;
463 	uint64_t count = 0;
464 
465 	if (opt->verbose_level > 1)
466 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
467 		       __func__, rte_lcore_id(), port, p->queue_id,
468 		       p->ca.cdev_id, p->ca.cdev_qp_id);
469 
470 	ev.event = 0;
471 	ev.op = RTE_EVENT_OP_NEW;
472 	ev.queue_id = p->queue_id;
473 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
474 	ev.event_type = RTE_EVENT_TYPE_CPU;
475 
476 	offset = sizeof(struct perf_elt);
477 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
478 
479 	while (count < nb_pkts && t->done == false) {
480 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
481 			struct rte_crypto_sym_op *sym_op;
482 
483 			op = rte_crypto_op_alloc(t->ca_op_pool,
484 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
485 			if (unlikely(op == NULL)) {
486 				alloc_failures++;
487 				continue;
488 			}
489 
490 			m = rte_pktmbuf_alloc(pool);
491 			if (unlikely(m == NULL)) {
492 				alloc_failures++;
493 				rte_crypto_op_free(op);
494 				continue;
495 			}
496 
497 			rte_pktmbuf_append(m, len);
498 			sym_op = op->sym;
499 			sym_op->m_src = m;
500 			sym_op->cipher.data.offset = offset;
501 			sym_op->cipher.data.length = len - offset;
502 			rte_crypto_op_attach_sym_session(
503 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
504 		} else {
505 			struct rte_crypto_asym_op *asym_op;
506 			uint8_t *result;
507 
508 			if (rte_mempool_get(pool, (void **)&result)) {
509 				alloc_failures++;
510 				continue;
511 			}
512 
513 			op = rte_crypto_op_alloc(t->ca_op_pool,
514 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
515 			if (unlikely(op == NULL)) {
516 				alloc_failures++;
517 				rte_mempool_put(pool, result);
518 				continue;
519 			}
520 
521 			asym_op = op->asym;
522 			asym_op->modex.base.data = modex_test_case.base.data;
523 			asym_op->modex.base.length = modex_test_case.base.len;
524 			asym_op->modex.result.data = result;
525 			asym_op->modex.result.length = modex_test_case.result_len;
526 			rte_crypto_op_attach_asym_session(
527 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
528 		}
529 		ev.event_ptr = op;
530 
531 		while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
532 		       t->done == false)
533 			rte_pause();
534 
535 		count++;
536 	}
537 
538 	if (opt->verbose_level > 1 && alloc_failures)
539 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
540 		       __func__, rte_lcore_id(), alloc_failures);
541 }
542 
543 static inline int
544 perf_event_crypto_producer(void *arg)
545 {
546 	struct prod_data *p = arg;
547 	struct evt_options *opt = p->t->opt;
548 
549 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
550 		crypto_adapter_enq_op_new(p);
551 	else
552 		crypto_adapter_enq_op_fwd(p);
553 
554 	return 0;
555 }
556 
557 static void
558 crypto_adapter_enq_op_new_burst(struct prod_data *p)
559 {
560 	const struct test_perf *t = p->t;
561 	const struct evt_options *opt = t->opt;
562 
563 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
564 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
565 	const uint32_t burst_size = opt->prod_enq_burst_sz;
566 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
567 	const uint32_t nb_flows = t->nb_flows;
568 	const uint64_t nb_pkts = t->nb_pkts;
569 	uint16_t len, enq, nb_alloc, offset;
570 	struct rte_mempool *pool = t->pool;
571 	uint16_t qp_id = p->ca.cdev_qp_id;
572 	uint8_t cdev_id = p->ca.cdev_id;
573 	uint64_t alloc_failures = 0;
574 	uint32_t flow_counter = 0;
575 	uint64_t count = 0;
576 	uint32_t  i;
577 
578 	if (opt->verbose_level > 1)
579 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
580 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
581 		       p->ca.cdev_qp_id);
582 
583 	offset = sizeof(struct perf_elt);
584 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
585 
586 	while (count < nb_pkts && t->done == false) {
587 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
588 			struct rte_crypto_sym_op *sym_op;
589 			int ret;
590 
591 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
592 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
593 			if (unlikely(nb_alloc != burst_size)) {
594 				alloc_failures++;
595 				continue;
596 			}
597 
598 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
599 			if (unlikely(ret != 0)) {
600 				alloc_failures++;
601 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
602 				continue;
603 			}
604 
605 			for (i = 0; i < burst_size; i++) {
606 				m = pkts_burst[i];
607 				rte_pktmbuf_append(m, len);
608 				sym_op = ops_burst[i]->sym;
609 				sym_op->m_src = m;
610 				sym_op->cipher.data.offset = offset;
611 				sym_op->cipher.data.length = len - offset;
612 				rte_crypto_op_attach_sym_session(ops_burst[i],
613 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
614 			}
615 		} else {
616 			struct rte_crypto_asym_op *asym_op;
617 
618 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
619 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
620 			if (unlikely(nb_alloc != burst_size)) {
621 				alloc_failures++;
622 				continue;
623 			}
624 
625 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
626 				alloc_failures++;
627 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
628 				continue;
629 			}
630 
631 			for (i = 0; i < burst_size; i++) {
632 				asym_op = ops_burst[i]->asym;
633 				asym_op->modex.base.data = modex_test_case.base.data;
634 				asym_op->modex.base.length = modex_test_case.base.len;
635 				asym_op->modex.result.data = result[i];
636 				asym_op->modex.result.length = modex_test_case.result_len;
637 				rte_crypto_op_attach_asym_session(ops_burst[i],
638 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
639 			}
640 		}
641 
642 		enq = 0;
643 		while (!t->done) {
644 			enq += rte_cryptodev_enqueue_burst(cdev_id, qp_id, ops_burst + enq,
645 					burst_size - enq);
646 			if (enq == burst_size)
647 				break;
648 		}
649 
650 		count += burst_size;
651 	}
652 
653 	if (opt->verbose_level > 1 && alloc_failures)
654 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
655 		       __func__, rte_lcore_id(), alloc_failures);
656 }
657 
658 static void
659 crypto_adapter_enq_op_fwd_burst(struct prod_data *p)
660 {
661 	const struct test_perf *t = p->t;
662 	const struct evt_options *opt = t->opt;
663 
664 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
665 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
666 	const uint32_t burst_size = opt->prod_enq_burst_sz;
667 	struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE];
668 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
669 	const uint32_t nb_flows = t->nb_flows;
670 	const uint64_t nb_pkts = t->nb_pkts;
671 	uint16_t len, enq, nb_alloc, offset;
672 	struct rte_mempool *pool = t->pool;
673 	const uint8_t dev_id = p->dev_id;
674 	const uint8_t port = p->port_id;
675 	uint64_t alloc_failures = 0;
676 	uint32_t flow_counter = 0;
677 	uint64_t count = 0;
678 	uint32_t  i;
679 
680 	if (opt->verbose_level > 1)
681 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
682 		       __func__, rte_lcore_id(), port, p->queue_id,
683 		       p->ca.cdev_id, p->ca.cdev_qp_id);
684 
685 	offset = sizeof(struct perf_elt);
686 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
687 
688 	for (i = 0; i < burst_size; i++) {
689 		ev[i].event = 0;
690 		ev[i].op = RTE_EVENT_OP_NEW;
691 		ev[i].queue_id = p->queue_id;
692 		ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
693 		ev[i].event_type = RTE_EVENT_TYPE_CPU;
694 	}
695 
696 	while (count < nb_pkts && t->done == false) {
697 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
698 			struct rte_crypto_sym_op *sym_op;
699 			int ret;
700 
701 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
702 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
703 			if (unlikely(nb_alloc != burst_size)) {
704 				alloc_failures++;
705 				continue;
706 			}
707 
708 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
709 			if (unlikely(ret != 0)) {
710 				alloc_failures++;
711 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
712 				continue;
713 			}
714 
715 			for (i = 0; i < burst_size; i++) {
716 				m = pkts_burst[i];
717 				rte_pktmbuf_append(m, len);
718 				sym_op = ops_burst[i]->sym;
719 				sym_op->m_src = m;
720 				sym_op->cipher.data.offset = offset;
721 				sym_op->cipher.data.length = len - offset;
722 				rte_crypto_op_attach_sym_session(ops_burst[i],
723 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
724 				ev[i].event_ptr = ops_burst[i];
725 			}
726 		} else {
727 			struct rte_crypto_asym_op *asym_op;
728 
729 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
730 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
731 			if (unlikely(nb_alloc != burst_size)) {
732 				alloc_failures++;
733 				continue;
734 			}
735 
736 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
737 				alloc_failures++;
738 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
739 				continue;
740 			}
741 
742 			for (i = 0; i < burst_size; i++) {
743 				asym_op = ops_burst[i]->asym;
744 				asym_op->modex.base.data = modex_test_case.base.data;
745 				asym_op->modex.base.length = modex_test_case.base.len;
746 				asym_op->modex.result.data = result[i];
747 				asym_op->modex.result.length = modex_test_case.result_len;
748 				rte_crypto_op_attach_asym_session(ops_burst[i],
749 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
750 				ev[i].event_ptr = ops_burst[i];
751 			}
752 		}
753 
754 		enq = 0;
755 		while (!t->done) {
756 			enq += rte_event_crypto_adapter_enqueue(dev_id, port, ev + enq,
757 					burst_size - enq);
758 			if (enq == burst_size)
759 				break;
760 		}
761 
762 		count += burst_size;
763 	}
764 
765 	if (opt->verbose_level > 1 && alloc_failures)
766 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
767 		       __func__, rte_lcore_id(), alloc_failures);
768 }
769 
770 static inline int
771 perf_event_crypto_producer_burst(void *arg)
772 {
773 	struct prod_data *p = arg;
774 	struct evt_options *opt = p->t->opt;
775 
776 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
777 		crypto_adapter_enq_op_new_burst(p);
778 	else
779 		crypto_adapter_enq_op_fwd_burst(p);
780 
781 	return 0;
782 }
783 
784 static int
785 perf_producer_wrapper(void *arg)
786 {
787 	struct prod_data *p  = arg;
788 	struct test_perf *t = p->t;
789 	bool burst = evt_has_burst_mode(p->dev_id);
790 
791 	/* In case of synthetic producer, launch perf_producer or
792 	 * perf_producer_burst depending on producer enqueue burst size
793 	 */
794 	if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
795 			t->opt->prod_enq_burst_sz == 1)
796 		return perf_producer(arg);
797 	else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
798 			t->opt->prod_enq_burst_sz > 1) {
799 		if (!burst)
800 			evt_err("This event device does not support burst mode");
801 		else
802 			return perf_producer_burst(arg);
803 	}
804 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
805 			!t->opt->timdev_use_burst)
806 		return perf_event_timer_producer(arg);
807 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
808 			t->opt->timdev_use_burst)
809 		return perf_event_timer_producer_burst(arg);
810 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
811 		if (t->opt->prod_enq_burst_sz > 1)
812 			return perf_event_crypto_producer_burst(arg);
813 		else
814 			return perf_event_crypto_producer(arg);
815 	}
816 	return 0;
817 }
818 
819 static inline uint64_t
820 processed_pkts(struct test_perf *t)
821 {
822 	uint8_t i;
823 	uint64_t total = 0;
824 
825 	for (i = 0; i < t->nb_workers; i++)
826 		total += t->worker[i].processed_pkts;
827 
828 	return total;
829 }
830 
831 static inline uint64_t
832 total_latency(struct test_perf *t)
833 {
834 	uint8_t i;
835 	uint64_t total = 0;
836 
837 	for (i = 0; i < t->nb_workers; i++)
838 		total += t->worker[i].latency;
839 
840 	return total;
841 }
842 
843 
844 int
845 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
846 		int (*worker)(void *))
847 {
848 	int ret, lcore_id;
849 	struct test_perf *t = evt_test_priv(test);
850 
851 	int port_idx = 0;
852 	/* launch workers */
853 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
854 		if (!(opt->wlcores[lcore_id]))
855 			continue;
856 
857 		ret = rte_eal_remote_launch(worker,
858 				 &t->worker[port_idx], lcore_id);
859 		if (ret) {
860 			evt_err("failed to launch worker %d", lcore_id);
861 			return ret;
862 		}
863 		port_idx++;
864 	}
865 
866 	/* launch producers */
867 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
868 		if (!(opt->plcores[lcore_id]))
869 			continue;
870 
871 		ret = rte_eal_remote_launch(perf_producer_wrapper,
872 				&t->prod[port_idx], lcore_id);
873 		if (ret) {
874 			evt_err("failed to launch perf_producer %d", lcore_id);
875 			return ret;
876 		}
877 		port_idx++;
878 	}
879 
880 	const uint64_t total_pkts = t->outstand_pkts;
881 
882 	uint64_t dead_lock_cycles = rte_get_timer_cycles();
883 	int64_t dead_lock_remaining  =  total_pkts;
884 	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
885 
886 	uint64_t perf_cycles = rte_get_timer_cycles();
887 	int64_t perf_remaining  = total_pkts;
888 	const uint64_t perf_sample = rte_get_timer_hz();
889 
890 	static float total_mpps;
891 	static uint64_t samples;
892 
893 	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
894 	int64_t remaining = t->outstand_pkts - processed_pkts(t);
895 
896 	while (t->done == false) {
897 		const uint64_t new_cycles = rte_get_timer_cycles();
898 
899 		if ((new_cycles - perf_cycles) > perf_sample) {
900 			const uint64_t latency = total_latency(t);
901 			const uint64_t pkts = processed_pkts(t);
902 
903 			remaining = t->outstand_pkts - pkts;
904 			float mpps = (float)(perf_remaining-remaining)/1000000;
905 
906 			perf_remaining = remaining;
907 			perf_cycles = new_cycles;
908 			total_mpps += mpps;
909 			++samples;
910 			if (opt->fwd_latency && pkts > 0) {
911 				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
912 					mpps, total_mpps/samples,
913 					(float)(latency/pkts)/freq_mhz);
914 			} else {
915 				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
916 					mpps, total_mpps/samples);
917 			}
918 			fflush(stdout);
919 
920 			if (remaining <= 0) {
921 				t->result = EVT_TEST_SUCCESS;
922 				if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
923 				    opt->prod_type ==
924 					    EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
925 				    opt->prod_type ==
926 					    EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
927 					t->done = true;
928 					break;
929 				}
930 			}
931 		}
932 
933 		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
934 		    (opt->prod_type == EVT_PROD_TYPE_SYNT ||
935 		     opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
936 		     opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
937 			remaining = t->outstand_pkts - processed_pkts(t);
938 			if (dead_lock_remaining == remaining) {
939 				rte_event_dev_dump(opt->dev_id, stdout);
940 				evt_err("No schedules for seconds, deadlock");
941 				t->done = true;
942 				break;
943 			}
944 			dead_lock_remaining = remaining;
945 			dead_lock_cycles = new_cycles;
946 		}
947 	}
948 	printf("\n");
949 	return 0;
950 }
951 
952 static int
953 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
954 		struct rte_event_port_conf prod_conf)
955 {
956 	int ret = 0;
957 	uint16_t prod;
958 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
959 
960 	memset(&queue_conf, 0,
961 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
962 	queue_conf.ev.sched_type = opt->sched_type_list[0];
963 	RTE_ETH_FOREACH_DEV(prod) {
964 		uint32_t cap;
965 
966 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
967 				prod, &cap);
968 		if (ret) {
969 			evt_err("failed to get event rx adapter[%d]"
970 					" capabilities",
971 					opt->dev_id);
972 			return ret;
973 		}
974 		queue_conf.ev.queue_id = prod * stride;
975 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
976 				&prod_conf);
977 		if (ret) {
978 			evt_err("failed to create rx adapter[%d]", prod);
979 			return ret;
980 		}
981 		ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
982 				&queue_conf);
983 		if (ret) {
984 			evt_err("failed to add rx queues to adapter[%d]", prod);
985 			return ret;
986 		}
987 
988 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
989 			uint32_t service_id;
990 
991 			rte_event_eth_rx_adapter_service_id_get(prod,
992 					&service_id);
993 			ret = evt_service_setup(service_id);
994 			if (ret) {
995 				evt_err("Failed to setup service core"
996 						" for Rx adapter\n");
997 				return ret;
998 			}
999 		}
1000 	}
1001 
1002 	return ret;
1003 }
1004 
1005 static int
1006 perf_event_timer_adapter_setup(struct test_perf *t)
1007 {
1008 	int i;
1009 	int ret;
1010 	struct rte_event_timer_adapter_info adapter_info;
1011 	struct rte_event_timer_adapter *wl;
1012 	uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
1013 	uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
1014 
1015 	if (nb_producers == 1)
1016 		flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
1017 
1018 	for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
1019 		struct rte_event_timer_adapter_conf config = {
1020 			.event_dev_id = t->opt->dev_id,
1021 			.timer_adapter_id = i,
1022 			.timer_tick_ns = t->opt->timer_tick_nsec,
1023 			.max_tmo_ns = t->opt->max_tmo_nsec,
1024 			.nb_timers = t->opt->pool_sz,
1025 			.flags = flags,
1026 		};
1027 
1028 		wl = rte_event_timer_adapter_create(&config);
1029 		if (wl == NULL) {
1030 			evt_err("failed to create event timer ring %d", i);
1031 			return rte_errno;
1032 		}
1033 
1034 		memset(&adapter_info, 0,
1035 				sizeof(struct rte_event_timer_adapter_info));
1036 		rte_event_timer_adapter_get_info(wl, &adapter_info);
1037 		t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
1038 
1039 		if (!(adapter_info.caps &
1040 				RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
1041 			uint32_t service_id = -1U;
1042 
1043 			rte_event_timer_adapter_service_id_get(wl,
1044 					&service_id);
1045 			ret = evt_service_setup(service_id);
1046 			if (ret) {
1047 				evt_err("Failed to setup service core"
1048 						" for timer adapter\n");
1049 				return ret;
1050 			}
1051 			rte_service_runstate_set(service_id, 1);
1052 		}
1053 		t->timer_adptr[i] = wl;
1054 	}
1055 	return 0;
1056 }
1057 
1058 static int
1059 perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
1060 {
1061 	struct rte_event_crypto_adapter_queue_conf conf;
1062 	struct evt_options *opt = t->opt;
1063 	uint32_t cap;
1064 	int ret;
1065 
1066 	memset(&conf, 0, sizeof(conf));
1067 
1068 	ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
1069 	if (ret) {
1070 		evt_err("Failed to get crypto adapter capabilities");
1071 		return ret;
1072 	}
1073 
1074 	if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
1075 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
1076 	    ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
1077 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
1078 		evt_err("crypto adapter %s mode unsupported\n",
1079 			opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
1080 		return -ENOTSUP;
1081 	} else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
1082 		evt_err("Storing crypto session not supported");
1083 		return -ENOTSUP;
1084 	}
1085 
1086 	if (opt->ena_vector) {
1087 		struct rte_event_crypto_adapter_vector_limits limits;
1088 
1089 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1090 			evt_err("Crypto adapter doesn't support event vector");
1091 			return -EINVAL;
1092 		}
1093 
1094 		ret = rte_event_crypto_adapter_vector_limits_get(p->dev_id, p->ca.cdev_id, &limits);
1095 		if (ret) {
1096 			evt_err("Failed to get crypto adapter's vector limits");
1097 			return ret;
1098 		}
1099 
1100 		if (opt->vector_size < limits.min_sz || opt->vector_size > limits.max_sz) {
1101 			evt_err("Vector size [%d] not within limits max[%d] min[%d]",
1102 				opt->vector_size, limits.max_sz, limits.min_sz);
1103 			return -EINVAL;
1104 		}
1105 
1106 		if (limits.log2_sz && !rte_is_power_of_2(opt->vector_size)) {
1107 			evt_err("Vector size [%d] not power of 2", opt->vector_size);
1108 			return -EINVAL;
1109 		}
1110 
1111 		if (opt->vector_tmo_nsec > limits.max_timeout_ns ||
1112 			opt->vector_tmo_nsec < limits.min_timeout_ns) {
1113 			evt_err("Vector timeout [%" PRIu64 "] not within limits "
1114 				"max[%" PRIu64 "] min[%" PRIu64 "]",
1115 				opt->vector_tmo_nsec, limits.max_timeout_ns, limits.min_timeout_ns);
1116 			return -EINVAL;
1117 		}
1118 
1119 		conf.vector_mp = t->ca_vector_pool;
1120 		conf.vector_sz = opt->vector_size;
1121 		conf.vector_timeout_ns = opt->vector_tmo_nsec;
1122 		conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
1123 	}
1124 
1125 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
1126 		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1127 		conf.ev.queue_id = p->queue_id;
1128 	}
1129 
1130 	ret = rte_event_crypto_adapter_queue_pair_add(
1131 		TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, &conf);
1132 
1133 	return ret;
1134 }
1135 
1136 static void *
1137 cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
1138 {
1139 	struct rte_crypto_sym_xform cipher_xform;
1140 	void *sess;
1141 
1142 	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1143 	cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
1144 	cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1145 	cipher_xform.next = NULL;
1146 
1147 	sess = rte_cryptodev_sym_session_create(p->ca.cdev_id, &cipher_xform,
1148 			t->ca_sess_pool);
1149 	if (sess == NULL) {
1150 		evt_err("Failed to create sym session");
1151 		return NULL;
1152 	}
1153 
1154 	return sess;
1155 }
1156 
1157 static void *
1158 cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t)
1159 {
1160 	const struct rte_cryptodev_asymmetric_xform_capability *capability;
1161 	struct rte_cryptodev_asym_capability_idx cap_idx;
1162 	struct rte_crypto_asym_xform xform;
1163 	void *sess;
1164 
1165 	xform.next = NULL;
1166 	xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
1167 	cap_idx.type = xform.xform_type;
1168 	capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx);
1169 	if (capability == NULL) {
1170 		evt_err("Device doesn't support MODEX. Test Skipped\n");
1171 		return NULL;
1172 	}
1173 
1174 	xform.modex.modulus.data = modex_test_case.modulus.data;
1175 	xform.modex.modulus.length = modex_test_case.modulus.len;
1176 	xform.modex.exponent.data = modex_test_case.exponent.data;
1177 	xform.modex.exponent.length = modex_test_case.exponent.len;
1178 
1179 	if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform,
1180 			t->ca_asym_sess_pool, &sess)) {
1181 		evt_err("Failed to create asym session");
1182 		return NULL;
1183 	}
1184 
1185 	return sess;
1186 }
1187 
1188 int
1189 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
1190 				uint8_t stride, uint8_t nb_queues,
1191 				const struct rte_event_port_conf *port_conf)
1192 {
1193 	struct test_perf *t = evt_test_priv(test);
1194 	uint16_t port, prod;
1195 	int ret = -1;
1196 
1197 	/* setup one port per worker, linking to all queues */
1198 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
1199 				port++) {
1200 		struct worker_data *w = &t->worker[port];
1201 
1202 		w->dev_id = opt->dev_id;
1203 		w->port_id = port;
1204 		w->t = t;
1205 		w->processed_pkts = 0;
1206 		w->latency = 0;
1207 
1208 		struct rte_event_port_conf conf = *port_conf;
1209 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
1210 
1211 		ret = rte_event_port_setup(opt->dev_id, port, &conf);
1212 		if (ret) {
1213 			evt_err("failed to setup port %d", port);
1214 			return ret;
1215 		}
1216 
1217 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
1218 		if (ret != nb_queues) {
1219 			evt_err("failed to link all queues to port %d", port);
1220 			return -EINVAL;
1221 		}
1222 	}
1223 
1224 	/* port for producers, no links */
1225 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1226 		for ( ; port < perf_nb_event_ports(opt); port++) {
1227 			struct prod_data *p = &t->prod[port];
1228 			p->t = t;
1229 		}
1230 
1231 		struct rte_event_port_conf conf = *port_conf;
1232 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
1233 
1234 		ret = perf_event_rx_adapter_setup(opt, stride, conf);
1235 		if (ret)
1236 			return ret;
1237 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1238 		prod = 0;
1239 		for ( ; port < perf_nb_event_ports(opt); port++) {
1240 			struct prod_data *p = &t->prod[port];
1241 			p->queue_id = prod * stride;
1242 			p->t = t;
1243 			prod++;
1244 		}
1245 
1246 		ret = perf_event_timer_adapter_setup(t);
1247 		if (ret)
1248 			return ret;
1249 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1250 		struct rte_event_port_conf conf = *port_conf;
1251 		uint8_t cdev_id = 0;
1252 		uint16_t qp_id = 0;
1253 
1254 		ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
1255 						      opt->dev_id, &conf, 0);
1256 		if (ret) {
1257 			evt_err("Failed to create crypto adapter");
1258 			return ret;
1259 		}
1260 
1261 		prod = 0;
1262 		for (; port < perf_nb_event_ports(opt); port++) {
1263 			union rte_event_crypto_metadata m_data;
1264 			struct prod_data *p = &t->prod[port];
1265 			uint32_t flow_id;
1266 
1267 			if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
1268 				cdev_id++;
1269 				qp_id = 0;
1270 			}
1271 
1272 			p->dev_id = opt->dev_id;
1273 			p->port_id = port;
1274 			p->queue_id = prod * stride;
1275 			p->ca.cdev_id = cdev_id;
1276 			p->ca.cdev_qp_id = qp_id;
1277 			p->ca.crypto_sess = rte_zmalloc_socket(
1278 				NULL, sizeof(void *) * t->nb_flows,
1279 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1280 			p->t = t;
1281 
1282 			ret = perf_event_crypto_adapter_setup(t, p);
1283 			if (ret)
1284 				return ret;
1285 
1286 			m_data.request_info.cdev_id = p->ca.cdev_id;
1287 			m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
1288 			m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
1289 			m_data.response_info.queue_id = p->queue_id;
1290 
1291 			for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1292 				m_data.response_info.flow_id = flow_id;
1293 				if (opt->crypto_op_type ==
1294 						RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1295 					void *sess;
1296 
1297 					sess = cryptodev_sym_sess_create(p, t);
1298 					if (sess == NULL)
1299 						return -ENOMEM;
1300 
1301 					ret = rte_cryptodev_session_event_mdata_set(
1302 						cdev_id,
1303 						sess,
1304 						RTE_CRYPTO_OP_TYPE_SYMMETRIC,
1305 						RTE_CRYPTO_OP_WITH_SESSION,
1306 						&m_data, sizeof(m_data));
1307 					if (ret)
1308 						return ret;
1309 					p->ca.crypto_sess[flow_id] = sess;
1310 				} else {
1311 					void *sess;
1312 
1313 					sess = cryptodev_asym_sess_create(p, t);
1314 					if (sess == NULL)
1315 						return -ENOMEM;
1316 					ret = rte_cryptodev_session_event_mdata_set(
1317 						cdev_id,
1318 						sess,
1319 						RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
1320 						RTE_CRYPTO_OP_WITH_SESSION,
1321 						&m_data, sizeof(m_data));
1322 					if (ret)
1323 						return ret;
1324 					p->ca.crypto_sess[flow_id] = sess;
1325 				}
1326 			}
1327 
1328 			conf.event_port_cfg |=
1329 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1330 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1331 
1332 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
1333 			if (ret) {
1334 				evt_err("failed to setup port %d", port);
1335 				return ret;
1336 			}
1337 
1338 			qp_id++;
1339 			prod++;
1340 		}
1341 	} else {
1342 		prod = 0;
1343 		for ( ; port < perf_nb_event_ports(opt); port++) {
1344 			struct prod_data *p = &t->prod[port];
1345 
1346 			p->dev_id = opt->dev_id;
1347 			p->port_id = port;
1348 			p->queue_id = prod * stride;
1349 			p->t = t;
1350 
1351 			struct rte_event_port_conf conf = *port_conf;
1352 			conf.event_port_cfg |=
1353 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1354 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1355 
1356 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
1357 			if (ret) {
1358 				evt_err("failed to setup port %d", port);
1359 				return ret;
1360 			}
1361 			prod++;
1362 		}
1363 	}
1364 
1365 	return ret;
1366 }
1367 
1368 int
1369 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
1370 {
1371 	unsigned int lcores;
1372 
1373 	/* N producer + N worker + main when producer cores are used
1374 	 * Else N worker + main when Rx adapter is used
1375 	 */
1376 	lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
1377 
1378 	if (rte_lcore_count() < lcores) {
1379 		evt_err("test need minimum %d lcores", lcores);
1380 		return -1;
1381 	}
1382 
1383 	/* Validate worker lcores */
1384 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
1385 		evt_err("worker lcores overlaps with main lcore");
1386 		return -1;
1387 	}
1388 	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
1389 		evt_err("worker lcores overlaps producer lcores");
1390 		return -1;
1391 	}
1392 	if (evt_has_disabled_lcore(opt->wlcores)) {
1393 		evt_err("one or more workers lcores are not enabled");
1394 		return -1;
1395 	}
1396 	if (!evt_has_active_lcore(opt->wlcores)) {
1397 		evt_err("minimum one worker is required");
1398 		return -1;
1399 	}
1400 
1401 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1402 	    opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1403 	    opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1404 		/* Validate producer lcores */
1405 		if (evt_lcores_has_overlap(opt->plcores,
1406 					rte_get_main_lcore())) {
1407 			evt_err("producer lcores overlaps with main lcore");
1408 			return -1;
1409 		}
1410 		if (evt_has_disabled_lcore(opt->plcores)) {
1411 			evt_err("one or more producer lcores are not enabled");
1412 			return -1;
1413 		}
1414 		if (!evt_has_active_lcore(opt->plcores)) {
1415 			evt_err("minimum one producer is required");
1416 			return -1;
1417 		}
1418 	}
1419 
1420 	if (evt_has_invalid_stage(opt))
1421 		return -1;
1422 
1423 	if (evt_has_invalid_sched_type(opt))
1424 		return -1;
1425 
1426 	if (nb_queues > EVT_MAX_QUEUES) {
1427 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
1428 		return -1;
1429 	}
1430 	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
1431 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
1432 		return -1;
1433 	}
1434 
1435 	/* Fixups */
1436 	if ((opt->nb_stages == 1 &&
1437 			opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
1438 			opt->fwd_latency) {
1439 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
1440 		opt->fwd_latency = 0;
1441 	}
1442 
1443 	if (opt->fwd_latency && !opt->q_priority) {
1444 		evt_info("enabled queue priority for latency measurement");
1445 		opt->q_priority = 1;
1446 	}
1447 	if (opt->nb_pkts == 0)
1448 		opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
1449 
1450 	return 0;
1451 }
1452 
1453 void
1454 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
1455 {
1456 	evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
1457 	evt_dump_producer_lcores(opt);
1458 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
1459 	evt_dump_worker_lcores(opt);
1460 	evt_dump_nb_stages(opt);
1461 	evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
1462 	evt_dump("nb_evdev_queues", "%d", nb_queues);
1463 	evt_dump_queue_priority(opt);
1464 	evt_dump_sched_type_list(opt);
1465 	evt_dump_producer_type(opt);
1466 	evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
1467 }
1468 
1469 static void
1470 perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
1471 		      void *args)
1472 {
1473 	rte_mempool_put(args, ev.event_ptr);
1474 }
1475 
1476 void
1477 perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
1478 		    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
1479 		    uint16_t nb_deq)
1480 {
1481 	int i;
1482 
1483 	if (nb_deq) {
1484 		for (i = nb_enq; i < nb_deq; i++)
1485 			rte_mempool_put(pool, events[i].event_ptr);
1486 
1487 		for (i = 0; i < nb_deq; i++)
1488 			events[i].op = RTE_EVENT_OP_RELEASE;
1489 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
1490 	}
1491 	rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
1492 }
1493 
1494 void
1495 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
1496 {
1497 	int i;
1498 	struct test_perf *t = evt_test_priv(test);
1499 
1500 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1501 		for (i = 0; i < opt->nb_timer_adptrs; i++)
1502 			rte_event_timer_adapter_stop(t->timer_adptr[i]);
1503 	}
1504 	rte_event_dev_stop(opt->dev_id);
1505 	rte_event_dev_close(opt->dev_id);
1506 }
1507 
1508 static inline void
1509 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
1510 	    void *obj, unsigned i __rte_unused)
1511 {
1512 	memset(obj, 0, mp->elt_size);
1513 }
1514 
1515 #define NB_RX_DESC			128
1516 #define NB_TX_DESC			512
1517 int
1518 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
1519 {
1520 	uint16_t i;
1521 	int ret;
1522 	struct test_perf *t = evt_test_priv(test);
1523 	struct rte_eth_conf port_conf = {
1524 		.rxmode = {
1525 			.mq_mode = RTE_ETH_MQ_RX_RSS,
1526 		},
1527 		.rx_adv_conf = {
1528 			.rss_conf = {
1529 				.rss_key = NULL,
1530 				.rss_hf = RTE_ETH_RSS_IP,
1531 			},
1532 		},
1533 	};
1534 
1535 	if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
1536 		return 0;
1537 
1538 	if (!rte_eth_dev_count_avail()) {
1539 		evt_err("No ethernet ports found.");
1540 		return -ENODEV;
1541 	}
1542 
1543 	RTE_ETH_FOREACH_DEV(i) {
1544 		struct rte_eth_dev_info dev_info;
1545 		struct rte_eth_conf local_port_conf = port_conf;
1546 
1547 		ret = rte_eth_dev_info_get(i, &dev_info);
1548 		if (ret != 0) {
1549 			evt_err("Error during getting device (port %u) info: %s\n",
1550 					i, strerror(-ret));
1551 			return ret;
1552 		}
1553 
1554 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1555 			dev_info.flow_type_rss_offloads;
1556 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1557 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
1558 			evt_info("Port %u modified RSS hash function based on hardware support,"
1559 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1560 				i,
1561 				port_conf.rx_adv_conf.rss_conf.rss_hf,
1562 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1563 		}
1564 
1565 		if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
1566 			evt_err("Failed to configure eth port [%d]", i);
1567 			return -EINVAL;
1568 		}
1569 
1570 		if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
1571 				rte_socket_id(), NULL, t->pool) < 0) {
1572 			evt_err("Failed to setup eth port [%d] rx_queue: %d.",
1573 					i, 0);
1574 			return -EINVAL;
1575 		}
1576 
1577 		if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
1578 					rte_socket_id(), NULL) < 0) {
1579 			evt_err("Failed to setup eth port [%d] tx_queue: %d.",
1580 					i, 0);
1581 			return -EINVAL;
1582 		}
1583 
1584 		ret = rte_eth_promiscuous_enable(i);
1585 		if (ret != 0) {
1586 			evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
1587 				i, rte_strerror(-ret));
1588 			return ret;
1589 		}
1590 	}
1591 
1592 	return 0;
1593 }
1594 
1595 void
1596 perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
1597 {
1598 	uint16_t i;
1599 	RTE_SET_USED(test);
1600 
1601 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1602 		RTE_ETH_FOREACH_DEV(i) {
1603 			rte_event_eth_rx_adapter_stop(i);
1604 			rte_event_eth_rx_adapter_queue_del(i, i, -1);
1605 			rte_eth_dev_rx_queue_stop(i, 0);
1606 		}
1607 	}
1608 }
1609 
1610 void
1611 perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
1612 {
1613 	uint16_t i;
1614 	RTE_SET_USED(test);
1615 
1616 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1617 		RTE_ETH_FOREACH_DEV(i) {
1618 			rte_event_eth_tx_adapter_stop(i);
1619 			rte_event_eth_tx_adapter_queue_del(i, i, -1);
1620 			rte_eth_dev_tx_queue_stop(i, 0);
1621 			rte_eth_dev_stop(i);
1622 		}
1623 	}
1624 }
1625 
1626 int
1627 perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
1628 {
1629 	uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
1630 	struct test_perf *t = evt_test_priv(test);
1631 	unsigned int max_session_size;
1632 	uint32_t nb_sessions;
1633 	int ret;
1634 
1635 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1636 		return 0;
1637 
1638 	cdev_count = rte_cryptodev_count();
1639 	if (cdev_count == 0) {
1640 		evt_err("No crypto devices available\n");
1641 		return -ENODEV;
1642 	}
1643 
1644 	t->ca_op_pool = rte_crypto_op_pool_create(
1645 		"crypto_op_pool", opt->crypto_op_type, opt->pool_sz,
1646 		128, sizeof(union rte_event_crypto_metadata),
1647 		rte_socket_id());
1648 	if (t->ca_op_pool == NULL) {
1649 		evt_err("Failed to create crypto op pool");
1650 		return -ENOMEM;
1651 	}
1652 
1653 	nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
1654 	t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create(
1655 		"ca_asym_sess_pool", nb_sessions, 0,
1656 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1657 	if (t->ca_asym_sess_pool == NULL) {
1658 		evt_err("Failed to create sym session pool");
1659 		ret = -ENOMEM;
1660 		goto err;
1661 	}
1662 
1663 	max_session_size = 0;
1664 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1665 		unsigned int session_size;
1666 
1667 		session_size =
1668 			rte_cryptodev_sym_get_private_session_size(cdev_id);
1669 		if (session_size > max_session_size)
1670 			max_session_size = session_size;
1671 	}
1672 
1673 	t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
1674 		"ca_sess_pool", nb_sessions, max_session_size, 0,
1675 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1676 	if (t->ca_sess_pool == NULL) {
1677 		evt_err("Failed to create sym session pool");
1678 		ret = -ENOMEM;
1679 		goto err;
1680 	}
1681 
1682 	if (opt->ena_vector) {
1683 		unsigned int nb_elem = (opt->pool_sz / opt->vector_size) * 2;
1684 		nb_elem = RTE_MAX(512U, nb_elem);
1685 		nb_elem += evt_nr_active_lcores(opt->wlcores) * 32;
1686 		t->ca_vector_pool = rte_event_vector_pool_create("vector_pool", nb_elem, 32,
1687 				opt->vector_size, opt->socket_id);
1688 		if (t->ca_vector_pool == NULL) {
1689 			evt_err("Failed to create event vector pool");
1690 			ret = -ENOMEM;
1691 			goto err;
1692 		}
1693 	}
1694 
1695 	/*
1696 	 * Calculate number of needed queue pairs, based on the amount of
1697 	 * available number of logical cores and crypto devices. For instance,
1698 	 * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
1699 	 * up per device.
1700 	 */
1701 	nb_plcores = evt_nr_active_lcores(opt->plcores);
1702 	nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
1703 					     nb_plcores / cdev_count;
1704 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1705 		struct rte_cryptodev_qp_conf qp_conf;
1706 		struct rte_cryptodev_config conf;
1707 		struct rte_cryptodev_info info;
1708 		int qp_id;
1709 
1710 		rte_cryptodev_info_get(cdev_id, &info);
1711 		if (nb_qps > info.max_nb_queue_pairs) {
1712 			evt_err("Not enough queue pairs per cryptodev (%u)",
1713 				nb_qps);
1714 			ret = -EINVAL;
1715 			goto err;
1716 		}
1717 
1718 		conf.nb_queue_pairs = nb_qps;
1719 		conf.socket_id = SOCKET_ID_ANY;
1720 		conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
1721 
1722 		ret = rte_cryptodev_configure(cdev_id, &conf);
1723 		if (ret) {
1724 			evt_err("Failed to configure cryptodev (%u)", cdev_id);
1725 			goto err;
1726 		}
1727 
1728 		qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
1729 		qp_conf.mp_session = t->ca_sess_pool;
1730 
1731 		for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
1732 			ret = rte_cryptodev_queue_pair_setup(
1733 				cdev_id, qp_id, &qp_conf,
1734 				rte_cryptodev_socket_id(cdev_id));
1735 			if (ret) {
1736 				evt_err("Failed to setup queue pairs on cryptodev %u\n",
1737 					cdev_id);
1738 				goto err;
1739 			}
1740 		}
1741 	}
1742 
1743 	return 0;
1744 err:
1745 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
1746 		rte_cryptodev_close(cdev_id);
1747 
1748 	rte_mempool_free(t->ca_op_pool);
1749 	rte_mempool_free(t->ca_sess_pool);
1750 	rte_mempool_free(t->ca_asym_sess_pool);
1751 	rte_mempool_free(t->ca_vector_pool);
1752 
1753 	return ret;
1754 }
1755 
1756 void
1757 perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
1758 {
1759 	uint8_t cdev_id, cdev_count = rte_cryptodev_count();
1760 	struct test_perf *t = evt_test_priv(test);
1761 	uint16_t port;
1762 
1763 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1764 		return;
1765 
1766 	for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
1767 		void *sess;
1768 		struct prod_data *p = &t->prod[port];
1769 		uint32_t flow_id;
1770 		uint8_t cdev_id;
1771 
1772 		for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1773 			sess = p->ca.crypto_sess[flow_id];
1774 			cdev_id = p->ca.cdev_id;
1775 			rte_cryptodev_sym_session_free(cdev_id, sess);
1776 		}
1777 
1778 		rte_event_crypto_adapter_queue_pair_del(
1779 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
1780 	}
1781 
1782 	rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
1783 
1784 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1785 		rte_cryptodev_stop(cdev_id);
1786 		rte_cryptodev_close(cdev_id);
1787 	}
1788 
1789 	rte_mempool_free(t->ca_op_pool);
1790 	rte_mempool_free(t->ca_sess_pool);
1791 	rte_mempool_free(t->ca_asym_sess_pool);
1792 	rte_mempool_free(t->ca_vector_pool);
1793 }
1794 
1795 int
1796 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
1797 {
1798 	struct test_perf *t = evt_test_priv(test);
1799 
1800 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1801 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1802 		t->pool = rte_mempool_create(test->name, /* mempool name */
1803 				opt->pool_sz, /* number of elements*/
1804 				sizeof(struct perf_elt), /* element size*/
1805 				512, /* cache size*/
1806 				0, NULL, NULL,
1807 				perf_elt_init, /* obj constructor */
1808 				NULL, opt->socket_id, 0); /* flags */
1809 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR &&
1810 			opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)  {
1811 		t->pool = rte_mempool_create(test->name, /* mempool name */
1812 				opt->pool_sz, /* number of elements*/
1813 				sizeof(struct perf_elt) + modex_test_case.result_len,
1814 				/* element size*/
1815 				512, /* cache size*/
1816 				0, NULL, NULL,
1817 				NULL, /* obj constructor */
1818 				NULL, opt->socket_id, 0); /* flags */
1819 	} else {
1820 		t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
1821 				opt->pool_sz, /* number of elements*/
1822 				512, /* cache size*/
1823 				0,
1824 				RTE_MBUF_DEFAULT_BUF_SIZE,
1825 				opt->socket_id); /* flags */
1826 
1827 	}
1828 
1829 	if (t->pool == NULL) {
1830 		evt_err("failed to create mempool");
1831 		return -ENOMEM;
1832 	}
1833 
1834 	return 0;
1835 }
1836 
1837 void
1838 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
1839 {
1840 	RTE_SET_USED(opt);
1841 	struct test_perf *t = evt_test_priv(test);
1842 
1843 	rte_mempool_free(t->pool);
1844 }
1845 
1846 int
1847 perf_test_setup(struct evt_test *test, struct evt_options *opt)
1848 {
1849 	void *test_perf;
1850 
1851 	test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
1852 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1853 	if (test_perf  == NULL) {
1854 		evt_err("failed to allocate test_perf memory");
1855 		goto nomem;
1856 	}
1857 	test->test_priv = test_perf;
1858 
1859 	struct test_perf *t = evt_test_priv(test);
1860 
1861 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1862 		t->outstand_pkts = opt->nb_timers *
1863 			evt_nr_active_lcores(opt->plcores);
1864 		t->nb_pkts = opt->nb_timers;
1865 	} else {
1866 		t->outstand_pkts = opt->nb_pkts *
1867 			evt_nr_active_lcores(opt->plcores);
1868 		t->nb_pkts = opt->nb_pkts;
1869 	}
1870 
1871 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
1872 	t->done = false;
1873 	t->nb_flows = opt->nb_flows;
1874 	t->result = EVT_TEST_FAILED;
1875 	t->opt = opt;
1876 	memcpy(t->sched_type_list, opt->sched_type_list,
1877 			sizeof(opt->sched_type_list));
1878 	return 0;
1879 nomem:
1880 	return -ENOMEM;
1881 }
1882 
1883 void
1884 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
1885 {
1886 	RTE_SET_USED(opt);
1887 
1888 	rte_free(test->test_priv);
1889 }
1890