xref: /dpdk/app/test-eventdev/test_perf_common.c (revision f12c41bf4074efb438fc21ab7db13f011f5a1e84)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <math.h>
6 
7 #include "test_perf_common.h"
8 
9 #define NB_CRYPTODEV_DESCRIPTORS 128
10 
11 int
12 perf_test_result(struct evt_test *test, struct evt_options *opt)
13 {
14 	RTE_SET_USED(opt);
15 	int i;
16 	uint64_t total = 0;
17 	struct test_perf *t = evt_test_priv(test);
18 
19 	printf("Packet distribution across worker cores :\n");
20 	for (i = 0; i < t->nb_workers; i++)
21 		total += t->worker[i].processed_pkts;
22 	for (i = 0; i < t->nb_workers; i++)
23 		printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
24 				CLGRN" %3.2f"CLNRM"\n", i,
25 				t->worker[i].processed_pkts,
26 				(((double)t->worker[i].processed_pkts)/total)
27 				* 100);
28 
29 	return t->result;
30 }
31 
32 static inline int
33 perf_producer(void *arg)
34 {
35 	int i;
36 	struct prod_data *p  = arg;
37 	struct test_perf *t = p->t;
38 	struct evt_options *opt = t->opt;
39 	const uint8_t dev_id = p->dev_id;
40 	const uint8_t port = p->port_id;
41 	struct rte_mempool *pool = t->pool;
42 	const uint64_t nb_pkts = t->nb_pkts;
43 	const uint32_t nb_flows = t->nb_flows;
44 	uint32_t flow_counter = 0;
45 	uint64_t count = 0;
46 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
47 	struct rte_event ev;
48 
49 	if (opt->verbose_level > 1)
50 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
51 				rte_lcore_id(), dev_id, port, p->queue_id);
52 
53 	ev.event = 0;
54 	ev.op = RTE_EVENT_OP_NEW;
55 	ev.queue_id = p->queue_id;
56 	ev.sched_type = t->opt->sched_type_list[0];
57 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
58 	ev.event_type =  RTE_EVENT_TYPE_CPU;
59 	ev.sub_event_type = 0; /* stage 0 */
60 
61 	while (count < nb_pkts && t->done == false) {
62 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
63 			continue;
64 		for (i = 0; i < BURST_SIZE; i++) {
65 			ev.flow_id = flow_counter++ % nb_flows;
66 			ev.event_ptr = m[i];
67 			m[i]->timestamp = rte_get_timer_cycles();
68 			while (rte_event_enqueue_burst(dev_id,
69 						       port, &ev, 1) != 1) {
70 				if (t->done)
71 					break;
72 				rte_pause();
73 				m[i]->timestamp = rte_get_timer_cycles();
74 			}
75 		}
76 		count += BURST_SIZE;
77 	}
78 
79 	return 0;
80 }
81 
82 static inline int
83 perf_producer_burst(void *arg)
84 {
85 	uint32_t i;
86 	uint64_t timestamp;
87 	struct rte_event_dev_info dev_info;
88 	struct prod_data *p  = arg;
89 	struct test_perf *t = p->t;
90 	struct evt_options *opt = t->opt;
91 	const uint8_t dev_id = p->dev_id;
92 	const uint8_t port = p->port_id;
93 	struct rte_mempool *pool = t->pool;
94 	const uint64_t nb_pkts = t->nb_pkts;
95 	const uint32_t nb_flows = t->nb_flows;
96 	uint32_t flow_counter = 0;
97 	uint16_t enq = 0;
98 	uint64_t count = 0;
99 	struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
100 	struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
101 	uint32_t burst_size = opt->prod_enq_burst_sz;
102 
103 	memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
104 	rte_event_dev_info_get(dev_id, &dev_info);
105 	if (dev_info.max_event_port_enqueue_depth < burst_size)
106 		burst_size = dev_info.max_event_port_enqueue_depth;
107 
108 	if (opt->verbose_level > 1)
109 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
110 				rte_lcore_id(), dev_id, port, p->queue_id);
111 
112 	for (i = 0; i < burst_size; i++) {
113 		ev[i].op = RTE_EVENT_OP_NEW;
114 		ev[i].queue_id = p->queue_id;
115 		ev[i].sched_type = t->opt->sched_type_list[0];
116 		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
117 		ev[i].event_type =  RTE_EVENT_TYPE_CPU;
118 		ev[i].sub_event_type = 0; /* stage 0 */
119 	}
120 
121 	while (count < nb_pkts && t->done == false) {
122 		if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
123 			continue;
124 		timestamp = rte_get_timer_cycles();
125 		for (i = 0; i < burst_size; i++) {
126 			ev[i].flow_id = flow_counter++ % nb_flows;
127 			ev[i].event_ptr = m[i];
128 			m[i]->timestamp = timestamp;
129 		}
130 		enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
131 		while (enq < burst_size) {
132 			enq += rte_event_enqueue_burst(dev_id, port,
133 							ev + enq,
134 							burst_size - enq);
135 			if (t->done)
136 				break;
137 			rte_pause();
138 			timestamp = rte_get_timer_cycles();
139 			for (i = enq; i < burst_size; i++)
140 				m[i]->timestamp = timestamp;
141 		}
142 		count += burst_size;
143 	}
144 	return 0;
145 }
146 
147 static inline int
148 perf_event_timer_producer(void *arg)
149 {
150 	int i;
151 	struct prod_data *p  = arg;
152 	struct test_perf *t = p->t;
153 	struct evt_options *opt = t->opt;
154 	uint32_t flow_counter = 0;
155 	uint64_t count = 0;
156 	uint64_t arm_latency = 0;
157 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
158 	const uint32_t nb_flows = t->nb_flows;
159 	const uint64_t nb_timers = opt->nb_timers;
160 	struct rte_mempool *pool = t->pool;
161 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
162 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
163 	struct rte_event_timer tim;
164 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
165 
166 	memset(&tim, 0, sizeof(struct rte_event_timer));
167 	timeout_ticks =
168 		opt->optm_timer_tick_nsec
169 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
170 			       opt->optm_timer_tick_nsec)
171 			: timeout_ticks;
172 	timeout_ticks += timeout_ticks ? 0 : 1;
173 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
174 	tim.ev.op = RTE_EVENT_OP_NEW;
175 	tim.ev.sched_type = t->opt->sched_type_list[0];
176 	tim.ev.queue_id = p->queue_id;
177 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
178 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
179 	tim.timeout_ticks = timeout_ticks;
180 
181 	if (opt->verbose_level > 1)
182 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
183 
184 	while (count < nb_timers && t->done == false) {
185 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
186 			continue;
187 		for (i = 0; i < BURST_SIZE; i++) {
188 			rte_prefetch0(m[i + 1]);
189 			m[i]->tim = tim;
190 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
191 			m[i]->tim.ev.event_ptr = m[i];
192 			m[i]->timestamp = rte_get_timer_cycles();
193 			while (rte_event_timer_arm_burst(
194 			       adptr[flow_counter % nb_timer_adptrs],
195 			       (struct rte_event_timer **)&m[i], 1) != 1) {
196 				if (t->done)
197 					break;
198 				m[i]->timestamp = rte_get_timer_cycles();
199 			}
200 			arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
201 		}
202 		count += BURST_SIZE;
203 	}
204 	fflush(stdout);
205 	rte_delay_ms(1000);
206 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
207 			__func__, rte_lcore_id(),
208 			count ? (float)(arm_latency / count) /
209 			(rte_get_timer_hz() / 1000000) : 0);
210 	return 0;
211 }
212 
213 static inline int
214 perf_event_timer_producer_burst(void *arg)
215 {
216 	int i;
217 	struct prod_data *p  = arg;
218 	struct test_perf *t = p->t;
219 	struct evt_options *opt = t->opt;
220 	uint32_t flow_counter = 0;
221 	uint64_t count = 0;
222 	uint64_t arm_latency = 0;
223 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
224 	const uint32_t nb_flows = t->nb_flows;
225 	const uint64_t nb_timers = opt->nb_timers;
226 	struct rte_mempool *pool = t->pool;
227 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
228 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
229 	struct rte_event_timer tim;
230 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
231 
232 	memset(&tim, 0, sizeof(struct rte_event_timer));
233 	timeout_ticks =
234 		opt->optm_timer_tick_nsec
235 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
236 			       opt->optm_timer_tick_nsec)
237 			: timeout_ticks;
238 	timeout_ticks += timeout_ticks ? 0 : 1;
239 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
240 	tim.ev.op = RTE_EVENT_OP_NEW;
241 	tim.ev.sched_type = t->opt->sched_type_list[0];
242 	tim.ev.queue_id = p->queue_id;
243 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
244 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
245 	tim.timeout_ticks = timeout_ticks;
246 
247 	if (opt->verbose_level > 1)
248 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
249 
250 	while (count < nb_timers && t->done == false) {
251 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
252 			continue;
253 		for (i = 0; i < BURST_SIZE; i++) {
254 			rte_prefetch0(m[i + 1]);
255 			m[i]->tim = tim;
256 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
257 			m[i]->tim.ev.event_ptr = m[i];
258 			m[i]->timestamp = rte_get_timer_cycles();
259 		}
260 		rte_event_timer_arm_tmo_tick_burst(
261 				adptr[flow_counter % nb_timer_adptrs],
262 				(struct rte_event_timer **)m,
263 				tim.timeout_ticks,
264 				BURST_SIZE);
265 		arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
266 		count += BURST_SIZE;
267 	}
268 	fflush(stdout);
269 	rte_delay_ms(1000);
270 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
271 			__func__, rte_lcore_id(),
272 			count ? (float)(arm_latency / count) /
273 			(rte_get_timer_hz() / 1000000) : 0);
274 	return 0;
275 }
276 
277 static inline void
278 crypto_adapter_enq_op_new(struct prod_data *p)
279 {
280 	struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
281 	struct test_perf *t = p->t;
282 	const uint32_t nb_flows = t->nb_flows;
283 	const uint64_t nb_pkts = t->nb_pkts;
284 	struct rte_mempool *pool = t->pool;
285 	struct rte_crypto_sym_op *sym_op;
286 	struct evt_options *opt = t->opt;
287 	uint16_t qp_id = p->ca.cdev_qp_id;
288 	uint8_t cdev_id = p->ca.cdev_id;
289 	uint32_t flow_counter = 0;
290 	struct rte_crypto_op *op;
291 	struct rte_mbuf *m;
292 	uint64_t count = 0;
293 	uint16_t len;
294 
295 	if (opt->verbose_level > 1)
296 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
297 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
298 		       p->ca.cdev_qp_id);
299 
300 	len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
301 
302 	while (count < nb_pkts && t->done == false) {
303 		m = rte_pktmbuf_alloc(pool);
304 		if (m == NULL)
305 			continue;
306 
307 		rte_pktmbuf_append(m, len);
308 		op = rte_crypto_op_alloc(t->ca_op_pool,
309 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
310 		sym_op = op->sym;
311 		sym_op->m_src = m;
312 		sym_op->cipher.data.offset = 0;
313 		sym_op->cipher.data.length = len;
314 		rte_crypto_op_attach_sym_session(
315 			op, crypto_sess[flow_counter++ % nb_flows]);
316 
317 		while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
318 		       t->done == false)
319 			rte_pause();
320 
321 		count++;
322 	}
323 }
324 
325 static inline void
326 crypto_adapter_enq_op_fwd(struct prod_data *p)
327 {
328 	struct rte_cryptodev_sym_session **crypto_sess = p->ca.crypto_sess;
329 	const uint8_t dev_id = p->dev_id;
330 	const uint8_t port = p->port_id;
331 	struct test_perf *t = p->t;
332 	const uint32_t nb_flows = t->nb_flows;
333 	const uint64_t nb_pkts = t->nb_pkts;
334 	struct rte_mempool *pool = t->pool;
335 	struct evt_options *opt = t->opt;
336 	struct rte_crypto_sym_op *sym_op;
337 	uint32_t flow_counter = 0;
338 	struct rte_crypto_op *op;
339 	struct rte_event ev;
340 	struct rte_mbuf *m;
341 	uint64_t count = 0;
342 	uint16_t len;
343 
344 	if (opt->verbose_level > 1)
345 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
346 		       __func__, rte_lcore_id(), port, p->queue_id,
347 		       p->ca.cdev_id, p->ca.cdev_qp_id);
348 
349 	ev.event = 0;
350 	ev.op = RTE_EVENT_OP_NEW;
351 	ev.queue_id = p->queue_id;
352 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
353 	ev.event_type = RTE_EVENT_TYPE_CPU;
354 	len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
355 
356 	while (count < nb_pkts && t->done == false) {
357 		m = rte_pktmbuf_alloc(pool);
358 		if (m == NULL)
359 			continue;
360 
361 		rte_pktmbuf_append(m, len);
362 		op = rte_crypto_op_alloc(t->ca_op_pool,
363 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
364 		sym_op = op->sym;
365 		sym_op->m_src = m;
366 		sym_op->cipher.data.offset = 0;
367 		sym_op->cipher.data.length = len;
368 		rte_crypto_op_attach_sym_session(
369 			op, crypto_sess[flow_counter++ % nb_flows]);
370 		ev.event_ptr = op;
371 
372 		while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
373 		       t->done == false)
374 			rte_pause();
375 
376 		count++;
377 	}
378 }
379 
380 static inline int
381 perf_event_crypto_producer(void *arg)
382 {
383 	struct prod_data *p = arg;
384 	struct evt_options *opt = p->t->opt;
385 
386 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
387 		crypto_adapter_enq_op_new(p);
388 	else
389 		crypto_adapter_enq_op_fwd(p);
390 
391 	return 0;
392 }
393 
394 static int
395 perf_producer_wrapper(void *arg)
396 {
397 	struct prod_data *p  = arg;
398 	struct test_perf *t = p->t;
399 	bool burst = evt_has_burst_mode(p->dev_id);
400 
401 	/* In case of synthetic producer, launch perf_producer or
402 	 * perf_producer_burst depending on producer enqueue burst size
403 	 */
404 	if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
405 			t->opt->prod_enq_burst_sz == 1)
406 		return perf_producer(arg);
407 	else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
408 			t->opt->prod_enq_burst_sz > 1) {
409 		if (!burst)
410 			evt_err("This event device does not support burst mode");
411 		else
412 			return perf_producer_burst(arg);
413 	}
414 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
415 			!t->opt->timdev_use_burst)
416 		return perf_event_timer_producer(arg);
417 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
418 			t->opt->timdev_use_burst)
419 		return perf_event_timer_producer_burst(arg);
420 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
421 		return perf_event_crypto_producer(arg);
422 	return 0;
423 }
424 
425 static inline uint64_t
426 processed_pkts(struct test_perf *t)
427 {
428 	uint8_t i;
429 	uint64_t total = 0;
430 
431 	for (i = 0; i < t->nb_workers; i++)
432 		total += t->worker[i].processed_pkts;
433 
434 	return total;
435 }
436 
437 static inline uint64_t
438 total_latency(struct test_perf *t)
439 {
440 	uint8_t i;
441 	uint64_t total = 0;
442 
443 	for (i = 0; i < t->nb_workers; i++)
444 		total += t->worker[i].latency;
445 
446 	return total;
447 }
448 
449 
450 int
451 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
452 		int (*worker)(void *))
453 {
454 	int ret, lcore_id;
455 	struct test_perf *t = evt_test_priv(test);
456 
457 	int port_idx = 0;
458 	/* launch workers */
459 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
460 		if (!(opt->wlcores[lcore_id]))
461 			continue;
462 
463 		ret = rte_eal_remote_launch(worker,
464 				 &t->worker[port_idx], lcore_id);
465 		if (ret) {
466 			evt_err("failed to launch worker %d", lcore_id);
467 			return ret;
468 		}
469 		port_idx++;
470 	}
471 
472 	/* launch producers */
473 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
474 		if (!(opt->plcores[lcore_id]))
475 			continue;
476 
477 		ret = rte_eal_remote_launch(perf_producer_wrapper,
478 				&t->prod[port_idx], lcore_id);
479 		if (ret) {
480 			evt_err("failed to launch perf_producer %d", lcore_id);
481 			return ret;
482 		}
483 		port_idx++;
484 	}
485 
486 	const uint64_t total_pkts = t->outstand_pkts;
487 
488 	uint64_t dead_lock_cycles = rte_get_timer_cycles();
489 	int64_t dead_lock_remaining  =  total_pkts;
490 	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
491 
492 	uint64_t perf_cycles = rte_get_timer_cycles();
493 	int64_t perf_remaining  = total_pkts;
494 	const uint64_t perf_sample = rte_get_timer_hz();
495 
496 	static float total_mpps;
497 	static uint64_t samples;
498 
499 	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
500 	int64_t remaining = t->outstand_pkts - processed_pkts(t);
501 
502 	while (t->done == false) {
503 		const uint64_t new_cycles = rte_get_timer_cycles();
504 
505 		if ((new_cycles - perf_cycles) > perf_sample) {
506 			const uint64_t latency = total_latency(t);
507 			const uint64_t pkts = processed_pkts(t);
508 
509 			remaining = t->outstand_pkts - pkts;
510 			float mpps = (float)(perf_remaining-remaining)/1000000;
511 
512 			perf_remaining = remaining;
513 			perf_cycles = new_cycles;
514 			total_mpps += mpps;
515 			++samples;
516 			if (opt->fwd_latency && pkts > 0) {
517 				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
518 					mpps, total_mpps/samples,
519 					(float)(latency/pkts)/freq_mhz);
520 			} else {
521 				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
522 					mpps, total_mpps/samples);
523 			}
524 			fflush(stdout);
525 
526 			if (remaining <= 0) {
527 				t->result = EVT_TEST_SUCCESS;
528 				if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
529 				    opt->prod_type ==
530 					    EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
531 				    opt->prod_type ==
532 					    EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
533 					t->done = true;
534 					break;
535 				}
536 			}
537 		}
538 
539 		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
540 		    (opt->prod_type == EVT_PROD_TYPE_SYNT ||
541 		     opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
542 		     opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
543 			remaining = t->outstand_pkts - processed_pkts(t);
544 			if (dead_lock_remaining == remaining) {
545 				rte_event_dev_dump(opt->dev_id, stdout);
546 				evt_err("No schedules for seconds, deadlock");
547 				t->done = true;
548 				break;
549 			}
550 			dead_lock_remaining = remaining;
551 			dead_lock_cycles = new_cycles;
552 		}
553 	}
554 	printf("\n");
555 	return 0;
556 }
557 
558 static int
559 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
560 		struct rte_event_port_conf prod_conf)
561 {
562 	int ret = 0;
563 	uint16_t prod;
564 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
565 
566 	memset(&queue_conf, 0,
567 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
568 	queue_conf.ev.sched_type = opt->sched_type_list[0];
569 	RTE_ETH_FOREACH_DEV(prod) {
570 		uint32_t cap;
571 
572 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
573 				prod, &cap);
574 		if (ret) {
575 			evt_err("failed to get event rx adapter[%d]"
576 					" capabilities",
577 					opt->dev_id);
578 			return ret;
579 		}
580 		queue_conf.ev.queue_id = prod * stride;
581 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
582 				&prod_conf);
583 		if (ret) {
584 			evt_err("failed to create rx adapter[%d]", prod);
585 			return ret;
586 		}
587 		ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
588 				&queue_conf);
589 		if (ret) {
590 			evt_err("failed to add rx queues to adapter[%d]", prod);
591 			return ret;
592 		}
593 
594 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
595 			uint32_t service_id;
596 
597 			rte_event_eth_rx_adapter_service_id_get(prod,
598 					&service_id);
599 			ret = evt_service_setup(service_id);
600 			if (ret) {
601 				evt_err("Failed to setup service core"
602 						" for Rx adapter\n");
603 				return ret;
604 			}
605 		}
606 	}
607 
608 	return ret;
609 }
610 
611 static int
612 perf_event_timer_adapter_setup(struct test_perf *t)
613 {
614 	int i;
615 	int ret;
616 	struct rte_event_timer_adapter_info adapter_info;
617 	struct rte_event_timer_adapter *wl;
618 	uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
619 	uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
620 
621 	if (nb_producers == 1)
622 		flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
623 
624 	for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
625 		struct rte_event_timer_adapter_conf config = {
626 			.event_dev_id = t->opt->dev_id,
627 			.timer_adapter_id = i,
628 			.timer_tick_ns = t->opt->timer_tick_nsec,
629 			.max_tmo_ns = t->opt->max_tmo_nsec,
630 			.nb_timers = t->opt->pool_sz,
631 			.flags = flags,
632 		};
633 
634 		wl = rte_event_timer_adapter_create(&config);
635 		if (wl == NULL) {
636 			evt_err("failed to create event timer ring %d", i);
637 			return rte_errno;
638 		}
639 
640 		memset(&adapter_info, 0,
641 				sizeof(struct rte_event_timer_adapter_info));
642 		rte_event_timer_adapter_get_info(wl, &adapter_info);
643 		t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
644 
645 		if (!(adapter_info.caps &
646 				RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
647 			uint32_t service_id = -1U;
648 
649 			rte_event_timer_adapter_service_id_get(wl,
650 					&service_id);
651 			ret = evt_service_setup(service_id);
652 			if (ret) {
653 				evt_err("Failed to setup service core"
654 						" for timer adapter\n");
655 				return ret;
656 			}
657 			rte_service_runstate_set(service_id, 1);
658 		}
659 		t->timer_adptr[i] = wl;
660 	}
661 	return 0;
662 }
663 
664 static int
665 perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
666 {
667 	struct evt_options *opt = t->opt;
668 	uint32_t cap;
669 	int ret;
670 
671 	ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
672 	if (ret) {
673 		evt_err("Failed to get crypto adapter capabilities");
674 		return ret;
675 	}
676 
677 	if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
678 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
679 	    ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
680 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
681 		evt_err("crypto adapter %s mode unsupported\n",
682 			opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
683 		return -ENOTSUP;
684 	} else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
685 		evt_err("Storing crypto session not supported");
686 		return -ENOTSUP;
687 	}
688 
689 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
690 		struct rte_event response_info;
691 
692 		response_info.event = 0;
693 		response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
694 		response_info.queue_id = p->queue_id;
695 		ret = rte_event_crypto_adapter_queue_pair_add(
696 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
697 			&response_info);
698 	} else {
699 		ret = rte_event_crypto_adapter_queue_pair_add(
700 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
701 	}
702 
703 	return ret;
704 }
705 
706 static struct rte_cryptodev_sym_session *
707 cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
708 {
709 	struct rte_crypto_sym_xform cipher_xform;
710 	struct rte_cryptodev_sym_session *sess;
711 
712 	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
713 	cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
714 	cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
715 	cipher_xform.next = NULL;
716 
717 	sess = rte_cryptodev_sym_session_create(t->ca_sess_pool);
718 	if (sess == NULL) {
719 		evt_err("Failed to create sym session");
720 		return NULL;
721 	}
722 
723 	if (rte_cryptodev_sym_session_init(p->ca.cdev_id, sess, &cipher_xform,
724 					   t->ca_sess_priv_pool)) {
725 		evt_err("Failed to init session");
726 		return NULL;
727 	}
728 
729 	return sess;
730 }
731 
732 int
733 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
734 				uint8_t stride, uint8_t nb_queues,
735 				const struct rte_event_port_conf *port_conf)
736 {
737 	struct test_perf *t = evt_test_priv(test);
738 	uint16_t port, prod;
739 	int ret = -1;
740 
741 	/* setup one port per worker, linking to all queues */
742 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
743 				port++) {
744 		struct worker_data *w = &t->worker[port];
745 
746 		w->dev_id = opt->dev_id;
747 		w->port_id = port;
748 		w->t = t;
749 		w->processed_pkts = 0;
750 		w->latency = 0;
751 
752 		struct rte_event_port_conf conf = *port_conf;
753 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
754 
755 		ret = rte_event_port_setup(opt->dev_id, port, &conf);
756 		if (ret) {
757 			evt_err("failed to setup port %d", port);
758 			return ret;
759 		}
760 
761 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
762 		if (ret != nb_queues) {
763 			evt_err("failed to link all queues to port %d", port);
764 			return -EINVAL;
765 		}
766 	}
767 
768 	/* port for producers, no links */
769 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
770 		for ( ; port < perf_nb_event_ports(opt); port++) {
771 			struct prod_data *p = &t->prod[port];
772 			p->t = t;
773 		}
774 
775 		struct rte_event_port_conf conf = *port_conf;
776 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
777 
778 		ret = perf_event_rx_adapter_setup(opt, stride, conf);
779 		if (ret)
780 			return ret;
781 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
782 		prod = 0;
783 		for ( ; port < perf_nb_event_ports(opt); port++) {
784 			struct prod_data *p = &t->prod[port];
785 			p->queue_id = prod * stride;
786 			p->t = t;
787 			prod++;
788 		}
789 
790 		ret = perf_event_timer_adapter_setup(t);
791 		if (ret)
792 			return ret;
793 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
794 		struct rte_event_port_conf conf = *port_conf;
795 		uint8_t cdev_id = 0;
796 		uint16_t qp_id = 0;
797 
798 		ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
799 						      opt->dev_id, &conf, 0);
800 		if (ret) {
801 			evt_err("Failed to create crypto adapter");
802 			return ret;
803 		}
804 
805 		prod = 0;
806 		for (; port < perf_nb_event_ports(opt); port++) {
807 			struct rte_cryptodev_sym_session *crypto_sess;
808 			union rte_event_crypto_metadata m_data;
809 			struct prod_data *p = &t->prod[port];
810 			uint32_t flow_id;
811 
812 			if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
813 				cdev_id++;
814 				qp_id = 0;
815 			}
816 
817 			p->dev_id = opt->dev_id;
818 			p->port_id = port;
819 			p->queue_id = prod * stride;
820 			p->ca.cdev_id = cdev_id;
821 			p->ca.cdev_qp_id = qp_id;
822 			p->ca.crypto_sess = rte_zmalloc_socket(
823 				NULL, sizeof(crypto_sess) * t->nb_flows,
824 				RTE_CACHE_LINE_SIZE, opt->socket_id);
825 			p->t = t;
826 
827 			m_data.request_info.cdev_id = p->ca.cdev_id;
828 			m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
829 			m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
830 			m_data.response_info.queue_id = p->queue_id;
831 
832 			for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
833 				crypto_sess = cryptodev_sym_sess_create(p, t);
834 				if (crypto_sess == NULL)
835 					return -ENOMEM;
836 
837 				m_data.response_info.flow_id = flow_id;
838 				rte_cryptodev_sym_session_set_user_data(
839 					crypto_sess, &m_data, sizeof(m_data));
840 				p->ca.crypto_sess[flow_id] = crypto_sess;
841 			}
842 
843 			conf.event_port_cfg |=
844 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
845 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
846 
847 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
848 			if (ret) {
849 				evt_err("failed to setup port %d", port);
850 				return ret;
851 			}
852 
853 			ret = perf_event_crypto_adapter_setup(t, p);
854 			if (ret)
855 				return ret;
856 
857 			qp_id++;
858 			prod++;
859 		}
860 	} else {
861 		prod = 0;
862 		for ( ; port < perf_nb_event_ports(opt); port++) {
863 			struct prod_data *p = &t->prod[port];
864 
865 			p->dev_id = opt->dev_id;
866 			p->port_id = port;
867 			p->queue_id = prod * stride;
868 			p->t = t;
869 
870 			struct rte_event_port_conf conf = *port_conf;
871 			conf.event_port_cfg |=
872 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
873 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
874 
875 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
876 			if (ret) {
877 				evt_err("failed to setup port %d", port);
878 				return ret;
879 			}
880 			prod++;
881 		}
882 	}
883 
884 	return ret;
885 }
886 
887 int
888 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
889 {
890 	unsigned int lcores;
891 
892 	/* N producer + N worker + main when producer cores are used
893 	 * Else N worker + main when Rx adapter is used
894 	 */
895 	lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
896 
897 	if (rte_lcore_count() < lcores) {
898 		evt_err("test need minimum %d lcores", lcores);
899 		return -1;
900 	}
901 
902 	/* Validate worker lcores */
903 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
904 		evt_err("worker lcores overlaps with main lcore");
905 		return -1;
906 	}
907 	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
908 		evt_err("worker lcores overlaps producer lcores");
909 		return -1;
910 	}
911 	if (evt_has_disabled_lcore(opt->wlcores)) {
912 		evt_err("one or more workers lcores are not enabled");
913 		return -1;
914 	}
915 	if (!evt_has_active_lcore(opt->wlcores)) {
916 		evt_err("minimum one worker is required");
917 		return -1;
918 	}
919 
920 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
921 	    opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
922 	    opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
923 		/* Validate producer lcores */
924 		if (evt_lcores_has_overlap(opt->plcores,
925 					rte_get_main_lcore())) {
926 			evt_err("producer lcores overlaps with main lcore");
927 			return -1;
928 		}
929 		if (evt_has_disabled_lcore(opt->plcores)) {
930 			evt_err("one or more producer lcores are not enabled");
931 			return -1;
932 		}
933 		if (!evt_has_active_lcore(opt->plcores)) {
934 			evt_err("minimum one producer is required");
935 			return -1;
936 		}
937 	}
938 
939 	if (evt_has_invalid_stage(opt))
940 		return -1;
941 
942 	if (evt_has_invalid_sched_type(opt))
943 		return -1;
944 
945 	if (nb_queues > EVT_MAX_QUEUES) {
946 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
947 		return -1;
948 	}
949 	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
950 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
951 		return -1;
952 	}
953 
954 	/* Fixups */
955 	if ((opt->nb_stages == 1 &&
956 			opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
957 			opt->fwd_latency) {
958 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
959 		opt->fwd_latency = 0;
960 	}
961 
962 	if (opt->fwd_latency && !opt->q_priority) {
963 		evt_info("enabled queue priority for latency measurement");
964 		opt->q_priority = 1;
965 	}
966 	if (opt->nb_pkts == 0)
967 		opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
968 
969 	return 0;
970 }
971 
972 void
973 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
974 {
975 	evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
976 	evt_dump_producer_lcores(opt);
977 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
978 	evt_dump_worker_lcores(opt);
979 	evt_dump_nb_stages(opt);
980 	evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
981 	evt_dump("nb_evdev_queues", "%d", nb_queues);
982 	evt_dump_queue_priority(opt);
983 	evt_dump_sched_type_list(opt);
984 	evt_dump_producer_type(opt);
985 	evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
986 }
987 
988 static void
989 perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
990 		      void *args)
991 {
992 	rte_mempool_put(args, ev.event_ptr);
993 }
994 
995 void
996 perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
997 		    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
998 		    uint16_t nb_deq)
999 {
1000 	int i;
1001 
1002 	if (nb_deq) {
1003 		for (i = nb_enq; i < nb_deq; i++)
1004 			rte_mempool_put(pool, events[i].event_ptr);
1005 
1006 		for (i = 0; i < nb_deq; i++)
1007 			events[i].op = RTE_EVENT_OP_RELEASE;
1008 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
1009 	}
1010 	rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
1011 }
1012 
1013 void
1014 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
1015 {
1016 	int i;
1017 	struct test_perf *t = evt_test_priv(test);
1018 
1019 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1020 		for (i = 0; i < opt->nb_timer_adptrs; i++)
1021 			rte_event_timer_adapter_stop(t->timer_adptr[i]);
1022 	}
1023 	rte_event_dev_stop(opt->dev_id);
1024 	rte_event_dev_close(opt->dev_id);
1025 }
1026 
1027 static inline void
1028 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
1029 	    void *obj, unsigned i __rte_unused)
1030 {
1031 	memset(obj, 0, mp->elt_size);
1032 }
1033 
1034 #define NB_RX_DESC			128
1035 #define NB_TX_DESC			512
1036 int
1037 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
1038 {
1039 	uint16_t i;
1040 	int ret;
1041 	struct test_perf *t = evt_test_priv(test);
1042 	struct rte_eth_conf port_conf = {
1043 		.rxmode = {
1044 			.mq_mode = RTE_ETH_MQ_RX_RSS,
1045 			.split_hdr_size = 0,
1046 		},
1047 		.rx_adv_conf = {
1048 			.rss_conf = {
1049 				.rss_key = NULL,
1050 				.rss_hf = RTE_ETH_RSS_IP,
1051 			},
1052 		},
1053 	};
1054 
1055 	if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
1056 		return 0;
1057 
1058 	if (!rte_eth_dev_count_avail()) {
1059 		evt_err("No ethernet ports found.");
1060 		return -ENODEV;
1061 	}
1062 
1063 	RTE_ETH_FOREACH_DEV(i) {
1064 		struct rte_eth_dev_info dev_info;
1065 		struct rte_eth_conf local_port_conf = port_conf;
1066 
1067 		ret = rte_eth_dev_info_get(i, &dev_info);
1068 		if (ret != 0) {
1069 			evt_err("Error during getting device (port %u) info: %s\n",
1070 					i, strerror(-ret));
1071 			return ret;
1072 		}
1073 
1074 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1075 			dev_info.flow_type_rss_offloads;
1076 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1077 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
1078 			evt_info("Port %u modified RSS hash function based on hardware support,"
1079 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1080 				i,
1081 				port_conf.rx_adv_conf.rss_conf.rss_hf,
1082 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1083 		}
1084 
1085 		if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
1086 			evt_err("Failed to configure eth port [%d]", i);
1087 			return -EINVAL;
1088 		}
1089 
1090 		if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
1091 				rte_socket_id(), NULL, t->pool) < 0) {
1092 			evt_err("Failed to setup eth port [%d] rx_queue: %d.",
1093 					i, 0);
1094 			return -EINVAL;
1095 		}
1096 
1097 		if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
1098 					rte_socket_id(), NULL) < 0) {
1099 			evt_err("Failed to setup eth port [%d] tx_queue: %d.",
1100 					i, 0);
1101 			return -EINVAL;
1102 		}
1103 
1104 		ret = rte_eth_promiscuous_enable(i);
1105 		if (ret != 0) {
1106 			evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
1107 				i, rte_strerror(-ret));
1108 			return ret;
1109 		}
1110 	}
1111 
1112 	return 0;
1113 }
1114 
1115 void
1116 perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
1117 {
1118 	uint16_t i;
1119 	RTE_SET_USED(test);
1120 
1121 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1122 		RTE_ETH_FOREACH_DEV(i) {
1123 			rte_event_eth_rx_adapter_stop(i);
1124 			rte_event_eth_rx_adapter_queue_del(i, i, -1);
1125 			rte_eth_dev_rx_queue_stop(i, 0);
1126 		}
1127 	}
1128 }
1129 
1130 void
1131 perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
1132 {
1133 	uint16_t i;
1134 	RTE_SET_USED(test);
1135 
1136 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1137 		RTE_ETH_FOREACH_DEV(i) {
1138 			rte_event_eth_tx_adapter_stop(i);
1139 			rte_event_eth_tx_adapter_queue_del(i, i, -1);
1140 			rte_eth_dev_tx_queue_stop(i, 0);
1141 			rte_eth_dev_stop(i);
1142 		}
1143 	}
1144 }
1145 
1146 int
1147 perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
1148 {
1149 	uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
1150 	struct test_perf *t = evt_test_priv(test);
1151 	unsigned int max_session_size;
1152 	uint32_t nb_sessions;
1153 	int ret;
1154 
1155 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1156 		return 0;
1157 
1158 	cdev_count = rte_cryptodev_count();
1159 	if (cdev_count == 0) {
1160 		evt_err("No crypto devices available\n");
1161 		return -ENODEV;
1162 	}
1163 
1164 	t->ca_op_pool = rte_crypto_op_pool_create(
1165 		"crypto_op_pool", RTE_CRYPTO_OP_TYPE_SYMMETRIC, opt->pool_sz,
1166 		128, 0, rte_socket_id());
1167 	if (t->ca_op_pool == NULL) {
1168 		evt_err("Failed to create crypto op pool");
1169 		return -ENOMEM;
1170 	}
1171 
1172 	nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
1173 	t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
1174 		"ca_sess_pool", nb_sessions, 0, 0,
1175 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1176 	if (t->ca_sess_pool == NULL) {
1177 		evt_err("Failed to create sym session pool");
1178 		ret = -ENOMEM;
1179 		goto err;
1180 	}
1181 
1182 	max_session_size = 0;
1183 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1184 		unsigned int session_size;
1185 
1186 		session_size =
1187 			rte_cryptodev_sym_get_private_session_size(cdev_id);
1188 		if (session_size > max_session_size)
1189 			max_session_size = session_size;
1190 	}
1191 
1192 	max_session_size += sizeof(union rte_event_crypto_metadata);
1193 	t->ca_sess_priv_pool = rte_mempool_create(
1194 		"ca_sess_priv_pool", nb_sessions, max_session_size, 0, 0, NULL,
1195 		NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1196 	if (t->ca_sess_priv_pool == NULL) {
1197 		evt_err("failed to create sym session private pool");
1198 		ret = -ENOMEM;
1199 		goto err;
1200 	}
1201 
1202 	/*
1203 	 * Calculate number of needed queue pairs, based on the amount of
1204 	 * available number of logical cores and crypto devices. For instance,
1205 	 * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
1206 	 * up per device.
1207 	 */
1208 	nb_plcores = evt_nr_active_lcores(opt->plcores);
1209 	nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
1210 					     nb_plcores / cdev_count;
1211 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1212 		struct rte_cryptodev_qp_conf qp_conf;
1213 		struct rte_cryptodev_config conf;
1214 		struct rte_cryptodev_info info;
1215 		int qp_id;
1216 
1217 		rte_cryptodev_info_get(cdev_id, &info);
1218 		if (nb_qps > info.max_nb_queue_pairs) {
1219 			evt_err("Not enough queue pairs per cryptodev (%u)",
1220 				nb_qps);
1221 			ret = -EINVAL;
1222 			goto err;
1223 		}
1224 
1225 		conf.nb_queue_pairs = nb_qps;
1226 		conf.socket_id = SOCKET_ID_ANY;
1227 		conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
1228 
1229 		ret = rte_cryptodev_configure(cdev_id, &conf);
1230 		if (ret) {
1231 			evt_err("Failed to configure cryptodev (%u)", cdev_id);
1232 			goto err;
1233 		}
1234 
1235 		qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
1236 		qp_conf.mp_session = t->ca_sess_pool;
1237 		qp_conf.mp_session_private = t->ca_sess_priv_pool;
1238 
1239 		for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
1240 			ret = rte_cryptodev_queue_pair_setup(
1241 				cdev_id, qp_id, &qp_conf,
1242 				rte_cryptodev_socket_id(cdev_id));
1243 			if (ret) {
1244 				evt_err("Failed to setup queue pairs on cryptodev %u\n",
1245 					cdev_id);
1246 				goto err;
1247 			}
1248 		}
1249 	}
1250 
1251 	return 0;
1252 err:
1253 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
1254 		rte_cryptodev_close(cdev_id);
1255 
1256 	rte_mempool_free(t->ca_op_pool);
1257 	rte_mempool_free(t->ca_sess_pool);
1258 	rte_mempool_free(t->ca_sess_priv_pool);
1259 
1260 	return ret;
1261 }
1262 
1263 void
1264 perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
1265 {
1266 	uint8_t cdev_id, cdev_count = rte_cryptodev_count();
1267 	struct test_perf *t = evt_test_priv(test);
1268 	uint16_t port;
1269 
1270 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1271 		return;
1272 
1273 	for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
1274 		struct rte_cryptodev_sym_session *sess;
1275 		struct prod_data *p = &t->prod[port];
1276 		uint32_t flow_id;
1277 		uint8_t cdev_id;
1278 
1279 		for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1280 			sess = p->ca.crypto_sess[flow_id];
1281 			cdev_id = p->ca.cdev_id;
1282 			rte_cryptodev_sym_session_clear(cdev_id, sess);
1283 			rte_cryptodev_sym_session_free(sess);
1284 		}
1285 
1286 		rte_event_crypto_adapter_queue_pair_del(
1287 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
1288 	}
1289 
1290 	rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
1291 
1292 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1293 		rte_cryptodev_stop(cdev_id);
1294 		rte_cryptodev_close(cdev_id);
1295 	}
1296 
1297 	rte_mempool_free(t->ca_op_pool);
1298 	rte_mempool_free(t->ca_sess_pool);
1299 	rte_mempool_free(t->ca_sess_priv_pool);
1300 }
1301 
1302 int
1303 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
1304 {
1305 	struct test_perf *t = evt_test_priv(test);
1306 
1307 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1308 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1309 		t->pool = rte_mempool_create(test->name, /* mempool name */
1310 				opt->pool_sz, /* number of elements*/
1311 				sizeof(struct perf_elt), /* element size*/
1312 				512, /* cache size*/
1313 				0, NULL, NULL,
1314 				perf_elt_init, /* obj constructor */
1315 				NULL, opt->socket_id, 0); /* flags */
1316 	} else {
1317 		t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
1318 				opt->pool_sz, /* number of elements*/
1319 				512, /* cache size*/
1320 				0,
1321 				RTE_MBUF_DEFAULT_BUF_SIZE,
1322 				opt->socket_id); /* flags */
1323 
1324 	}
1325 
1326 	if (t->pool == NULL) {
1327 		evt_err("failed to create mempool");
1328 		return -ENOMEM;
1329 	}
1330 
1331 	return 0;
1332 }
1333 
1334 void
1335 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
1336 {
1337 	RTE_SET_USED(opt);
1338 	struct test_perf *t = evt_test_priv(test);
1339 
1340 	rte_mempool_free(t->pool);
1341 }
1342 
1343 int
1344 perf_test_setup(struct evt_test *test, struct evt_options *opt)
1345 {
1346 	void *test_perf;
1347 
1348 	test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
1349 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1350 	if (test_perf  == NULL) {
1351 		evt_err("failed to allocate test_perf memory");
1352 		goto nomem;
1353 	}
1354 	test->test_priv = test_perf;
1355 
1356 	struct test_perf *t = evt_test_priv(test);
1357 
1358 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1359 		t->outstand_pkts = opt->nb_timers *
1360 			evt_nr_active_lcores(opt->plcores);
1361 		t->nb_pkts = opt->nb_timers;
1362 	} else {
1363 		t->outstand_pkts = opt->nb_pkts *
1364 			evt_nr_active_lcores(opt->plcores);
1365 		t->nb_pkts = opt->nb_pkts;
1366 	}
1367 
1368 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
1369 	t->done = false;
1370 	t->nb_flows = opt->nb_flows;
1371 	t->result = EVT_TEST_FAILED;
1372 	t->opt = opt;
1373 	memcpy(t->sched_type_list, opt->sched_type_list,
1374 			sizeof(opt->sched_type_list));
1375 	return 0;
1376 nomem:
1377 	return -ENOMEM;
1378 }
1379 
1380 void
1381 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
1382 {
1383 	RTE_SET_USED(opt);
1384 
1385 	rte_free(test->test_priv);
1386 }
1387