xref: /dpdk/app/test-eventdev/test_perf_atq.c (revision bca734c27e345af500d0d951421584e2567cd107)
153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob  * Copyright(c) 2017 Cavium, Inc
3e6050243SJerin Jacob  */
4e6050243SJerin Jacob 
5e6050243SJerin Jacob #include "test_perf_common.h"
6e6050243SJerin Jacob 
743d162bcSThomas Monjalon /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
8e6050243SJerin Jacob 
9e6050243SJerin Jacob static inline int
10e6050243SJerin Jacob atq_nb_event_queues(struct evt_options *opt)
11e6050243SJerin Jacob {
12e6050243SJerin Jacob 	/* nb_queues = number of producers */
13452cd797SPavan Nikhilesh 	return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14d9a42a69SThomas Monjalon 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15e6050243SJerin Jacob }
16e6050243SJerin Jacob 
1733011cb3SThomas Monjalon static __rte_always_inline void
181eb10ad8SJerin Jacob atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
191eb10ad8SJerin Jacob 		const uint8_t nb_stages)
201eb10ad8SJerin Jacob {
211eb10ad8SJerin Jacob 	ev->sub_event_type++;
221eb10ad8SJerin Jacob 	ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
231eb10ad8SJerin Jacob 	ev->op = RTE_EVENT_OP_FORWARD;
241eb10ad8SJerin Jacob 	ev->event_type = RTE_EVENT_TYPE_CPU;
251eb10ad8SJerin Jacob }
261eb10ad8SJerin Jacob 
2769e807dfSVolodymyr Fialko static __rte_always_inline void
2869e807dfSVolodymyr Fialko atq_fwd_event_vector(struct rte_event *const ev, uint8_t *const sched_type_list,
2969e807dfSVolodymyr Fialko 		const uint8_t nb_stages)
3069e807dfSVolodymyr Fialko {
3169e807dfSVolodymyr Fialko 	ev->sub_event_type++;
3269e807dfSVolodymyr Fialko 	ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
3369e807dfSVolodymyr Fialko 	ev->op = RTE_EVENT_OP_FORWARD;
3469e807dfSVolodymyr Fialko 	ev->event_type = RTE_EVENT_TYPE_CPU_VECTOR;
3569e807dfSVolodymyr Fialko }
3669e807dfSVolodymyr Fialko 
371eb10ad8SJerin Jacob static int
381eb10ad8SJerin Jacob perf_atq_worker(void *arg, const int enable_fwd_latency)
391eb10ad8SJerin Jacob {
40f0b68c0bSPavan Nikhilesh 	uint16_t enq = 0, deq = 0;
411eb10ad8SJerin Jacob 	struct rte_event ev;
42f0b68c0bSPavan Nikhilesh 	PERF_WORKER_INIT;
431eb10ad8SJerin Jacob 
44*bca734c2SPavan Nikhilesh 	RTE_SET_USED(pe);
451eb10ad8SJerin Jacob 	while (t->done == false) {
46f0b68c0bSPavan Nikhilesh 		deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
471eb10ad8SJerin Jacob 
48f0b68c0bSPavan Nikhilesh 		if (!deq) {
491eb10ad8SJerin Jacob 			rte_pause();
501eb10ad8SJerin Jacob 			continue;
511eb10ad8SJerin Jacob 		}
521eb10ad8SJerin Jacob 
53b25a66c4SAmit Prakash Shukla 		if ((prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) &&
54b25a66c4SAmit Prakash Shukla 		    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
55*bca734c2SPavan Nikhilesh 			if (perf_handle_crypto_ev(&ev))
56de2bc16eSShijith Thotton 				continue;
57de2bc16eSShijith Thotton 		}
58de2bc16eSShijith Thotton 
596776a581SVolodymyr Fialko 		stage = ev.sub_event_type % nb_stages;
606776a581SVolodymyr Fialko 		if (enable_fwd_latency && !prod_timer_type && stage == 0)
611eb10ad8SJerin Jacob 			/* first stage in pipeline, mark ts to compute fwd latency */
62*bca734c2SPavan Nikhilesh 			perf_mark_fwd_latency(prod_type, &ev);
631eb10ad8SJerin Jacob 
641eb10ad8SJerin Jacob 		/* last stage in pipeline */
656776a581SVolodymyr Fialko 		if (unlikely(stage == laststage)) {
661eb10ad8SJerin Jacob 			if (enable_fwd_latency)
67b25a66c4SAmit Prakash Shukla 				cnt = perf_process_last_stage_latency(pool, prod_type,
681eb10ad8SJerin Jacob 					&ev, w, bufs, sz, cnt);
691eb10ad8SJerin Jacob 			else
70b25a66c4SAmit Prakash Shukla 				cnt = perf_process_last_stage(pool, prod_type, &ev, w,
711eb10ad8SJerin Jacob 					bufs, sz, cnt);
721eb10ad8SJerin Jacob 		} else {
731eb10ad8SJerin Jacob 			atq_fwd_event(&ev, sched_type_list, nb_stages);
74f0b68c0bSPavan Nikhilesh 			do {
75f0b68c0bSPavan Nikhilesh 				enq = rte_event_enqueue_burst(dev, port, &ev,
76f0b68c0bSPavan Nikhilesh 							      1);
77f0b68c0bSPavan Nikhilesh 			} while (!enq && !t->done);
781eb10ad8SJerin Jacob 		}
791eb10ad8SJerin Jacob 	}
80f0b68c0bSPavan Nikhilesh 
81f0b68c0bSPavan Nikhilesh 	perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
82f0b68c0bSPavan Nikhilesh 
831eb10ad8SJerin Jacob 	return 0;
841eb10ad8SJerin Jacob }
851eb10ad8SJerin Jacob 
861eb10ad8SJerin Jacob static int
871eb10ad8SJerin Jacob perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
881eb10ad8SJerin Jacob {
891eb10ad8SJerin Jacob 	/* +1 to avoid prefetch out of array check */
901eb10ad8SJerin Jacob 	struct rte_event ev[BURST_SIZE + 1];
91f0b68c0bSPavan Nikhilesh 	uint16_t enq = 0, nb_rx = 0;
92f0b68c0bSPavan Nikhilesh 	PERF_WORKER_INIT;
93f0b68c0bSPavan Nikhilesh 	uint16_t i;
941eb10ad8SJerin Jacob 
95*bca734c2SPavan Nikhilesh 	RTE_SET_USED(pe);
961eb10ad8SJerin Jacob 	while (t->done == false) {
97f0b68c0bSPavan Nikhilesh 		nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
981eb10ad8SJerin Jacob 
991eb10ad8SJerin Jacob 		if (!nb_rx) {
1001eb10ad8SJerin Jacob 			rte_pause();
1011eb10ad8SJerin Jacob 			continue;
1021eb10ad8SJerin Jacob 		}
1031eb10ad8SJerin Jacob 
1041eb10ad8SJerin Jacob 		for (i = 0; i < nb_rx; i++) {
105b25a66c4SAmit Prakash Shukla 			if ((prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) &&
106b25a66c4SAmit Prakash Shukla 			    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
107*bca734c2SPavan Nikhilesh 				if (perf_handle_crypto_ev(&ev[i]))
108de2bc16eSShijith Thotton 					continue;
109de2bc16eSShijith Thotton 			}
110de2bc16eSShijith Thotton 
1116776a581SVolodymyr Fialko 			stage = ev[i].sub_event_type % nb_stages;
1126776a581SVolodymyr Fialko 			if (enable_fwd_latency && !prod_timer_type && stage == 0) {
1131eb10ad8SJerin Jacob 				rte_prefetch0(ev[i+1].event_ptr);
1141eb10ad8SJerin Jacob 				/* first stage in pipeline.
1151eb10ad8SJerin Jacob 				 * mark time stamp to compute fwd latency
1161eb10ad8SJerin Jacob 				 */
117*bca734c2SPavan Nikhilesh 				perf_mark_fwd_latency(prod_type, &ev[i]);
1181eb10ad8SJerin Jacob 			}
1191eb10ad8SJerin Jacob 			/* last stage in pipeline */
1206776a581SVolodymyr Fialko 			if (unlikely(stage == laststage)) {
1211eb10ad8SJerin Jacob 				if (enable_fwd_latency)
122a5a54879SVolodymyr Fialko 					cnt = perf_process_last_stage_latency(pool,
123b25a66c4SAmit Prakash Shukla 						prod_type, &ev[i], w, bufs, sz, cnt);
1241eb10ad8SJerin Jacob 				else
125b25a66c4SAmit Prakash Shukla 					cnt = perf_process_last_stage(pool, prod_type,
1261eb10ad8SJerin Jacob 						&ev[i], w, bufs, sz, cnt);
1271eb10ad8SJerin Jacob 
1281eb10ad8SJerin Jacob 				ev[i].op = RTE_EVENT_OP_RELEASE;
1291eb10ad8SJerin Jacob 			} else {
1301eb10ad8SJerin Jacob 				atq_fwd_event(&ev[i], sched_type_list,
1311eb10ad8SJerin Jacob 						nb_stages);
1321eb10ad8SJerin Jacob 			}
1331eb10ad8SJerin Jacob 		}
1341eb10ad8SJerin Jacob 
1351eb10ad8SJerin Jacob 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
136f0b68c0bSPavan Nikhilesh 		while ((enq < nb_rx) && !t->done) {
1371eb10ad8SJerin Jacob 			enq += rte_event_enqueue_burst(dev, port,
1381eb10ad8SJerin Jacob 							ev + enq, nb_rx - enq);
1391eb10ad8SJerin Jacob 		}
1401eb10ad8SJerin Jacob 	}
141f0b68c0bSPavan Nikhilesh 
142f0b68c0bSPavan Nikhilesh 	perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
143f0b68c0bSPavan Nikhilesh 
1441eb10ad8SJerin Jacob 	return 0;
1451eb10ad8SJerin Jacob }
1461eb10ad8SJerin Jacob 
1471eb10ad8SJerin Jacob static int
14869e807dfSVolodymyr Fialko perf_atq_worker_vector(void *arg, const int enable_fwd_latency)
14969e807dfSVolodymyr Fialko {
15069e807dfSVolodymyr Fialko 	uint16_t enq = 0, deq = 0;
15169e807dfSVolodymyr Fialko 	struct rte_event ev;
15269e807dfSVolodymyr Fialko 	PERF_WORKER_INIT;
15369e807dfSVolodymyr Fialko 
15469e807dfSVolodymyr Fialko 	RTE_SET_USED(sz);
15569e807dfSVolodymyr Fialko 	RTE_SET_USED(cnt);
156b25a66c4SAmit Prakash Shukla 	RTE_SET_USED(prod_type);
15769e807dfSVolodymyr Fialko 
15869e807dfSVolodymyr Fialko 	while (t->done == false) {
15969e807dfSVolodymyr Fialko 		deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
16069e807dfSVolodymyr Fialko 
16169e807dfSVolodymyr Fialko 		if (!deq)
16269e807dfSVolodymyr Fialko 			continue;
16369e807dfSVolodymyr Fialko 
16469e807dfSVolodymyr Fialko 		if (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV_VECTOR) {
16569e807dfSVolodymyr Fialko 			if (perf_handle_crypto_vector_ev(&ev, &pe, enable_fwd_latency))
16669e807dfSVolodymyr Fialko 				continue;
16769e807dfSVolodymyr Fialko 		}
16869e807dfSVolodymyr Fialko 
16969e807dfSVolodymyr Fialko 		stage = ev.sub_event_type % nb_stages;
17069e807dfSVolodymyr Fialko 		/* First q in pipeline, mark timestamp to compute fwd latency */
17169e807dfSVolodymyr Fialko 		if (enable_fwd_latency && !prod_timer_type && stage == 0)
172*bca734c2SPavan Nikhilesh 			pe->timestamp = rte_get_timer_cycles();
17369e807dfSVolodymyr Fialko 
17469e807dfSVolodymyr Fialko 		/* Last stage in pipeline */
17569e807dfSVolodymyr Fialko 		if (unlikely(stage == laststage)) {
17669e807dfSVolodymyr Fialko 			perf_process_vector_last_stage(pool, t->ca_op_pool, &ev, w,
17769e807dfSVolodymyr Fialko 							enable_fwd_latency);
17869e807dfSVolodymyr Fialko 		} else {
17969e807dfSVolodymyr Fialko 			atq_fwd_event_vector(&ev, sched_type_list, nb_stages);
18069e807dfSVolodymyr Fialko 			do {
18169e807dfSVolodymyr Fialko 				enq = rte_event_enqueue_burst(dev, port, &ev, 1);
18269e807dfSVolodymyr Fialko 			} while (!enq && !t->done);
18369e807dfSVolodymyr Fialko 		}
18469e807dfSVolodymyr Fialko 	}
18569e807dfSVolodymyr Fialko 
18669e807dfSVolodymyr Fialko 	perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
18769e807dfSVolodymyr Fialko 
18869e807dfSVolodymyr Fialko 	return 0;
18969e807dfSVolodymyr Fialko }
19069e807dfSVolodymyr Fialko 
19169e807dfSVolodymyr Fialko static int
1921eb10ad8SJerin Jacob worker_wrapper(void *arg)
1931eb10ad8SJerin Jacob {
1941eb10ad8SJerin Jacob 	struct worker_data *w  = arg;
1951eb10ad8SJerin Jacob 	struct evt_options *opt = w->t->opt;
1961eb10ad8SJerin Jacob 
1971eb10ad8SJerin Jacob 	const bool burst = evt_has_burst_mode(w->dev_id);
1981eb10ad8SJerin Jacob 	const int fwd_latency = opt->fwd_latency;
1991eb10ad8SJerin Jacob 
2001eb10ad8SJerin Jacob 	/* allow compiler to optimize */
20169e807dfSVolodymyr Fialko 	if (opt->ena_vector && opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
20269e807dfSVolodymyr Fialko 		return perf_atq_worker_vector(arg, fwd_latency);
20369e807dfSVolodymyr Fialko 	else if (!burst && !fwd_latency)
2041eb10ad8SJerin Jacob 		return perf_atq_worker(arg, 0);
2051eb10ad8SJerin Jacob 	else if (!burst && fwd_latency)
2061eb10ad8SJerin Jacob 		return perf_atq_worker(arg, 1);
2071eb10ad8SJerin Jacob 	else if (burst && !fwd_latency)
2081eb10ad8SJerin Jacob 		return perf_atq_worker_burst(arg, 0);
2091eb10ad8SJerin Jacob 	else if (burst && fwd_latency)
2101eb10ad8SJerin Jacob 		return perf_atq_worker_burst(arg, 1);
2111eb10ad8SJerin Jacob 
2121eb10ad8SJerin Jacob 	rte_panic("invalid worker\n");
2131eb10ad8SJerin Jacob }
2141eb10ad8SJerin Jacob 
2151eb10ad8SJerin Jacob static int
2161eb10ad8SJerin Jacob perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
2171eb10ad8SJerin Jacob {
2181eb10ad8SJerin Jacob 	return perf_launch_lcores(test, opt, worker_wrapper);
2191eb10ad8SJerin Jacob }
2201eb10ad8SJerin Jacob 
221e6050243SJerin Jacob static int
222e6050243SJerin Jacob perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
223e6050243SJerin Jacob {
224e6050243SJerin Jacob 	int ret;
225e6050243SJerin Jacob 	uint8_t queue;
22659f697e3SPavan Nikhilesh 	uint8_t nb_queues;
22759f697e3SPavan Nikhilesh 	uint8_t nb_ports;
22866b82db2SPavan Nikhilesh 	uint16_t prod;
2293617aae5SPavan Nikhilesh 	struct rte_event_dev_info dev_info;
23066b82db2SPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
23159f697e3SPavan Nikhilesh 
23259f697e3SPavan Nikhilesh 	nb_ports = evt_nr_active_lcores(opt->wlcores);
233d008f20bSPavan Nikhilesh 	nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
234d008f20bSPavan Nikhilesh 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
23559f697e3SPavan Nikhilesh 		evt_nr_active_lcores(opt->plcores);
23659f697e3SPavan Nikhilesh 
237452cd797SPavan Nikhilesh 	nb_queues = atq_nb_event_queues(opt);
238e6050243SJerin Jacob 
2393617aae5SPavan Nikhilesh 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
2403617aae5SPavan Nikhilesh 	if (ret) {
2413617aae5SPavan Nikhilesh 		evt_err("failed to get eventdev info %d", opt->dev_id);
2423617aae5SPavan Nikhilesh 		return ret;
2433617aae5SPavan Nikhilesh 	}
2443617aae5SPavan Nikhilesh 
245f0959283SPavan Nikhilesh 	ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
246e6050243SJerin Jacob 	if (ret) {
247e6050243SJerin Jacob 		evt_err("failed to configure eventdev %d", opt->dev_id);
248e6050243SJerin Jacob 		return ret;
249e6050243SJerin Jacob 	}
250e6050243SJerin Jacob 
251e6050243SJerin Jacob 	struct rte_event_queue_conf q_conf = {
252e6050243SJerin Jacob 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
253e6050243SJerin Jacob 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
254e6050243SJerin Jacob 			.nb_atomic_flows = opt->nb_flows,
255e6050243SJerin Jacob 			.nb_atomic_order_sequences = opt->nb_flows,
256e6050243SJerin Jacob 	};
257e6050243SJerin Jacob 	/* queue configurations */
25859f697e3SPavan Nikhilesh 	for (queue = 0; queue < nb_queues; queue++) {
259e6050243SJerin Jacob 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
260e6050243SJerin Jacob 		if (ret) {
261e6050243SJerin Jacob 			evt_err("failed to setup queue=%d", queue);
262e6050243SJerin Jacob 			return ret;
263e6050243SJerin Jacob 		}
264e6050243SJerin Jacob 	}
265e6050243SJerin Jacob 
266535c630cSPavan Nikhilesh 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
267535c630cSPavan Nikhilesh 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
268535c630cSPavan Nikhilesh 
269535c630cSPavan Nikhilesh 	/* port configuration */
270535c630cSPavan Nikhilesh 	const struct rte_event_port_conf p_conf = {
271535c630cSPavan Nikhilesh 			.dequeue_depth = opt->wkr_deq_dep,
272535c630cSPavan Nikhilesh 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
273535c630cSPavan Nikhilesh 			.new_event_threshold = dev_info.max_num_events,
274535c630cSPavan Nikhilesh 	};
275535c630cSPavan Nikhilesh 
276535c630cSPavan Nikhilesh 	ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
277535c630cSPavan Nikhilesh 			&p_conf);
278e6050243SJerin Jacob 	if (ret)
279e6050243SJerin Jacob 		return ret;
280e6050243SJerin Jacob 
281b0333c55SPavan Nikhilesh 	if (!evt_has_distributed_sched(opt->dev_id)) {
282b0333c55SPavan Nikhilesh 		uint32_t service_id;
283b0333c55SPavan Nikhilesh 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
284b0333c55SPavan Nikhilesh 		ret = evt_service_setup(service_id);
28557305d79SPavan Nikhilesh 		if (ret) {
28657305d79SPavan Nikhilesh 			evt_err("No service lcore found to run event dev.");
28757305d79SPavan Nikhilesh 			return ret;
28857305d79SPavan Nikhilesh 		}
289b0333c55SPavan Nikhilesh 	}
29057305d79SPavan Nikhilesh 
291e6050243SJerin Jacob 	ret = rte_event_dev_start(opt->dev_id);
292e6050243SJerin Jacob 	if (ret) {
293e6050243SJerin Jacob 		evt_err("failed to start eventdev %d", opt->dev_id);
294e6050243SJerin Jacob 		return ret;
295e6050243SJerin Jacob 	}
296e6050243SJerin Jacob 
29766b82db2SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
29866b82db2SPavan Nikhilesh 		RTE_ETH_FOREACH_DEV(prod) {
29966b82db2SPavan Nikhilesh 			ret = rte_eth_dev_start(prod);
30066b82db2SPavan Nikhilesh 			if (ret) {
30166b82db2SPavan Nikhilesh 				evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
30266b82db2SPavan Nikhilesh 						prod);
30366b82db2SPavan Nikhilesh 				return ret;
30466b82db2SPavan Nikhilesh 			}
30566b82db2SPavan Nikhilesh 
30666b82db2SPavan Nikhilesh 			ret = rte_event_eth_rx_adapter_start(prod);
30766b82db2SPavan Nikhilesh 			if (ret) {
30866b82db2SPavan Nikhilesh 				evt_err("Rx adapter[%d] start failed", prod);
30966b82db2SPavan Nikhilesh 				return ret;
31066b82db2SPavan Nikhilesh 			}
31166b82db2SPavan Nikhilesh 			printf("%s: Port[%d] using Rx adapter[%d] started\n",
31266b82db2SPavan Nikhilesh 					__func__, prod, prod);
31366b82db2SPavan Nikhilesh 		}
31466b82db2SPavan Nikhilesh 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
31566b82db2SPavan Nikhilesh 		for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
31666b82db2SPavan Nikhilesh 			ret = rte_event_timer_adapter_start(
31766b82db2SPavan Nikhilesh 					t->timer_adptr[prod]);
31866b82db2SPavan Nikhilesh 			if (ret) {
31966b82db2SPavan Nikhilesh 				evt_err("failed to Start event timer adapter %d"
32066b82db2SPavan Nikhilesh 						, prod);
32166b82db2SPavan Nikhilesh 				return ret;
32266b82db2SPavan Nikhilesh 			}
32366b82db2SPavan Nikhilesh 		}
324de2bc16eSShijith Thotton 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
325de2bc16eSShijith Thotton 		uint8_t cdev_id, cdev_count;
326de2bc16eSShijith Thotton 
327de2bc16eSShijith Thotton 		cdev_count = rte_cryptodev_count();
328de2bc16eSShijith Thotton 		for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
329de2bc16eSShijith Thotton 			ret = rte_cryptodev_start(cdev_id);
330de2bc16eSShijith Thotton 			if (ret) {
331de2bc16eSShijith Thotton 				evt_err("Failed to start cryptodev %u",
332de2bc16eSShijith Thotton 					cdev_id);
333de2bc16eSShijith Thotton 				return ret;
334de2bc16eSShijith Thotton 			}
335de2bc16eSShijith Thotton 		}
336b25a66c4SAmit Prakash Shukla 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
337b25a66c4SAmit Prakash Shukla 		uint8_t dma_dev_id = 0, dma_dev_count;
338b25a66c4SAmit Prakash Shukla 
339b25a66c4SAmit Prakash Shukla 		dma_dev_count = rte_dma_count_avail();
340b25a66c4SAmit Prakash Shukla 		if (dma_dev_count == 0) {
341b25a66c4SAmit Prakash Shukla 			evt_err("No dma devices available\n");
342b25a66c4SAmit Prakash Shukla 			return -ENODEV;
343b25a66c4SAmit Prakash Shukla 		}
344b25a66c4SAmit Prakash Shukla 
345b25a66c4SAmit Prakash Shukla 		ret = rte_dma_start(dma_dev_id);
346b25a66c4SAmit Prakash Shukla 		if (ret) {
347b25a66c4SAmit Prakash Shukla 			evt_err("Failed to start dmadev %u", dma_dev_id);
348b25a66c4SAmit Prakash Shukla 			return ret;
349b25a66c4SAmit Prakash Shukla 		}
35066b82db2SPavan Nikhilesh 	}
35166b82db2SPavan Nikhilesh 
352e6050243SJerin Jacob 	return 0;
353e6050243SJerin Jacob }
354e6050243SJerin Jacob 
355e6050243SJerin Jacob static void
356e6050243SJerin Jacob perf_atq_opt_dump(struct evt_options *opt)
357e6050243SJerin Jacob {
358e6050243SJerin Jacob 	perf_opt_dump(opt, atq_nb_event_queues(opt));
359e6050243SJerin Jacob }
360e6050243SJerin Jacob 
361e6050243SJerin Jacob static int
362e6050243SJerin Jacob perf_atq_opt_check(struct evt_options *opt)
363e6050243SJerin Jacob {
364e6050243SJerin Jacob 	return perf_opt_check(opt, atq_nb_event_queues(opt));
365e6050243SJerin Jacob }
366e6050243SJerin Jacob 
367e6050243SJerin Jacob static bool
368e6050243SJerin Jacob perf_atq_capability_check(struct evt_options *opt)
369e6050243SJerin Jacob {
370e6050243SJerin Jacob 	struct rte_event_dev_info dev_info;
371e6050243SJerin Jacob 
372e6050243SJerin Jacob 	rte_event_dev_info_get(opt->dev_id, &dev_info);
373e6050243SJerin Jacob 	if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
374e6050243SJerin Jacob 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
375e6050243SJerin Jacob 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
376e6050243SJerin Jacob 			atq_nb_event_queues(opt), dev_info.max_event_queues,
377e6050243SJerin Jacob 			perf_nb_event_ports(opt), dev_info.max_event_ports);
378e6050243SJerin Jacob 	}
379e6050243SJerin Jacob 	if (!evt_has_all_types_queue(opt->dev_id))
380e6050243SJerin Jacob 		return false;
381e6050243SJerin Jacob 
382e6050243SJerin Jacob 	return true;
383e6050243SJerin Jacob }
384e6050243SJerin Jacob 
385e6050243SJerin Jacob static const struct evt_test_ops perf_atq =  {
386e6050243SJerin Jacob 	.cap_check          = perf_atq_capability_check,
387e6050243SJerin Jacob 	.opt_check          = perf_atq_opt_check,
388e6050243SJerin Jacob 	.opt_dump           = perf_atq_opt_dump,
389e6050243SJerin Jacob 	.test_setup         = perf_test_setup,
3903fc8de4fSPavan Nikhilesh 	.ethdev_setup       = perf_ethdev_setup,
391de2bc16eSShijith Thotton 	.cryptodev_setup    = perf_cryptodev_setup,
392b25a66c4SAmit Prakash Shukla 	.dmadev_setup       = perf_dmadev_setup,
393a734e738SPavan Nikhilesh 	.ethdev_rx_stop     = perf_ethdev_rx_stop,
394e6050243SJerin Jacob 	.mempool_setup      = perf_mempool_setup,
395e6050243SJerin Jacob 	.eventdev_setup     = perf_atq_eventdev_setup,
3961eb10ad8SJerin Jacob 	.launch_lcores      = perf_atq_launch_lcores,
397e6050243SJerin Jacob 	.eventdev_destroy   = perf_eventdev_destroy,
398e6050243SJerin Jacob 	.mempool_destroy    = perf_mempool_destroy,
3997f3daf34SPavan Nikhilesh 	.ethdev_destroy     = perf_ethdev_destroy,
400de2bc16eSShijith Thotton 	.cryptodev_destroy  = perf_cryptodev_destroy,
401b25a66c4SAmit Prakash Shukla 	.dmadev_destroy     = perf_dmadev_destroy,
402e6050243SJerin Jacob 	.test_result        = perf_test_result,
403e6050243SJerin Jacob 	.test_destroy       = perf_test_destroy,
404e6050243SJerin Jacob };
405e6050243SJerin Jacob 
406e6050243SJerin Jacob EVT_TEST_REGISTER(perf_atq);
407