xref: /dpdk/app/test-eventdev/test_perf_queue.c (revision 09442498ef736d0a96632cf8b8c15d8ca78a6468)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "test_perf_common.h"
6 
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
8 
9 static inline int
10 perf_queue_nb_event_queues(struct evt_options *opt)
11 {
12 	/* nb_queues = number of producers * number of stages */
13 	uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 	return nb_prod * opt->nb_stages;
16 }
17 
18 static __rte_always_inline void
19 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
20 		const uint8_t nb_stages)
21 {
22 	ev->queue_id++;
23 	ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
24 	ev->op = RTE_EVENT_OP_FORWARD;
25 	ev->event_type = RTE_EVENT_TYPE_CPU;
26 }
27 
28 static int
29 perf_queue_worker(void *arg, const int enable_fwd_latency)
30 {
31 	struct perf_elt *pe = NULL;
32 	uint16_t enq = 0, deq = 0;
33 	struct rte_event ev;
34 	PERF_WORKER_INIT;
35 	uint8_t stage;
36 
37 
38 	while (t->done == false) {
39 		deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
40 
41 		if (!deq) {
42 			rte_pause();
43 			continue;
44 		}
45 
46 		if (prod_crypto_type && (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
47 			if (perf_handle_crypto_ev(&ev, &pe, enable_fwd_latency))
48 				continue;
49 		} else {
50 			pe = ev.event_ptr;
51 		}
52 
53 		stage = ev.queue_id % nb_stages;
54 		if (enable_fwd_latency && !prod_timer_type && stage == 0)
55 		/* first q in pipeline, mark timestamp to compute fwd latency */
56 			perf_mark_fwd_latency(pe);
57 
58 		/* last stage in pipeline */
59 		if (unlikely(stage == laststage)) {
60 			if (enable_fwd_latency)
61 				cnt = perf_process_last_stage_latency(pool, prod_crypto_type,
62 					&ev, w, bufs, sz, cnt);
63 			else
64 				cnt = perf_process_last_stage(pool, prod_crypto_type,
65 					&ev, w, bufs, sz, cnt);
66 		} else {
67 			fwd_event(&ev, sched_type_list, nb_stages);
68 			do {
69 				enq = rte_event_enqueue_burst(dev, port, &ev, 1);
70 			} while (!enq && !t->done);
71 		}
72 	}
73 
74 	perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
75 
76 	return 0;
77 }
78 
79 static int
80 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
81 {
82 	/* +1 to avoid prefetch out of array check */
83 	struct rte_event ev[BURST_SIZE + 1];
84 	uint16_t enq = 0, nb_rx = 0;
85 	struct perf_elt *pe = NULL;
86 	PERF_WORKER_INIT;
87 	uint8_t stage;
88 	uint16_t i;
89 
90 	while (t->done == false) {
91 		nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
92 
93 		if (!nb_rx) {
94 			rte_pause();
95 			continue;
96 		}
97 
98 		for (i = 0; i < nb_rx; i++) {
99 			if (prod_crypto_type && (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
100 				if (perf_handle_crypto_ev(&ev[i], &pe, enable_fwd_latency))
101 					continue;
102 			}
103 
104 			stage = ev[i].queue_id % nb_stages;
105 			if (enable_fwd_latency && !prod_timer_type && stage == 0) {
106 				rte_prefetch0(ev[i+1].event_ptr);
107 				/* first queue in pipeline.
108 				 * mark time stamp to compute fwd latency
109 				 */
110 				perf_mark_fwd_latency(ev[i].event_ptr);
111 			}
112 			/* last stage in pipeline */
113 			if (unlikely(stage == laststage)) {
114 				if (enable_fwd_latency)
115 					cnt = perf_process_last_stage_latency(pool,
116 						prod_crypto_type, &ev[i], w, bufs, sz, cnt);
117 				else
118 					cnt = perf_process_last_stage(pool, prod_crypto_type,
119 						&ev[i], w, bufs, sz, cnt);
120 
121 				ev[i].op = RTE_EVENT_OP_RELEASE;
122 			} else {
123 				fwd_event(&ev[i], sched_type_list, nb_stages);
124 			}
125 		}
126 
127 
128 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
129 		while (enq < nb_rx && !t->done) {
130 			enq += rte_event_enqueue_burst(dev, port,
131 							ev + enq, nb_rx - enq);
132 		}
133 	}
134 
135 	perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
136 
137 	return 0;
138 }
139 
140 static int
141 worker_wrapper(void *arg)
142 {
143 	struct worker_data *w  = arg;
144 	struct evt_options *opt = w->t->opt;
145 
146 	const bool burst = evt_has_burst_mode(w->dev_id);
147 	const int fwd_latency = opt->fwd_latency;
148 
149 	/* allow compiler to optimize */
150 	if (!burst && !fwd_latency)
151 		return perf_queue_worker(arg, 0);
152 	else if (!burst && fwd_latency)
153 		return perf_queue_worker(arg, 1);
154 	else if (burst && !fwd_latency)
155 		return perf_queue_worker_burst(arg, 0);
156 	else if (burst && fwd_latency)
157 		return perf_queue_worker_burst(arg, 1);
158 
159 	rte_panic("invalid worker\n");
160 }
161 
162 static int
163 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
164 {
165 	return perf_launch_lcores(test, opt, worker_wrapper);
166 }
167 
168 static int
169 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
170 {
171 	uint8_t queue;
172 	int nb_stages = opt->nb_stages;
173 	int ret;
174 	int nb_ports;
175 	int nb_queues;
176 	uint16_t prod;
177 	struct rte_event_dev_info dev_info;
178 	struct test_perf *t = evt_test_priv(test);
179 
180 	nb_ports = evt_nr_active_lcores(opt->wlcores);
181 	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
182 		opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
183 		evt_nr_active_lcores(opt->plcores);
184 
185 	nb_queues = perf_queue_nb_event_queues(opt);
186 
187 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
188 	if (ret) {
189 		evt_err("failed to get eventdev info %d", opt->dev_id);
190 		return ret;
191 	}
192 
193 	ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
194 	if (ret) {
195 		evt_err("failed to configure eventdev %d", opt->dev_id);
196 		return ret;
197 	}
198 
199 	struct rte_event_queue_conf q_conf = {
200 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
201 			.nb_atomic_flows = opt->nb_flows,
202 			.nb_atomic_order_sequences = opt->nb_flows,
203 	};
204 	/* queue configurations */
205 	for (queue = 0; queue < nb_queues; queue++) {
206 		q_conf.schedule_type =
207 			(opt->sched_type_list[queue % nb_stages]);
208 
209 		if (opt->q_priority) {
210 			uint8_t stage_pos = queue % nb_stages;
211 			/* Configure event queues(stage 0 to stage n) with
212 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
213 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
214 			 */
215 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
216 					(nb_stages - 1);
217 			/* Higher prio for the queues closer to last stage */
218 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
219 					(step * stage_pos);
220 		}
221 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
222 		if (ret) {
223 			evt_err("failed to setup queue=%d", queue);
224 			return ret;
225 		}
226 	}
227 
228 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
229 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
230 
231 	/* port configuration */
232 	const struct rte_event_port_conf p_conf = {
233 			.dequeue_depth = opt->wkr_deq_dep,
234 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
235 			.new_event_threshold = dev_info.max_num_events,
236 	};
237 
238 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
239 					nb_queues, &p_conf);
240 	if (ret)
241 		return ret;
242 
243 	if (!evt_has_distributed_sched(opt->dev_id)) {
244 		uint32_t service_id;
245 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
246 		ret = evt_service_setup(service_id);
247 		if (ret) {
248 			evt_err("No service lcore found to run event dev.");
249 			return ret;
250 		}
251 	}
252 
253 	ret = rte_event_dev_start(opt->dev_id);
254 	if (ret) {
255 		evt_err("failed to start eventdev %d", opt->dev_id);
256 		return ret;
257 	}
258 
259 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
260 		RTE_ETH_FOREACH_DEV(prod) {
261 			ret = rte_eth_dev_start(prod);
262 			if (ret) {
263 				evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
264 						prod);
265 				return ret;
266 			}
267 
268 			ret = rte_event_eth_rx_adapter_start(prod);
269 			if (ret) {
270 				evt_err("Rx adapter[%d] start failed", prod);
271 				return ret;
272 			}
273 			printf("%s: Port[%d] using Rx adapter[%d] started\n",
274 					__func__, prod, prod);
275 		}
276 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
277 		for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
278 			ret = rte_event_timer_adapter_start(
279 					t->timer_adptr[prod]);
280 			if (ret) {
281 				evt_err("failed to Start event timer adapter %d"
282 						, prod);
283 				return ret;
284 			}
285 		}
286 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
287 		uint8_t cdev_id, cdev_count;
288 
289 		cdev_count = rte_cryptodev_count();
290 		for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
291 			ret = rte_cryptodev_start(cdev_id);
292 			if (ret) {
293 				evt_err("Failed to start cryptodev %u",
294 					cdev_id);
295 				return ret;
296 			}
297 		}
298 	}
299 
300 	return 0;
301 }
302 
303 static void
304 perf_queue_opt_dump(struct evt_options *opt)
305 {
306 	evt_dump_fwd_latency(opt);
307 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
308 }
309 
310 static int
311 perf_queue_opt_check(struct evt_options *opt)
312 {
313 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
314 }
315 
316 static bool
317 perf_queue_capability_check(struct evt_options *opt)
318 {
319 	struct rte_event_dev_info dev_info;
320 
321 	rte_event_dev_info_get(opt->dev_id, &dev_info);
322 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
323 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
324 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
325 			perf_queue_nb_event_queues(opt),
326 			dev_info.max_event_queues,
327 			perf_nb_event_ports(opt), dev_info.max_event_ports);
328 	}
329 
330 	return true;
331 }
332 
333 static const struct evt_test_ops perf_queue =  {
334 	.cap_check          = perf_queue_capability_check,
335 	.opt_check          = perf_queue_opt_check,
336 	.opt_dump           = perf_queue_opt_dump,
337 	.test_setup         = perf_test_setup,
338 	.mempool_setup      = perf_mempool_setup,
339 	.ethdev_setup	    = perf_ethdev_setup,
340 	.cryptodev_setup    = perf_cryptodev_setup,
341 	.ethdev_rx_stop     = perf_ethdev_rx_stop,
342 	.eventdev_setup     = perf_queue_eventdev_setup,
343 	.launch_lcores      = perf_queue_launch_lcores,
344 	.eventdev_destroy   = perf_eventdev_destroy,
345 	.mempool_destroy    = perf_mempool_destroy,
346 	.ethdev_destroy	    = perf_ethdev_destroy,
347 	.cryptodev_destroy  = perf_cryptodev_destroy,
348 	.test_result        = perf_test_result,
349 	.test_destroy       = perf_test_destroy,
350 };
351 
352 EVT_TEST_REGISTER(perf_queue);
353