xref: /dpdk/app/test-eventdev/test_perf_queue.c (revision 8f1d23ece06adff5eae9f1b4365bdbbd3abee2b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "test_perf_common.h"
6 
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
8 
9 static inline int
10 perf_queue_nb_event_queues(struct evt_options *opt)
11 {
12 	/* nb_queues = number of producers * number of stages */
13 	uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 	return nb_prod * opt->nb_stages;
16 }
17 
18 static __rte_always_inline void
19 mark_fwd_latency(struct rte_event *const ev,
20 		const uint8_t nb_stages)
21 {
22 	if (unlikely((ev->queue_id % nb_stages) == 0)) {
23 		struct perf_elt *const m = ev->event_ptr;
24 
25 		m->timestamp = rte_get_timer_cycles();
26 	}
27 }
28 
29 static __rte_always_inline void
30 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
31 		const uint8_t nb_stages)
32 {
33 	ev->queue_id++;
34 	ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
35 	ev->op = RTE_EVENT_OP_FORWARD;
36 	ev->event_type = RTE_EVENT_TYPE_CPU;
37 }
38 
39 static int
40 perf_queue_worker(void *arg, const int enable_fwd_latency)
41 {
42 	uint16_t enq = 0, deq = 0;
43 	struct rte_event ev;
44 	PERF_WORKER_INIT;
45 
46 	while (t->done == false) {
47 		deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
48 
49 		if (!deq) {
50 			rte_pause();
51 			continue;
52 		}
53 
54 		if (prod_crypto_type &&
55 		    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
56 			struct rte_crypto_op *op = ev.event_ptr;
57 
58 			if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
59 				if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
60 					if (op->sym->m_dst == NULL)
61 						ev.event_ptr = op->sym->m_src;
62 					else
63 						ev.event_ptr = op->sym->m_dst;
64 					rte_crypto_op_free(op);
65 				}
66 			} else {
67 				rte_crypto_op_free(op);
68 				continue;
69 			}
70 		}
71 
72 		if (enable_fwd_latency && !prod_timer_type)
73 		/* first q in pipeline, mark timestamp to compute fwd latency */
74 			mark_fwd_latency(&ev, nb_stages);
75 
76 		/* last stage in pipeline */
77 		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
78 			if (enable_fwd_latency)
79 				cnt = perf_process_last_stage_latency(pool,
80 					&ev, w, bufs, sz, cnt);
81 			else
82 				cnt = perf_process_last_stage(pool,
83 					&ev, w, bufs, sz, cnt);
84 		} else {
85 			fwd_event(&ev, sched_type_list, nb_stages);
86 			do {
87 				enq = rte_event_enqueue_burst(dev, port, &ev,
88 							      1);
89 			} while (!enq && !t->done);
90 		}
91 	}
92 
93 	perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
94 
95 	return 0;
96 }
97 
98 static int
99 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
100 {
101 	/* +1 to avoid prefetch out of array check */
102 	struct rte_event ev[BURST_SIZE + 1];
103 	uint16_t enq = 0, nb_rx = 0;
104 	PERF_WORKER_INIT;
105 	uint16_t i;
106 
107 	while (t->done == false) {
108 		nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
109 
110 		if (!nb_rx) {
111 			rte_pause();
112 			continue;
113 		}
114 
115 		for (i = 0; i < nb_rx; i++) {
116 			if (prod_crypto_type &&
117 			    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
118 				struct rte_crypto_op *op = ev[i].event_ptr;
119 
120 				if (op->status ==
121 				    RTE_CRYPTO_OP_STATUS_SUCCESS) {
122 					if (op->sym->m_dst == NULL)
123 						ev[i].event_ptr =
124 							op->sym->m_src;
125 					else
126 						ev[i].event_ptr =
127 							op->sym->m_dst;
128 					rte_crypto_op_free(op);
129 				} else {
130 					rte_crypto_op_free(op);
131 					continue;
132 				}
133 			}
134 
135 			if (enable_fwd_latency && !prod_timer_type) {
136 				rte_prefetch0(ev[i+1].event_ptr);
137 				/* first queue in pipeline.
138 				 * mark time stamp to compute fwd latency
139 				 */
140 				mark_fwd_latency(&ev[i], nb_stages);
141 			}
142 			/* last stage in pipeline */
143 			if (unlikely((ev[i].queue_id % nb_stages) ==
144 						 laststage)) {
145 				if (enable_fwd_latency)
146 					cnt = perf_process_last_stage_latency(
147 						pool, &ev[i], w, bufs, sz, cnt);
148 				else
149 					cnt = perf_process_last_stage(pool,
150 						&ev[i], w, bufs, sz, cnt);
151 
152 				ev[i].op = RTE_EVENT_OP_RELEASE;
153 			} else {
154 				fwd_event(&ev[i], sched_type_list, nb_stages);
155 			}
156 		}
157 
158 
159 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
160 		while (enq < nb_rx && !t->done) {
161 			enq += rte_event_enqueue_burst(dev, port,
162 							ev + enq, nb_rx - enq);
163 		}
164 	}
165 
166 	perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
167 
168 	return 0;
169 }
170 
171 static int
172 worker_wrapper(void *arg)
173 {
174 	struct worker_data *w  = arg;
175 	struct evt_options *opt = w->t->opt;
176 
177 	const bool burst = evt_has_burst_mode(w->dev_id);
178 	const int fwd_latency = opt->fwd_latency;
179 
180 	/* allow compiler to optimize */
181 	if (!burst && !fwd_latency)
182 		return perf_queue_worker(arg, 0);
183 	else if (!burst && fwd_latency)
184 		return perf_queue_worker(arg, 1);
185 	else if (burst && !fwd_latency)
186 		return perf_queue_worker_burst(arg, 0);
187 	else if (burst && fwd_latency)
188 		return perf_queue_worker_burst(arg, 1);
189 
190 	rte_panic("invalid worker\n");
191 }
192 
193 static int
194 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
195 {
196 	return perf_launch_lcores(test, opt, worker_wrapper);
197 }
198 
199 static int
200 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
201 {
202 	uint8_t queue;
203 	int nb_stages = opt->nb_stages;
204 	int ret;
205 	int nb_ports;
206 	int nb_queues;
207 	uint16_t prod;
208 	struct rte_event_dev_info dev_info;
209 	struct test_perf *t = evt_test_priv(test);
210 
211 	nb_ports = evt_nr_active_lcores(opt->wlcores);
212 	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
213 		opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
214 		evt_nr_active_lcores(opt->plcores);
215 
216 	nb_queues = perf_queue_nb_event_queues(opt);
217 
218 	memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
219 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
220 	if (ret) {
221 		evt_err("failed to get eventdev info %d", opt->dev_id);
222 		return ret;
223 	}
224 
225 	ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
226 	if (ret) {
227 		evt_err("failed to configure eventdev %d", opt->dev_id);
228 		return ret;
229 	}
230 
231 	struct rte_event_queue_conf q_conf = {
232 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
233 			.nb_atomic_flows = opt->nb_flows,
234 			.nb_atomic_order_sequences = opt->nb_flows,
235 	};
236 	/* queue configurations */
237 	for (queue = 0; queue < nb_queues; queue++) {
238 		q_conf.schedule_type =
239 			(opt->sched_type_list[queue % nb_stages]);
240 
241 		if (opt->q_priority) {
242 			uint8_t stage_pos = queue % nb_stages;
243 			/* Configure event queues(stage 0 to stage n) with
244 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
245 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
246 			 */
247 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
248 					(nb_stages - 1);
249 			/* Higher prio for the queues closer to last stage */
250 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
251 					(step * stage_pos);
252 		}
253 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
254 		if (ret) {
255 			evt_err("failed to setup queue=%d", queue);
256 			return ret;
257 		}
258 	}
259 
260 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
261 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
262 
263 	/* port configuration */
264 	const struct rte_event_port_conf p_conf = {
265 			.dequeue_depth = opt->wkr_deq_dep,
266 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
267 			.new_event_threshold = dev_info.max_num_events,
268 	};
269 
270 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
271 					nb_queues, &p_conf);
272 	if (ret)
273 		return ret;
274 
275 	if (!evt_has_distributed_sched(opt->dev_id)) {
276 		uint32_t service_id;
277 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
278 		ret = evt_service_setup(service_id);
279 		if (ret) {
280 			evt_err("No service lcore found to run event dev.");
281 			return ret;
282 		}
283 	}
284 
285 	ret = rte_event_dev_start(opt->dev_id);
286 	if (ret) {
287 		evt_err("failed to start eventdev %d", opt->dev_id);
288 		return ret;
289 	}
290 
291 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
292 		RTE_ETH_FOREACH_DEV(prod) {
293 			ret = rte_eth_dev_start(prod);
294 			if (ret) {
295 				evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
296 						prod);
297 				return ret;
298 			}
299 
300 			ret = rte_event_eth_rx_adapter_start(prod);
301 			if (ret) {
302 				evt_err("Rx adapter[%d] start failed", prod);
303 				return ret;
304 			}
305 			printf("%s: Port[%d] using Rx adapter[%d] started\n",
306 					__func__, prod, prod);
307 		}
308 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
309 		for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
310 			ret = rte_event_timer_adapter_start(
311 					t->timer_adptr[prod]);
312 			if (ret) {
313 				evt_err("failed to Start event timer adapter %d"
314 						, prod);
315 				return ret;
316 			}
317 		}
318 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
319 		uint8_t cdev_id, cdev_count;
320 
321 		cdev_count = rte_cryptodev_count();
322 		for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
323 			ret = rte_cryptodev_start(cdev_id);
324 			if (ret) {
325 				evt_err("Failed to start cryptodev %u",
326 					cdev_id);
327 				return ret;
328 			}
329 		}
330 	}
331 
332 	return 0;
333 }
334 
335 static void
336 perf_queue_opt_dump(struct evt_options *opt)
337 {
338 	evt_dump_fwd_latency(opt);
339 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
340 }
341 
342 static int
343 perf_queue_opt_check(struct evt_options *opt)
344 {
345 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
346 }
347 
348 static bool
349 perf_queue_capability_check(struct evt_options *opt)
350 {
351 	struct rte_event_dev_info dev_info;
352 
353 	rte_event_dev_info_get(opt->dev_id, &dev_info);
354 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
355 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
356 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
357 			perf_queue_nb_event_queues(opt),
358 			dev_info.max_event_queues,
359 			perf_nb_event_ports(opt), dev_info.max_event_ports);
360 	}
361 
362 	return true;
363 }
364 
365 static const struct evt_test_ops perf_queue =  {
366 	.cap_check          = perf_queue_capability_check,
367 	.opt_check          = perf_queue_opt_check,
368 	.opt_dump           = perf_queue_opt_dump,
369 	.test_setup         = perf_test_setup,
370 	.mempool_setup      = perf_mempool_setup,
371 	.ethdev_setup	    = perf_ethdev_setup,
372 	.cryptodev_setup    = perf_cryptodev_setup,
373 	.ethdev_rx_stop     = perf_ethdev_rx_stop,
374 	.eventdev_setup     = perf_queue_eventdev_setup,
375 	.launch_lcores      = perf_queue_launch_lcores,
376 	.eventdev_destroy   = perf_eventdev_destroy,
377 	.mempool_destroy    = perf_mempool_destroy,
378 	.ethdev_destroy	    = perf_ethdev_destroy,
379 	.cryptodev_destroy  = perf_cryptodev_destroy,
380 	.test_result        = perf_test_result,
381 	.test_destroy       = perf_test_destroy,
382 };
383 
384 EVT_TEST_REGISTER(perf_queue);
385