xref: /dpdk/app/test-eventdev/test_perf_queue.c (revision de2bc16e1bd187c84d5664c86d28c2207b86ebf9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "test_perf_common.h"
6 
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
8 
9 static inline int
10 perf_queue_nb_event_queues(struct evt_options *opt)
11 {
12 	/* nb_queues = number of producers * number of stages */
13 	uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 	return nb_prod * opt->nb_stages;
16 }
17 
18 static __rte_always_inline void
19 mark_fwd_latency(struct rte_event *const ev,
20 		const uint8_t nb_stages)
21 {
22 	if (unlikely((ev->queue_id % nb_stages) == 0)) {
23 		struct perf_elt *const m = ev->event_ptr;
24 
25 		m->timestamp = rte_get_timer_cycles();
26 	}
27 }
28 
29 static __rte_always_inline void
30 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
31 		const uint8_t nb_stages)
32 {
33 	ev->queue_id++;
34 	ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
35 	ev->op = RTE_EVENT_OP_FORWARD;
36 	ev->event_type = RTE_EVENT_TYPE_CPU;
37 }
38 
39 static int
40 perf_queue_worker(void *arg, const int enable_fwd_latency)
41 {
42 	PERF_WORKER_INIT;
43 	struct rte_event ev;
44 
45 	while (t->done == false) {
46 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
47 
48 		if (!event) {
49 			rte_pause();
50 			continue;
51 		}
52 
53 		if (prod_crypto_type &&
54 		    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
55 			struct rte_crypto_op *op = ev.event_ptr;
56 
57 			if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
58 				if (op->sym->m_dst == NULL)
59 					ev.event_ptr = op->sym->m_src;
60 				else
61 					ev.event_ptr = op->sym->m_dst;
62 				rte_crypto_op_free(op);
63 			} else {
64 				rte_crypto_op_free(op);
65 				continue;
66 			}
67 		}
68 
69 		if (enable_fwd_latency && !prod_timer_type)
70 		/* first q in pipeline, mark timestamp to compute fwd latency */
71 			mark_fwd_latency(&ev, nb_stages);
72 
73 		/* last stage in pipeline */
74 		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
75 			if (enable_fwd_latency)
76 				cnt = perf_process_last_stage_latency(pool,
77 					&ev, w, bufs, sz, cnt);
78 			else
79 				cnt = perf_process_last_stage(pool,
80 					&ev, w, bufs, sz, cnt);
81 		} else {
82 			fwd_event(&ev, sched_type_list, nb_stages);
83 			while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
84 				rte_pause();
85 		}
86 	}
87 	return 0;
88 }
89 
90 static int
91 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
92 {
93 	PERF_WORKER_INIT;
94 	uint16_t i;
95 	/* +1 to avoid prefetch out of array check */
96 	struct rte_event ev[BURST_SIZE + 1];
97 
98 	while (t->done == false) {
99 		uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
100 				BURST_SIZE, 0);
101 
102 		if (!nb_rx) {
103 			rte_pause();
104 			continue;
105 		}
106 
107 		for (i = 0; i < nb_rx; i++) {
108 			if (prod_crypto_type &&
109 			    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
110 				struct rte_crypto_op *op = ev[i].event_ptr;
111 
112 				if (op->status ==
113 				    RTE_CRYPTO_OP_STATUS_SUCCESS) {
114 					if (op->sym->m_dst == NULL)
115 						ev[i].event_ptr =
116 							op->sym->m_src;
117 					else
118 						ev[i].event_ptr =
119 							op->sym->m_dst;
120 					rte_crypto_op_free(op);
121 				} else {
122 					rte_crypto_op_free(op);
123 					continue;
124 				}
125 			}
126 
127 			if (enable_fwd_latency && !prod_timer_type) {
128 				rte_prefetch0(ev[i+1].event_ptr);
129 				/* first queue in pipeline.
130 				 * mark time stamp to compute fwd latency
131 				 */
132 				mark_fwd_latency(&ev[i], nb_stages);
133 			}
134 			/* last stage in pipeline */
135 			if (unlikely((ev[i].queue_id % nb_stages) ==
136 						 laststage)) {
137 				if (enable_fwd_latency)
138 					cnt = perf_process_last_stage_latency(
139 						pool, &ev[i], w, bufs, sz, cnt);
140 				else
141 					cnt = perf_process_last_stage(pool,
142 						&ev[i], w, bufs, sz, cnt);
143 
144 				ev[i].op = RTE_EVENT_OP_RELEASE;
145 			} else {
146 				fwd_event(&ev[i], sched_type_list, nb_stages);
147 			}
148 		}
149 
150 		uint16_t enq;
151 
152 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
153 		while (enq < nb_rx) {
154 			enq += rte_event_enqueue_burst(dev, port,
155 							ev + enq, nb_rx - enq);
156 		}
157 	}
158 	return 0;
159 }
160 
161 static int
162 worker_wrapper(void *arg)
163 {
164 	struct worker_data *w  = arg;
165 	struct evt_options *opt = w->t->opt;
166 
167 	const bool burst = evt_has_burst_mode(w->dev_id);
168 	const int fwd_latency = opt->fwd_latency;
169 
170 	/* allow compiler to optimize */
171 	if (!burst && !fwd_latency)
172 		return perf_queue_worker(arg, 0);
173 	else if (!burst && fwd_latency)
174 		return perf_queue_worker(arg, 1);
175 	else if (burst && !fwd_latency)
176 		return perf_queue_worker_burst(arg, 0);
177 	else if (burst && fwd_latency)
178 		return perf_queue_worker_burst(arg, 1);
179 
180 	rte_panic("invalid worker\n");
181 }
182 
183 static int
184 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
185 {
186 	return perf_launch_lcores(test, opt, worker_wrapper);
187 }
188 
189 static int
190 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
191 {
192 	uint8_t queue;
193 	int nb_stages = opt->nb_stages;
194 	int ret;
195 	int nb_ports;
196 	int nb_queues;
197 	uint16_t prod;
198 	struct rte_event_dev_info dev_info;
199 	struct test_perf *t = evt_test_priv(test);
200 
201 	nb_ports = evt_nr_active_lcores(opt->wlcores);
202 	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
203 		opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
204 		evt_nr_active_lcores(opt->plcores);
205 
206 	nb_queues = perf_queue_nb_event_queues(opt);
207 
208 	memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
209 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
210 	if (ret) {
211 		evt_err("failed to get eventdev info %d", opt->dev_id);
212 		return ret;
213 	}
214 
215 	ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
216 	if (ret) {
217 		evt_err("failed to configure eventdev %d", opt->dev_id);
218 		return ret;
219 	}
220 
221 	struct rte_event_queue_conf q_conf = {
222 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
223 			.nb_atomic_flows = opt->nb_flows,
224 			.nb_atomic_order_sequences = opt->nb_flows,
225 	};
226 	/* queue configurations */
227 	for (queue = 0; queue < nb_queues; queue++) {
228 		q_conf.schedule_type =
229 			(opt->sched_type_list[queue % nb_stages]);
230 
231 		if (opt->q_priority) {
232 			uint8_t stage_pos = queue % nb_stages;
233 			/* Configure event queues(stage 0 to stage n) with
234 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
235 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
236 			 */
237 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
238 					(nb_stages - 1);
239 			/* Higher prio for the queues closer to last stage */
240 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
241 					(step * stage_pos);
242 		}
243 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
244 		if (ret) {
245 			evt_err("failed to setup queue=%d", queue);
246 			return ret;
247 		}
248 	}
249 
250 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
251 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
252 
253 	/* port configuration */
254 	const struct rte_event_port_conf p_conf = {
255 			.dequeue_depth = opt->wkr_deq_dep,
256 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
257 			.new_event_threshold = dev_info.max_num_events,
258 	};
259 
260 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
261 					nb_queues, &p_conf);
262 	if (ret)
263 		return ret;
264 
265 	if (!evt_has_distributed_sched(opt->dev_id)) {
266 		uint32_t service_id;
267 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
268 		ret = evt_service_setup(service_id);
269 		if (ret) {
270 			evt_err("No service lcore found to run event dev.");
271 			return ret;
272 		}
273 	}
274 
275 	ret = rte_event_dev_start(opt->dev_id);
276 	if (ret) {
277 		evt_err("failed to start eventdev %d", opt->dev_id);
278 		return ret;
279 	}
280 
281 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
282 		RTE_ETH_FOREACH_DEV(prod) {
283 			ret = rte_eth_dev_start(prod);
284 			if (ret) {
285 				evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
286 						prod);
287 				return ret;
288 			}
289 
290 			ret = rte_event_eth_rx_adapter_start(prod);
291 			if (ret) {
292 				evt_err("Rx adapter[%d] start failed", prod);
293 				return ret;
294 			}
295 			printf("%s: Port[%d] using Rx adapter[%d] started\n",
296 					__func__, prod, prod);
297 		}
298 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
299 		for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
300 			ret = rte_event_timer_adapter_start(
301 					t->timer_adptr[prod]);
302 			if (ret) {
303 				evt_err("failed to Start event timer adapter %d"
304 						, prod);
305 				return ret;
306 			}
307 		}
308 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
309 		uint8_t cdev_id, cdev_count;
310 
311 		cdev_count = rte_cryptodev_count();
312 		for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
313 			ret = rte_cryptodev_start(cdev_id);
314 			if (ret) {
315 				evt_err("Failed to start cryptodev %u",
316 					cdev_id);
317 				return ret;
318 			}
319 		}
320 	}
321 
322 	return 0;
323 }
324 
325 static void
326 perf_queue_opt_dump(struct evt_options *opt)
327 {
328 	evt_dump_fwd_latency(opt);
329 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
330 }
331 
332 static int
333 perf_queue_opt_check(struct evt_options *opt)
334 {
335 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
336 }
337 
338 static bool
339 perf_queue_capability_check(struct evt_options *opt)
340 {
341 	struct rte_event_dev_info dev_info;
342 
343 	rte_event_dev_info_get(opt->dev_id, &dev_info);
344 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
345 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
346 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
347 			perf_queue_nb_event_queues(opt),
348 			dev_info.max_event_queues,
349 			perf_nb_event_ports(opt), dev_info.max_event_ports);
350 	}
351 
352 	return true;
353 }
354 
355 static const struct evt_test_ops perf_queue =  {
356 	.cap_check          = perf_queue_capability_check,
357 	.opt_check          = perf_queue_opt_check,
358 	.opt_dump           = perf_queue_opt_dump,
359 	.test_setup         = perf_test_setup,
360 	.mempool_setup      = perf_mempool_setup,
361 	.ethdev_setup	    = perf_ethdev_setup,
362 	.cryptodev_setup    = perf_cryptodev_setup,
363 	.eventdev_setup     = perf_queue_eventdev_setup,
364 	.launch_lcores      = perf_queue_launch_lcores,
365 	.eventdev_destroy   = perf_eventdev_destroy,
366 	.mempool_destroy    = perf_mempool_destroy,
367 	.ethdev_destroy	    = perf_ethdev_destroy,
368 	.cryptodev_destroy  = perf_cryptodev_destroy,
369 	.test_result        = perf_test_result,
370 	.test_destroy       = perf_test_destroy,
371 };
372 
373 EVT_TEST_REGISTER(perf_queue);
374