xref: /dpdk/app/test-eventdev/test_perf_queue.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "test_perf_common.h"
6 
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
8 
9 static inline int
10 perf_queue_nb_event_queues(struct evt_options *opt)
11 {
12 	/* nb_queues = number of producers * number of stages */
13 	uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 	return nb_prod * opt->nb_stages;
16 }
17 
18 static inline __attribute__((always_inline)) void
19 mark_fwd_latency(struct rte_event *const ev,
20 		const uint8_t nb_stages)
21 {
22 	if (unlikely((ev->queue_id % nb_stages) == 0)) {
23 		struct perf_elt *const m = ev->event_ptr;
24 
25 		m->timestamp = rte_get_timer_cycles();
26 	}
27 }
28 
29 static inline __attribute__((always_inline)) void
30 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
31 		const uint8_t nb_stages)
32 {
33 	ev->queue_id++;
34 	ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
35 	ev->op = RTE_EVENT_OP_FORWARD;
36 	ev->event_type = RTE_EVENT_TYPE_CPU;
37 }
38 
39 static int
40 perf_queue_worker(void *arg, const int enable_fwd_latency)
41 {
42 	PERF_WORKER_INIT;
43 	struct rte_event ev;
44 
45 	while (t->done == false) {
46 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
47 
48 		if (!event) {
49 			rte_pause();
50 			continue;
51 		}
52 		if (enable_fwd_latency && !prod_timer_type)
53 		/* first q in pipeline, mark timestamp to compute fwd latency */
54 			mark_fwd_latency(&ev, nb_stages);
55 
56 		/* last stage in pipeline */
57 		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
58 			if (enable_fwd_latency)
59 				cnt = perf_process_last_stage_latency(pool,
60 					&ev, w, bufs, sz, cnt);
61 			else
62 				cnt = perf_process_last_stage(pool,
63 					&ev, w, bufs, sz, cnt);
64 		} else {
65 			fwd_event(&ev, sched_type_list, nb_stages);
66 			while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
67 				rte_pause();
68 		}
69 	}
70 	return 0;
71 }
72 
73 static int
74 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
75 {
76 	PERF_WORKER_INIT;
77 	uint16_t i;
78 	/* +1 to avoid prefetch out of array check */
79 	struct rte_event ev[BURST_SIZE + 1];
80 
81 	while (t->done == false) {
82 		uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
83 				BURST_SIZE, 0);
84 
85 		if (!nb_rx) {
86 			rte_pause();
87 			continue;
88 		}
89 
90 		for (i = 0; i < nb_rx; i++) {
91 			if (enable_fwd_latency && !prod_timer_type) {
92 				rte_prefetch0(ev[i+1].event_ptr);
93 				/* first queue in pipeline.
94 				 * mark time stamp to compute fwd latency
95 				 */
96 				mark_fwd_latency(&ev[i], nb_stages);
97 			}
98 			/* last stage in pipeline */
99 			if (unlikely((ev[i].queue_id % nb_stages) ==
100 						 laststage)) {
101 				if (enable_fwd_latency)
102 					cnt = perf_process_last_stage_latency(
103 						pool, &ev[i], w, bufs, sz, cnt);
104 				else
105 					cnt = perf_process_last_stage(pool,
106 						&ev[i], w, bufs, sz, cnt);
107 
108 				ev[i].op = RTE_EVENT_OP_RELEASE;
109 			} else {
110 				fwd_event(&ev[i], sched_type_list, nb_stages);
111 			}
112 		}
113 
114 		uint16_t enq;
115 
116 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
117 		while (enq < nb_rx) {
118 			enq += rte_event_enqueue_burst(dev, port,
119 							ev + enq, nb_rx - enq);
120 		}
121 	}
122 	return 0;
123 }
124 
125 static int
126 worker_wrapper(void *arg)
127 {
128 	struct worker_data *w  = arg;
129 	struct evt_options *opt = w->t->opt;
130 
131 	const bool burst = evt_has_burst_mode(w->dev_id);
132 	const int fwd_latency = opt->fwd_latency;
133 
134 	/* allow compiler to optimize */
135 	if (!burst && !fwd_latency)
136 		return perf_queue_worker(arg, 0);
137 	else if (!burst && fwd_latency)
138 		return perf_queue_worker(arg, 1);
139 	else if (burst && !fwd_latency)
140 		return perf_queue_worker_burst(arg, 0);
141 	else if (burst && fwd_latency)
142 		return perf_queue_worker_burst(arg, 1);
143 
144 	rte_panic("invalid worker\n");
145 }
146 
147 static int
148 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
149 {
150 	return perf_launch_lcores(test, opt, worker_wrapper);
151 }
152 
153 static int
154 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
155 {
156 	uint8_t queue;
157 	int nb_stages = opt->nb_stages;
158 	int ret;
159 	int nb_ports;
160 	int nb_queues;
161 	struct rte_event_dev_info dev_info;
162 
163 	nb_ports = evt_nr_active_lcores(opt->wlcores);
164 	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
165 		 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
166 		evt_nr_active_lcores(opt->plcores);
167 
168 	nb_queues = perf_queue_nb_event_queues(opt);
169 
170 	memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
171 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
172 	if (ret) {
173 		evt_err("failed to get eventdev info %d", opt->dev_id);
174 		return ret;
175 	}
176 
177 	const struct rte_event_dev_config config = {
178 			.nb_event_queues = nb_queues,
179 			.nb_event_ports = nb_ports,
180 			.nb_events_limit  = dev_info.max_num_events,
181 			.nb_event_queue_flows = opt->nb_flows,
182 			.nb_event_port_dequeue_depth =
183 				dev_info.max_event_port_dequeue_depth,
184 			.nb_event_port_enqueue_depth =
185 				dev_info.max_event_port_enqueue_depth,
186 	};
187 
188 	ret = rte_event_dev_configure(opt->dev_id, &config);
189 	if (ret) {
190 		evt_err("failed to configure eventdev %d", opt->dev_id);
191 		return ret;
192 	}
193 
194 	struct rte_event_queue_conf q_conf = {
195 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
196 			.nb_atomic_flows = opt->nb_flows,
197 			.nb_atomic_order_sequences = opt->nb_flows,
198 	};
199 	/* queue configurations */
200 	for (queue = 0; queue < nb_queues; queue++) {
201 		q_conf.schedule_type =
202 			(opt->sched_type_list[queue % nb_stages]);
203 
204 		if (opt->q_priority) {
205 			uint8_t stage_pos = queue % nb_stages;
206 			/* Configure event queues(stage 0 to stage n) with
207 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
208 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
209 			 */
210 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
211 					(nb_stages - 1);
212 			/* Higher prio for the queues closer to last stage */
213 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
214 					(step * stage_pos);
215 		}
216 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
217 		if (ret) {
218 			evt_err("failed to setup queue=%d", queue);
219 			return ret;
220 		}
221 	}
222 
223 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
224 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
225 
226 	/* port configuration */
227 	const struct rte_event_port_conf p_conf = {
228 			.dequeue_depth = opt->wkr_deq_dep,
229 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
230 			.new_event_threshold = dev_info.max_num_events,
231 	};
232 
233 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
234 					nb_queues, &p_conf);
235 	if (ret)
236 		return ret;
237 
238 	if (!evt_has_distributed_sched(opt->dev_id)) {
239 		uint32_t service_id;
240 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
241 		ret = evt_service_setup(service_id);
242 		if (ret) {
243 			evt_err("No service lcore found to run event dev.");
244 			return ret;
245 		}
246 	}
247 
248 	ret = rte_event_dev_start(opt->dev_id);
249 	if (ret) {
250 		evt_err("failed to start eventdev %d", opt->dev_id);
251 		return ret;
252 	}
253 
254 	return 0;
255 }
256 
257 static void
258 perf_queue_opt_dump(struct evt_options *opt)
259 {
260 	evt_dump_fwd_latency(opt);
261 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
262 }
263 
264 static int
265 perf_queue_opt_check(struct evt_options *opt)
266 {
267 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
268 }
269 
270 static bool
271 perf_queue_capability_check(struct evt_options *opt)
272 {
273 	struct rte_event_dev_info dev_info;
274 
275 	rte_event_dev_info_get(opt->dev_id, &dev_info);
276 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
277 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
278 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
279 			perf_queue_nb_event_queues(opt),
280 			dev_info.max_event_queues,
281 			perf_nb_event_ports(opt), dev_info.max_event_ports);
282 	}
283 
284 	return true;
285 }
286 
287 static const struct evt_test_ops perf_queue =  {
288 	.cap_check          = perf_queue_capability_check,
289 	.opt_check          = perf_queue_opt_check,
290 	.opt_dump           = perf_queue_opt_dump,
291 	.test_setup         = perf_test_setup,
292 	.mempool_setup      = perf_mempool_setup,
293 	.ethdev_setup	    = perf_ethdev_setup,
294 	.eventdev_setup     = perf_queue_eventdev_setup,
295 	.launch_lcores      = perf_queue_launch_lcores,
296 	.eventdev_destroy   = perf_eventdev_destroy,
297 	.mempool_destroy    = perf_mempool_destroy,
298 	.ethdev_destroy	    = perf_ethdev_destroy,
299 	.test_result        = perf_test_result,
300 	.test_destroy       = perf_test_destroy,
301 };
302 
303 EVT_TEST_REGISTER(perf_queue);
304