xref: /dpdk/app/test-eventdev/test_perf_queue.c (revision 7f3daf34269e42ec31bf330fa6f869b05a9fec2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "test_perf_common.h"
6 
7 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
8 
9 static inline int
10 perf_queue_nb_event_queues(struct evt_options *opt)
11 {
12 	/* nb_queues = number of producers * number of stages */
13 	return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
14 }
15 
16 static inline __attribute__((always_inline)) void
17 mark_fwd_latency(struct rte_event *const ev,
18 		const uint8_t nb_stages)
19 {
20 	if (unlikely((ev->queue_id % nb_stages) == 0)) {
21 		struct perf_elt *const m = ev->event_ptr;
22 
23 		m->timestamp = rte_get_timer_cycles();
24 	}
25 }
26 
27 static inline __attribute__((always_inline)) void
28 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
29 		const uint8_t nb_stages)
30 {
31 	ev->queue_id++;
32 	ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
33 	ev->op = RTE_EVENT_OP_FORWARD;
34 	ev->event_type = RTE_EVENT_TYPE_CPU;
35 }
36 
37 static int
38 perf_queue_worker(void *arg, const int enable_fwd_latency)
39 {
40 	PERF_WORKER_INIT;
41 	struct rte_event ev;
42 
43 	while (t->done == false) {
44 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
45 
46 		if (!event) {
47 			rte_pause();
48 			continue;
49 		}
50 		if (enable_fwd_latency)
51 		/* first q in pipeline, mark timestamp to compute fwd latency */
52 			mark_fwd_latency(&ev, nb_stages);
53 
54 		/* last stage in pipeline */
55 		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
56 			if (enable_fwd_latency)
57 				cnt = perf_process_last_stage_latency(pool,
58 					&ev, w, bufs, sz, cnt);
59 			else
60 				cnt = perf_process_last_stage(pool,
61 					&ev, w, bufs, sz, cnt);
62 		} else {
63 			fwd_event(&ev, sched_type_list, nb_stages);
64 			while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
65 				rte_pause();
66 		}
67 	}
68 	return 0;
69 }
70 
71 static int
72 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
73 {
74 	PERF_WORKER_INIT;
75 	uint16_t i;
76 	/* +1 to avoid prefetch out of array check */
77 	struct rte_event ev[BURST_SIZE + 1];
78 
79 	while (t->done == false) {
80 		uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
81 				BURST_SIZE, 0);
82 
83 		if (!nb_rx) {
84 			rte_pause();
85 			continue;
86 		}
87 
88 		for (i = 0; i < nb_rx; i++) {
89 			if (enable_fwd_latency) {
90 				rte_prefetch0(ev[i+1].event_ptr);
91 				/* first queue in pipeline.
92 				 * mark time stamp to compute fwd latency
93 				 */
94 				mark_fwd_latency(&ev[i], nb_stages);
95 			}
96 			/* last stage in pipeline */
97 			if (unlikely((ev[i].queue_id % nb_stages) ==
98 						 laststage)) {
99 				if (enable_fwd_latency)
100 					cnt = perf_process_last_stage_latency(
101 						pool, &ev[i], w, bufs, sz, cnt);
102 				else
103 					cnt = perf_process_last_stage(pool,
104 						&ev[i], w, bufs, sz, cnt);
105 
106 				ev[i].op = RTE_EVENT_OP_RELEASE;
107 			} else {
108 				fwd_event(&ev[i], sched_type_list, nb_stages);
109 			}
110 		}
111 
112 		uint16_t enq;
113 
114 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
115 		while (enq < nb_rx) {
116 			enq += rte_event_enqueue_burst(dev, port,
117 							ev + enq, nb_rx - enq);
118 		}
119 	}
120 	return 0;
121 }
122 
123 static int
124 worker_wrapper(void *arg)
125 {
126 	struct worker_data *w  = arg;
127 	struct evt_options *opt = w->t->opt;
128 
129 	const bool burst = evt_has_burst_mode(w->dev_id);
130 	const int fwd_latency = opt->fwd_latency;
131 
132 	/* allow compiler to optimize */
133 	if (!burst && !fwd_latency)
134 		return perf_queue_worker(arg, 0);
135 	else if (!burst && fwd_latency)
136 		return perf_queue_worker(arg, 1);
137 	else if (burst && !fwd_latency)
138 		return perf_queue_worker_burst(arg, 0);
139 	else if (burst && fwd_latency)
140 		return perf_queue_worker_burst(arg, 1);
141 
142 	rte_panic("invalid worker\n");
143 }
144 
145 static int
146 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
147 {
148 	return perf_launch_lcores(test, opt, worker_wrapper);
149 }
150 
151 static int
152 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
153 {
154 	uint8_t queue;
155 	int nb_stages = opt->nb_stages;
156 	int ret;
157 	int nb_ports;
158 	int nb_queues;
159 
160 	nb_ports = evt_nr_active_lcores(opt->wlcores);
161 	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
162 		evt_nr_active_lcores(opt->plcores);
163 
164 	nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
165 		rte_eth_dev_count() * nb_stages :
166 		perf_queue_nb_event_queues(opt);
167 
168 	const struct rte_event_dev_config config = {
169 			.nb_event_queues = nb_queues,
170 			.nb_event_ports = nb_ports,
171 			.nb_events_limit  = 4096,
172 			.nb_event_queue_flows = opt->nb_flows,
173 			.nb_event_port_dequeue_depth = 128,
174 			.nb_event_port_enqueue_depth = 128,
175 	};
176 
177 	ret = rte_event_dev_configure(opt->dev_id, &config);
178 	if (ret) {
179 		evt_err("failed to configure eventdev %d", opt->dev_id);
180 		return ret;
181 	}
182 
183 	struct rte_event_queue_conf q_conf = {
184 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
185 			.nb_atomic_flows = opt->nb_flows,
186 			.nb_atomic_order_sequences = opt->nb_flows,
187 	};
188 	/* queue configurations */
189 	for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
190 		q_conf.schedule_type =
191 			(opt->sched_type_list[queue % nb_stages]);
192 
193 		if (opt->q_priority) {
194 			uint8_t stage_pos = queue % nb_stages;
195 			/* Configure event queues(stage 0 to stage n) with
196 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
197 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
198 			 */
199 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
200 					(nb_stages - 1);
201 			/* Higher prio for the queues closer to last stage */
202 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
203 					(step * stage_pos);
204 		}
205 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
206 		if (ret) {
207 			evt_err("failed to setup queue=%d", queue);
208 			return ret;
209 		}
210 	}
211 
212 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
213 					nb_queues);
214 	if (ret)
215 		return ret;
216 
217 	ret = evt_service_setup(opt->dev_id);
218 	if (ret) {
219 		evt_err("No service lcore found to run event dev.");
220 		return ret;
221 	}
222 
223 	ret = rte_event_dev_start(opt->dev_id);
224 	if (ret) {
225 		evt_err("failed to start eventdev %d", opt->dev_id);
226 		return ret;
227 	}
228 
229 	return 0;
230 }
231 
232 static void
233 perf_queue_opt_dump(struct evt_options *opt)
234 {
235 	evt_dump_fwd_latency(opt);
236 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
237 }
238 
239 static int
240 perf_queue_opt_check(struct evt_options *opt)
241 {
242 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
243 }
244 
245 static bool
246 perf_queue_capability_check(struct evt_options *opt)
247 {
248 	struct rte_event_dev_info dev_info;
249 
250 	rte_event_dev_info_get(opt->dev_id, &dev_info);
251 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
252 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
253 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
254 			perf_queue_nb_event_queues(opt),
255 			dev_info.max_event_queues,
256 			perf_nb_event_ports(opt), dev_info.max_event_ports);
257 	}
258 
259 	return true;
260 }
261 
262 static const struct evt_test_ops perf_queue =  {
263 	.cap_check          = perf_queue_capability_check,
264 	.opt_check          = perf_queue_opt_check,
265 	.opt_dump           = perf_queue_opt_dump,
266 	.test_setup         = perf_test_setup,
267 	.mempool_setup      = perf_mempool_setup,
268 	.ethdev_setup	    = perf_ethdev_setup,
269 	.eventdev_setup     = perf_queue_eventdev_setup,
270 	.launch_lcores      = perf_queue_launch_lcores,
271 	.eventdev_destroy   = perf_eventdev_destroy,
272 	.mempool_destroy    = perf_mempool_destroy,
273 	.ethdev_destroy	    = perf_ethdev_destroy,
274 	.test_result        = perf_test_result,
275 	.test_destroy       = perf_test_destroy,
276 };
277 
278 EVT_TEST_REGISTER(perf_queue);
279