xref: /dpdk/app/test-eventdev/test_order_common.c (revision b6a7e6852e9ab82ae0e05e2d2a0b83abca17de3b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdalign.h>
6 
7 #include "test_order_common.h"
8 
9 int
order_test_result(struct evt_test * test,struct evt_options * opt)10 order_test_result(struct evt_test *test, struct evt_options *opt)
11 {
12 	RTE_SET_USED(opt);
13 	struct test_order *t = evt_test_priv(test);
14 
15 	return t->result;
16 }
17 
18 static inline int
order_producer(void * arg)19 order_producer(void *arg)
20 {
21 	struct prod_data *p  = arg;
22 	struct test_order *t = p->t;
23 	struct evt_options *opt = t->opt;
24 	const uint8_t dev_id = p->dev_id;
25 	const uint8_t port = p->port_id;
26 	struct rte_mempool *pool = t->pool;
27 	const uint64_t nb_pkts = t->nb_pkts;
28 	uint32_t *producer_flow_seq = t->producer_flow_seq;
29 	const uint32_t nb_flows = t->nb_flows;
30 	uint64_t count = 0;
31 	struct rte_mbuf *m;
32 	struct rte_event ev;
33 
34 	if (opt->verbose_level > 1)
35 		printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
36 			 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
37 
38 	ev.event = 0;
39 	ev.op = RTE_EVENT_OP_NEW;
40 	ev.queue_id = p->queue_id;
41 	ev.sched_type = RTE_SCHED_TYPE_ORDERED;
42 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
43 	ev.event_type =  RTE_EVENT_TYPE_CPU;
44 	ev.sub_event_type = 0; /* stage 0 */
45 
46 	while (count < nb_pkts && t->err == false) {
47 		m = rte_pktmbuf_alloc(pool);
48 		if (m == NULL)
49 			continue;
50 
51 		const flow_id_t flow = (uintptr_t)m % nb_flows;
52 		/* Maintain seq number per flow */
53 		*order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
54 		order_flow_id_save(t, flow, m, &ev);
55 
56 		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
57 			if (t->err)
58 				break;
59 			rte_pause();
60 		}
61 
62 		count++;
63 	}
64 	return 0;
65 }
66 
67 int
order_opt_check(struct evt_options * opt)68 order_opt_check(struct evt_options *opt)
69 {
70 	if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
71 		evt_err("Invalid producer type '%s' valid producer '%s'",
72 			evt_prod_id_to_name(opt->prod_type),
73 			evt_prod_id_to_name(EVT_PROD_TYPE_SYNT));
74 		return -1;
75 	}
76 
77 	/* 1 producer + N workers + main */
78 	if (rte_lcore_count() < 3) {
79 		evt_err("test need minimum 3 lcores");
80 		return -1;
81 	}
82 
83 	/* Validate worker lcores */
84 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
85 		evt_err("worker lcores overlaps with main lcore");
86 		return -1;
87 	}
88 
89 	if (evt_nr_active_lcores(opt->plcores) == 0) {
90 		evt_err("missing the producer lcore");
91 		return -1;
92 	}
93 
94 	if (evt_nr_active_lcores(opt->plcores) != 1) {
95 		evt_err("only one producer lcore must be selected");
96 		return -1;
97 	}
98 
99 	int plcore = evt_get_first_active_lcore(opt->plcores);
100 
101 	if (plcore < 0) {
102 		evt_err("failed to find active producer");
103 		return plcore;
104 	}
105 
106 	if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
107 		evt_err("worker lcores overlaps producer lcore");
108 		return -1;
109 	}
110 	if (evt_has_disabled_lcore(opt->wlcores)) {
111 		evt_err("one or more workers lcores are not enabled");
112 		return -1;
113 	}
114 	if (!evt_has_active_lcore(opt->wlcores)) {
115 		evt_err("minimum one worker is required");
116 		return -1;
117 	}
118 
119 	/* Validate producer lcore */
120 	if (plcore == (int)rte_get_main_lcore()) {
121 		evt_err("producer lcore and main lcore should be different");
122 		return -1;
123 	}
124 	if (!rte_lcore_is_enabled(plcore)) {
125 		evt_err("producer lcore is not enabled");
126 		return -1;
127 	}
128 
129 	/* Fixups */
130 	if (opt->nb_pkts == 0)
131 		opt->nb_pkts = INT64_MAX;
132 
133 	return 0;
134 }
135 
136 int
order_test_setup(struct evt_test * test,struct evt_options * opt)137 order_test_setup(struct evt_test *test, struct evt_options *opt)
138 {
139 	void *test_order;
140 	struct test_order *t;
141 	static const struct rte_mbuf_dynfield flow_id_dynfield_desc = {
142 		.name = "test_event_dynfield_flow_id",
143 		.size = sizeof(flow_id_t),
144 		.align = alignof(flow_id_t),
145 	};
146 	static const struct rte_mbuf_dynfield seqn_dynfield_desc = {
147 		.name = "test_event_dynfield_seqn",
148 		.size = sizeof(seqn_t),
149 		.align = alignof(seqn_t),
150 	};
151 
152 	test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
153 				RTE_CACHE_LINE_SIZE, opt->socket_id);
154 	if (test_order  == NULL) {
155 		evt_err("failed to allocate test_order memory");
156 		goto nomem;
157 	}
158 	test->test_priv = test_order;
159 	t = evt_test_priv(test);
160 
161 	t->flow_id_dynfield_offset =
162 		rte_mbuf_dynfield_register(&flow_id_dynfield_desc);
163 	if (t->flow_id_dynfield_offset < 0) {
164 		evt_err("failed to register mbuf field");
165 		return -rte_errno;
166 	}
167 
168 	t->seqn_dynfield_offset =
169 		rte_mbuf_dynfield_register(&seqn_dynfield_desc);
170 	if (t->seqn_dynfield_offset < 0) {
171 		evt_err("failed to register mbuf field");
172 		return -rte_errno;
173 	}
174 
175 	t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
176 				 sizeof(*t->producer_flow_seq) * opt->nb_flows,
177 				RTE_CACHE_LINE_SIZE, opt->socket_id);
178 
179 	if (t->producer_flow_seq  == NULL) {
180 		evt_err("failed to allocate t->producer_flow_seq memory");
181 		goto prod_nomem;
182 	}
183 
184 	t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
185 				 sizeof(*t->expected_flow_seq) * opt->nb_flows,
186 				RTE_CACHE_LINE_SIZE, opt->socket_id);
187 
188 	if (t->expected_flow_seq  == NULL) {
189 		evt_err("failed to allocate t->expected_flow_seq memory");
190 		goto exp_nomem;
191 	}
192 	rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
193 	t->err = false;
194 	t->nb_pkts = opt->nb_pkts;
195 	t->nb_flows = opt->nb_flows;
196 	t->result = EVT_TEST_FAILED;
197 	t->opt = opt;
198 	return 0;
199 
200 exp_nomem:
201 	rte_free(t->producer_flow_seq);
202 prod_nomem:
203 	rte_free(test->test_priv);
204 nomem:
205 	return -ENOMEM;
206 }
207 
208 void
order_test_destroy(struct evt_test * test,struct evt_options * opt)209 order_test_destroy(struct evt_test *test, struct evt_options *opt)
210 {
211 	RTE_SET_USED(opt);
212 	struct test_order *t = evt_test_priv(test);
213 
214 	rte_free(t->expected_flow_seq);
215 	rte_free(t->producer_flow_seq);
216 	rte_free(test->test_priv);
217 }
218 
219 int
order_mempool_setup(struct evt_test * test,struct evt_options * opt)220 order_mempool_setup(struct evt_test *test, struct evt_options *opt)
221 {
222 	struct test_order *t = evt_test_priv(test);
223 
224 	t->pool  = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
225 					256 /* Cache */, 0,
226 					512, /* Use very small mbufs */
227 					opt->socket_id);
228 	if (t->pool == NULL) {
229 		evt_err("failed to create mempool");
230 		return -ENOMEM;
231 	}
232 
233 	return 0;
234 }
235 
236 void
order_mempool_destroy(struct evt_test * test,struct evt_options * opt)237 order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
238 {
239 	RTE_SET_USED(opt);
240 	struct test_order *t = evt_test_priv(test);
241 
242 	rte_mempool_free(t->pool);
243 }
244 
245 void
order_eventdev_destroy(struct evt_test * test,struct evt_options * opt)246 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
247 {
248 	RTE_SET_USED(test);
249 
250 	rte_event_dev_stop(opt->dev_id);
251 	rte_event_dev_close(opt->dev_id);
252 }
253 
254 void
order_opt_dump(struct evt_options * opt)255 order_opt_dump(struct evt_options *opt)
256 {
257 	evt_dump_producer_lcores(opt);
258 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
259 	evt_dump_worker_lcores(opt);
260 	evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
261 }
262 
263 int
order_launch_lcores(struct evt_test * test,struct evt_options * opt,int (* worker)(void *))264 order_launch_lcores(struct evt_test *test, struct evt_options *opt,
265 			int (*worker)(void *))
266 {
267 	int ret, lcore_id;
268 	struct test_order *t = evt_test_priv(test);
269 
270 	int wkr_idx = 0;
271 	/* launch workers */
272 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
273 		if (!(opt->wlcores[lcore_id]))
274 			continue;
275 
276 		ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
277 					 lcore_id);
278 		if (ret) {
279 			evt_err("failed to launch worker %d", lcore_id);
280 			return ret;
281 		}
282 		wkr_idx++;
283 	}
284 
285 	/* launch producer */
286 	int plcore = evt_get_first_active_lcore(opt->plcores);
287 
288 	ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
289 	if (ret) {
290 		evt_err("failed to launch order_producer %d", plcore);
291 		return ret;
292 	}
293 
294 	uint64_t cycles = rte_get_timer_cycles();
295 	int64_t old_remaining  = -1;
296 
297 	while (t->err == false) {
298 		uint64_t new_cycles = rte_get_timer_cycles();
299 		int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
300 				rte_memory_order_relaxed);
301 
302 		if (remaining <= 0) {
303 			t->result = EVT_TEST_SUCCESS;
304 			break;
305 		}
306 
307 		if (new_cycles - cycles > rte_get_timer_hz() * 1) {
308 			printf(CLGRN"\r%"PRId64""CLNRM, remaining);
309 			fflush(stdout);
310 			if (old_remaining == remaining) {
311 				rte_event_dev_dump(opt->dev_id, stdout);
312 				evt_err("No schedules for seconds, deadlock");
313 				t->err = true;
314 				break;
315 			}
316 			old_remaining = remaining;
317 			cycles = new_cycles;
318 		}
319 	}
320 	printf("\r");
321 
322 	return 0;
323 }
324 
325 int
order_event_dev_port_setup(struct evt_test * test,struct evt_options * opt,uint8_t nb_workers,uint8_t nb_queues)326 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
327 				uint8_t nb_workers, uint8_t nb_queues)
328 {
329 	int ret;
330 	uint8_t port;
331 	struct test_order *t = evt_test_priv(test);
332 	struct rte_event_dev_info dev_info;
333 
334 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
335 	if (ret) {
336 		evt_err("failed to get eventdev info %d", opt->dev_id);
337 		return ret;
338 	}
339 
340 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
341 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
342 
343 	/* port configuration */
344 	const struct rte_event_port_conf p_conf = {
345 			.dequeue_depth = opt->wkr_deq_dep,
346 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
347 			.new_event_threshold = dev_info.max_num_events,
348 	};
349 
350 	/* setup one port per worker, linking to all queues */
351 	for (port = 0; port < nb_workers; port++) {
352 		struct worker_data *w = &t->worker[port];
353 
354 		w->dev_id = opt->dev_id;
355 		w->port_id = port;
356 		w->t = t;
357 
358 		ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
359 		if (ret) {
360 			evt_err("failed to setup port %d", port);
361 			return ret;
362 		}
363 
364 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
365 		if (ret != nb_queues) {
366 			evt_err("failed to link all queues to port %d", port);
367 			return -EINVAL;
368 		}
369 	}
370 	struct prod_data *p = &t->prod;
371 
372 	p->dev_id = opt->dev_id;
373 	p->port_id = port; /* last port */
374 	p->queue_id = 0;
375 	p->t = t;
376 
377 	ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
378 	if (ret) {
379 		evt_err("failed to setup producer port %d", port);
380 		return ret;
381 	}
382 
383 	return ret;
384 }
385