xref: /dpdk/app/test-eventdev/test_order_common.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "test_order_common.h"
6 
7 int
8 order_test_result(struct evt_test *test, struct evt_options *opt)
9 {
10 	RTE_SET_USED(opt);
11 	struct test_order *t = evt_test_priv(test);
12 
13 	return t->result;
14 }
15 
16 static inline int
17 order_producer(void *arg)
18 {
19 	struct prod_data *p  = arg;
20 	struct test_order *t = p->t;
21 	struct evt_options *opt = t->opt;
22 	const uint8_t dev_id = p->dev_id;
23 	const uint8_t port = p->port_id;
24 	struct rte_mempool *pool = t->pool;
25 	const uint64_t nb_pkts = t->nb_pkts;
26 	uint32_t *producer_flow_seq = t->producer_flow_seq;
27 	const uint32_t nb_flows = t->nb_flows;
28 	uint64_t count = 0;
29 	struct rte_mbuf *m;
30 	struct rte_event ev;
31 
32 	if (opt->verbose_level > 1)
33 		printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
34 			 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
35 
36 	ev.event = 0;
37 	ev.op = RTE_EVENT_OP_NEW;
38 	ev.queue_id = p->queue_id;
39 	ev.sched_type = RTE_SCHED_TYPE_ORDERED;
40 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
41 	ev.event_type =  RTE_EVENT_TYPE_CPU;
42 	ev.sub_event_type = 0; /* stage 0 */
43 
44 	while (count < nb_pkts && t->err == false) {
45 		m = rte_pktmbuf_alloc(pool);
46 		if (m == NULL)
47 			continue;
48 
49 		const uint32_t flow = (uintptr_t)m % nb_flows;
50 		/* Maintain seq number per flow */
51 		m->seqn = producer_flow_seq[flow]++;
52 
53 		ev.flow_id = flow;
54 		ev.mbuf = m;
55 
56 		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
57 			if (t->err)
58 				break;
59 			rte_pause();
60 		}
61 
62 		count++;
63 	}
64 	return 0;
65 }
66 
67 int
68 order_opt_check(struct evt_options *opt)
69 {
70 	/* 1 producer + N workers + 1 master */
71 	if (rte_lcore_count() < 3) {
72 		evt_err("test need minimum 3 lcores");
73 		return -1;
74 	}
75 
76 	/* Validate worker lcores */
77 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
78 		evt_err("worker lcores overlaps with master lcore");
79 		return -1;
80 	}
81 
82 	if (evt_nr_active_lcores(opt->plcores) == 0) {
83 		evt_err("missing the producer lcore");
84 		return -1;
85 	}
86 
87 	if (evt_nr_active_lcores(opt->plcores) != 1) {
88 		evt_err("only one producer lcore must be selected");
89 		return -1;
90 	}
91 
92 	int plcore = evt_get_first_active_lcore(opt->plcores);
93 
94 	if (plcore < 0) {
95 		evt_err("failed to find active producer");
96 		return plcore;
97 	}
98 
99 	if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
100 		evt_err("worker lcores overlaps producer lcore");
101 		return -1;
102 	}
103 	if (evt_has_disabled_lcore(opt->wlcores)) {
104 		evt_err("one or more workers lcores are not enabled");
105 		return -1;
106 	}
107 	if (!evt_has_active_lcore(opt->wlcores)) {
108 		evt_err("minimum one worker is required");
109 		return -1;
110 	}
111 
112 	/* Validate producer lcore */
113 	if (plcore == (int)rte_get_master_lcore()) {
114 		evt_err("producer lcore and master lcore should be different");
115 		return -1;
116 	}
117 	if (!rte_lcore_is_enabled(plcore)) {
118 		evt_err("producer lcore is not enabled");
119 		return -1;
120 	}
121 
122 	/* Fixups */
123 	if (opt->nb_pkts == 0)
124 		opt->nb_pkts = INT64_MAX;
125 
126 	return 0;
127 }
128 
129 int
130 order_test_setup(struct evt_test *test, struct evt_options *opt)
131 {
132 	void *test_order;
133 
134 	test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
135 				RTE_CACHE_LINE_SIZE, opt->socket_id);
136 	if (test_order  == NULL) {
137 		evt_err("failed to allocate test_order memory");
138 		goto nomem;
139 	}
140 	test->test_priv = test_order;
141 
142 	struct test_order *t = evt_test_priv(test);
143 
144 	t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
145 				 sizeof(*t->producer_flow_seq) * opt->nb_flows,
146 				RTE_CACHE_LINE_SIZE, opt->socket_id);
147 
148 	if (t->producer_flow_seq  == NULL) {
149 		evt_err("failed to allocate t->producer_flow_seq memory");
150 		goto prod_nomem;
151 	}
152 
153 	t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
154 				 sizeof(*t->expected_flow_seq) * opt->nb_flows,
155 				RTE_CACHE_LINE_SIZE, opt->socket_id);
156 
157 	if (t->expected_flow_seq  == NULL) {
158 		evt_err("failed to allocate t->expected_flow_seq memory");
159 		goto exp_nomem;
160 	}
161 	rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
162 	t->err = false;
163 	t->nb_pkts = opt->nb_pkts;
164 	t->nb_flows = opt->nb_flows;
165 	t->result = EVT_TEST_FAILED;
166 	t->opt = opt;
167 	return 0;
168 
169 exp_nomem:
170 	rte_free(t->producer_flow_seq);
171 prod_nomem:
172 	rte_free(test->test_priv);
173 nomem:
174 	return -ENOMEM;
175 }
176 
177 void
178 order_test_destroy(struct evt_test *test, struct evt_options *opt)
179 {
180 	RTE_SET_USED(opt);
181 	struct test_order *t = evt_test_priv(test);
182 
183 	rte_free(t->expected_flow_seq);
184 	rte_free(t->producer_flow_seq);
185 	rte_free(test->test_priv);
186 }
187 
188 int
189 order_mempool_setup(struct evt_test *test, struct evt_options *opt)
190 {
191 	struct test_order *t = evt_test_priv(test);
192 
193 	t->pool  = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
194 					256 /* Cache */, 0,
195 					512, /* Use very small mbufs */
196 					opt->socket_id);
197 	if (t->pool == NULL) {
198 		evt_err("failed to create mempool");
199 		return -ENOMEM;
200 	}
201 
202 	return 0;
203 }
204 
205 void
206 order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
207 {
208 	RTE_SET_USED(opt);
209 	struct test_order *t = evt_test_priv(test);
210 
211 	rte_mempool_free(t->pool);
212 }
213 
214 void
215 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
216 {
217 	RTE_SET_USED(test);
218 
219 	rte_event_dev_stop(opt->dev_id);
220 	rte_event_dev_close(opt->dev_id);
221 }
222 
223 void
224 order_opt_dump(struct evt_options *opt)
225 {
226 	evt_dump_producer_lcores(opt);
227 	evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
228 	evt_dump_worker_lcores(opt);
229 	evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
230 }
231 
232 int
233 order_launch_lcores(struct evt_test *test, struct evt_options *opt,
234 			int (*worker)(void *))
235 {
236 	int ret, lcore_id;
237 	struct test_order *t = evt_test_priv(test);
238 
239 	int wkr_idx = 0;
240 	/* launch workers */
241 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
242 		if (!(opt->wlcores[lcore_id]))
243 			continue;
244 
245 		ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
246 					 lcore_id);
247 		if (ret) {
248 			evt_err("failed to launch worker %d", lcore_id);
249 			return ret;
250 		}
251 		wkr_idx++;
252 	}
253 
254 	/* launch producer */
255 	int plcore = evt_get_first_active_lcore(opt->plcores);
256 
257 	ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
258 	if (ret) {
259 		evt_err("failed to launch order_producer %d", plcore);
260 		return ret;
261 	}
262 
263 	uint64_t cycles = rte_get_timer_cycles();
264 	int64_t old_remaining  = -1;
265 
266 	while (t->err == false) {
267 		uint64_t new_cycles = rte_get_timer_cycles();
268 		int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
269 
270 		if (remaining <= 0) {
271 			t->result = EVT_TEST_SUCCESS;
272 			break;
273 		}
274 
275 		if (new_cycles - cycles > rte_get_timer_hz() * 1) {
276 			printf(CLGRN"\r%"PRId64""CLNRM, remaining);
277 			fflush(stdout);
278 			if (old_remaining == remaining) {
279 				rte_event_dev_dump(opt->dev_id, stdout);
280 				evt_err("No schedules for seconds, deadlock");
281 				t->err = true;
282 				rte_smp_wmb();
283 				break;
284 			}
285 			old_remaining = remaining;
286 			cycles = new_cycles;
287 		}
288 	}
289 	printf("\r");
290 
291 	return 0;
292 }
293 
294 int
295 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
296 				uint8_t nb_workers, uint8_t nb_queues)
297 {
298 	int ret;
299 	uint8_t port;
300 	struct test_order *t = evt_test_priv(test);
301 
302 	/* port configuration */
303 	const struct rte_event_port_conf wkr_p_conf = {
304 			.dequeue_depth = opt->wkr_deq_dep,
305 			.enqueue_depth = 64,
306 			.new_event_threshold = 4096,
307 	};
308 
309 	/* setup one port per worker, linking to all queues */
310 	for (port = 0; port < nb_workers; port++) {
311 		struct worker_data *w = &t->worker[port];
312 
313 		w->dev_id = opt->dev_id;
314 		w->port_id = port;
315 		w->t = t;
316 
317 		ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
318 		if (ret) {
319 			evt_err("failed to setup port %d", port);
320 			return ret;
321 		}
322 
323 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
324 		if (ret != nb_queues) {
325 			evt_err("failed to link all queues to port %d", port);
326 			return -EINVAL;
327 		}
328 	}
329 	/* port for producer, no links */
330 	const struct rte_event_port_conf prod_conf = {
331 			.dequeue_depth = 8,
332 			.enqueue_depth = 32,
333 			.new_event_threshold = 1200,
334 	};
335 	struct prod_data *p = &t->prod;
336 
337 	p->dev_id = opt->dev_id;
338 	p->port_id = port; /* last port */
339 	p->queue_id = 0;
340 	p->t = t;
341 
342 	ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
343 	if (ret) {
344 		evt_err("failed to setup producer port %d", port);
345 		return ret;
346 	}
347 
348 	return ret;
349 }
350