xref: /dpdk/app/test-eventdev/test_order_common.c (revision 3e0ceb9f17fff027fc6c8f18de35e11719ffa61e)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium, Inc 2017.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "test_order_common.h"
34 
35 int
36 order_test_result(struct evt_test *test, struct evt_options *opt)
37 {
38 	RTE_SET_USED(opt);
39 	struct test_order *t = evt_test_priv(test);
40 
41 	return t->result;
42 }
43 
44 static inline int
45 order_producer(void *arg)
46 {
47 	struct prod_data *p  = arg;
48 	struct test_order *t = p->t;
49 	struct evt_options *opt = t->opt;
50 	const uint8_t dev_id = p->dev_id;
51 	const uint8_t port = p->port_id;
52 	struct rte_mempool *pool = t->pool;
53 	const uint64_t nb_pkts = t->nb_pkts;
54 	uint32_t *producer_flow_seq = t->producer_flow_seq;
55 	const uint32_t nb_flows = t->nb_flows;
56 	uint64_t count = 0;
57 	struct rte_mbuf *m;
58 	struct rte_event ev;
59 
60 	if (opt->verbose_level > 1)
61 		printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
62 			 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
63 
64 	ev.event = 0;
65 	ev.op = RTE_EVENT_OP_NEW;
66 	ev.queue_id = p->queue_id;
67 	ev.sched_type = RTE_SCHED_TYPE_ORDERED;
68 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
69 	ev.event_type =  RTE_EVENT_TYPE_CPU;
70 	ev.sub_event_type = 0; /* stage 0 */
71 
72 	while (count < nb_pkts && t->err == false) {
73 		m = rte_pktmbuf_alloc(pool);
74 		if (m == NULL)
75 			continue;
76 
77 		const uint32_t flow = (uintptr_t)m % nb_flows;
78 		/* Maintain seq number per flow */
79 		m->seqn = producer_flow_seq[flow]++;
80 
81 		ev.flow_id = flow;
82 		ev.mbuf = m;
83 
84 		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
85 			if (t->err)
86 				break;
87 			rte_pause();
88 		}
89 
90 		count++;
91 	}
92 	return 0;
93 }
94 
95 int
96 order_opt_check(struct evt_options *opt)
97 {
98 	/* 1 producer + N workers + 1 master */
99 	if (rte_lcore_count() < 3) {
100 		evt_err("test need minimum 3 lcores");
101 		return -1;
102 	}
103 
104 	/* Validate worker lcores */
105 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
106 		evt_err("worker lcores overlaps with master lcore");
107 		return -1;
108 	}
109 
110 	if (evt_nr_active_lcores(opt->plcores) == 0) {
111 		evt_err("missing the producer lcore");
112 		return -1;
113 	}
114 
115 	if (evt_nr_active_lcores(opt->plcores) != 1) {
116 		evt_err("only one producer lcore must be selected");
117 		return -1;
118 	}
119 
120 	int plcore = evt_get_first_active_lcore(opt->plcores);
121 
122 	if (plcore < 0) {
123 		evt_err("failed to find active producer");
124 		return plcore;
125 	}
126 
127 	if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
128 		evt_err("worker lcores overlaps producer lcore");
129 		return -1;
130 	}
131 	if (evt_has_disabled_lcore(opt->wlcores)) {
132 		evt_err("one or more workers lcores are not enabled");
133 		return -1;
134 	}
135 	if (!evt_has_active_lcore(opt->wlcores)) {
136 		evt_err("minimum one worker is required");
137 		return -1;
138 	}
139 
140 	/* Validate producer lcore */
141 	if (plcore == (int)rte_get_master_lcore()) {
142 		evt_err("producer lcore and master lcore should be different");
143 		return -1;
144 	}
145 	if (!rte_lcore_is_enabled(plcore)) {
146 		evt_err("producer lcore is not enabled");
147 		return -1;
148 	}
149 
150 	/* Fixups */
151 	if (opt->nb_pkts == 0)
152 		opt->nb_pkts = INT64_MAX;
153 
154 	return 0;
155 }
156 
157 int
158 order_test_setup(struct evt_test *test, struct evt_options *opt)
159 {
160 	void *test_order;
161 
162 	test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
163 				RTE_CACHE_LINE_SIZE, opt->socket_id);
164 	if (test_order  == NULL) {
165 		evt_err("failed to allocate test_order memory");
166 		goto nomem;
167 	}
168 	test->test_priv = test_order;
169 
170 	struct test_order *t = evt_test_priv(test);
171 
172 	t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
173 				 sizeof(*t->producer_flow_seq) * opt->nb_flows,
174 				RTE_CACHE_LINE_SIZE, opt->socket_id);
175 
176 	if (t->producer_flow_seq  == NULL) {
177 		evt_err("failed to allocate t->producer_flow_seq memory");
178 		goto prod_nomem;
179 	}
180 
181 	t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
182 				 sizeof(*t->expected_flow_seq) * opt->nb_flows,
183 				RTE_CACHE_LINE_SIZE, opt->socket_id);
184 
185 	if (t->expected_flow_seq  == NULL) {
186 		evt_err("failed to allocate t->expected_flow_seq memory");
187 		goto exp_nomem;
188 	}
189 	rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
190 	t->err = false;
191 	t->nb_pkts = opt->nb_pkts;
192 	t->nb_flows = opt->nb_flows;
193 	t->result = EVT_TEST_FAILED;
194 	t->opt = opt;
195 	return 0;
196 
197 exp_nomem:
198 	rte_free(t->producer_flow_seq);
199 prod_nomem:
200 	rte_free(test->test_priv);
201 nomem:
202 	return -ENOMEM;
203 }
204 
205 void
206 order_test_destroy(struct evt_test *test, struct evt_options *opt)
207 {
208 	RTE_SET_USED(opt);
209 	struct test_order *t = evt_test_priv(test);
210 
211 	rte_free(t->expected_flow_seq);
212 	rte_free(t->producer_flow_seq);
213 	rte_free(test->test_priv);
214 }
215 
216 int
217 order_mempool_setup(struct evt_test *test, struct evt_options *opt)
218 {
219 	struct test_order *t = evt_test_priv(test);
220 
221 	t->pool  = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
222 					256 /* Cache */, 0,
223 					512, /* Use very small mbufs */
224 					opt->socket_id);
225 	if (t->pool == NULL) {
226 		evt_err("failed to create mempool");
227 		return -ENOMEM;
228 	}
229 
230 	return 0;
231 }
232 
233 void
234 order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
235 {
236 	RTE_SET_USED(opt);
237 	struct test_order *t = evt_test_priv(test);
238 
239 	rte_mempool_free(t->pool);
240 }
241 
242 void
243 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
244 {
245 	RTE_SET_USED(test);
246 
247 	rte_event_dev_stop(opt->dev_id);
248 	rte_event_dev_close(opt->dev_id);
249 }
250 
251 void
252 order_opt_dump(struct evt_options *opt)
253 {
254 	evt_dump_producer_lcores(opt);
255 	evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
256 	evt_dump_worker_lcores(opt);
257 	evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
258 }
259 
260 int
261 order_launch_lcores(struct evt_test *test, struct evt_options *opt,
262 			int (*worker)(void *))
263 {
264 	int ret, lcore_id;
265 	struct test_order *t = evt_test_priv(test);
266 
267 	int wkr_idx = 0;
268 	/* launch workers */
269 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
270 		if (!(opt->wlcores[lcore_id]))
271 			continue;
272 
273 		ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
274 					 lcore_id);
275 		if (ret) {
276 			evt_err("failed to launch worker %d", lcore_id);
277 			return ret;
278 		}
279 		wkr_idx++;
280 	}
281 
282 	/* launch producer */
283 	int plcore = evt_get_first_active_lcore(opt->plcores);
284 
285 	ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
286 	if (ret) {
287 		evt_err("failed to launch order_producer %d", plcore);
288 		return ret;
289 	}
290 
291 	uint64_t cycles = rte_get_timer_cycles();
292 	int64_t old_remaining  = -1;
293 
294 	while (t->err == false) {
295 		uint64_t new_cycles = rte_get_timer_cycles();
296 		int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
297 
298 		if (remaining <= 0) {
299 			t->result = EVT_TEST_SUCCESS;
300 			break;
301 		}
302 
303 		if (new_cycles - cycles > rte_get_timer_hz() * 1) {
304 			printf(CLGRN"\r%"PRId64""CLNRM, remaining);
305 			fflush(stdout);
306 			if (old_remaining == remaining) {
307 				rte_event_dev_dump(opt->dev_id, stdout);
308 				evt_err("No schedules for seconds, deadlock");
309 				t->err = true;
310 				rte_smp_wmb();
311 				break;
312 			}
313 			old_remaining = remaining;
314 			cycles = new_cycles;
315 		}
316 	}
317 	printf("\r");
318 
319 	return 0;
320 }
321 
322 int
323 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
324 				uint8_t nb_workers, uint8_t nb_queues)
325 {
326 	int ret;
327 	uint8_t port;
328 	struct test_order *t = evt_test_priv(test);
329 
330 	/* port configuration */
331 	const struct rte_event_port_conf wkr_p_conf = {
332 			.dequeue_depth = opt->wkr_deq_dep,
333 			.enqueue_depth = 64,
334 			.new_event_threshold = 4096,
335 	};
336 
337 	/* setup one port per worker, linking to all queues */
338 	for (port = 0; port < nb_workers; port++) {
339 		struct worker_data *w = &t->worker[port];
340 
341 		w->dev_id = opt->dev_id;
342 		w->port_id = port;
343 		w->t = t;
344 
345 		ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
346 		if (ret) {
347 			evt_err("failed to setup port %d", port);
348 			return ret;
349 		}
350 
351 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
352 		if (ret != nb_queues) {
353 			evt_err("failed to link all queues to port %d", port);
354 			return -EINVAL;
355 		}
356 	}
357 	/* port for producer, no links */
358 	const struct rte_event_port_conf prod_conf = {
359 			.dequeue_depth = 8,
360 			.enqueue_depth = 32,
361 			.new_event_threshold = 1200,
362 	};
363 	struct prod_data *p = &t->prod;
364 
365 	p->dev_id = opt->dev_id;
366 	p->port_id = port; /* last port */
367 	p->queue_id = 0;
368 	p->t = t;
369 
370 	ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
371 	if (ret) {
372 		evt_err("failed to setup producer port %d", port);
373 		return ret;
374 	}
375 
376 	return ret;
377 }
378