xref: /dpdk/app/test-eventdev/test_perf_atq.c (revision 1eb10ad8db8c2cb68972e1c3a94bf9a341adb8cc)
1e6050243SJerin Jacob /*
2e6050243SJerin Jacob  *   BSD LICENSE
3e6050243SJerin Jacob  *
4e6050243SJerin Jacob  *   Copyright (C) Cavium 2017.
5e6050243SJerin Jacob  *
6e6050243SJerin Jacob  *   Redistribution and use in source and binary forms, with or without
7e6050243SJerin Jacob  *   modification, are permitted provided that the following conditions
8e6050243SJerin Jacob  *   are met:
9e6050243SJerin Jacob  *
10e6050243SJerin Jacob  *     * Redistributions of source code must retain the above copyright
11e6050243SJerin Jacob  *       notice, this list of conditions and the following disclaimer.
12e6050243SJerin Jacob  *     * Redistributions in binary form must reproduce the above copyright
13e6050243SJerin Jacob  *       notice, this list of conditions and the following disclaimer in
14e6050243SJerin Jacob  *       the documentation and/or other materials provided with the
15e6050243SJerin Jacob  *       distribution.
16e6050243SJerin Jacob  *     * Neither the name of Cavium nor the names of its
17e6050243SJerin Jacob  *       contributors may be used to endorse or promote products derived
18e6050243SJerin Jacob  *       from this software without specific prior written permission.
19e6050243SJerin Jacob  *
20e6050243SJerin Jacob  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21e6050243SJerin Jacob  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22e6050243SJerin Jacob  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23e6050243SJerin Jacob  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24e6050243SJerin Jacob  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25e6050243SJerin Jacob  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26e6050243SJerin Jacob  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27e6050243SJerin Jacob  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28e6050243SJerin Jacob  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29e6050243SJerin Jacob  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30e6050243SJerin Jacob  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31e6050243SJerin Jacob  */
32e6050243SJerin Jacob 
33e6050243SJerin Jacob #include "test_perf_common.h"
34e6050243SJerin Jacob 
35e6050243SJerin Jacob /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
36e6050243SJerin Jacob 
37e6050243SJerin Jacob static inline int
38e6050243SJerin Jacob atq_nb_event_queues(struct evt_options *opt)
39e6050243SJerin Jacob {
40e6050243SJerin Jacob 	/* nb_queues = number of producers */
41e6050243SJerin Jacob 	return evt_nr_active_lcores(opt->plcores);
42e6050243SJerin Jacob }
43e6050243SJerin Jacob 
44*1eb10ad8SJerin Jacob static inline __attribute__((always_inline)) void
45*1eb10ad8SJerin Jacob atq_mark_fwd_latency(struct rte_event *const ev)
46*1eb10ad8SJerin Jacob {
47*1eb10ad8SJerin Jacob 	if (unlikely(ev->sub_event_type == 0)) {
48*1eb10ad8SJerin Jacob 		struct perf_elt *const m = ev->event_ptr;
49*1eb10ad8SJerin Jacob 
50*1eb10ad8SJerin Jacob 		m->timestamp = rte_get_timer_cycles();
51*1eb10ad8SJerin Jacob 	}
52*1eb10ad8SJerin Jacob }
53*1eb10ad8SJerin Jacob 
54*1eb10ad8SJerin Jacob static inline __attribute__((always_inline)) void
55*1eb10ad8SJerin Jacob atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
56*1eb10ad8SJerin Jacob 		const uint8_t nb_stages)
57*1eb10ad8SJerin Jacob {
58*1eb10ad8SJerin Jacob 	ev->sub_event_type++;
59*1eb10ad8SJerin Jacob 	ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
60*1eb10ad8SJerin Jacob 	ev->op = RTE_EVENT_OP_FORWARD;
61*1eb10ad8SJerin Jacob 	ev->event_type = RTE_EVENT_TYPE_CPU;
62*1eb10ad8SJerin Jacob }
63*1eb10ad8SJerin Jacob 
64*1eb10ad8SJerin Jacob static int
65*1eb10ad8SJerin Jacob perf_atq_worker(void *arg, const int enable_fwd_latency)
66*1eb10ad8SJerin Jacob {
67*1eb10ad8SJerin Jacob 	PERF_WORKER_INIT;
68*1eb10ad8SJerin Jacob 	struct rte_event ev;
69*1eb10ad8SJerin Jacob 
70*1eb10ad8SJerin Jacob 	while (t->done == false) {
71*1eb10ad8SJerin Jacob 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
72*1eb10ad8SJerin Jacob 
73*1eb10ad8SJerin Jacob 		if (enable_fwd_latency)
74*1eb10ad8SJerin Jacob 			rte_prefetch0(ev.event_ptr);
75*1eb10ad8SJerin Jacob 
76*1eb10ad8SJerin Jacob 		if (!event) {
77*1eb10ad8SJerin Jacob 			rte_pause();
78*1eb10ad8SJerin Jacob 			continue;
79*1eb10ad8SJerin Jacob 		}
80*1eb10ad8SJerin Jacob 
81*1eb10ad8SJerin Jacob 		if (enable_fwd_latency)
82*1eb10ad8SJerin Jacob 		/* first stage in pipeline, mark ts to compute fwd latency */
83*1eb10ad8SJerin Jacob 			atq_mark_fwd_latency(&ev);
84*1eb10ad8SJerin Jacob 
85*1eb10ad8SJerin Jacob 		/* last stage in pipeline */
86*1eb10ad8SJerin Jacob 		if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
87*1eb10ad8SJerin Jacob 			if (enable_fwd_latency)
88*1eb10ad8SJerin Jacob 				cnt = perf_process_last_stage_latency(pool,
89*1eb10ad8SJerin Jacob 					&ev, w, bufs, sz, cnt);
90*1eb10ad8SJerin Jacob 			else
91*1eb10ad8SJerin Jacob 				cnt = perf_process_last_stage(pool, &ev, w,
92*1eb10ad8SJerin Jacob 					 bufs, sz, cnt);
93*1eb10ad8SJerin Jacob 		} else {
94*1eb10ad8SJerin Jacob 			atq_fwd_event(&ev, sched_type_list, nb_stages);
95*1eb10ad8SJerin Jacob 			while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
96*1eb10ad8SJerin Jacob 				rte_pause();
97*1eb10ad8SJerin Jacob 		}
98*1eb10ad8SJerin Jacob 	}
99*1eb10ad8SJerin Jacob 	return 0;
100*1eb10ad8SJerin Jacob }
101*1eb10ad8SJerin Jacob 
102*1eb10ad8SJerin Jacob static int
103*1eb10ad8SJerin Jacob perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
104*1eb10ad8SJerin Jacob {
105*1eb10ad8SJerin Jacob 	PERF_WORKER_INIT;
106*1eb10ad8SJerin Jacob 	uint16_t i;
107*1eb10ad8SJerin Jacob 	/* +1 to avoid prefetch out of array check */
108*1eb10ad8SJerin Jacob 	struct rte_event ev[BURST_SIZE + 1];
109*1eb10ad8SJerin Jacob 
110*1eb10ad8SJerin Jacob 	while (t->done == false) {
111*1eb10ad8SJerin Jacob 		uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
112*1eb10ad8SJerin Jacob 				BURST_SIZE, 0);
113*1eb10ad8SJerin Jacob 
114*1eb10ad8SJerin Jacob 		if (!nb_rx) {
115*1eb10ad8SJerin Jacob 			rte_pause();
116*1eb10ad8SJerin Jacob 			continue;
117*1eb10ad8SJerin Jacob 		}
118*1eb10ad8SJerin Jacob 
119*1eb10ad8SJerin Jacob 		for (i = 0; i < nb_rx; i++) {
120*1eb10ad8SJerin Jacob 			if (enable_fwd_latency) {
121*1eb10ad8SJerin Jacob 				rte_prefetch0(ev[i+1].event_ptr);
122*1eb10ad8SJerin Jacob 				/* first stage in pipeline.
123*1eb10ad8SJerin Jacob 				 * mark time stamp to compute fwd latency
124*1eb10ad8SJerin Jacob 				 */
125*1eb10ad8SJerin Jacob 				atq_mark_fwd_latency(&ev[i]);
126*1eb10ad8SJerin Jacob 			}
127*1eb10ad8SJerin Jacob 			/* last stage in pipeline */
128*1eb10ad8SJerin Jacob 			if (unlikely((ev[i].sub_event_type % nb_stages)
129*1eb10ad8SJerin Jacob 						== laststage)) {
130*1eb10ad8SJerin Jacob 				if (enable_fwd_latency)
131*1eb10ad8SJerin Jacob 					cnt = perf_process_last_stage_latency(
132*1eb10ad8SJerin Jacob 						pool, &ev[i], w, bufs, sz, cnt);
133*1eb10ad8SJerin Jacob 				else
134*1eb10ad8SJerin Jacob 					cnt = perf_process_last_stage(pool,
135*1eb10ad8SJerin Jacob 						&ev[i], w, bufs, sz, cnt);
136*1eb10ad8SJerin Jacob 
137*1eb10ad8SJerin Jacob 				ev[i].op = RTE_EVENT_OP_RELEASE;
138*1eb10ad8SJerin Jacob 			} else {
139*1eb10ad8SJerin Jacob 				atq_fwd_event(&ev[i], sched_type_list,
140*1eb10ad8SJerin Jacob 						nb_stages);
141*1eb10ad8SJerin Jacob 			}
142*1eb10ad8SJerin Jacob 		}
143*1eb10ad8SJerin Jacob 
144*1eb10ad8SJerin Jacob 		uint16_t enq;
145*1eb10ad8SJerin Jacob 
146*1eb10ad8SJerin Jacob 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
147*1eb10ad8SJerin Jacob 		while (enq < nb_rx) {
148*1eb10ad8SJerin Jacob 			enq += rte_event_enqueue_burst(dev, port,
149*1eb10ad8SJerin Jacob 							ev + enq, nb_rx - enq);
150*1eb10ad8SJerin Jacob 		}
151*1eb10ad8SJerin Jacob 	}
152*1eb10ad8SJerin Jacob 	return 0;
153*1eb10ad8SJerin Jacob }
154*1eb10ad8SJerin Jacob 
155*1eb10ad8SJerin Jacob static int
156*1eb10ad8SJerin Jacob worker_wrapper(void *arg)
157*1eb10ad8SJerin Jacob {
158*1eb10ad8SJerin Jacob 	struct worker_data *w  = arg;
159*1eb10ad8SJerin Jacob 	struct evt_options *opt = w->t->opt;
160*1eb10ad8SJerin Jacob 
161*1eb10ad8SJerin Jacob 	const bool burst = evt_has_burst_mode(w->dev_id);
162*1eb10ad8SJerin Jacob 	const int fwd_latency = opt->fwd_latency;
163*1eb10ad8SJerin Jacob 
164*1eb10ad8SJerin Jacob 	/* allow compiler to optimize */
165*1eb10ad8SJerin Jacob 	if (!burst && !fwd_latency)
166*1eb10ad8SJerin Jacob 		return perf_atq_worker(arg, 0);
167*1eb10ad8SJerin Jacob 	else if (!burst && fwd_latency)
168*1eb10ad8SJerin Jacob 		return perf_atq_worker(arg, 1);
169*1eb10ad8SJerin Jacob 	else if (burst && !fwd_latency)
170*1eb10ad8SJerin Jacob 		return perf_atq_worker_burst(arg, 0);
171*1eb10ad8SJerin Jacob 	else if (burst && fwd_latency)
172*1eb10ad8SJerin Jacob 		return perf_atq_worker_burst(arg, 1);
173*1eb10ad8SJerin Jacob 
174*1eb10ad8SJerin Jacob 	rte_panic("invalid worker\n");
175*1eb10ad8SJerin Jacob }
176*1eb10ad8SJerin Jacob 
177*1eb10ad8SJerin Jacob static int
178*1eb10ad8SJerin Jacob perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
179*1eb10ad8SJerin Jacob {
180*1eb10ad8SJerin Jacob 	return perf_launch_lcores(test, opt, worker_wrapper);
181*1eb10ad8SJerin Jacob }
182*1eb10ad8SJerin Jacob 
183e6050243SJerin Jacob static int
184e6050243SJerin Jacob perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
185e6050243SJerin Jacob {
186e6050243SJerin Jacob 	int ret;
187e6050243SJerin Jacob 	uint8_t queue;
188e6050243SJerin Jacob 
189e6050243SJerin Jacob 	const struct rte_event_dev_config config = {
190e6050243SJerin Jacob 			.nb_event_queues = atq_nb_event_queues(opt),
191e6050243SJerin Jacob 			.nb_event_ports = perf_nb_event_ports(opt),
192e6050243SJerin Jacob 			.nb_events_limit  = 4096,
193e6050243SJerin Jacob 			.nb_event_queue_flows = opt->nb_flows,
194e6050243SJerin Jacob 			.nb_event_port_dequeue_depth = 128,
195e6050243SJerin Jacob 			.nb_event_port_enqueue_depth = 128,
196e6050243SJerin Jacob 	};
197e6050243SJerin Jacob 
198e6050243SJerin Jacob 	ret = rte_event_dev_configure(opt->dev_id, &config);
199e6050243SJerin Jacob 	if (ret) {
200e6050243SJerin Jacob 		evt_err("failed to configure eventdev %d", opt->dev_id);
201e6050243SJerin Jacob 		return ret;
202e6050243SJerin Jacob 	}
203e6050243SJerin Jacob 
204e6050243SJerin Jacob 	struct rte_event_queue_conf q_conf = {
205e6050243SJerin Jacob 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
206e6050243SJerin Jacob 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
207e6050243SJerin Jacob 			.nb_atomic_flows = opt->nb_flows,
208e6050243SJerin Jacob 			.nb_atomic_order_sequences = opt->nb_flows,
209e6050243SJerin Jacob 	};
210e6050243SJerin Jacob 	/* queue configurations */
211e6050243SJerin Jacob 	for (queue = 0; queue < atq_nb_event_queues(opt); queue++) {
212e6050243SJerin Jacob 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
213e6050243SJerin Jacob 		if (ret) {
214e6050243SJerin Jacob 			evt_err("failed to setup queue=%d", queue);
215e6050243SJerin Jacob 			return ret;
216e6050243SJerin Jacob 		}
217e6050243SJerin Jacob 	}
218e6050243SJerin Jacob 
219e6050243SJerin Jacob 	ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
220e6050243SJerin Jacob 					atq_nb_event_queues(opt));
221e6050243SJerin Jacob 	if (ret)
222e6050243SJerin Jacob 		return ret;
223e6050243SJerin Jacob 
224e6050243SJerin Jacob 	ret = rte_event_dev_start(opt->dev_id);
225e6050243SJerin Jacob 	if (ret) {
226e6050243SJerin Jacob 		evt_err("failed to start eventdev %d", opt->dev_id);
227e6050243SJerin Jacob 		return ret;
228e6050243SJerin Jacob 	}
229e6050243SJerin Jacob 
230e6050243SJerin Jacob 	return 0;
231e6050243SJerin Jacob }
232e6050243SJerin Jacob 
233e6050243SJerin Jacob static void
234e6050243SJerin Jacob perf_atq_opt_dump(struct evt_options *opt)
235e6050243SJerin Jacob {
236e6050243SJerin Jacob 	perf_opt_dump(opt, atq_nb_event_queues(opt));
237e6050243SJerin Jacob }
238e6050243SJerin Jacob 
239e6050243SJerin Jacob static int
240e6050243SJerin Jacob perf_atq_opt_check(struct evt_options *opt)
241e6050243SJerin Jacob {
242e6050243SJerin Jacob 	return perf_opt_check(opt, atq_nb_event_queues(opt));
243e6050243SJerin Jacob }
244e6050243SJerin Jacob 
245e6050243SJerin Jacob static bool
246e6050243SJerin Jacob perf_atq_capability_check(struct evt_options *opt)
247e6050243SJerin Jacob {
248e6050243SJerin Jacob 	struct rte_event_dev_info dev_info;
249e6050243SJerin Jacob 
250e6050243SJerin Jacob 	rte_event_dev_info_get(opt->dev_id, &dev_info);
251e6050243SJerin Jacob 	if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
252e6050243SJerin Jacob 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
253e6050243SJerin Jacob 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
254e6050243SJerin Jacob 			atq_nb_event_queues(opt), dev_info.max_event_queues,
255e6050243SJerin Jacob 			perf_nb_event_ports(opt), dev_info.max_event_ports);
256e6050243SJerin Jacob 	}
257e6050243SJerin Jacob 	if (!evt_has_all_types_queue(opt->dev_id))
258e6050243SJerin Jacob 		return false;
259e6050243SJerin Jacob 
260e6050243SJerin Jacob 	return true;
261e6050243SJerin Jacob }
262e6050243SJerin Jacob 
263e6050243SJerin Jacob static const struct evt_test_ops perf_atq =  {
264e6050243SJerin Jacob 	.cap_check          = perf_atq_capability_check,
265e6050243SJerin Jacob 	.opt_check          = perf_atq_opt_check,
266e6050243SJerin Jacob 	.opt_dump           = perf_atq_opt_dump,
267e6050243SJerin Jacob 	.test_setup         = perf_test_setup,
268e6050243SJerin Jacob 	.mempool_setup      = perf_mempool_setup,
269e6050243SJerin Jacob 	.eventdev_setup     = perf_atq_eventdev_setup,
270*1eb10ad8SJerin Jacob 	.launch_lcores      = perf_atq_launch_lcores,
271e6050243SJerin Jacob 	.eventdev_destroy   = perf_eventdev_destroy,
272e6050243SJerin Jacob 	.mempool_destroy    = perf_mempool_destroy,
273e6050243SJerin Jacob 	.test_result        = perf_test_result,
274e6050243SJerin Jacob 	.test_destroy       = perf_test_destroy,
275e6050243SJerin Jacob };
276e6050243SJerin Jacob 
277e6050243SJerin Jacob EVT_TEST_REGISTER(perf_atq);
278