xref: /dpdk/app/test-eventdev/test_perf_common.c (revision 84a7513d4387958da134d3817dca812be50afaf9)
1ffbae86fSJerin Jacob /*
2ffbae86fSJerin Jacob  *   BSD LICENSE
3ffbae86fSJerin Jacob  *
4ffbae86fSJerin Jacob  *   Copyright (C) Cavium 2017.
5ffbae86fSJerin Jacob  *
6ffbae86fSJerin Jacob  *   Redistribution and use in source and binary forms, with or without
7ffbae86fSJerin Jacob  *   modification, are permitted provided that the following conditions
8ffbae86fSJerin Jacob  *   are met:
9ffbae86fSJerin Jacob  *
10ffbae86fSJerin Jacob  *     * Redistributions of source code must retain the above copyright
11ffbae86fSJerin Jacob  *       notice, this list of conditions and the following disclaimer.
12ffbae86fSJerin Jacob  *     * Redistributions in binary form must reproduce the above copyright
13ffbae86fSJerin Jacob  *       notice, this list of conditions and the following disclaimer in
14ffbae86fSJerin Jacob  *       the documentation and/or other materials provided with the
15ffbae86fSJerin Jacob  *       distribution.
16ffbae86fSJerin Jacob  *     * Neither the name of Cavium networks nor the names of its
17ffbae86fSJerin Jacob  *       contributors may be used to endorse or promote products derived
18ffbae86fSJerin Jacob  *       from this software without specific prior written permission.
19ffbae86fSJerin Jacob  *
20ffbae86fSJerin Jacob  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21ffbae86fSJerin Jacob  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22ffbae86fSJerin Jacob  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23ffbae86fSJerin Jacob  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24ffbae86fSJerin Jacob  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25ffbae86fSJerin Jacob  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26ffbae86fSJerin Jacob  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27ffbae86fSJerin Jacob  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28ffbae86fSJerin Jacob  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29ffbae86fSJerin Jacob  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30ffbae86fSJerin Jacob  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31ffbae86fSJerin Jacob  */
32ffbae86fSJerin Jacob 
33ffbae86fSJerin Jacob #include "test_perf_common.h"
34ffbae86fSJerin Jacob 
3541c219e6SJerin Jacob int
3641c219e6SJerin Jacob perf_test_result(struct evt_test *test, struct evt_options *opt)
3741c219e6SJerin Jacob {
3841c219e6SJerin Jacob 	RTE_SET_USED(opt);
3941c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
4041c219e6SJerin Jacob 
4141c219e6SJerin Jacob 	return t->result;
4241c219e6SJerin Jacob }
4341c219e6SJerin Jacob 
44272de067SJerin Jacob int
45*84a7513dSJerin Jacob perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
46*84a7513dSJerin Jacob 				uint8_t stride, uint8_t nb_queues)
47*84a7513dSJerin Jacob {
48*84a7513dSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
49*84a7513dSJerin Jacob 	uint8_t port, prod;
50*84a7513dSJerin Jacob 	int ret = -1;
51*84a7513dSJerin Jacob 
52*84a7513dSJerin Jacob 	/* port configuration */
53*84a7513dSJerin Jacob 	const struct rte_event_port_conf wkr_p_conf = {
54*84a7513dSJerin Jacob 			.dequeue_depth = opt->wkr_deq_dep,
55*84a7513dSJerin Jacob 			.enqueue_depth = 64,
56*84a7513dSJerin Jacob 			.new_event_threshold = 4096,
57*84a7513dSJerin Jacob 	};
58*84a7513dSJerin Jacob 
59*84a7513dSJerin Jacob 	/* setup one port per worker, linking to all queues */
60*84a7513dSJerin Jacob 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
61*84a7513dSJerin Jacob 				port++) {
62*84a7513dSJerin Jacob 		struct worker_data *w = &t->worker[port];
63*84a7513dSJerin Jacob 
64*84a7513dSJerin Jacob 		w->dev_id = opt->dev_id;
65*84a7513dSJerin Jacob 		w->port_id = port;
66*84a7513dSJerin Jacob 		w->t = t;
67*84a7513dSJerin Jacob 		w->processed_pkts = 0;
68*84a7513dSJerin Jacob 		w->latency = 0;
69*84a7513dSJerin Jacob 
70*84a7513dSJerin Jacob 		ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
71*84a7513dSJerin Jacob 		if (ret) {
72*84a7513dSJerin Jacob 			evt_err("failed to setup port %d", port);
73*84a7513dSJerin Jacob 			return ret;
74*84a7513dSJerin Jacob 		}
75*84a7513dSJerin Jacob 
76*84a7513dSJerin Jacob 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
77*84a7513dSJerin Jacob 		if (ret != nb_queues) {
78*84a7513dSJerin Jacob 			evt_err("failed to link all queues to port %d", port);
79*84a7513dSJerin Jacob 			return -EINVAL;
80*84a7513dSJerin Jacob 		}
81*84a7513dSJerin Jacob 	}
82*84a7513dSJerin Jacob 
83*84a7513dSJerin Jacob 	/* port for producers, no links */
84*84a7513dSJerin Jacob 	const struct rte_event_port_conf prod_conf = {
85*84a7513dSJerin Jacob 			.dequeue_depth = 8,
86*84a7513dSJerin Jacob 			.enqueue_depth = 32,
87*84a7513dSJerin Jacob 			.new_event_threshold = 1200,
88*84a7513dSJerin Jacob 	};
89*84a7513dSJerin Jacob 	prod = 0;
90*84a7513dSJerin Jacob 	for ( ; port < perf_nb_event_ports(opt); port++) {
91*84a7513dSJerin Jacob 		struct prod_data *p = &t->prod[port];
92*84a7513dSJerin Jacob 
93*84a7513dSJerin Jacob 		p->dev_id = opt->dev_id;
94*84a7513dSJerin Jacob 		p->port_id = port;
95*84a7513dSJerin Jacob 		p->queue_id = prod * stride;
96*84a7513dSJerin Jacob 		p->t = t;
97*84a7513dSJerin Jacob 
98*84a7513dSJerin Jacob 		ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
99*84a7513dSJerin Jacob 		if (ret) {
100*84a7513dSJerin Jacob 			evt_err("failed to setup port %d", port);
101*84a7513dSJerin Jacob 			return ret;
102*84a7513dSJerin Jacob 		}
103*84a7513dSJerin Jacob 		prod++;
104*84a7513dSJerin Jacob 	}
105*84a7513dSJerin Jacob 
106*84a7513dSJerin Jacob 	return ret;
107*84a7513dSJerin Jacob }
108*84a7513dSJerin Jacob 
109*84a7513dSJerin Jacob int
110272de067SJerin Jacob perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
111272de067SJerin Jacob {
112272de067SJerin Jacob 	unsigned int lcores;
113272de067SJerin Jacob 	bool need_slcore = !evt_has_distributed_sched(opt->dev_id);
114272de067SJerin Jacob 
115272de067SJerin Jacob 	/* N producer + N worker + 1 scheduler(based on dev capa) + 1 master */
116272de067SJerin Jacob 	lcores = need_slcore ? 4 : 3;
117272de067SJerin Jacob 
118272de067SJerin Jacob 	if (rte_lcore_count() < lcores) {
119272de067SJerin Jacob 		evt_err("test need minimum %d lcores", lcores);
120272de067SJerin Jacob 		return -1;
121272de067SJerin Jacob 	}
122272de067SJerin Jacob 
123272de067SJerin Jacob 	/* Validate worker lcores */
124272de067SJerin Jacob 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
125272de067SJerin Jacob 		evt_err("worker lcores overlaps with master lcore");
126272de067SJerin Jacob 		return -1;
127272de067SJerin Jacob 	}
128272de067SJerin Jacob 	if (need_slcore && evt_lcores_has_overlap(opt->wlcores, opt->slcore)) {
129272de067SJerin Jacob 		evt_err("worker lcores overlaps with scheduler lcore");
130272de067SJerin Jacob 		return -1;
131272de067SJerin Jacob 	}
132272de067SJerin Jacob 	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
133272de067SJerin Jacob 		evt_err("worker lcores overlaps producer lcores");
134272de067SJerin Jacob 		return -1;
135272de067SJerin Jacob 	}
136272de067SJerin Jacob 	if (evt_has_disabled_lcore(opt->wlcores)) {
137272de067SJerin Jacob 		evt_err("one or more workers lcores are not enabled");
138272de067SJerin Jacob 		return -1;
139272de067SJerin Jacob 	}
140272de067SJerin Jacob 	if (!evt_has_active_lcore(opt->wlcores)) {
141272de067SJerin Jacob 		evt_err("minimum one worker is required");
142272de067SJerin Jacob 		return -1;
143272de067SJerin Jacob 	}
144272de067SJerin Jacob 
145272de067SJerin Jacob 	/* Validate producer lcores */
146272de067SJerin Jacob 	if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) {
147272de067SJerin Jacob 		evt_err("producer lcores overlaps with master lcore");
148272de067SJerin Jacob 		return -1;
149272de067SJerin Jacob 	}
150272de067SJerin Jacob 	if (need_slcore && evt_lcores_has_overlap(opt->plcores, opt->slcore)) {
151272de067SJerin Jacob 		evt_err("producer lcores overlaps with scheduler lcore");
152272de067SJerin Jacob 		return -1;
153272de067SJerin Jacob 	}
154272de067SJerin Jacob 	if (evt_has_disabled_lcore(opt->plcores)) {
155272de067SJerin Jacob 		evt_err("one or more producer lcores are not enabled");
156272de067SJerin Jacob 		return -1;
157272de067SJerin Jacob 	}
158272de067SJerin Jacob 	if (!evt_has_active_lcore(opt->plcores)) {
159272de067SJerin Jacob 		evt_err("minimum one producer is required");
160272de067SJerin Jacob 		return -1;
161272de067SJerin Jacob 	}
162272de067SJerin Jacob 
163272de067SJerin Jacob 	/* Validate scheduler lcore */
164272de067SJerin Jacob 	if (!evt_has_distributed_sched(opt->dev_id) &&
165272de067SJerin Jacob 			opt->slcore == (int)rte_get_master_lcore()) {
166272de067SJerin Jacob 		evt_err("scheduler lcore and master lcore should be different");
167272de067SJerin Jacob 		return -1;
168272de067SJerin Jacob 	}
169272de067SJerin Jacob 	if (need_slcore && !rte_lcore_is_enabled(opt->slcore)) {
170272de067SJerin Jacob 		evt_err("scheduler lcore is not enabled");
171272de067SJerin Jacob 		return -1;
172272de067SJerin Jacob 	}
173272de067SJerin Jacob 
174272de067SJerin Jacob 	if (evt_has_invalid_stage(opt))
175272de067SJerin Jacob 		return -1;
176272de067SJerin Jacob 
177272de067SJerin Jacob 	if (evt_has_invalid_sched_type(opt))
178272de067SJerin Jacob 		return -1;
179272de067SJerin Jacob 
180272de067SJerin Jacob 	if (nb_queues > EVT_MAX_QUEUES) {
181272de067SJerin Jacob 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
182272de067SJerin Jacob 		return -1;
183272de067SJerin Jacob 	}
184272de067SJerin Jacob 	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
185272de067SJerin Jacob 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
186272de067SJerin Jacob 		return -1;
187272de067SJerin Jacob 	}
188272de067SJerin Jacob 
189272de067SJerin Jacob 	/* Fixups */
190272de067SJerin Jacob 	if (opt->nb_stages == 1 && opt->fwd_latency) {
191272de067SJerin Jacob 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
192272de067SJerin Jacob 		opt->fwd_latency = 0;
193272de067SJerin Jacob 	}
194272de067SJerin Jacob 	if (opt->fwd_latency && !opt->q_priority) {
195272de067SJerin Jacob 		evt_info("enabled queue priority for latency measurement");
196272de067SJerin Jacob 		opt->q_priority = 1;
197272de067SJerin Jacob 	}
198272de067SJerin Jacob 
199272de067SJerin Jacob 	return 0;
200272de067SJerin Jacob }
201272de067SJerin Jacob 
202272de067SJerin Jacob void
203272de067SJerin Jacob perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
204272de067SJerin Jacob {
205272de067SJerin Jacob 	evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
206272de067SJerin Jacob 	evt_dump_producer_lcores(opt);
207272de067SJerin Jacob 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
208272de067SJerin Jacob 	evt_dump_worker_lcores(opt);
209272de067SJerin Jacob 	if (!evt_has_distributed_sched(opt->dev_id))
210272de067SJerin Jacob 		evt_dump_scheduler_lcore(opt);
211272de067SJerin Jacob 	evt_dump_nb_stages(opt);
212272de067SJerin Jacob 	evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
213272de067SJerin Jacob 	evt_dump("nb_evdev_queues", "%d", nb_queues);
214272de067SJerin Jacob 	evt_dump_queue_priority(opt);
215272de067SJerin Jacob 	evt_dump_sched_type_list(opt);
216272de067SJerin Jacob }
217272de067SJerin Jacob 
21841c219e6SJerin Jacob void
21941c219e6SJerin Jacob perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
22041c219e6SJerin Jacob {
22141c219e6SJerin Jacob 	RTE_SET_USED(test);
22241c219e6SJerin Jacob 
22341c219e6SJerin Jacob 	rte_event_dev_stop(opt->dev_id);
22441c219e6SJerin Jacob 	rte_event_dev_close(opt->dev_id);
22541c219e6SJerin Jacob }
22641c219e6SJerin Jacob 
22741c219e6SJerin Jacob static inline void
22841c219e6SJerin Jacob perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
22941c219e6SJerin Jacob 	    void *obj, unsigned i __rte_unused)
23041c219e6SJerin Jacob {
23141c219e6SJerin Jacob 	memset(obj, 0, mp->elt_size);
23241c219e6SJerin Jacob }
23341c219e6SJerin Jacob 
23441c219e6SJerin Jacob int
23541c219e6SJerin Jacob perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
23641c219e6SJerin Jacob {
23741c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
23841c219e6SJerin Jacob 
23941c219e6SJerin Jacob 	t->pool = rte_mempool_create(test->name, /* mempool name */
24041c219e6SJerin Jacob 				opt->pool_sz, /* number of elements*/
24141c219e6SJerin Jacob 				sizeof(struct perf_elt), /* element size*/
24241c219e6SJerin Jacob 				512, /* cache size*/
24341c219e6SJerin Jacob 				0, NULL, NULL,
24441c219e6SJerin Jacob 				perf_elt_init, /* obj constructor */
24541c219e6SJerin Jacob 				NULL, opt->socket_id, 0); /* flags */
24641c219e6SJerin Jacob 	if (t->pool == NULL) {
24741c219e6SJerin Jacob 		evt_err("failed to create mempool");
24841c219e6SJerin Jacob 		return -ENOMEM;
24941c219e6SJerin Jacob 	}
25041c219e6SJerin Jacob 
25141c219e6SJerin Jacob 	return 0;
25241c219e6SJerin Jacob }
25341c219e6SJerin Jacob 
25441c219e6SJerin Jacob void
25541c219e6SJerin Jacob perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
25641c219e6SJerin Jacob {
25741c219e6SJerin Jacob 	RTE_SET_USED(opt);
25841c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
25941c219e6SJerin Jacob 
26041c219e6SJerin Jacob 	rte_mempool_free(t->pool);
26141c219e6SJerin Jacob }
262ffbae86fSJerin Jacob 
263ffbae86fSJerin Jacob int
264ffbae86fSJerin Jacob perf_test_setup(struct evt_test *test, struct evt_options *opt)
265ffbae86fSJerin Jacob {
266ffbae86fSJerin Jacob 	void *test_perf;
267ffbae86fSJerin Jacob 
268ffbae86fSJerin Jacob 	test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
269ffbae86fSJerin Jacob 				RTE_CACHE_LINE_SIZE, opt->socket_id);
270ffbae86fSJerin Jacob 	if (test_perf  == NULL) {
271ffbae86fSJerin Jacob 		evt_err("failed to allocate test_perf memory");
272ffbae86fSJerin Jacob 		goto nomem;
273ffbae86fSJerin Jacob 	}
274ffbae86fSJerin Jacob 	test->test_priv = test_perf;
275ffbae86fSJerin Jacob 
276ffbae86fSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
277ffbae86fSJerin Jacob 
278ffbae86fSJerin Jacob 	t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
279ffbae86fSJerin Jacob 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
280ffbae86fSJerin Jacob 	t->done = false;
281ffbae86fSJerin Jacob 	t->nb_pkts = opt->nb_pkts;
282ffbae86fSJerin Jacob 	t->nb_flows = opt->nb_flows;
283ffbae86fSJerin Jacob 	t->result = EVT_TEST_FAILED;
284ffbae86fSJerin Jacob 	t->opt = opt;
285ffbae86fSJerin Jacob 	memcpy(t->sched_type_list, opt->sched_type_list,
286ffbae86fSJerin Jacob 			sizeof(opt->sched_type_list));
287ffbae86fSJerin Jacob 	return 0;
288ffbae86fSJerin Jacob nomem:
289ffbae86fSJerin Jacob 	return -ENOMEM;
290ffbae86fSJerin Jacob }
291ffbae86fSJerin Jacob 
292ffbae86fSJerin Jacob void
293ffbae86fSJerin Jacob perf_test_destroy(struct evt_test *test, struct evt_options *opt)
294ffbae86fSJerin Jacob {
295ffbae86fSJerin Jacob 	RTE_SET_USED(opt);
296ffbae86fSJerin Jacob 
297ffbae86fSJerin Jacob 	rte_free(test->test_priv);
298ffbae86fSJerin Jacob }
299