xref: /dpdk/app/test-eventdev/test_perf_common.c (revision cb056611a8ed9ab9024f3b91bf26e97255194514)
153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob  * Copyright(c) 2017 Cavium, Inc
3ffbae86fSJerin Jacob  */
4ffbae86fSJerin Jacob 
5ffbae86fSJerin Jacob #include "test_perf_common.h"
6ffbae86fSJerin Jacob 
741c219e6SJerin Jacob int
841c219e6SJerin Jacob perf_test_result(struct evt_test *test, struct evt_options *opt)
941c219e6SJerin Jacob {
1041c219e6SJerin Jacob 	RTE_SET_USED(opt);
116b1a14a8SPavan Nikhilesh 	int i;
126b1a14a8SPavan Nikhilesh 	uint64_t total = 0;
1341c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
1441c219e6SJerin Jacob 
156b1a14a8SPavan Nikhilesh 	printf("Packet distribution across worker cores :\n");
166b1a14a8SPavan Nikhilesh 	for (i = 0; i < t->nb_workers; i++)
176b1a14a8SPavan Nikhilesh 		total += t->worker[i].processed_pkts;
186b1a14a8SPavan Nikhilesh 	for (i = 0; i < t->nb_workers; i++)
196b1a14a8SPavan Nikhilesh 		printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
206b1a14a8SPavan Nikhilesh 				CLGRN" %3.2f\n"CLNRM, i,
216b1a14a8SPavan Nikhilesh 				t->worker[i].processed_pkts,
226b1a14a8SPavan Nikhilesh 				(((double)t->worker[i].processed_pkts)/total)
236b1a14a8SPavan Nikhilesh 				* 100);
246b1a14a8SPavan Nikhilesh 
2541c219e6SJerin Jacob 	return t->result;
2641c219e6SJerin Jacob }
2741c219e6SJerin Jacob 
289d3aeb18SJerin Jacob static inline int
299d3aeb18SJerin Jacob perf_producer(void *arg)
309d3aeb18SJerin Jacob {
319a618803SPavan Nikhilesh 	int i;
329d3aeb18SJerin Jacob 	struct prod_data *p  = arg;
339d3aeb18SJerin Jacob 	struct test_perf *t = p->t;
349d3aeb18SJerin Jacob 	struct evt_options *opt = t->opt;
359d3aeb18SJerin Jacob 	const uint8_t dev_id = p->dev_id;
369d3aeb18SJerin Jacob 	const uint8_t port = p->port_id;
379d3aeb18SJerin Jacob 	struct rte_mempool *pool = t->pool;
389d3aeb18SJerin Jacob 	const uint64_t nb_pkts = t->nb_pkts;
399d3aeb18SJerin Jacob 	const uint32_t nb_flows = t->nb_flows;
409d3aeb18SJerin Jacob 	uint32_t flow_counter = 0;
419d3aeb18SJerin Jacob 	uint64_t count = 0;
429a618803SPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
439d3aeb18SJerin Jacob 	struct rte_event ev;
449d3aeb18SJerin Jacob 
459d3aeb18SJerin Jacob 	if (opt->verbose_level > 1)
469d3aeb18SJerin Jacob 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
479d3aeb18SJerin Jacob 				rte_lcore_id(), dev_id, port, p->queue_id);
489d3aeb18SJerin Jacob 
499d3aeb18SJerin Jacob 	ev.event = 0;
509d3aeb18SJerin Jacob 	ev.op = RTE_EVENT_OP_NEW;
519d3aeb18SJerin Jacob 	ev.queue_id = p->queue_id;
529d3aeb18SJerin Jacob 	ev.sched_type = t->opt->sched_type_list[0];
539d3aeb18SJerin Jacob 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
549d3aeb18SJerin Jacob 	ev.event_type =  RTE_EVENT_TYPE_CPU;
559d3aeb18SJerin Jacob 	ev.sub_event_type = 0; /* stage 0 */
569d3aeb18SJerin Jacob 
579d3aeb18SJerin Jacob 	while (count < nb_pkts && t->done == false) {
589a618803SPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
599d3aeb18SJerin Jacob 			continue;
609a618803SPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
619d3aeb18SJerin Jacob 			ev.flow_id = flow_counter++ % nb_flows;
629a618803SPavan Nikhilesh 			ev.event_ptr = m[i];
639a618803SPavan Nikhilesh 			m[i]->timestamp = rte_get_timer_cycles();
649a618803SPavan Nikhilesh 			while (rte_event_enqueue_burst(dev_id,
659a618803SPavan Nikhilesh 						       port, &ev, 1) != 1) {
669d3aeb18SJerin Jacob 				if (t->done)
679d3aeb18SJerin Jacob 					break;
689d3aeb18SJerin Jacob 				rte_pause();
699a618803SPavan Nikhilesh 				m[i]->timestamp = rte_get_timer_cycles();
709d3aeb18SJerin Jacob 			}
719a618803SPavan Nikhilesh 		}
729a618803SPavan Nikhilesh 		count += BURST_SIZE;
739d3aeb18SJerin Jacob 	}
749d3aeb18SJerin Jacob 
759d3aeb18SJerin Jacob 	return 0;
769d3aeb18SJerin Jacob }
779d3aeb18SJerin Jacob 
78d008f20bSPavan Nikhilesh static inline int
79d008f20bSPavan Nikhilesh perf_event_timer_producer(void *arg)
80d008f20bSPavan Nikhilesh {
819a618803SPavan Nikhilesh 	int i;
82d008f20bSPavan Nikhilesh 	struct prod_data *p  = arg;
83d008f20bSPavan Nikhilesh 	struct test_perf *t = p->t;
84d008f20bSPavan Nikhilesh 	struct evt_options *opt = t->opt;
85d008f20bSPavan Nikhilesh 	uint32_t flow_counter = 0;
86d008f20bSPavan Nikhilesh 	uint64_t count = 0;
87d008f20bSPavan Nikhilesh 	uint64_t arm_latency = 0;
88d008f20bSPavan Nikhilesh 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
89d008f20bSPavan Nikhilesh 	const uint32_t nb_flows = t->nb_flows;
90d008f20bSPavan Nikhilesh 	const uint64_t nb_timers = opt->nb_timers;
91d008f20bSPavan Nikhilesh 	struct rte_mempool *pool = t->pool;
929a618803SPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
93d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
9452553263SPavan Nikhilesh 	struct rte_event_timer tim;
95d008f20bSPavan Nikhilesh 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
96d008f20bSPavan Nikhilesh 
9752553263SPavan Nikhilesh 	memset(&tim, 0, sizeof(struct rte_event_timer));
98d008f20bSPavan Nikhilesh 	timeout_ticks = opt->optm_timer_tick_nsec ?
99d008f20bSPavan Nikhilesh 			(timeout_ticks * opt->timer_tick_nsec)
100d008f20bSPavan Nikhilesh 			/ opt->optm_timer_tick_nsec : timeout_ticks;
101d008f20bSPavan Nikhilesh 	timeout_ticks += timeout_ticks ? 0 : 1;
10252553263SPavan Nikhilesh 	tim.ev.event_type =  RTE_EVENT_TYPE_TIMER;
10352553263SPavan Nikhilesh 	tim.ev.op = RTE_EVENT_OP_NEW;
10452553263SPavan Nikhilesh 	tim.ev.sched_type = t->opt->sched_type_list[0];
10552553263SPavan Nikhilesh 	tim.ev.queue_id = p->queue_id;
10652553263SPavan Nikhilesh 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
10752553263SPavan Nikhilesh 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
10852553263SPavan Nikhilesh 	tim.timeout_ticks = timeout_ticks;
109d008f20bSPavan Nikhilesh 
110d008f20bSPavan Nikhilesh 	if (opt->verbose_level > 1)
111d008f20bSPavan Nikhilesh 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
112d008f20bSPavan Nikhilesh 
113d008f20bSPavan Nikhilesh 	while (count < nb_timers && t->done == false) {
1149a618803SPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
115d008f20bSPavan Nikhilesh 			continue;
1169a618803SPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
1179a618803SPavan Nikhilesh 			rte_prefetch0(m[i + 1]);
1189a618803SPavan Nikhilesh 			m[i]->tim = tim;
1199a618803SPavan Nikhilesh 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
1209a618803SPavan Nikhilesh 			m[i]->tim.ev.event_ptr = m[i];
1219a618803SPavan Nikhilesh 			m[i]->timestamp = rte_get_timer_cycles();
122d008f20bSPavan Nikhilesh 			while (rte_event_timer_arm_burst(
123d008f20bSPavan Nikhilesh 			       adptr[flow_counter % nb_timer_adptrs],
1249a618803SPavan Nikhilesh 			       (struct rte_event_timer **)&m[i], 1) != 1) {
125d008f20bSPavan Nikhilesh 				if (t->done)
126d008f20bSPavan Nikhilesh 					break;
1279a618803SPavan Nikhilesh 				m[i]->timestamp = rte_get_timer_cycles();
128d008f20bSPavan Nikhilesh 			}
1299a618803SPavan Nikhilesh 			arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
1309a618803SPavan Nikhilesh 		}
1319a618803SPavan Nikhilesh 		count += BURST_SIZE;
132d008f20bSPavan Nikhilesh 	}
133d008f20bSPavan Nikhilesh 	fflush(stdout);
134d008f20bSPavan Nikhilesh 	rte_delay_ms(1000);
135d008f20bSPavan Nikhilesh 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
13693b7794bSPavan Nikhilesh 			__func__, rte_lcore_id(),
13793b7794bSPavan Nikhilesh 			count ? (float)(arm_latency / count) /
13893b7794bSPavan Nikhilesh 			(rte_get_timer_hz() / 1000000) : 0);
139d008f20bSPavan Nikhilesh 	return 0;
140d008f20bSPavan Nikhilesh }
141d008f20bSPavan Nikhilesh 
14217b22d0bSPavan Nikhilesh static inline int
14317b22d0bSPavan Nikhilesh perf_event_timer_producer_burst(void *arg)
14417b22d0bSPavan Nikhilesh {
14517b22d0bSPavan Nikhilesh 	int i;
14617b22d0bSPavan Nikhilesh 	struct prod_data *p  = arg;
14717b22d0bSPavan Nikhilesh 	struct test_perf *t = p->t;
14817b22d0bSPavan Nikhilesh 	struct evt_options *opt = t->opt;
14917b22d0bSPavan Nikhilesh 	uint32_t flow_counter = 0;
15017b22d0bSPavan Nikhilesh 	uint64_t count = 0;
15117b22d0bSPavan Nikhilesh 	uint64_t arm_latency = 0;
15217b22d0bSPavan Nikhilesh 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
15317b22d0bSPavan Nikhilesh 	const uint32_t nb_flows = t->nb_flows;
15417b22d0bSPavan Nikhilesh 	const uint64_t nb_timers = opt->nb_timers;
15517b22d0bSPavan Nikhilesh 	struct rte_mempool *pool = t->pool;
15617b22d0bSPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
15717b22d0bSPavan Nikhilesh 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
15852553263SPavan Nikhilesh 	struct rte_event_timer tim;
15917b22d0bSPavan Nikhilesh 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
16017b22d0bSPavan Nikhilesh 
16152553263SPavan Nikhilesh 	memset(&tim, 0, sizeof(struct rte_event_timer));
16217b22d0bSPavan Nikhilesh 	timeout_ticks = opt->optm_timer_tick_nsec ?
16317b22d0bSPavan Nikhilesh 			(timeout_ticks * opt->timer_tick_nsec)
16417b22d0bSPavan Nikhilesh 			/ opt->optm_timer_tick_nsec : timeout_ticks;
16517b22d0bSPavan Nikhilesh 	timeout_ticks += timeout_ticks ? 0 : 1;
16652553263SPavan Nikhilesh 	tim.ev.event_type =  RTE_EVENT_TYPE_TIMER;
16752553263SPavan Nikhilesh 	tim.ev.op = RTE_EVENT_OP_NEW;
16852553263SPavan Nikhilesh 	tim.ev.sched_type = t->opt->sched_type_list[0];
16952553263SPavan Nikhilesh 	tim.ev.queue_id = p->queue_id;
17052553263SPavan Nikhilesh 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
17152553263SPavan Nikhilesh 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
17252553263SPavan Nikhilesh 	tim.timeout_ticks = timeout_ticks;
17317b22d0bSPavan Nikhilesh 
17417b22d0bSPavan Nikhilesh 	if (opt->verbose_level > 1)
17517b22d0bSPavan Nikhilesh 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
17617b22d0bSPavan Nikhilesh 
17717b22d0bSPavan Nikhilesh 	while (count < nb_timers && t->done == false) {
17817b22d0bSPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
17917b22d0bSPavan Nikhilesh 			continue;
18017b22d0bSPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
18117b22d0bSPavan Nikhilesh 			rte_prefetch0(m[i + 1]);
18217b22d0bSPavan Nikhilesh 			m[i]->tim = tim;
18317b22d0bSPavan Nikhilesh 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
18417b22d0bSPavan Nikhilesh 			m[i]->tim.ev.event_ptr = m[i];
18517b22d0bSPavan Nikhilesh 			m[i]->timestamp = rte_get_timer_cycles();
18617b22d0bSPavan Nikhilesh 		}
18717b22d0bSPavan Nikhilesh 		rte_event_timer_arm_tmo_tick_burst(
18817b22d0bSPavan Nikhilesh 				adptr[flow_counter % nb_timer_adptrs],
18917b22d0bSPavan Nikhilesh 				(struct rte_event_timer **)m,
19017b22d0bSPavan Nikhilesh 				tim.timeout_ticks,
19117b22d0bSPavan Nikhilesh 				BURST_SIZE);
19217b22d0bSPavan Nikhilesh 		arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
19317b22d0bSPavan Nikhilesh 		count += BURST_SIZE;
19417b22d0bSPavan Nikhilesh 	}
19517b22d0bSPavan Nikhilesh 	fflush(stdout);
19617b22d0bSPavan Nikhilesh 	rte_delay_ms(1000);
19717b22d0bSPavan Nikhilesh 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
19893b7794bSPavan Nikhilesh 			__func__, rte_lcore_id(),
19993b7794bSPavan Nikhilesh 			count ? (float)(arm_latency / count) /
20093b7794bSPavan Nikhilesh 			(rte_get_timer_hz() / 1000000) : 0);
20117b22d0bSPavan Nikhilesh 	return 0;
20217b22d0bSPavan Nikhilesh }
20317b22d0bSPavan Nikhilesh 
20459f697e3SPavan Nikhilesh static int
20559f697e3SPavan Nikhilesh perf_producer_wrapper(void *arg)
20659f697e3SPavan Nikhilesh {
20759f697e3SPavan Nikhilesh 	struct prod_data *p  = arg;
20859f697e3SPavan Nikhilesh 	struct test_perf *t = p->t;
20959f697e3SPavan Nikhilesh 	/* Launch the producer function only in case of synthetic producer. */
21059f697e3SPavan Nikhilesh 	if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
21159f697e3SPavan Nikhilesh 		return perf_producer(arg);
21217b22d0bSPavan Nikhilesh 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
21317b22d0bSPavan Nikhilesh 			!t->opt->timdev_use_burst)
214d008f20bSPavan Nikhilesh 		return perf_event_timer_producer(arg);
21517b22d0bSPavan Nikhilesh 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
21617b22d0bSPavan Nikhilesh 			t->opt->timdev_use_burst)
21717b22d0bSPavan Nikhilesh 		return perf_event_timer_producer_burst(arg);
21859f697e3SPavan Nikhilesh 	return 0;
21959f697e3SPavan Nikhilesh }
22059f697e3SPavan Nikhilesh 
2219d3aeb18SJerin Jacob static inline uint64_t
2229d3aeb18SJerin Jacob processed_pkts(struct test_perf *t)
2239d3aeb18SJerin Jacob {
2249d3aeb18SJerin Jacob 	uint8_t i;
2259d3aeb18SJerin Jacob 	uint64_t total = 0;
2269d3aeb18SJerin Jacob 
2279d3aeb18SJerin Jacob 	rte_smp_rmb();
2289d3aeb18SJerin Jacob 	for (i = 0; i < t->nb_workers; i++)
2299d3aeb18SJerin Jacob 		total += t->worker[i].processed_pkts;
2309d3aeb18SJerin Jacob 
2319d3aeb18SJerin Jacob 	return total;
2329d3aeb18SJerin Jacob }
2339d3aeb18SJerin Jacob 
2349d3aeb18SJerin Jacob static inline uint64_t
2359d3aeb18SJerin Jacob total_latency(struct test_perf *t)
2369d3aeb18SJerin Jacob {
2379d3aeb18SJerin Jacob 	uint8_t i;
2389d3aeb18SJerin Jacob 	uint64_t total = 0;
2399d3aeb18SJerin Jacob 
2409d3aeb18SJerin Jacob 	rte_smp_rmb();
2419d3aeb18SJerin Jacob 	for (i = 0; i < t->nb_workers; i++)
2429d3aeb18SJerin Jacob 		total += t->worker[i].latency;
2439d3aeb18SJerin Jacob 
2449d3aeb18SJerin Jacob 	return total;
2459d3aeb18SJerin Jacob }
2469d3aeb18SJerin Jacob 
2479d3aeb18SJerin Jacob 
2489d3aeb18SJerin Jacob int
2499d3aeb18SJerin Jacob perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
2509d3aeb18SJerin Jacob 		int (*worker)(void *))
2519d3aeb18SJerin Jacob {
2529d3aeb18SJerin Jacob 	int ret, lcore_id;
2539d3aeb18SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
2549d3aeb18SJerin Jacob 
2559d3aeb18SJerin Jacob 	int port_idx = 0;
2569d3aeb18SJerin Jacob 	/* launch workers */
257*cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
2589d3aeb18SJerin Jacob 		if (!(opt->wlcores[lcore_id]))
2599d3aeb18SJerin Jacob 			continue;
2609d3aeb18SJerin Jacob 
2619d3aeb18SJerin Jacob 		ret = rte_eal_remote_launch(worker,
2629d3aeb18SJerin Jacob 				 &t->worker[port_idx], lcore_id);
2639d3aeb18SJerin Jacob 		if (ret) {
2649d3aeb18SJerin Jacob 			evt_err("failed to launch worker %d", lcore_id);
2659d3aeb18SJerin Jacob 			return ret;
2669d3aeb18SJerin Jacob 		}
2679d3aeb18SJerin Jacob 		port_idx++;
2689d3aeb18SJerin Jacob 	}
2699d3aeb18SJerin Jacob 
2709d3aeb18SJerin Jacob 	/* launch producers */
271*cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
2729d3aeb18SJerin Jacob 		if (!(opt->plcores[lcore_id]))
2739d3aeb18SJerin Jacob 			continue;
2749d3aeb18SJerin Jacob 
27559f697e3SPavan Nikhilesh 		ret = rte_eal_remote_launch(perf_producer_wrapper,
27659f697e3SPavan Nikhilesh 				&t->prod[port_idx], lcore_id);
2779d3aeb18SJerin Jacob 		if (ret) {
2789d3aeb18SJerin Jacob 			evt_err("failed to launch perf_producer %d", lcore_id);
2799d3aeb18SJerin Jacob 			return ret;
2809d3aeb18SJerin Jacob 		}
2819d3aeb18SJerin Jacob 		port_idx++;
2829d3aeb18SJerin Jacob 	}
2839d3aeb18SJerin Jacob 
284d008f20bSPavan Nikhilesh 	const uint64_t total_pkts = t->outstand_pkts;
2859d3aeb18SJerin Jacob 
2869d3aeb18SJerin Jacob 	uint64_t dead_lock_cycles = rte_get_timer_cycles();
2879d3aeb18SJerin Jacob 	int64_t dead_lock_remaining  =  total_pkts;
2889d3aeb18SJerin Jacob 	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
2899d3aeb18SJerin Jacob 
2909d3aeb18SJerin Jacob 	uint64_t perf_cycles = rte_get_timer_cycles();
2919d3aeb18SJerin Jacob 	int64_t perf_remaining  = total_pkts;
2929d3aeb18SJerin Jacob 	const uint64_t perf_sample = rte_get_timer_hz();
2939d3aeb18SJerin Jacob 
2949d3aeb18SJerin Jacob 	static float total_mpps;
2959d3aeb18SJerin Jacob 	static uint64_t samples;
2969d3aeb18SJerin Jacob 
2979d3aeb18SJerin Jacob 	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
2989d3aeb18SJerin Jacob 	int64_t remaining = t->outstand_pkts - processed_pkts(t);
2999d3aeb18SJerin Jacob 
3009d3aeb18SJerin Jacob 	while (t->done == false) {
3019d3aeb18SJerin Jacob 		const uint64_t new_cycles = rte_get_timer_cycles();
3029d3aeb18SJerin Jacob 
3039d3aeb18SJerin Jacob 		if ((new_cycles - perf_cycles) > perf_sample) {
3049d3aeb18SJerin Jacob 			const uint64_t latency = total_latency(t);
3059d3aeb18SJerin Jacob 			const uint64_t pkts = processed_pkts(t);
3069d3aeb18SJerin Jacob 
3079d3aeb18SJerin Jacob 			remaining = t->outstand_pkts - pkts;
3089d3aeb18SJerin Jacob 			float mpps = (float)(perf_remaining-remaining)/1000000;
3099d3aeb18SJerin Jacob 
3109d3aeb18SJerin Jacob 			perf_remaining = remaining;
3119d3aeb18SJerin Jacob 			perf_cycles = new_cycles;
3129d3aeb18SJerin Jacob 			total_mpps += mpps;
3139d3aeb18SJerin Jacob 			++samples;
31404716352SJerin Jacob 			if (opt->fwd_latency && pkts > 0) {
3159d3aeb18SJerin Jacob 				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
3169d3aeb18SJerin Jacob 					mpps, total_mpps/samples,
3179d3aeb18SJerin Jacob 					(float)(latency/pkts)/freq_mhz);
3189d3aeb18SJerin Jacob 			} else {
3199d3aeb18SJerin Jacob 				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
3209d3aeb18SJerin Jacob 					mpps, total_mpps/samples);
3219d3aeb18SJerin Jacob 			}
3229d3aeb18SJerin Jacob 			fflush(stdout);
3239d3aeb18SJerin Jacob 
3249d3aeb18SJerin Jacob 			if (remaining <= 0) {
3259d3aeb18SJerin Jacob 				t->result = EVT_TEST_SUCCESS;
326d008f20bSPavan Nikhilesh 				if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
327d008f20bSPavan Nikhilesh 					opt->prod_type ==
328d008f20bSPavan Nikhilesh 					EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
32959f697e3SPavan Nikhilesh 					t->done = true;
3309d3aeb18SJerin Jacob 					rte_smp_wmb();
3319d3aeb18SJerin Jacob 					break;
3329d3aeb18SJerin Jacob 				}
3339d3aeb18SJerin Jacob 			}
33459f697e3SPavan Nikhilesh 		}
3359d3aeb18SJerin Jacob 
33659f697e3SPavan Nikhilesh 		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
33747303784SErik Gabriel Carrillo 		    (opt->prod_type == EVT_PROD_TYPE_SYNT ||
33847303784SErik Gabriel Carrillo 		     opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
3399d3aeb18SJerin Jacob 			remaining = t->outstand_pkts - processed_pkts(t);
3409d3aeb18SJerin Jacob 			if (dead_lock_remaining == remaining) {
3419d3aeb18SJerin Jacob 				rte_event_dev_dump(opt->dev_id, stdout);
3429d3aeb18SJerin Jacob 				evt_err("No schedules for seconds, deadlock");
3439d3aeb18SJerin Jacob 				t->done = true;
3449d3aeb18SJerin Jacob 				rte_smp_wmb();
3459d3aeb18SJerin Jacob 				break;
3469d3aeb18SJerin Jacob 			}
3479d3aeb18SJerin Jacob 			dead_lock_remaining = remaining;
3489d3aeb18SJerin Jacob 			dead_lock_cycles = new_cycles;
3499d3aeb18SJerin Jacob 		}
3509d3aeb18SJerin Jacob 	}
3519d3aeb18SJerin Jacob 	printf("\n");
3529d3aeb18SJerin Jacob 	return 0;
3539d3aeb18SJerin Jacob }
3549d3aeb18SJerin Jacob 
3553617aae5SPavan Nikhilesh static int
3563617aae5SPavan Nikhilesh perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
3573617aae5SPavan Nikhilesh 		struct rte_event_port_conf prod_conf)
3583617aae5SPavan Nikhilesh {
3593617aae5SPavan Nikhilesh 	int ret = 0;
3603617aae5SPavan Nikhilesh 	uint16_t prod;
3613617aae5SPavan Nikhilesh 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3623617aae5SPavan Nikhilesh 
3633617aae5SPavan Nikhilesh 	memset(&queue_conf, 0,
3643617aae5SPavan Nikhilesh 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
3653617aae5SPavan Nikhilesh 	queue_conf.ev.sched_type = opt->sched_type_list[0];
3668728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(prod) {
3673617aae5SPavan Nikhilesh 		uint32_t cap;
3683617aae5SPavan Nikhilesh 
3693617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
3703617aae5SPavan Nikhilesh 				prod, &cap);
3713617aae5SPavan Nikhilesh 		if (ret) {
3723617aae5SPavan Nikhilesh 			evt_err("failed to get event rx adapter[%d]"
3733617aae5SPavan Nikhilesh 					" capabilities",
3743617aae5SPavan Nikhilesh 					opt->dev_id);
3753617aae5SPavan Nikhilesh 			return ret;
3763617aae5SPavan Nikhilesh 		}
3773617aae5SPavan Nikhilesh 		queue_conf.ev.queue_id = prod * stride;
3783617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
3793617aae5SPavan Nikhilesh 				&prod_conf);
3803617aae5SPavan Nikhilesh 		if (ret) {
3813617aae5SPavan Nikhilesh 			evt_err("failed to create rx adapter[%d]", prod);
3823617aae5SPavan Nikhilesh 			return ret;
3833617aae5SPavan Nikhilesh 		}
3843617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
3853617aae5SPavan Nikhilesh 				&queue_conf);
3863617aae5SPavan Nikhilesh 		if (ret) {
3873617aae5SPavan Nikhilesh 			evt_err("failed to add rx queues to adapter[%d]", prod);
3883617aae5SPavan Nikhilesh 			return ret;
3893617aae5SPavan Nikhilesh 		}
3903617aae5SPavan Nikhilesh 
391b0333c55SPavan Nikhilesh 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
392b0333c55SPavan Nikhilesh 			uint32_t service_id;
393b0333c55SPavan Nikhilesh 
394b0333c55SPavan Nikhilesh 			rte_event_eth_rx_adapter_service_id_get(prod,
395b0333c55SPavan Nikhilesh 					&service_id);
396b0333c55SPavan Nikhilesh 			ret = evt_service_setup(service_id);
397b0333c55SPavan Nikhilesh 			if (ret) {
398b0333c55SPavan Nikhilesh 				evt_err("Failed to setup service core"
399b0333c55SPavan Nikhilesh 						" for Rx adapter\n");
400b0333c55SPavan Nikhilesh 				return ret;
401b0333c55SPavan Nikhilesh 			}
402b0333c55SPavan Nikhilesh 		}
4033617aae5SPavan Nikhilesh 	}
4043617aae5SPavan Nikhilesh 
4053617aae5SPavan Nikhilesh 	return ret;
4063617aae5SPavan Nikhilesh }
4073617aae5SPavan Nikhilesh 
408d008f20bSPavan Nikhilesh static int
409d008f20bSPavan Nikhilesh perf_event_timer_adapter_setup(struct test_perf *t)
410d008f20bSPavan Nikhilesh {
411d008f20bSPavan Nikhilesh 	int i;
412d008f20bSPavan Nikhilesh 	int ret;
413d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter_info adapter_info;
414d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter *wl;
415d008f20bSPavan Nikhilesh 	uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
416d008f20bSPavan Nikhilesh 	uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
417d008f20bSPavan Nikhilesh 
418d008f20bSPavan Nikhilesh 	if (nb_producers == 1)
419d008f20bSPavan Nikhilesh 		flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
420d008f20bSPavan Nikhilesh 
421d008f20bSPavan Nikhilesh 	for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
422d008f20bSPavan Nikhilesh 		struct rte_event_timer_adapter_conf config = {
423d008f20bSPavan Nikhilesh 			.event_dev_id = t->opt->dev_id,
424d008f20bSPavan Nikhilesh 			.timer_adapter_id = i,
425d008f20bSPavan Nikhilesh 			.timer_tick_ns = t->opt->timer_tick_nsec,
426d008f20bSPavan Nikhilesh 			.max_tmo_ns = t->opt->max_tmo_nsec,
427c13b1ad7SPavan Nikhilesh 			.nb_timers = t->opt->pool_sz,
428d008f20bSPavan Nikhilesh 			.flags = flags,
429d008f20bSPavan Nikhilesh 		};
430d008f20bSPavan Nikhilesh 
431d008f20bSPavan Nikhilesh 		wl = rte_event_timer_adapter_create(&config);
432d008f20bSPavan Nikhilesh 		if (wl == NULL) {
433d008f20bSPavan Nikhilesh 			evt_err("failed to create event timer ring %d", i);
434d008f20bSPavan Nikhilesh 			return rte_errno;
435d008f20bSPavan Nikhilesh 		}
436d008f20bSPavan Nikhilesh 
437d008f20bSPavan Nikhilesh 		memset(&adapter_info, 0,
438d008f20bSPavan Nikhilesh 				sizeof(struct rte_event_timer_adapter_info));
439d008f20bSPavan Nikhilesh 		rte_event_timer_adapter_get_info(wl, &adapter_info);
440d008f20bSPavan Nikhilesh 		t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
441d008f20bSPavan Nikhilesh 
442d008f20bSPavan Nikhilesh 		if (!(adapter_info.caps &
443d008f20bSPavan Nikhilesh 				RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
44499c25664SAndrzej Ostruszka 			uint32_t service_id = -1U;
445d008f20bSPavan Nikhilesh 
446d008f20bSPavan Nikhilesh 			rte_event_timer_adapter_service_id_get(wl,
447d008f20bSPavan Nikhilesh 					&service_id);
448d008f20bSPavan Nikhilesh 			ret = evt_service_setup(service_id);
449d008f20bSPavan Nikhilesh 			if (ret) {
450d008f20bSPavan Nikhilesh 				evt_err("Failed to setup service core"
451d008f20bSPavan Nikhilesh 						" for timer adapter\n");
452d008f20bSPavan Nikhilesh 				return ret;
453d008f20bSPavan Nikhilesh 			}
454d008f20bSPavan Nikhilesh 			rte_service_runstate_set(service_id, 1);
455d008f20bSPavan Nikhilesh 		}
456d008f20bSPavan Nikhilesh 		t->timer_adptr[i] = wl;
457d008f20bSPavan Nikhilesh 	}
458d008f20bSPavan Nikhilesh 	return 0;
459d008f20bSPavan Nikhilesh }
460d008f20bSPavan Nikhilesh 
461272de067SJerin Jacob int
46284a7513dSJerin Jacob perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
463535c630cSPavan Nikhilesh 				uint8_t stride, uint8_t nb_queues,
464535c630cSPavan Nikhilesh 				const struct rte_event_port_conf *port_conf)
46584a7513dSJerin Jacob {
46684a7513dSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
4673617aae5SPavan Nikhilesh 	uint16_t port, prod;
46884a7513dSJerin Jacob 	int ret = -1;
46984a7513dSJerin Jacob 
47084a7513dSJerin Jacob 	/* setup one port per worker, linking to all queues */
47184a7513dSJerin Jacob 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
47284a7513dSJerin Jacob 				port++) {
47384a7513dSJerin Jacob 		struct worker_data *w = &t->worker[port];
47484a7513dSJerin Jacob 
47584a7513dSJerin Jacob 		w->dev_id = opt->dev_id;
47684a7513dSJerin Jacob 		w->port_id = port;
47784a7513dSJerin Jacob 		w->t = t;
47884a7513dSJerin Jacob 		w->processed_pkts = 0;
47984a7513dSJerin Jacob 		w->latency = 0;
48084a7513dSJerin Jacob 
481535c630cSPavan Nikhilesh 		ret = rte_event_port_setup(opt->dev_id, port, port_conf);
48284a7513dSJerin Jacob 		if (ret) {
48384a7513dSJerin Jacob 			evt_err("failed to setup port %d", port);
48484a7513dSJerin Jacob 			return ret;
48584a7513dSJerin Jacob 		}
48684a7513dSJerin Jacob 
48784a7513dSJerin Jacob 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
48884a7513dSJerin Jacob 		if (ret != nb_queues) {
48984a7513dSJerin Jacob 			evt_err("failed to link all queues to port %d", port);
49084a7513dSJerin Jacob 			return -EINVAL;
49184a7513dSJerin Jacob 		}
49284a7513dSJerin Jacob 	}
49384a7513dSJerin Jacob 
49484a7513dSJerin Jacob 	/* port for producers, no links */
4953617aae5SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
4963617aae5SPavan Nikhilesh 		for ( ; port < perf_nb_event_ports(opt); port++) {
4973617aae5SPavan Nikhilesh 			struct prod_data *p = &t->prod[port];
4983617aae5SPavan Nikhilesh 			p->t = t;
4993617aae5SPavan Nikhilesh 		}
5003617aae5SPavan Nikhilesh 
501535c630cSPavan Nikhilesh 		ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
5023617aae5SPavan Nikhilesh 		if (ret)
5033617aae5SPavan Nikhilesh 			return ret;
504d008f20bSPavan Nikhilesh 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
505d008f20bSPavan Nikhilesh 		prod = 0;
506d008f20bSPavan Nikhilesh 		for ( ; port < perf_nb_event_ports(opt); port++) {
507d008f20bSPavan Nikhilesh 			struct prod_data *p = &t->prod[port];
508d008f20bSPavan Nikhilesh 			p->queue_id = prod * stride;
509d008f20bSPavan Nikhilesh 			p->t = t;
510d008f20bSPavan Nikhilesh 			prod++;
511d008f20bSPavan Nikhilesh 		}
512d008f20bSPavan Nikhilesh 
513d008f20bSPavan Nikhilesh 		ret = perf_event_timer_adapter_setup(t);
514d008f20bSPavan Nikhilesh 		if (ret)
515d008f20bSPavan Nikhilesh 			return ret;
5163617aae5SPavan Nikhilesh 	} else {
51784a7513dSJerin Jacob 		prod = 0;
51884a7513dSJerin Jacob 		for ( ; port < perf_nb_event_ports(opt); port++) {
51984a7513dSJerin Jacob 			struct prod_data *p = &t->prod[port];
52084a7513dSJerin Jacob 
52184a7513dSJerin Jacob 			p->dev_id = opt->dev_id;
52284a7513dSJerin Jacob 			p->port_id = port;
52384a7513dSJerin Jacob 			p->queue_id = prod * stride;
52484a7513dSJerin Jacob 			p->t = t;
52584a7513dSJerin Jacob 
5263617aae5SPavan Nikhilesh 			ret = rte_event_port_setup(opt->dev_id, port,
527535c630cSPavan Nikhilesh 					port_conf);
52884a7513dSJerin Jacob 			if (ret) {
52984a7513dSJerin Jacob 				evt_err("failed to setup port %d", port);
53084a7513dSJerin Jacob 				return ret;
53184a7513dSJerin Jacob 			}
53284a7513dSJerin Jacob 			prod++;
53384a7513dSJerin Jacob 		}
5343617aae5SPavan Nikhilesh 	}
53584a7513dSJerin Jacob 
53684a7513dSJerin Jacob 	return ret;
53784a7513dSJerin Jacob }
53884a7513dSJerin Jacob 
53984a7513dSJerin Jacob int
540272de067SJerin Jacob perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
541272de067SJerin Jacob {
542272de067SJerin Jacob 	unsigned int lcores;
543272de067SJerin Jacob 
544*cb056611SStephen Hemminger 	/* N producer + N worker + main when producer cores are used
545*cb056611SStephen Hemminger 	 * Else N worker + main when Rx adapter is used
546b01974daSPavan Nikhilesh 	 */
547b01974daSPavan Nikhilesh 	lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
548272de067SJerin Jacob 
549272de067SJerin Jacob 	if (rte_lcore_count() < lcores) {
550272de067SJerin Jacob 		evt_err("test need minimum %d lcores", lcores);
551272de067SJerin Jacob 		return -1;
552272de067SJerin Jacob 	}
553272de067SJerin Jacob 
554272de067SJerin Jacob 	/* Validate worker lcores */
555*cb056611SStephen Hemminger 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
556*cb056611SStephen Hemminger 		evt_err("worker lcores overlaps with main lcore");
557272de067SJerin Jacob 		return -1;
558272de067SJerin Jacob 	}
559272de067SJerin Jacob 	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
560272de067SJerin Jacob 		evt_err("worker lcores overlaps producer lcores");
561272de067SJerin Jacob 		return -1;
562272de067SJerin Jacob 	}
563272de067SJerin Jacob 	if (evt_has_disabled_lcore(opt->wlcores)) {
564272de067SJerin Jacob 		evt_err("one or more workers lcores are not enabled");
565272de067SJerin Jacob 		return -1;
566272de067SJerin Jacob 	}
567272de067SJerin Jacob 	if (!evt_has_active_lcore(opt->wlcores)) {
568272de067SJerin Jacob 		evt_err("minimum one worker is required");
569272de067SJerin Jacob 		return -1;
570272de067SJerin Jacob 	}
571272de067SJerin Jacob 
572902387eaSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
573902387eaSPavan Nikhilesh 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
574272de067SJerin Jacob 		/* Validate producer lcores */
575b01974daSPavan Nikhilesh 		if (evt_lcores_has_overlap(opt->plcores,
576*cb056611SStephen Hemminger 					rte_get_main_lcore())) {
577*cb056611SStephen Hemminger 			evt_err("producer lcores overlaps with main lcore");
578272de067SJerin Jacob 			return -1;
579272de067SJerin Jacob 		}
580272de067SJerin Jacob 		if (evt_has_disabled_lcore(opt->plcores)) {
581272de067SJerin Jacob 			evt_err("one or more producer lcores are not enabled");
582272de067SJerin Jacob 			return -1;
583272de067SJerin Jacob 		}
584272de067SJerin Jacob 		if (!evt_has_active_lcore(opt->plcores)) {
585272de067SJerin Jacob 			evt_err("minimum one producer is required");
586272de067SJerin Jacob 			return -1;
587272de067SJerin Jacob 		}
588b01974daSPavan Nikhilesh 	}
589272de067SJerin Jacob 
590272de067SJerin Jacob 	if (evt_has_invalid_stage(opt))
591272de067SJerin Jacob 		return -1;
592272de067SJerin Jacob 
593272de067SJerin Jacob 	if (evt_has_invalid_sched_type(opt))
594272de067SJerin Jacob 		return -1;
595272de067SJerin Jacob 
596272de067SJerin Jacob 	if (nb_queues > EVT_MAX_QUEUES) {
597272de067SJerin Jacob 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
598272de067SJerin Jacob 		return -1;
599272de067SJerin Jacob 	}
600272de067SJerin Jacob 	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
601272de067SJerin Jacob 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
602272de067SJerin Jacob 		return -1;
603272de067SJerin Jacob 	}
604272de067SJerin Jacob 
605272de067SJerin Jacob 	/* Fixups */
606d008f20bSPavan Nikhilesh 	if ((opt->nb_stages == 1 &&
607d008f20bSPavan Nikhilesh 			opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
608d008f20bSPavan Nikhilesh 			opt->fwd_latency) {
609272de067SJerin Jacob 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
610272de067SJerin Jacob 		opt->fwd_latency = 0;
611272de067SJerin Jacob 	}
612d008f20bSPavan Nikhilesh 
613272de067SJerin Jacob 	if (opt->fwd_latency && !opt->q_priority) {
614272de067SJerin Jacob 		evt_info("enabled queue priority for latency measurement");
615272de067SJerin Jacob 		opt->q_priority = 1;
616272de067SJerin Jacob 	}
6179d3aeb18SJerin Jacob 	if (opt->nb_pkts == 0)
6189d3aeb18SJerin Jacob 		opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
619272de067SJerin Jacob 
620272de067SJerin Jacob 	return 0;
621272de067SJerin Jacob }
622272de067SJerin Jacob 
623272de067SJerin Jacob void
624272de067SJerin Jacob perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
625272de067SJerin Jacob {
626272de067SJerin Jacob 	evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
627272de067SJerin Jacob 	evt_dump_producer_lcores(opt);
628272de067SJerin Jacob 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
629272de067SJerin Jacob 	evt_dump_worker_lcores(opt);
630272de067SJerin Jacob 	evt_dump_nb_stages(opt);
631272de067SJerin Jacob 	evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
632272de067SJerin Jacob 	evt_dump("nb_evdev_queues", "%d", nb_queues);
633272de067SJerin Jacob 	evt_dump_queue_priority(opt);
634272de067SJerin Jacob 	evt_dump_sched_type_list(opt);
635b01974daSPavan Nikhilesh 	evt_dump_producer_type(opt);
636272de067SJerin Jacob }
637272de067SJerin Jacob 
63841c219e6SJerin Jacob void
63941c219e6SJerin Jacob perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
64041c219e6SJerin Jacob {
641d008f20bSPavan Nikhilesh 	int i;
642d008f20bSPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
64341c219e6SJerin Jacob 
644d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
645d008f20bSPavan Nikhilesh 		for (i = 0; i < opt->nb_timer_adptrs; i++)
646d008f20bSPavan Nikhilesh 			rte_event_timer_adapter_stop(t->timer_adptr[i]);
647d008f20bSPavan Nikhilesh 	}
64841c219e6SJerin Jacob 	rte_event_dev_stop(opt->dev_id);
64941c219e6SJerin Jacob 	rte_event_dev_close(opt->dev_id);
65041c219e6SJerin Jacob }
65141c219e6SJerin Jacob 
65241c219e6SJerin Jacob static inline void
65341c219e6SJerin Jacob perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
65441c219e6SJerin Jacob 	    void *obj, unsigned i __rte_unused)
65541c219e6SJerin Jacob {
65641c219e6SJerin Jacob 	memset(obj, 0, mp->elt_size);
65741c219e6SJerin Jacob }
65841c219e6SJerin Jacob 
6593fc8de4fSPavan Nikhilesh #define NB_RX_DESC			128
6603fc8de4fSPavan Nikhilesh #define NB_TX_DESC			512
6613fc8de4fSPavan Nikhilesh int
6623fc8de4fSPavan Nikhilesh perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
6633fc8de4fSPavan Nikhilesh {
6648728ccf3SThomas Monjalon 	uint16_t i;
66577339255SIvan Ilchenko 	int ret;
6663fc8de4fSPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
6673fc8de4fSPavan Nikhilesh 	struct rte_eth_conf port_conf = {
6683fc8de4fSPavan Nikhilesh 		.rxmode = {
6693fc8de4fSPavan Nikhilesh 			.mq_mode = ETH_MQ_RX_RSS,
67035b2d13fSOlivier Matz 			.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
6713fc8de4fSPavan Nikhilesh 			.split_hdr_size = 0,
6723fc8de4fSPavan Nikhilesh 		},
6733fc8de4fSPavan Nikhilesh 		.rx_adv_conf = {
6743fc8de4fSPavan Nikhilesh 			.rss_conf = {
6753fc8de4fSPavan Nikhilesh 				.rss_key = NULL,
6763fc8de4fSPavan Nikhilesh 				.rss_hf = ETH_RSS_IP,
6773fc8de4fSPavan Nikhilesh 			},
6783fc8de4fSPavan Nikhilesh 		},
6793fc8de4fSPavan Nikhilesh 	};
6803fc8de4fSPavan Nikhilesh 
681d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
682d008f20bSPavan Nikhilesh 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
6833fc8de4fSPavan Nikhilesh 		return 0;
6843fc8de4fSPavan Nikhilesh 
685d9a42a69SThomas Monjalon 	if (!rte_eth_dev_count_avail()) {
6863fc8de4fSPavan Nikhilesh 		evt_err("No ethernet ports found.");
6873fc8de4fSPavan Nikhilesh 		return -ENODEV;
6883fc8de4fSPavan Nikhilesh 	}
6893fc8de4fSPavan Nikhilesh 
6908728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(i) {
6914f5701f2SFerruh Yigit 		struct rte_eth_dev_info dev_info;
6924f5701f2SFerruh Yigit 		struct rte_eth_conf local_port_conf = port_conf;
6933fc8de4fSPavan Nikhilesh 
69477339255SIvan Ilchenko 		ret = rte_eth_dev_info_get(i, &dev_info);
69577339255SIvan Ilchenko 		if (ret != 0) {
69677339255SIvan Ilchenko 			evt_err("Error during getting device (port %u) info: %s\n",
69777339255SIvan Ilchenko 					i, strerror(-ret));
69877339255SIvan Ilchenko 			return ret;
69977339255SIvan Ilchenko 		}
7004f5701f2SFerruh Yigit 
7014f5701f2SFerruh Yigit 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
7024f5701f2SFerruh Yigit 			dev_info.flow_type_rss_offloads;
7034f5701f2SFerruh Yigit 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
7044f5701f2SFerruh Yigit 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
7054f5701f2SFerruh Yigit 			evt_info("Port %u modified RSS hash function based on hardware support,"
7064f5701f2SFerruh Yigit 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
7074f5701f2SFerruh Yigit 				i,
7084f5701f2SFerruh Yigit 				port_conf.rx_adv_conf.rss_conf.rss_hf,
7094f5701f2SFerruh Yigit 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
7104f5701f2SFerruh Yigit 		}
7114f5701f2SFerruh Yigit 
7124f5701f2SFerruh Yigit 		if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
7133fc8de4fSPavan Nikhilesh 			evt_err("Failed to configure eth port [%d]", i);
7143fc8de4fSPavan Nikhilesh 			return -EINVAL;
7153fc8de4fSPavan Nikhilesh 		}
7163fc8de4fSPavan Nikhilesh 
7173fc8de4fSPavan Nikhilesh 		if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
7183fc8de4fSPavan Nikhilesh 				rte_socket_id(), NULL, t->pool) < 0) {
7193fc8de4fSPavan Nikhilesh 			evt_err("Failed to setup eth port [%d] rx_queue: %d.",
7203fc8de4fSPavan Nikhilesh 					i, 0);
7213fc8de4fSPavan Nikhilesh 			return -EINVAL;
7223fc8de4fSPavan Nikhilesh 		}
7233fc8de4fSPavan Nikhilesh 
7243fc8de4fSPavan Nikhilesh 		if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
7253fc8de4fSPavan Nikhilesh 					rte_socket_id(), NULL) < 0) {
7263fc8de4fSPavan Nikhilesh 			evt_err("Failed to setup eth port [%d] tx_queue: %d.",
7273fc8de4fSPavan Nikhilesh 					i, 0);
7283fc8de4fSPavan Nikhilesh 			return -EINVAL;
7293fc8de4fSPavan Nikhilesh 		}
7303fc8de4fSPavan Nikhilesh 
73170e51a0eSIvan Ilchenko 		ret = rte_eth_promiscuous_enable(i);
73270e51a0eSIvan Ilchenko 		if (ret != 0) {
73370e51a0eSIvan Ilchenko 			evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
73470e51a0eSIvan Ilchenko 				i, rte_strerror(-ret));
73570e51a0eSIvan Ilchenko 			return ret;
73670e51a0eSIvan Ilchenko 		}
7373fc8de4fSPavan Nikhilesh 	}
7383fc8de4fSPavan Nikhilesh 
7393fc8de4fSPavan Nikhilesh 	return 0;
7403fc8de4fSPavan Nikhilesh }
7413fc8de4fSPavan Nikhilesh 
7427f3daf34SPavan Nikhilesh void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
7437f3daf34SPavan Nikhilesh {
7448728ccf3SThomas Monjalon 	uint16_t i;
7457f3daf34SPavan Nikhilesh 	RTE_SET_USED(test);
7467f3daf34SPavan Nikhilesh 
7477f3daf34SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
7488728ccf3SThomas Monjalon 		RTE_ETH_FOREACH_DEV(i) {
7493617aae5SPavan Nikhilesh 			rte_event_eth_rx_adapter_stop(i);
7507f3daf34SPavan Nikhilesh 			rte_eth_dev_stop(i);
7517f3daf34SPavan Nikhilesh 		}
7527f3daf34SPavan Nikhilesh 	}
7537f3daf34SPavan Nikhilesh }
7547f3daf34SPavan Nikhilesh 
75541c219e6SJerin Jacob int
75641c219e6SJerin Jacob perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
75741c219e6SJerin Jacob {
75841c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
75941c219e6SJerin Jacob 
760d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
761d008f20bSPavan Nikhilesh 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
76241c219e6SJerin Jacob 		t->pool = rte_mempool_create(test->name, /* mempool name */
76341c219e6SJerin Jacob 				opt->pool_sz, /* number of elements*/
76441c219e6SJerin Jacob 				sizeof(struct perf_elt), /* element size*/
76541c219e6SJerin Jacob 				512, /* cache size*/
76641c219e6SJerin Jacob 				0, NULL, NULL,
76741c219e6SJerin Jacob 				perf_elt_init, /* obj constructor */
76841c219e6SJerin Jacob 				NULL, opt->socket_id, 0); /* flags */
7698577cc1aSPavan Nikhilesh 	} else {
7708577cc1aSPavan Nikhilesh 		t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
7718577cc1aSPavan Nikhilesh 				opt->pool_sz, /* number of elements*/
7728577cc1aSPavan Nikhilesh 				512, /* cache size*/
7738577cc1aSPavan Nikhilesh 				0,
7748577cc1aSPavan Nikhilesh 				RTE_MBUF_DEFAULT_BUF_SIZE,
7758577cc1aSPavan Nikhilesh 				opt->socket_id); /* flags */
7768577cc1aSPavan Nikhilesh 
7778577cc1aSPavan Nikhilesh 	}
7788577cc1aSPavan Nikhilesh 
77941c219e6SJerin Jacob 	if (t->pool == NULL) {
78041c219e6SJerin Jacob 		evt_err("failed to create mempool");
78141c219e6SJerin Jacob 		return -ENOMEM;
78241c219e6SJerin Jacob 	}
78341c219e6SJerin Jacob 
78441c219e6SJerin Jacob 	return 0;
78541c219e6SJerin Jacob }
78641c219e6SJerin Jacob 
78741c219e6SJerin Jacob void
78841c219e6SJerin Jacob perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
78941c219e6SJerin Jacob {
79041c219e6SJerin Jacob 	RTE_SET_USED(opt);
79141c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
79241c219e6SJerin Jacob 
79341c219e6SJerin Jacob 	rte_mempool_free(t->pool);
79441c219e6SJerin Jacob }
795ffbae86fSJerin Jacob 
796ffbae86fSJerin Jacob int
797ffbae86fSJerin Jacob perf_test_setup(struct evt_test *test, struct evt_options *opt)
798ffbae86fSJerin Jacob {
799ffbae86fSJerin Jacob 	void *test_perf;
800ffbae86fSJerin Jacob 
801ffbae86fSJerin Jacob 	test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
802ffbae86fSJerin Jacob 				RTE_CACHE_LINE_SIZE, opt->socket_id);
803ffbae86fSJerin Jacob 	if (test_perf  == NULL) {
804ffbae86fSJerin Jacob 		evt_err("failed to allocate test_perf memory");
805ffbae86fSJerin Jacob 		goto nomem;
806ffbae86fSJerin Jacob 	}
807ffbae86fSJerin Jacob 	test->test_priv = test_perf;
808ffbae86fSJerin Jacob 
809ffbae86fSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
810ffbae86fSJerin Jacob 
811d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
812d008f20bSPavan Nikhilesh 		t->outstand_pkts = opt->nb_timers *
813d008f20bSPavan Nikhilesh 			evt_nr_active_lcores(opt->plcores);
814d008f20bSPavan Nikhilesh 		t->nb_pkts = opt->nb_timers;
815d008f20bSPavan Nikhilesh 	} else {
816d008f20bSPavan Nikhilesh 		t->outstand_pkts = opt->nb_pkts *
817d008f20bSPavan Nikhilesh 			evt_nr_active_lcores(opt->plcores);
818d008f20bSPavan Nikhilesh 		t->nb_pkts = opt->nb_pkts;
819d008f20bSPavan Nikhilesh 	}
820d008f20bSPavan Nikhilesh 
821ffbae86fSJerin Jacob 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
822ffbae86fSJerin Jacob 	t->done = false;
823ffbae86fSJerin Jacob 	t->nb_flows = opt->nb_flows;
824ffbae86fSJerin Jacob 	t->result = EVT_TEST_FAILED;
825ffbae86fSJerin Jacob 	t->opt = opt;
826ffbae86fSJerin Jacob 	memcpy(t->sched_type_list, opt->sched_type_list,
827ffbae86fSJerin Jacob 			sizeof(opt->sched_type_list));
828ffbae86fSJerin Jacob 	return 0;
829ffbae86fSJerin Jacob nomem:
830ffbae86fSJerin Jacob 	return -ENOMEM;
831ffbae86fSJerin Jacob }
832ffbae86fSJerin Jacob 
833ffbae86fSJerin Jacob void
834ffbae86fSJerin Jacob perf_test_destroy(struct evt_test *test, struct evt_options *opt)
835ffbae86fSJerin Jacob {
836ffbae86fSJerin Jacob 	RTE_SET_USED(opt);
837ffbae86fSJerin Jacob 
838ffbae86fSJerin Jacob 	rte_free(test->test_priv);
839ffbae86fSJerin Jacob }
840