xref: /dpdk/app/test-eventdev/test_perf_common.h (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #ifndef _TEST_PERF_COMMON_
6 #define _TEST_PERF_COMMON_
7 
8 #include <stdio.h>
9 #include <stdbool.h>
10 #include <unistd.h>
11 
12 #include <rte_cycles.h>
13 #include <rte_ethdev.h>
14 #include <rte_eventdev.h>
15 #include <rte_event_eth_rx_adapter.h>
16 #include <rte_event_timer_adapter.h>
17 #include <rte_lcore.h>
18 #include <rte_malloc.h>
19 #include <rte_mempool.h>
20 #include <rte_prefetch.h>
21 
22 #include "evt_common.h"
23 #include "evt_options.h"
24 #include "evt_test.h"
25 
26 struct test_perf;
27 
28 struct worker_data {
29 	uint64_t processed_pkts;
30 	uint64_t latency;
31 	uint8_t dev_id;
32 	uint8_t port_id;
33 	struct test_perf *t;
34 } __rte_cache_aligned;
35 
36 struct prod_data {
37 	uint8_t dev_id;
38 	uint8_t port_id;
39 	uint8_t queue_id;
40 	struct test_perf *t;
41 } __rte_cache_aligned;
42 
43 
44 struct test_perf {
45 	/* Don't change the offset of "done". Signal handler use this memory
46 	 * to terminate all lcores work.
47 	 */
48 	int done;
49 	uint64_t outstand_pkts;
50 	uint8_t nb_workers;
51 	enum evt_test_result result;
52 	uint32_t nb_flows;
53 	uint64_t nb_pkts;
54 	struct rte_mempool *pool;
55 	struct prod_data prod[EVT_MAX_PORTS];
56 	struct worker_data worker[EVT_MAX_PORTS];
57 	struct evt_options *opt;
58 	uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
59 	struct rte_event_timer_adapter *timer_adptr[
60 		RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
61 } __rte_cache_aligned;
62 
63 struct perf_elt {
64 	union {
65 		struct rte_event_timer tim;
66 		struct {
67 			char pad[offsetof(struct rte_event_timer, user_meta)];
68 			uint64_t timestamp;
69 		};
70 	};
71 } __rte_cache_aligned;
72 
73 #define BURST_SIZE 16
74 #define MAX_PROD_ENQ_BURST_SIZE 128
75 
76 #define PERF_WORKER_INIT\
77 	struct worker_data *w  = arg;\
78 	struct test_perf *t = w->t;\
79 	struct evt_options *opt = t->opt;\
80 	const uint8_t dev = w->dev_id;\
81 	const uint8_t port = w->port_id;\
82 	const uint8_t prod_timer_type = \
83 		opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
84 	uint8_t *const sched_type_list = &t->sched_type_list[0];\
85 	struct rte_mempool *const pool = t->pool;\
86 	const uint8_t nb_stages = t->opt->nb_stages;\
87 	const uint8_t laststage = nb_stages - 1;\
88 	uint8_t cnt = 0;\
89 	void *bufs[16] __rte_cache_aligned;\
90 	int const sz = RTE_DIM(bufs);\
91 	if (opt->verbose_level > 1)\
92 		printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
93 				rte_lcore_id(), dev, port)
94 
95 static __rte_always_inline int
96 perf_process_last_stage(struct rte_mempool *const pool,
97 		struct rte_event *const ev, struct worker_data *const w,
98 		void *bufs[], int const buf_sz, uint8_t count)
99 {
100 	bufs[count++] = ev->event_ptr;
101 
102 	/* release fence here ensures event_prt is
103 	 * stored before updating the number of
104 	 * processed packets for worker lcores
105 	 */
106 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
107 	w->processed_pkts++;
108 
109 	if (unlikely(count == buf_sz)) {
110 		count = 0;
111 		rte_mempool_put_bulk(pool, bufs, buf_sz);
112 	}
113 	return count;
114 }
115 
116 static __rte_always_inline uint8_t
117 perf_process_last_stage_latency(struct rte_mempool *const pool,
118 		struct rte_event *const ev, struct worker_data *const w,
119 		void *bufs[], int const buf_sz, uint8_t count)
120 {
121 	uint64_t latency;
122 	struct perf_elt *const m = ev->event_ptr;
123 
124 	bufs[count++] = ev->event_ptr;
125 
126 	/* release fence here ensures event_prt is
127 	 * stored before updating the number of
128 	 * processed packets for worker lcores
129 	 */
130 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
131 	w->processed_pkts++;
132 
133 	if (unlikely(count == buf_sz)) {
134 		count = 0;
135 		latency = rte_get_timer_cycles() - m->timestamp;
136 		rte_mempool_put_bulk(pool, bufs, buf_sz);
137 	} else {
138 		latency = rte_get_timer_cycles() - m->timestamp;
139 	}
140 
141 	w->latency += latency;
142 	return count;
143 }
144 
145 
146 static inline int
147 perf_nb_event_ports(struct evt_options *opt)
148 {
149 	return evt_nr_active_lcores(opt->wlcores) +
150 			evt_nr_active_lcores(opt->plcores);
151 }
152 
153 int perf_test_result(struct evt_test *test, struct evt_options *opt);
154 int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
155 int perf_test_setup(struct evt_test *test, struct evt_options *opt);
156 int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
157 int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
158 int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
159 				uint8_t stride, uint8_t nb_queues,
160 				const struct rte_event_port_conf *port_conf);
161 int perf_event_dev_service_setup(uint8_t dev_id);
162 int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
163 		int (*worker)(void *));
164 void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
165 void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
166 void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
167 void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
168 void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
169 
170 #endif /* _TEST_PERF_COMMON_ */
171