xref: /dpdk/app/test-eventdev/test_pipeline_common.h (revision f12c41bf4074efb438fc21ab7db13f011f5a1e84)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5 
6 #ifndef _TEST_PIPELINE_COMMON_
7 #define _TEST_PIPELINE_COMMON_
8 
9 #include <stdio.h>
10 #include <stdbool.h>
11 #include <unistd.h>
12 
13 #include <rte_cycles.h>
14 #include <rte_ethdev.h>
15 #include <rte_event_eth_rx_adapter.h>
16 #include <rte_event_eth_tx_adapter.h>
17 #include <rte_eventdev.h>
18 #include <rte_lcore.h>
19 #include <rte_malloc.h>
20 #include <rte_mempool.h>
21 #include <rte_prefetch.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_spinlock.h>
25 
26 #include "evt_common.h"
27 #include "evt_options.h"
28 #include "evt_test.h"
29 
30 struct test_pipeline;
31 
32 struct worker_data {
33 	uint64_t processed_pkts;
34 	uint8_t dev_id;
35 	uint8_t port_id;
36 	struct test_pipeline *t;
37 } __rte_cache_aligned;
38 
39 struct test_pipeline {
40 	/* Don't change the offset of "done". Signal handler use this memory
41 	 * to terminate all lcores work.
42 	 */
43 	int done;
44 	uint8_t nb_workers;
45 	uint8_t internal_port;
46 	uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
47 	enum evt_test_result result;
48 	uint32_t nb_flows;
49 	uint64_t outstand_pkts;
50 	struct rte_mempool *pool[RTE_MAX_ETHPORTS];
51 	struct worker_data worker[EVT_MAX_PORTS];
52 	struct evt_options *opt;
53 	uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
54 } __rte_cache_aligned;
55 
56 #define BURST_SIZE 16
57 
58 #define PIPELINE_WORKER_SINGLE_STAGE_INIT \
59 	struct worker_data *w  = arg;     \
60 	struct test_pipeline *t = w->t;   \
61 	const uint8_t dev = w->dev_id;    \
62 	const uint8_t port = w->port_id;  \
63 	struct rte_event ev __rte_cache_aligned
64 
65 #define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \
66 	int i;                                  \
67 	struct worker_data *w  = arg;           \
68 	struct test_pipeline *t = w->t;         \
69 	const uint8_t dev = w->dev_id;          \
70 	const uint8_t port = w->port_id;        \
71 	struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
72 
73 #define PIPELINE_WORKER_MULTI_STAGE_INIT                         \
74 	struct worker_data *w  = arg;                            \
75 	struct test_pipeline *t = w->t;                          \
76 	uint8_t cq_id;                                           \
77 	const uint8_t dev = w->dev_id;                           \
78 	const uint8_t port = w->port_id;                         \
79 	const uint8_t last_queue = t->opt->nb_stages - 1;        \
80 	uint8_t *const sched_type_list = &t->sched_type_list[0]; \
81 	const uint8_t nb_stages = t->opt->nb_stages + 1;	 \
82 	struct rte_event ev __rte_cache_aligned
83 
84 #define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT                   \
85 	int i;                                                   \
86 	struct worker_data *w  = arg;                            \
87 	struct test_pipeline *t = w->t;                          \
88 	uint8_t cq_id;                                           \
89 	const uint8_t dev = w->dev_id;                           \
90 	const uint8_t port = w->port_id;                         \
91 	const uint8_t last_queue = t->opt->nb_stages - 1;        \
92 	uint8_t *const sched_type_list = &t->sched_type_list[0]; \
93 	const uint8_t nb_stages = t->opt->nb_stages + 1;	 \
94 	struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
95 
96 static __rte_always_inline void
97 pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
98 {
99 	ev->event_type = RTE_EVENT_TYPE_CPU;
100 	ev->op = RTE_EVENT_OP_FORWARD;
101 	ev->sched_type = sched;
102 }
103 
104 static __rte_always_inline void
105 pipeline_fwd_event_vector(struct rte_event *ev, uint8_t sched)
106 {
107 	ev->event_type = RTE_EVENT_TYPE_CPU_VECTOR;
108 	ev->op = RTE_EVENT_OP_FORWARD;
109 	ev->sched_type = sched;
110 }
111 
112 static __rte_always_inline uint8_t
113 pipeline_event_tx(const uint8_t dev, const uint8_t port,
114 		  struct rte_event *const ev, struct test_pipeline *t)
115 {
116 	uint8_t enq;
117 
118 	rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
119 	do {
120 		enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
121 	} while (!enq && !t->done);
122 
123 	return enq;
124 }
125 
126 static __rte_always_inline uint8_t
127 pipeline_event_tx_vector(const uint8_t dev, const uint8_t port,
128 			 struct rte_event *const ev, struct test_pipeline *t)
129 {
130 	uint8_t enq;
131 
132 	ev->vec->queue = 0;
133 	do {
134 		enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
135 	} while (!enq && !t->done);
136 
137 	return enq;
138 }
139 
140 static __rte_always_inline uint16_t
141 pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
142 			struct rte_event *ev, const uint16_t nb_rx,
143 			struct test_pipeline *t)
144 {
145 	uint16_t enq;
146 
147 	enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx, 0);
148 	while (enq < nb_rx && !t->done) {
149 		enq += rte_event_eth_tx_adapter_enqueue(dev, port,
150 				ev + enq, nb_rx - enq, 0);
151 	}
152 
153 	return enq;
154 }
155 
156 static __rte_always_inline uint8_t
157 pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
158 		       struct rte_event *ev, struct test_pipeline *t)
159 {
160 	uint8_t enq;
161 
162 	do {
163 		enq = rte_event_enqueue_burst(dev, port, ev, 1);
164 	} while (!enq && !t->done);
165 
166 	return enq;
167 }
168 
169 static __rte_always_inline uint16_t
170 pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
171 			     struct rte_event *ev, const uint16_t nb_rx,
172 			     struct test_pipeline *t)
173 {
174 	uint16_t enq;
175 
176 	enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
177 	while (enq < nb_rx && !t->done) {
178 		enq += rte_event_enqueue_burst(dev, port,
179 						ev + enq, nb_rx - enq);
180 	}
181 
182 	return enq;
183 }
184 
185 
186 static inline int
187 pipeline_nb_event_ports(struct evt_options *opt)
188 {
189 	return evt_nr_active_lcores(opt->wlcores);
190 }
191 
192 int pipeline_test_result(struct evt_test *test, struct evt_options *opt);
193 int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
194 int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
195 int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
196 int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
197 		struct rte_event_port_conf prod_conf);
198 int pipeline_event_tx_adapter_setup(struct evt_options *opt,
199 		struct rte_event_port_conf prod_conf);
200 int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
201 int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
202 		uint8_t *queue_arr, uint8_t nb_queues,
203 		const struct rte_event_port_conf p_conf);
204 int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
205 		int (*worker)(void *));
206 void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
207 void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
208 void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
209 void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
210 void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
211 void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
212 void pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
213 			     uint16_t enq, uint16_t deq);
214 
215 #endif /* _TEST_PIPELINE_COMMON_ */
216