1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017 Cavium, Inc.
4 */
5
6 #ifndef _TEST_PIPELINE_COMMON_
7 #define _TEST_PIPELINE_COMMON_
8
9 #include <stdio.h>
10 #include <stdbool.h>
11 #include <unistd.h>
12
13 #include <rte_cycles.h>
14 #include <rte_ethdev.h>
15 #include <rte_ether.h>
16 #include <rte_event_eth_rx_adapter.h>
17 #include <rte_event_eth_tx_adapter.h>
18 #include <rte_eventdev.h>
19 #include <rte_lcore.h>
20 #include <rte_malloc.h>
21 #include <rte_mempool.h>
22 #include <rte_prefetch.h>
23 #include <rte_service.h>
24 #include <rte_service_component.h>
25 #include <rte_spinlock.h>
26 #include <rte_udp.h>
27
28 #include "evt_common.h"
29 #include "evt_options.h"
30 #include "evt_test.h"
31
32 struct test_pipeline;
33
34 struct __rte_cache_aligned worker_data {
35 uint64_t processed_pkts;
36 uint8_t dev_id;
37 uint8_t port_id;
38 struct test_pipeline *t;
39 };
40
41 struct __rte_cache_aligned test_pipeline {
42 /* Don't change the offset of "done". Signal handler use this memory
43 * to terminate all lcores work.
44 */
45 int done;
46 uint8_t nb_workers;
47 uint8_t internal_port;
48 uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
49 enum evt_test_result result;
50 uint32_t nb_flows;
51 uint64_t outstand_pkts;
52 struct rte_mempool *pool[RTE_MAX_ETHPORTS];
53 struct worker_data worker[EVT_MAX_PORTS];
54 struct evt_options *opt;
55 alignas(RTE_CACHE_LINE_SIZE) uint8_t sched_type_list[EVT_MAX_STAGES];
56 };
57
58 #define BURST_SIZE 16
59
60 #define PIPELINE_WORKER_SINGLE_STAGE_INIT \
61 struct worker_data *w = arg; \
62 struct test_pipeline *t = w->t; \
63 const uint8_t dev = w->dev_id; \
64 const uint8_t port = w->port_id; \
65 alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev
66
67 #define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \
68 int i; \
69 struct worker_data *w = arg; \
70 struct test_pipeline *t = w->t; \
71 const uint8_t dev = w->dev_id; \
72 const uint8_t port = w->port_id; \
73 alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev[BURST_SIZE + 1]
74
75 #define PIPELINE_WORKER_MULTI_STAGE_INIT \
76 struct worker_data *w = arg; \
77 struct test_pipeline *t = w->t; \
78 uint8_t cq_id; \
79 const uint8_t dev = w->dev_id; \
80 const uint8_t port = w->port_id; \
81 const uint8_t last_queue = t->opt->nb_stages - 1; \
82 uint8_t *const sched_type_list = &t->sched_type_list[0]; \
83 const uint8_t nb_stages = t->opt->nb_stages + 1; \
84 alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev
85
86 #define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT \
87 int i; \
88 struct worker_data *w = arg; \
89 struct test_pipeline *t = w->t; \
90 uint8_t cq_id; \
91 const uint8_t dev = w->dev_id; \
92 const uint8_t port = w->port_id; \
93 const uint8_t last_queue = t->opt->nb_stages - 1; \
94 uint8_t *const sched_type_list = &t->sched_type_list[0]; \
95 const uint8_t nb_stages = t->opt->nb_stages + 1; \
96 alignas(RTE_CACHE_LINE_SIZE) struct rte_event ev[BURST_SIZE + 1]
97
98 static __rte_always_inline void
pipeline_fwd_event(struct rte_event * ev,uint8_t sched)99 pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
100 {
101 ev->event_type = RTE_EVENT_TYPE_CPU;
102 ev->op = RTE_EVENT_OP_FORWARD;
103 ev->sched_type = sched;
104 }
105
106 static __rte_always_inline void
pipeline_fwd_event_vector(struct rte_event * ev,uint8_t sched)107 pipeline_fwd_event_vector(struct rte_event *ev, uint8_t sched)
108 {
109 ev->event_type = RTE_EVENT_TYPE_CPU_VECTOR;
110 ev->op = RTE_EVENT_OP_FORWARD;
111 ev->sched_type = sched;
112 }
113
114 static __rte_always_inline uint8_t
pipeline_event_tx(const uint8_t dev,const uint8_t port,struct rte_event * const ev,struct test_pipeline * t)115 pipeline_event_tx(const uint8_t dev, const uint8_t port,
116 struct rte_event *const ev, struct test_pipeline *t)
117 {
118 uint8_t enq;
119
120 rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
121 do {
122 enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
123 } while (!enq && !t->done);
124
125 return enq;
126 }
127
128 static __rte_always_inline uint8_t
pipeline_event_tx_vector(const uint8_t dev,const uint8_t port,struct rte_event * const ev,struct test_pipeline * t)129 pipeline_event_tx_vector(const uint8_t dev, const uint8_t port,
130 struct rte_event *const ev, struct test_pipeline *t)
131 {
132 uint8_t enq;
133
134 ev->vec->queue = 0;
135 do {
136 enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0);
137 } while (!enq && !t->done);
138
139 return enq;
140 }
141
142 static __rte_always_inline uint16_t
pipeline_event_tx_burst(const uint8_t dev,const uint8_t port,struct rte_event * ev,const uint16_t nb_rx,struct test_pipeline * t)143 pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
144 struct rte_event *ev, const uint16_t nb_rx,
145 struct test_pipeline *t)
146 {
147 uint16_t enq;
148
149 enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx, 0);
150 while (enq < nb_rx && !t->done) {
151 enq += rte_event_eth_tx_adapter_enqueue(dev, port,
152 ev + enq, nb_rx - enq, 0);
153 }
154
155 return enq;
156 }
157
158 static __rte_always_inline uint8_t
pipeline_event_enqueue(const uint8_t dev,const uint8_t port,struct rte_event * ev,struct test_pipeline * t)159 pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
160 struct rte_event *ev, struct test_pipeline *t)
161 {
162 uint8_t enq;
163
164 do {
165 enq = rte_event_enqueue_burst(dev, port, ev, 1);
166 } while (!enq && !t->done);
167
168 return enq;
169 }
170
171 static __rte_always_inline uint16_t
pipeline_event_enqueue_burst(const uint8_t dev,const uint8_t port,struct rte_event * ev,const uint16_t nb_rx,struct test_pipeline * t)172 pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
173 struct rte_event *ev, const uint16_t nb_rx,
174 struct test_pipeline *t)
175 {
176 uint16_t enq;
177
178 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
179 while (enq < nb_rx && !t->done) {
180 enq += rte_event_enqueue_burst(dev, port,
181 ev + enq, nb_rx - enq);
182 }
183
184 return enq;
185 }
186
187
188 static inline int
pipeline_nb_event_ports(struct evt_options * opt)189 pipeline_nb_event_ports(struct evt_options *opt)
190 {
191 return evt_nr_active_lcores(opt->wlcores);
192 }
193
194 int pipeline_test_result(struct evt_test *test, struct evt_options *opt);
195 int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
196 int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
197 int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
198 int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
199 struct rte_event_port_conf prod_conf);
200 int pipeline_event_tx_adapter_setup(struct evt_options *opt,
201 struct rte_event_port_conf prod_conf);
202 int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
203 int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
204 uint8_t *queue_arr, uint8_t nb_queues,
205 const struct rte_event_port_conf p_conf);
206 int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
207 int (*worker)(void *));
208 void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
209 void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
210 void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
211 void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
212 void pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt);
213 void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
214 void pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
215 uint16_t enq, uint16_t deq);
216
217 #endif /* _TEST_PIPELINE_COMMON_ */
218