xref: /dpdk/app/test-eventdev/test_pipeline_common.h (revision 89f0711f9ddfb5822da9d34f384b92f72a61c4dc)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5 
6 #ifndef _TEST_PIPELINE_COMMON_
7 #define _TEST_PIPELINE_COMMON_
8 
9 #include <stdio.h>
10 #include <stdbool.h>
11 #include <unistd.h>
12 
13 #include <rte_cycles.h>
14 #include <rte_ethdev.h>
15 #include <rte_eventdev.h>
16 #include <rte_event_eth_rx_adapter.h>
17 #include <rte_lcore.h>
18 #include <rte_malloc.h>
19 #include <rte_mempool.h>
20 #include <rte_prefetch.h>
21 #include <rte_spinlock.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 
25 #include "evt_common.h"
26 #include "evt_options.h"
27 #include "evt_test.h"
28 
29 struct test_pipeline;
30 
31 struct worker_data {
32 	uint64_t processed_pkts;
33 	uint8_t dev_id;
34 	uint8_t port_id;
35 	struct test_pipeline *t;
36 } __rte_cache_aligned;
37 
38 struct tx_service_data {
39 	uint8_t dev_id;
40 	uint8_t queue_id;
41 	uint8_t port_id;
42 	uint32_t service_id;
43 	uint64_t processed_pkts;
44 	uint16_t nb_ethports;
45 	struct rte_eth_dev_tx_buffer *tx_buf[RTE_MAX_ETHPORTS];
46 	struct test_pipeline *t;
47 } __rte_cache_aligned;
48 
49 struct test_pipeline {
50 	/* Don't change the offset of "done". Signal handler use this memory
51 	 * to terminate all lcores work.
52 	 */
53 	int done;
54 	uint8_t nb_workers;
55 	uint8_t mt_unsafe;
56 	enum evt_test_result result;
57 	uint32_t nb_flows;
58 	uint64_t outstand_pkts;
59 	struct rte_mempool *pool;
60 	struct worker_data worker[EVT_MAX_PORTS];
61 	struct tx_service_data tx_service;
62 	struct evt_options *opt;
63 	uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
64 } __rte_cache_aligned;
65 
66 #define BURST_SIZE 16
67 
68 #define PIPELINE_WROKER_SINGLE_STAGE_INIT \
69 	struct worker_data *w  = arg;     \
70 	struct test_pipeline *t = w->t;   \
71 	const uint8_t dev = w->dev_id;    \
72 	const uint8_t port = w->port_id;  \
73 	struct rte_event ev
74 
75 #define PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT \
76 	int i;                                  \
77 	struct worker_data *w  = arg;           \
78 	struct test_pipeline *t = w->t;         \
79 	const uint8_t dev = w->dev_id;          \
80 	const uint8_t port = w->port_id;        \
81 	struct rte_event ev[BURST_SIZE + 1]
82 
83 #define PIPELINE_WROKER_MULTI_STAGE_INIT                         \
84 	struct worker_data *w  = arg;                            \
85 	struct test_pipeline *t = w->t;                          \
86 	uint8_t cq_id;                                           \
87 	const uint8_t dev = w->dev_id;                           \
88 	const uint8_t port = w->port_id;                         \
89 	const uint8_t last_queue = t->opt->nb_stages - 1;        \
90 	uint8_t *const sched_type_list = &t->sched_type_list[0]; \
91 	struct rte_event ev
92 
93 #define PIPELINE_WROKER_MULTI_STAGE_BURST_INIT                   \
94 	int i;                                  \
95 	struct worker_data *w  = arg;                            \
96 	struct test_pipeline *t = w->t;                          \
97 	uint8_t cq_id;                                           \
98 	const uint8_t dev = w->dev_id;                           \
99 	const uint8_t port = w->port_id;                         \
100 	const uint8_t last_queue = t->opt->nb_stages - 1;        \
101 	uint8_t *const sched_type_list = &t->sched_type_list[0]; \
102 	struct rte_event ev[BURST_SIZE + 1]
103 
104 static __rte_always_inline void
105 pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
106 {
107 	ev->event_type = RTE_EVENT_TYPE_CPU;
108 	ev->op = RTE_EVENT_OP_FORWARD;
109 	ev->sched_type = sched;
110 }
111 
112 static __rte_always_inline void
113 pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
114 		struct rte_event *ev)
115 {
116 	while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
117 		rte_pause();
118 }
119 
120 static __rte_always_inline void
121 pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
122 		struct rte_event *ev, const uint16_t nb_rx)
123 {
124 	uint16_t enq;
125 
126 	enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
127 	while (enq < nb_rx) {
128 		enq += rte_event_enqueue_burst(dev, port,
129 						ev + enq, nb_rx - enq);
130 	}
131 }
132 
133 static __rte_always_inline void
134 pipeline_tx_pkt(struct rte_mbuf *mbuf)
135 {
136 	while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
137 		rte_pause();
138 }
139 
140 static inline int
141 pipeline_nb_event_ports(struct evt_options *opt)
142 {
143 	return evt_nr_active_lcores(opt->wlcores);
144 }
145 
146 int pipeline_test_result(struct evt_test *test, struct evt_options *opt);
147 int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
148 int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
149 int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
150 int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
151 		struct rte_event_port_conf prod_conf);
152 int pipeline_event_tx_service_setup(struct evt_test *test,
153 		struct evt_options *opt, uint8_t tx_queue_id,
154 		uint8_t tx_port_id, const struct rte_event_port_conf p_conf);
155 int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
156 int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
157 		uint8_t *queue_arr, uint8_t nb_queues,
158 		const struct rte_event_port_conf p_conf);
159 int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
160 		int (*worker)(void *));
161 void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
162 void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
163 void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
164 void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
165 void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
166 
167 #endif /* _TEST_PIPELINE_COMMON_ */
168