xref: /dpdk/app/test-eventdev/evt_common.h (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #ifndef _EVT_COMMON_
6 #define _EVT_COMMON_
7 
8 #include <rte_common.h>
9 #include <rte_crypto.h>
10 #include <rte_debug.h>
11 #include <rte_event_crypto_adapter.h>
12 #include <rte_event_dma_adapter.h>
13 #include <rte_eventdev.h>
14 #include <rte_service.h>
15 
16 #define CLNRM  "\x1b[0m"
17 #define CLRED  "\x1b[31m"
18 #define CLGRN  "\x1b[32m"
19 #define CLYEL  "\x1b[33m"
20 
21 #define evt_err(fmt, args...) \
22 	fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
23 
24 #define evt_info(fmt, args...) \
25 	fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
26 
27 #define EVT_STR_FMT 20
28 
29 #define evt_dump(str, fmt, val...) \
30 	printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
31 
32 #define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
33 
34 #define evt_dump_end printf("\b}\n")
35 
36 #define EVT_MAX_STAGES           64
37 #define EVT_MAX_PORTS            256
38 #define EVT_MAX_QUEUES           256
39 
40 enum evt_prod_type {
41 	EVT_PROD_TYPE_NONE,
42 	EVT_PROD_TYPE_SYNT,          /* Producer type Synthetic i.e. CPU. */
43 	EVT_PROD_TYPE_ETH_RX_ADPTR,  /* Producer type Eth Rx Adapter. */
44 	EVT_PROD_TYPE_EVENT_TIMER_ADPTR,  /* Producer type Timer Adapter. */
45 	EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR,  /* Producer type Crypto Adapter. */
46 	EVT_PROD_TYPE_EVENT_DMA_ADPTR,  /* Producer type DMA Adapter. */
47 	EVT_PROD_TYPE_MAX,
48 };
49 
50 struct evt_options {
51 #define EVT_TEST_NAME_MAX_LEN     32
52 #define EVT_CRYPTO_MAX_KEY_SIZE   256
53 #define EVT_CRYPTO_MAX_IV_SIZE    16
54 	char test_name[EVT_TEST_NAME_MAX_LEN];
55 	bool plcores[RTE_MAX_LCORE];
56 	bool wlcores[RTE_MAX_LCORE];
57 	bool crypto_cipher_bit_mode;
58 	int pool_sz;
59 	int socket_id;
60 	int nb_stages;
61 	int verbose_level;
62 	uint8_t dev_id;
63 	uint8_t timdev_cnt;
64 	uint8_t nb_timer_adptrs;
65 	uint8_t timdev_use_burst;
66 	uint8_t per_port_pool;
67 	uint8_t preschedule;
68 	uint8_t preschedule_opted;
69 	uint8_t sched_type_list[EVT_MAX_STAGES];
70 	uint16_t mbuf_sz;
71 	uint16_t wkr_deq_dep;
72 	uint16_t vector_size;
73 	uint16_t eth_queues;
74 	uint16_t crypto_cipher_iv_sz;
75 	uint32_t nb_flows;
76 	uint32_t tx_first;
77 	uint16_t tx_pkt_sz;
78 	uint32_t max_pkt_sz;
79 	uint32_t prod_enq_burst_sz;
80 	uint32_t deq_tmo_nsec;
81 	uint32_t crypto_cipher_key_sz;
82 	uint32_t q_priority:1;
83 	uint32_t fwd_latency:1;
84 	uint32_t ena_vector : 1;
85 	uint64_t nb_pkts;
86 	uint64_t nb_timers;
87 	uint64_t expiry_nsec;
88 	uint64_t max_tmo_nsec;
89 	uint64_t vector_tmo_nsec;
90 	uint64_t timer_tick_nsec;
91 	uint64_t optm_timer_tick_nsec;
92 	enum evt_prod_type prod_type;
93 	enum rte_event_dma_adapter_mode dma_adptr_mode;
94 	enum rte_event_crypto_adapter_mode crypto_adptr_mode;
95 	enum rte_crypto_op_type crypto_op_type;
96 	enum rte_crypto_cipher_algorithm crypto_cipher_alg;
97 	uint8_t crypto_cipher_key[EVT_CRYPTO_MAX_KEY_SIZE];
98 };
99 
100 static inline bool
101 evt_has_distributed_sched(uint8_t dev_id)
102 {
103 	struct rte_event_dev_info dev_info;
104 
105 	rte_event_dev_info_get(dev_id, &dev_info);
106 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
107 			true : false;
108 }
109 
110 static inline bool
111 evt_has_burst_mode(uint8_t dev_id)
112 {
113 	struct rte_event_dev_info dev_info;
114 
115 	rte_event_dev_info_get(dev_id, &dev_info);
116 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
117 			true : false;
118 }
119 
120 
121 static inline bool
122 evt_has_all_types_queue(uint8_t dev_id)
123 {
124 	struct rte_event_dev_info dev_info;
125 
126 	rte_event_dev_info_get(dev_id, &dev_info);
127 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
128 			true : false;
129 }
130 
131 static inline bool
132 evt_has_flow_id(uint8_t dev_id)
133 {
134 	struct rte_event_dev_info dev_info;
135 
136 	rte_event_dev_info_get(dev_id, &dev_info);
137 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
138 			true : false;
139 }
140 
141 static inline int
142 evt_service_setup(uint32_t service_id)
143 {
144 	int32_t core_cnt;
145 	unsigned int lcore = 0;
146 	uint32_t core_array[RTE_MAX_LCORE];
147 	uint8_t cnt;
148 	uint8_t min_cnt = UINT8_MAX;
149 
150 	if (!rte_service_lcore_count())
151 		return -ENOENT;
152 
153 	core_cnt = rte_service_lcore_list(core_array,
154 			RTE_MAX_LCORE);
155 	if (core_cnt < 0)
156 		return -ENOENT;
157 	/* Get the core which has least number of services running. */
158 	while (core_cnt--) {
159 		/* Reset default mapping */
160 		rte_service_map_lcore_set(service_id,
161 				core_array[core_cnt], 0);
162 		cnt = rte_service_lcore_count_services(
163 				core_array[core_cnt]);
164 		if (cnt < min_cnt) {
165 			lcore = core_array[core_cnt];
166 			min_cnt = cnt;
167 		}
168 	}
169 	if (rte_service_map_lcore_set(service_id, lcore, 1))
170 		return -ENOENT;
171 
172 	return 0;
173 }
174 
175 static inline int
176 evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
177 		uint8_t nb_ports)
178 {
179 	struct rte_event_dev_info info;
180 	int ret;
181 
182 	memset(&info, 0, sizeof(struct rte_event_dev_info));
183 	ret = rte_event_dev_info_get(opt->dev_id, &info);
184 	if (ret) {
185 		evt_err("failed to get eventdev info %d", opt->dev_id);
186 		return ret;
187 	}
188 
189 	if (opt->preschedule_opted && opt->preschedule) {
190 		switch (opt->preschedule) {
191 		case RTE_EVENT_PRESCHEDULE_ADAPTIVE:
192 			if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE)) {
193 				evt_err("Preschedule type %d not supported", opt->preschedule);
194 				return -EINVAL;
195 			}
196 			break;
197 		case RTE_EVENT_PRESCHEDULE:
198 			if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE)) {
199 				evt_err("Preschedule type %d not supported", opt->preschedule);
200 				return -EINVAL;
201 			}
202 			break;
203 		default:
204 			break;
205 		}
206 	}
207 
208 	if (!opt->preschedule_opted) {
209 		if (info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE)
210 			opt->preschedule = RTE_EVENT_PRESCHEDULE_ADAPTIVE;
211 	}
212 
213 	if (opt->deq_tmo_nsec) {
214 		if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
215 			opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
216 			evt_info("dequeue_timeout_ns too low, using %d",
217 					opt->deq_tmo_nsec);
218 		}
219 		if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
220 			opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
221 			evt_info("dequeue_timeout_ns too high, using %d",
222 					opt->deq_tmo_nsec);
223 		}
224 	}
225 
226 	const struct rte_event_dev_config config = {
227 		.dequeue_timeout_ns = opt->deq_tmo_nsec,
228 		.nb_event_queues = nb_queues,
229 		.nb_event_ports = nb_ports,
230 		.nb_single_link_event_port_queues = 0,
231 		.nb_events_limit = info.max_num_events,
232 		.nb_event_queue_flows = opt->nb_flows,
233 		.nb_event_port_dequeue_depth = info.max_event_port_dequeue_depth,
234 		.nb_event_port_enqueue_depth = info.max_event_port_enqueue_depth,
235 		.preschedule_type = opt->preschedule,
236 	};
237 
238 	return rte_event_dev_configure(opt->dev_id, &config);
239 }
240 
241 #endif /*  _EVT_COMMON_*/
242