xref: /dpdk/app/test-eventdev/evt_common.h (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #ifndef _EVT_COMMON_
6 #define _EVT_COMMON_
7 
8 #include <rte_common.h>
9 #include <rte_crypto.h>
10 #include <rte_debug.h>
11 #include <rte_event_crypto_adapter.h>
12 #include <rte_eventdev.h>
13 #include <rte_service.h>
14 
15 #define CLNRM  "\x1b[0m"
16 #define CLRED  "\x1b[31m"
17 #define CLGRN  "\x1b[32m"
18 #define CLYEL  "\x1b[33m"
19 
20 #define evt_err(fmt, args...) \
21 	fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
22 
23 #define evt_info(fmt, args...) \
24 	fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
25 
26 #define EVT_STR_FMT 20
27 
28 #define evt_dump(str, fmt, val...) \
29 	printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
30 
31 #define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
32 
33 #define evt_dump_end printf("\b}\n")
34 
35 #define EVT_MAX_STAGES           64
36 #define EVT_MAX_PORTS            256
37 #define EVT_MAX_QUEUES           256
38 
39 enum evt_prod_type {
40 	EVT_PROD_TYPE_NONE,
41 	EVT_PROD_TYPE_SYNT,          /* Producer type Synthetic i.e. CPU. */
42 	EVT_PROD_TYPE_ETH_RX_ADPTR,  /* Producer type Eth Rx Adapter. */
43 	EVT_PROD_TYPE_EVENT_TIMER_ADPTR,  /* Producer type Timer Adapter. */
44 	EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR,  /* Producer type Crypto Adapter. */
45 	EVT_PROD_TYPE_MAX,
46 };
47 
48 struct evt_options {
49 #define EVT_TEST_NAME_MAX_LEN     32
50 	char test_name[EVT_TEST_NAME_MAX_LEN];
51 	bool plcores[RTE_MAX_LCORE];
52 	bool wlcores[RTE_MAX_LCORE];
53 	int pool_sz;
54 	int socket_id;
55 	int nb_stages;
56 	int verbose_level;
57 	uint8_t dev_id;
58 	uint8_t timdev_cnt;
59 	uint8_t nb_timer_adptrs;
60 	uint8_t timdev_use_burst;
61 	uint8_t per_port_pool;
62 	uint8_t sched_type_list[EVT_MAX_STAGES];
63 	uint16_t mbuf_sz;
64 	uint16_t wkr_deq_dep;
65 	uint16_t vector_size;
66 	uint16_t eth_queues;
67 	uint32_t nb_flows;
68 	uint32_t tx_first;
69 	uint16_t tx_pkt_sz;
70 	uint32_t max_pkt_sz;
71 	uint32_t prod_enq_burst_sz;
72 	uint32_t deq_tmo_nsec;
73 	uint32_t q_priority:1;
74 	uint32_t fwd_latency:1;
75 	uint32_t ena_vector : 1;
76 	uint64_t nb_pkts;
77 	uint64_t nb_timers;
78 	uint64_t expiry_nsec;
79 	uint64_t max_tmo_nsec;
80 	uint64_t vector_tmo_nsec;
81 	uint64_t timer_tick_nsec;
82 	uint64_t optm_timer_tick_nsec;
83 	enum evt_prod_type prod_type;
84 	enum rte_event_crypto_adapter_mode crypto_adptr_mode;
85 	enum rte_crypto_op_type crypto_op_type;
86 };
87 
88 static inline bool
89 evt_has_distributed_sched(uint8_t dev_id)
90 {
91 	struct rte_event_dev_info dev_info;
92 
93 	rte_event_dev_info_get(dev_id, &dev_info);
94 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
95 			true : false;
96 }
97 
98 static inline bool
99 evt_has_burst_mode(uint8_t dev_id)
100 {
101 	struct rte_event_dev_info dev_info;
102 
103 	rte_event_dev_info_get(dev_id, &dev_info);
104 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
105 			true : false;
106 }
107 
108 
109 static inline bool
110 evt_has_all_types_queue(uint8_t dev_id)
111 {
112 	struct rte_event_dev_info dev_info;
113 
114 	rte_event_dev_info_get(dev_id, &dev_info);
115 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
116 			true : false;
117 }
118 
119 static inline bool
120 evt_has_flow_id(uint8_t dev_id)
121 {
122 	struct rte_event_dev_info dev_info;
123 
124 	rte_event_dev_info_get(dev_id, &dev_info);
125 	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
126 			true : false;
127 }
128 
129 static inline int
130 evt_service_setup(uint32_t service_id)
131 {
132 	int32_t core_cnt;
133 	unsigned int lcore = 0;
134 	uint32_t core_array[RTE_MAX_LCORE];
135 	uint8_t cnt;
136 	uint8_t min_cnt = UINT8_MAX;
137 
138 	if (!rte_service_lcore_count())
139 		return -ENOENT;
140 
141 	core_cnt = rte_service_lcore_list(core_array,
142 			RTE_MAX_LCORE);
143 	if (core_cnt < 0)
144 		return -ENOENT;
145 	/* Get the core which has least number of services running. */
146 	while (core_cnt--) {
147 		/* Reset default mapping */
148 		rte_service_map_lcore_set(service_id,
149 				core_array[core_cnt], 0);
150 		cnt = rte_service_lcore_count_services(
151 				core_array[core_cnt]);
152 		if (cnt < min_cnt) {
153 			lcore = core_array[core_cnt];
154 			min_cnt = cnt;
155 		}
156 	}
157 	if (rte_service_map_lcore_set(service_id, lcore, 1))
158 		return -ENOENT;
159 
160 	return 0;
161 }
162 
163 static inline int
164 evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
165 		uint8_t nb_ports)
166 {
167 	struct rte_event_dev_info info;
168 	int ret;
169 
170 	memset(&info, 0, sizeof(struct rte_event_dev_info));
171 	ret = rte_event_dev_info_get(opt->dev_id, &info);
172 	if (ret) {
173 		evt_err("failed to get eventdev info %d", opt->dev_id);
174 		return ret;
175 	}
176 
177 	if (opt->deq_tmo_nsec) {
178 		if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
179 			opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
180 			evt_info("dequeue_timeout_ns too low, using %d",
181 					opt->deq_tmo_nsec);
182 		}
183 		if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
184 			opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
185 			evt_info("dequeue_timeout_ns too high, using %d",
186 					opt->deq_tmo_nsec);
187 		}
188 	}
189 
190 	const struct rte_event_dev_config config = {
191 			.dequeue_timeout_ns = opt->deq_tmo_nsec,
192 			.nb_event_queues = nb_queues,
193 			.nb_event_ports = nb_ports,
194 			.nb_single_link_event_port_queues = 0,
195 			.nb_events_limit  = info.max_num_events,
196 			.nb_event_queue_flows = opt->nb_flows,
197 			.nb_event_port_dequeue_depth =
198 				info.max_event_port_dequeue_depth,
199 			.nb_event_port_enqueue_depth =
200 				info.max_event_port_enqueue_depth,
201 	};
202 
203 	return rte_event_dev_configure(opt->dev_id, &config);
204 }
205 
206 #endif /*  _EVT_COMMON_*/
207