xref: /dpdk/app/test-flow-perf/items_gen.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * This file contain the implementations of the items
5  * related methods. Each Item have a method to prepare
6  * the item and add it into items array in given index.
7  */
8 
9 #include <stdint.h>
10 #include <rte_flow.h>
11 
12 #include "items_gen.h"
13 #include "config.h"
14 
15 /* Storage for additional parameters for items */
16 struct additional_para {
17 	rte_be32_t src_ip;
18 	uint8_t core_idx;
19 };
20 
21 static void
22 add_ether(struct rte_flow_item *items,
23 	uint8_t items_counter,
24 	__rte_unused struct additional_para para)
25 {
26 	static struct rte_flow_item_eth eth_spec;
27 	static struct rte_flow_item_eth eth_mask;
28 
29 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
30 	items[items_counter].spec = &eth_spec;
31 	items[items_counter].mask = &eth_mask;
32 }
33 
34 static void
35 add_vlan(struct rte_flow_item *items,
36 	uint8_t items_counter,
37 	__rte_unused struct additional_para para)
38 {
39 	static struct rte_flow_item_vlan vlan_spec = {
40 		.tci = RTE_BE16(VLAN_VALUE),
41 	};
42 	static struct rte_flow_item_vlan vlan_mask = {
43 		.tci = RTE_BE16(0xffff),
44 	};
45 
46 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
47 	items[items_counter].spec = &vlan_spec;
48 	items[items_counter].mask = &vlan_mask;
49 }
50 
51 static void
52 add_ipv4(struct rte_flow_item *items,
53 	uint8_t items_counter, struct additional_para para)
54 {
55 	static struct rte_flow_item_ipv4 ipv4_specs[RTE_MAX_LCORE] __rte_cache_aligned;
56 	static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
57 	uint8_t ti = para.core_idx;
58 
59 	ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
60 	ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
61 
62 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
63 	items[items_counter].spec = &ipv4_specs[ti];
64 	items[items_counter].mask = &ipv4_masks[ti];
65 }
66 
67 
68 static void
69 add_ipv6(struct rte_flow_item *items,
70 	uint8_t items_counter, struct additional_para para)
71 {
72 	static struct rte_flow_item_ipv6 ipv6_specs[RTE_MAX_LCORE] __rte_cache_aligned;
73 	static struct rte_flow_item_ipv6 ipv6_masks[RTE_MAX_LCORE] __rte_cache_aligned;
74 	uint8_t ti = para.core_idx;
75 
76 	/** Set ipv6 src **/
77 	memset(&ipv6_specs[ti].hdr.src_addr, para.src_ip,
78 		sizeof(ipv6_specs->hdr.src_addr) / 2);
79 
80 	/** Full mask **/
81 	memset(&ipv6_masks[ti].hdr.src_addr, 0xff,
82 		sizeof(ipv6_specs->hdr.src_addr));
83 
84 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
85 	items[items_counter].spec = &ipv6_specs[ti];
86 	items[items_counter].mask = &ipv6_masks[ti];
87 }
88 
89 static void
90 add_tcp(struct rte_flow_item *items,
91 	uint8_t items_counter,
92 	__rte_unused struct additional_para para)
93 {
94 	static struct rte_flow_item_tcp tcp_spec;
95 	static struct rte_flow_item_tcp tcp_mask;
96 
97 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
98 	items[items_counter].spec = &tcp_spec;
99 	items[items_counter].mask = &tcp_mask;
100 }
101 
102 static void
103 add_udp(struct rte_flow_item *items,
104 	uint8_t items_counter,
105 	__rte_unused struct additional_para para)
106 {
107 	static struct rte_flow_item_udp udp_spec;
108 	static struct rte_flow_item_udp udp_mask;
109 
110 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
111 	items[items_counter].spec = &udp_spec;
112 	items[items_counter].mask = &udp_mask;
113 }
114 
115 static void
116 add_vxlan(struct rte_flow_item *items,
117 	uint8_t items_counter,
118 	struct additional_para para)
119 {
120 	static struct rte_flow_item_vxlan vxlan_specs[RTE_MAX_LCORE] __rte_cache_aligned;
121 	static struct rte_flow_item_vxlan vxlan_masks[RTE_MAX_LCORE] __rte_cache_aligned;
122 	uint8_t ti = para.core_idx;
123 	uint32_t vni_value;
124 	uint8_t i;
125 
126 	vni_value = VNI_VALUE;
127 
128 	/* Set standard vxlan vni */
129 	for (i = 0; i < 3; i++) {
130 		vxlan_specs[ti].vni[2 - i] = vni_value >> (i * 8);
131 		vxlan_masks[ti].vni[2 - i] = 0xff;
132 	}
133 
134 	/* Standard vxlan flags */
135 	vxlan_specs[ti].flags = 0x8;
136 
137 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
138 	items[items_counter].spec = &vxlan_specs[ti];
139 	items[items_counter].mask = &vxlan_masks[ti];
140 }
141 
142 static void
143 add_vxlan_gpe(struct rte_flow_item *items,
144 	uint8_t items_counter,
145 	__rte_unused struct additional_para para)
146 {
147 	static struct rte_flow_item_vxlan_gpe vxlan_gpe_specs[RTE_MAX_LCORE] __rte_cache_aligned;
148 	static struct rte_flow_item_vxlan_gpe vxlan_gpe_masks[RTE_MAX_LCORE] __rte_cache_aligned;
149 	uint8_t ti = para.core_idx;
150 	uint32_t vni_value;
151 	uint8_t i;
152 
153 	vni_value = VNI_VALUE;
154 
155 	/* Set vxlan-gpe vni */
156 	for (i = 0; i < 3; i++) {
157 		vxlan_gpe_specs[ti].vni[2 - i] = vni_value >> (i * 8);
158 		vxlan_gpe_masks[ti].vni[2 - i] = 0xff;
159 	}
160 
161 	/* vxlan-gpe flags */
162 	vxlan_gpe_specs[ti].flags = 0x0c;
163 
164 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
165 	items[items_counter].spec = &vxlan_gpe_specs[ti];
166 	items[items_counter].mask = &vxlan_gpe_masks[ti];
167 }
168 
169 static void
170 add_gre(struct rte_flow_item *items,
171 	uint8_t items_counter,
172 	__rte_unused struct additional_para para)
173 {
174 	static struct rte_flow_item_gre gre_spec = {
175 		.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB),
176 	};
177 	static struct rte_flow_item_gre gre_mask = {
178 		.protocol = RTE_BE16(0xffff),
179 	};
180 
181 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
182 	items[items_counter].spec = &gre_spec;
183 	items[items_counter].mask = &gre_mask;
184 }
185 
186 static void
187 add_geneve(struct rte_flow_item *items,
188 	uint8_t items_counter,
189 	__rte_unused struct additional_para para)
190 {
191 	static struct rte_flow_item_geneve geneve_specs[RTE_MAX_LCORE] __rte_cache_aligned;
192 	static struct rte_flow_item_geneve geneve_masks[RTE_MAX_LCORE] __rte_cache_aligned;
193 	uint8_t ti = para.core_idx;
194 	uint32_t vni_value;
195 	uint8_t i;
196 
197 	vni_value = VNI_VALUE;
198 
199 	for (i = 0; i < 3; i++) {
200 		geneve_specs[ti].vni[2 - i] = vni_value >> (i * 8);
201 		geneve_masks[ti].vni[2 - i] = 0xff;
202 	}
203 
204 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
205 	items[items_counter].spec = &geneve_specs[ti];
206 	items[items_counter].mask = &geneve_masks[ti];
207 }
208 
209 static void
210 add_gtp(struct rte_flow_item *items,
211 	uint8_t items_counter,
212 	__rte_unused struct additional_para para)
213 {
214 	static struct rte_flow_item_gtp gtp_spec = {
215 		.teid = RTE_BE32(TEID_VALUE),
216 	};
217 	static struct rte_flow_item_gtp gtp_mask = {
218 		.teid = RTE_BE32(0xffffffff),
219 	};
220 
221 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
222 	items[items_counter].spec = &gtp_spec;
223 	items[items_counter].mask = &gtp_mask;
224 }
225 
226 static void
227 add_meta_data(struct rte_flow_item *items,
228 	uint8_t items_counter,
229 	__rte_unused struct additional_para para)
230 {
231 	static struct rte_flow_item_meta meta_spec = {
232 		.data = RTE_BE32(META_DATA),
233 	};
234 	static struct rte_flow_item_meta meta_mask = {
235 		.data = RTE_BE32(0xffffffff),
236 	};
237 
238 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
239 	items[items_counter].spec = &meta_spec;
240 	items[items_counter].mask = &meta_mask;
241 }
242 
243 
244 static void
245 add_meta_tag(struct rte_flow_item *items,
246 	uint8_t items_counter,
247 	__rte_unused struct additional_para para)
248 {
249 	static struct rte_flow_item_tag tag_spec = {
250 		.data = RTE_BE32(META_DATA),
251 		.index = TAG_INDEX,
252 	};
253 	static struct rte_flow_item_tag tag_mask = {
254 		.data = RTE_BE32(0xffffffff),
255 		.index = 0xff,
256 	};
257 
258 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
259 	items[items_counter].spec = &tag_spec;
260 	items[items_counter].mask = &tag_mask;
261 }
262 
263 static void
264 add_icmpv4(struct rte_flow_item *items,
265 	uint8_t items_counter,
266 	__rte_unused struct additional_para para)
267 {
268 	static struct rte_flow_item_icmp icmpv4_spec;
269 	static struct rte_flow_item_icmp icmpv4_mask;
270 
271 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP;
272 	items[items_counter].spec = &icmpv4_spec;
273 	items[items_counter].mask = &icmpv4_mask;
274 }
275 
276 static void
277 add_icmpv6(struct rte_flow_item *items,
278 	uint8_t items_counter,
279 	__rte_unused struct additional_para para)
280 {
281 	static struct rte_flow_item_icmp6 icmpv6_spec;
282 	static struct rte_flow_item_icmp6 icmpv6_mask;
283 
284 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6;
285 	items[items_counter].spec = &icmpv6_spec;
286 	items[items_counter].mask = &icmpv6_mask;
287 }
288 
289 void
290 fill_items(struct rte_flow_item *items,
291 	uint64_t *flow_items, uint32_t outer_ip_src,
292 	uint8_t core_idx)
293 {
294 	uint8_t items_counter = 0;
295 	uint8_t i, j;
296 	struct additional_para additional_para_data = {
297 		.src_ip = outer_ip_src,
298 		.core_idx = core_idx,
299 	};
300 
301 	/* Support outer items up to tunnel layer only. */
302 	static const struct items_dict {
303 		uint64_t mask;
304 		void (*funct)(
305 			struct rte_flow_item *items,
306 			uint8_t items_counter,
307 			struct additional_para para
308 			);
309 	} items_list[] = {
310 		{
311 			.mask = RTE_FLOW_ITEM_TYPE_META,
312 			.funct = add_meta_data,
313 		},
314 		{
315 			.mask = RTE_FLOW_ITEM_TYPE_TAG,
316 			.funct = add_meta_tag,
317 		},
318 		{
319 			.mask = RTE_FLOW_ITEM_TYPE_ETH,
320 			.funct = add_ether,
321 		},
322 		{
323 			.mask = RTE_FLOW_ITEM_TYPE_VLAN,
324 			.funct = add_vlan,
325 		},
326 		{
327 			.mask = RTE_FLOW_ITEM_TYPE_IPV4,
328 			.funct = add_ipv4,
329 		},
330 		{
331 			.mask = RTE_FLOW_ITEM_TYPE_IPV6,
332 			.funct = add_ipv6,
333 		},
334 		{
335 			.mask = RTE_FLOW_ITEM_TYPE_TCP,
336 			.funct = add_tcp,
337 		},
338 		{
339 			.mask = RTE_FLOW_ITEM_TYPE_UDP,
340 			.funct = add_udp,
341 		},
342 		{
343 			.mask = RTE_FLOW_ITEM_TYPE_VXLAN,
344 			.funct = add_vxlan,
345 		},
346 		{
347 			.mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
348 			.funct = add_vxlan_gpe,
349 		},
350 		{
351 			.mask = RTE_FLOW_ITEM_TYPE_GRE,
352 			.funct = add_gre,
353 		},
354 		{
355 			.mask = RTE_FLOW_ITEM_TYPE_GENEVE,
356 			.funct = add_geneve,
357 		},
358 		{
359 			.mask = RTE_FLOW_ITEM_TYPE_GTP,
360 			.funct = add_gtp,
361 		},
362 		{
363 			.mask = RTE_FLOW_ITEM_TYPE_ICMP,
364 			.funct = add_icmpv4,
365 		},
366 		{
367 			.mask = RTE_FLOW_ITEM_TYPE_ICMP6,
368 			.funct = add_icmpv6,
369 		},
370 	};
371 
372 	for (j = 0; j < MAX_ITEMS_NUM; j++) {
373 		if (flow_items[j] == 0)
374 			break;
375 		for (i = 0; i < RTE_DIM(items_list); i++) {
376 			if ((flow_items[j] &
377 				FLOW_ITEM_MASK(items_list[i].mask)) == 0)
378 				continue;
379 			items_list[i].funct(
380 				items, items_counter++,
381 				additional_para_data
382 			);
383 			break;
384 		}
385 	}
386 
387 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;
388 }
389