xref: /dpdk/app/test-flow-perf/actions_gen.c (revision cc13675026303f1da82551deee89027cda3d7aef)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * The file contains the implementations of actions generators.
5  * Each generator is responsible for preparing it's action instance
6  * and initializing it with needed data.
7  */
8 
9 #include <stdlib.h>
10 #include <sys/types.h>
11 #include <rte_malloc.h>
12 #include <rte_flow.h>
13 #include <rte_ethdev.h>
14 #include <rte_vxlan.h>
15 #include <rte_gtp.h>
16 #include <rte_gre.h>
17 #include <rte_geneve.h>
18 
19 #include "actions_gen.h"
20 #include "flow_gen.h"
21 #include "config.h"
22 
23 
24 /* Storage for additional parameters for actions */
25 struct additional_para {
26 	uint16_t queue;
27 	uint16_t next_table;
28 	uint16_t *queues;
29 	uint16_t queues_number;
30 	uint32_t counter;
31 	uint64_t encap_data;
32 	uint64_t decap_data;
33 	uint16_t dst_port;
34 	uint8_t core_idx;
35 	bool unique_data;
36 };
37 
38 /* Storage for struct rte_flow_action_raw_encap including external data. */
39 struct action_raw_encap_data {
40 	struct rte_flow_action_raw_encap conf;
41 	uint8_t data[128];
42 	uint8_t preserve[128];
43 	uint16_t idx;
44 };
45 
46 /* Storage for struct rte_flow_action_raw_decap including external data. */
47 struct action_raw_decap_data {
48 	struct rte_flow_action_raw_decap conf;
49 	uint8_t data[128];
50 	uint16_t idx;
51 };
52 
53 /* Storage for struct rte_flow_action_rss including external data. */
54 struct action_rss_data {
55 	struct rte_flow_action_rss conf;
56 	uint8_t key[40];
57 	uint16_t queue[128];
58 };
59 
60 static void
61 add_mark(struct rte_flow_action *actions,
62 	uint8_t actions_counter,
63 	struct additional_para para)
64 {
65 	static alignas(RTE_CACHE_LINE_SIZE)
66 	    struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE];
67 	uint32_t counter = para.counter;
68 
69 	do {
70 		/* Random values from 1 to 256 */
71 		mark_actions[para.core_idx].id = (counter % 255) + 1;
72 	} while (0);
73 
74 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
75 	actions[actions_counter].conf = &mark_actions[para.core_idx];
76 }
77 
78 static void
79 add_queue(struct rte_flow_action *actions,
80 	uint8_t actions_counter,
81 	struct additional_para para)
82 {
83 	static alignas(RTE_CACHE_LINE_SIZE)
84 	    struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE];
85 
86 	do {
87 		queue_actions[para.core_idx].index = para.queue;
88 	} while (0);
89 
90 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
91 	actions[actions_counter].conf = &queue_actions[para.core_idx];
92 }
93 
94 static void
95 add_jump(struct rte_flow_action *actions,
96 	uint8_t actions_counter,
97 	struct additional_para para)
98 {
99 	static struct rte_flow_action_jump jump_action;
100 
101 	do {
102 		jump_action.group = para.next_table;
103 	} while (0);
104 
105 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
106 	actions[actions_counter].conf = &jump_action;
107 }
108 
109 static void
110 add_rss(struct rte_flow_action *actions,
111 	uint8_t actions_counter,
112 	struct additional_para para)
113 {
114 	static alignas(RTE_CACHE_LINE_SIZE) struct action_rss_data *rss_data[RTE_MAX_LCORE];
115 
116 	uint16_t queue;
117 
118 	if (rss_data[para.core_idx] == NULL)
119 		rss_data[para.core_idx] = rte_malloc("rss_data",
120 			sizeof(struct action_rss_data), 0);
121 
122 	if (rss_data[para.core_idx] == NULL)
123 		rte_exit(EXIT_FAILURE, "No Memory available!");
124 
125 	*rss_data[para.core_idx] = (struct action_rss_data){
126 		.conf = (struct rte_flow_action_rss){
127 			.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
128 			.level = 0,
129 			.types = GET_RSS_HF(),
130 			.key_len = sizeof(rss_data[para.core_idx]->key),
131 			.queue_num = para.queues_number,
132 			.key = rss_data[para.core_idx]->key,
133 			.queue = rss_data[para.core_idx]->queue,
134 		},
135 		.key = { 1 },
136 		.queue = { 0 },
137 	};
138 
139 	for (queue = 0; queue < para.queues_number; queue++)
140 		rss_data[para.core_idx]->queue[queue] = para.queues[queue];
141 
142 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
143 	actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
144 }
145 
146 static void
147 add_set_meta(struct rte_flow_action *actions,
148 	uint8_t actions_counter,
149 	__rte_unused struct additional_para para)
150 {
151 	static struct rte_flow_action_set_meta meta_action = {
152 		.data = RTE_BE32(META_DATA),
153 		.mask = RTE_BE32(0xffffffff),
154 	};
155 
156 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
157 	actions[actions_counter].conf = &meta_action;
158 }
159 
160 static void
161 add_set_tag(struct rte_flow_action *actions,
162 	uint8_t actions_counter,
163 	__rte_unused struct additional_para para)
164 {
165 	static struct rte_flow_action_set_tag tag_action = {
166 		.data = RTE_BE32(META_DATA),
167 		.mask = RTE_BE32(0xffffffff),
168 		.index = TAG_INDEX,
169 	};
170 
171 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
172 	actions[actions_counter].conf = &tag_action;
173 }
174 
175 static void
176 add_port_id(struct rte_flow_action *actions,
177 	uint8_t actions_counter,
178 	struct additional_para para)
179 {
180 	static struct rte_flow_action_port_id port_id = {
181 		.id = PORT_ID_DST,
182 	};
183 
184 	port_id.id = para.dst_port;
185 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
186 	actions[actions_counter].conf = &port_id;
187 }
188 
189 static void
190 add_drop(struct rte_flow_action *actions,
191 	uint8_t actions_counter,
192 	__rte_unused struct additional_para para)
193 {
194 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
195 }
196 
197 static void
198 add_count(struct rte_flow_action *actions,
199 	uint8_t actions_counter,
200 	__rte_unused struct additional_para para)
201 {
202 	static struct rte_flow_action_count count_action;
203 
204 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
205 	actions[actions_counter].conf = &count_action;
206 }
207 
208 static void
209 add_set_src_mac(struct rte_flow_action *actions,
210 	uint8_t actions_counter,
211 	struct additional_para para)
212 {
213 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE];
214 	uint32_t mac = para.counter;
215 	uint16_t i;
216 
217 	/* Fixed value */
218 	if (!para.unique_data)
219 		mac = 1;
220 
221 	/* Mac address to be set is random each time */
222 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
223 		set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
224 		mac = mac >> 8;
225 	}
226 
227 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
228 	actions[actions_counter].conf = &set_macs[para.core_idx];
229 }
230 
231 static void
232 add_set_dst_mac(struct rte_flow_action *actions,
233 	uint8_t actions_counter,
234 	struct additional_para para)
235 {
236 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE];
237 	uint32_t mac = para.counter;
238 	uint16_t i;
239 
240 	/* Fixed value */
241 	if (!para.unique_data)
242 		mac = 1;
243 
244 	/* Mac address to be set is random each time */
245 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
246 		set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
247 		mac = mac >> 8;
248 	}
249 
250 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
251 	actions[actions_counter].conf = &set_macs[para.core_idx];
252 }
253 
254 static void
255 add_set_src_ipv4(struct rte_flow_action *actions,
256 	uint8_t actions_counter,
257 	struct additional_para para)
258 {
259 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE];
260 	uint32_t ip = para.counter;
261 
262 	/* Fixed value */
263 	if (!para.unique_data)
264 		ip = 1;
265 
266 	/* IPv4 value to be set is random each time */
267 	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
268 
269 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
270 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
271 }
272 
273 static void
274 add_set_dst_ipv4(struct rte_flow_action *actions,
275 	uint8_t actions_counter,
276 	struct additional_para para)
277 {
278 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE];
279 	uint32_t ip = para.counter;
280 
281 	/* Fixed value */
282 	if (!para.unique_data)
283 		ip = 1;
284 
285 	/* IPv4 value to be set is random each time */
286 	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
287 
288 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
289 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
290 }
291 
292 static void
293 add_set_src_ipv6(struct rte_flow_action *actions,
294 	uint8_t actions_counter,
295 	struct additional_para para)
296 {
297 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE];
298 	uint32_t ipv6 = para.counter;
299 	uint8_t i;
300 
301 	/* Fixed value */
302 	if (!para.unique_data)
303 		ipv6 = 1;
304 
305 	/* IPv6 value to set is random each time */
306 	for (i = 0; i < 16; i++) {
307 		set_ipv6[para.core_idx].ipv6_addr.a[i] = ipv6 & 0xff;
308 		ipv6 = ipv6 >> 8;
309 	}
310 
311 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
312 	actions[actions_counter].conf = &set_ipv6[para.core_idx];
313 }
314 
315 static void
316 add_set_dst_ipv6(struct rte_flow_action *actions,
317 	uint8_t actions_counter,
318 	struct additional_para para)
319 {
320 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE];
321 	uint32_t ipv6 = para.counter;
322 	uint8_t i;
323 
324 	/* Fixed value */
325 	if (!para.unique_data)
326 		ipv6 = 1;
327 
328 	/* IPv6 value to set is random each time */
329 	for (i = 0; i < 16; i++) {
330 		set_ipv6[para.core_idx].ipv6_addr.a[i] = ipv6 & 0xff;
331 		ipv6 = ipv6 >> 8;
332 	}
333 
334 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
335 	actions[actions_counter].conf = &set_ipv6[para.core_idx];
336 }
337 
338 static void
339 add_set_src_tp(struct rte_flow_action *actions,
340 	uint8_t actions_counter,
341 	struct additional_para para)
342 {
343 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE];
344 	uint32_t tp = para.counter;
345 
346 	/* Fixed value */
347 	if (!para.unique_data)
348 		tp = 100;
349 
350 	/* TP src port is random each time */
351 	tp = tp % 0xffff;
352 
353 	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
354 
355 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
356 	actions[actions_counter].conf = &set_tp[para.core_idx];
357 }
358 
359 static void
360 add_set_dst_tp(struct rte_flow_action *actions,
361 	uint8_t actions_counter,
362 	struct additional_para para)
363 {
364 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE];
365 	uint32_t tp = para.counter;
366 
367 	/* Fixed value */
368 	if (!para.unique_data)
369 		tp = 100;
370 
371 	/* TP src port is random each time */
372 	if (tp > 0xffff)
373 		tp = tp >> 16;
374 
375 	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
376 
377 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
378 	actions[actions_counter].conf = &set_tp[para.core_idx];
379 }
380 
381 static void
382 add_inc_tcp_ack(struct rte_flow_action *actions,
383 	uint8_t actions_counter,
384 	struct additional_para para)
385 {
386 	static alignas(RTE_CACHE_LINE_SIZE) rte_be32_t value[RTE_MAX_LCORE];
387 	uint32_t ack_value = para.counter;
388 
389 	/* Fixed value */
390 	if (!para.unique_data)
391 		ack_value = 1;
392 
393 	value[para.core_idx] = RTE_BE32(ack_value);
394 
395 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
396 	actions[actions_counter].conf = &value[para.core_idx];
397 }
398 
399 static void
400 add_dec_tcp_ack(struct rte_flow_action *actions,
401 	uint8_t actions_counter,
402 	struct additional_para para)
403 {
404 	static alignas(RTE_CACHE_LINE_SIZE) rte_be32_t value[RTE_MAX_LCORE];
405 	uint32_t ack_value = para.counter;
406 
407 	/* Fixed value */
408 	if (!para.unique_data)
409 		ack_value = 1;
410 
411 	value[para.core_idx] = RTE_BE32(ack_value);
412 
413 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
414 	actions[actions_counter].conf = &value[para.core_idx];
415 }
416 
417 static void
418 add_inc_tcp_seq(struct rte_flow_action *actions,
419 	uint8_t actions_counter,
420 	struct additional_para para)
421 {
422 	static alignas(RTE_CACHE_LINE_SIZE) rte_be32_t value[RTE_MAX_LCORE];
423 	uint32_t seq_value = para.counter;
424 
425 	/* Fixed value */
426 	if (!para.unique_data)
427 		seq_value = 1;
428 
429 	value[para.core_idx] = RTE_BE32(seq_value);
430 
431 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
432 	actions[actions_counter].conf = &value[para.core_idx];
433 }
434 
435 static void
436 add_dec_tcp_seq(struct rte_flow_action *actions,
437 	uint8_t actions_counter,
438 	struct additional_para para)
439 {
440 	static alignas(RTE_CACHE_LINE_SIZE) rte_be32_t value[RTE_MAX_LCORE];
441 	uint32_t seq_value = para.counter;
442 
443 	/* Fixed value */
444 	if (!para.unique_data)
445 		seq_value = 1;
446 
447 	value[para.core_idx] = RTE_BE32(seq_value);
448 
449 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
450 	actions[actions_counter].conf = &value[para.core_idx];
451 }
452 
453 static void
454 add_set_ttl(struct rte_flow_action *actions,
455 	uint8_t actions_counter,
456 	struct additional_para para)
457 {
458 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE];
459 	uint32_t ttl_value = para.counter;
460 
461 	/* Fixed value */
462 	if (!para.unique_data)
463 		ttl_value = 1;
464 
465 	/* Set ttl to random value each time */
466 	ttl_value = ttl_value % 0xff;
467 
468 	set_ttl[para.core_idx].ttl_value = ttl_value;
469 
470 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
471 	actions[actions_counter].conf = &set_ttl[para.core_idx];
472 }
473 
474 static void
475 add_dec_ttl(struct rte_flow_action *actions,
476 	uint8_t actions_counter,
477 	__rte_unused struct additional_para para)
478 {
479 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
480 }
481 
482 static void
483 add_set_ipv4_dscp(struct rte_flow_action *actions,
484 	uint8_t actions_counter,
485 	struct additional_para para)
486 {
487 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE];
488 	uint32_t dscp_value = para.counter;
489 
490 	/* Fixed value */
491 	if (!para.unique_data)
492 		dscp_value = 1;
493 
494 	/* Set dscp to random value each time */
495 	dscp_value = dscp_value % 0xff;
496 
497 	set_dscp[para.core_idx].dscp = dscp_value;
498 
499 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
500 	actions[actions_counter].conf = &set_dscp[para.core_idx];
501 }
502 
503 static void
504 add_set_ipv6_dscp(struct rte_flow_action *actions,
505 	uint8_t actions_counter,
506 	struct additional_para para)
507 {
508 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE];
509 	uint32_t dscp_value = para.counter;
510 
511 	/* Fixed value */
512 	if (!para.unique_data)
513 		dscp_value = 1;
514 
515 	/* Set dscp to random value each time */
516 	dscp_value = dscp_value % 0xff;
517 
518 	set_dscp[para.core_idx].dscp = dscp_value;
519 
520 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
521 	actions[actions_counter].conf = &set_dscp[para.core_idx];
522 }
523 
524 static void
525 add_flag(struct rte_flow_action *actions,
526 	uint8_t actions_counter,
527 	__rte_unused struct additional_para para)
528 {
529 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
530 }
531 
532 static void
533 add_ether_header(uint8_t **header, uint64_t data,
534 	__rte_unused struct additional_para para)
535 {
536 	struct rte_ether_hdr eth_hdr;
537 
538 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
539 		return;
540 
541 	memset(&eth_hdr, 0, sizeof(struct rte_ether_hdr));
542 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
543 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
544 	else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
545 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
546 	else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
547 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
548 	memcpy(*header, &eth_hdr, sizeof(eth_hdr));
549 	*header += sizeof(eth_hdr);
550 }
551 
552 static void
553 add_vlan_header(uint8_t **header, uint64_t data,
554 	__rte_unused struct additional_para para)
555 {
556 	struct rte_vlan_hdr vlan_hdr;
557 	uint16_t vlan_value;
558 
559 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
560 		return;
561 
562 	vlan_value = VLAN_VALUE;
563 
564 	memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
565 	vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
566 
567 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
568 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
569 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
570 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
571 	memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
572 	*header += sizeof(vlan_hdr);
573 }
574 
575 static void
576 add_ipv4_header(uint8_t **header, uint64_t data,
577 	struct additional_para para)
578 {
579 	struct rte_ipv4_hdr ipv4_hdr;
580 	uint32_t ip_dst = para.counter;
581 
582 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
583 		return;
584 
585 	/* Fixed value */
586 	if (!para.unique_data)
587 		ip_dst = 1;
588 
589 	memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
590 	ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
591 	ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
592 	ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
593 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
594 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
595 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
596 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
597 	memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
598 	*header += sizeof(ipv4_hdr);
599 }
600 
601 static void
602 add_ipv6_header(uint8_t **header, uint64_t data,
603 	__rte_unused struct additional_para para)
604 {
605 	struct rte_ipv6_hdr ipv6_hdr;
606 
607 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
608 		return;
609 
610 	memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
611 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
612 		ipv6_hdr.proto = RTE_IP_TYPE_UDP;
613 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
614 		ipv6_hdr.proto = RTE_IP_TYPE_GRE;
615 	memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
616 	*header += sizeof(ipv6_hdr);
617 }
618 
619 static void
620 add_udp_header(uint8_t **header, uint64_t data,
621 	__rte_unused struct additional_para para)
622 {
623 	struct rte_udp_hdr udp_hdr;
624 
625 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
626 		return;
627 
628 	memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
629 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
630 		udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
631 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
632 		udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
633 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
634 		udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
635 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
636 		udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
637 	 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
638 	 *header += sizeof(udp_hdr);
639 }
640 
641 static void
642 add_vxlan_header(uint8_t **header, uint64_t data,
643 	struct additional_para para)
644 {
645 	struct rte_vxlan_hdr vxlan_hdr;
646 	uint32_t vni_value = para.counter;
647 
648 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
649 		return;
650 
651 	/* Fixed value */
652 	if (!para.unique_data)
653 		vni_value = 1;
654 
655 	memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
656 
657 	vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
658 	vxlan_hdr.vx_flags = 0x8;
659 
660 	memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
661 	*header += sizeof(vxlan_hdr);
662 }
663 
664 static void
665 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
666 	struct additional_para para)
667 {
668 	struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
669 	uint32_t vni_value = para.counter;
670 
671 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
672 		return;
673 
674 	/* Fixed value */
675 	if (!para.unique_data)
676 		vni_value = 1;
677 
678 	memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
679 
680 	vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
681 	vxlan_gpe_hdr.vx_flags = 0x0c;
682 
683 	memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
684 	*header += sizeof(vxlan_gpe_hdr);
685 }
686 
687 static void
688 add_gre_header(uint8_t **header, uint64_t data,
689 	__rte_unused struct additional_para para)
690 {
691 	struct rte_gre_hdr gre_hdr;
692 
693 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
694 		return;
695 
696 	memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
697 
698 	gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
699 
700 	memcpy(*header, &gre_hdr, sizeof(gre_hdr));
701 	*header += sizeof(gre_hdr);
702 }
703 
704 static void
705 add_geneve_header(uint8_t **header, uint64_t data,
706 	struct additional_para para)
707 {
708 	struct rte_geneve_hdr geneve_hdr;
709 	uint32_t vni_value = para.counter;
710 	uint8_t i;
711 
712 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
713 		return;
714 
715 	/* Fixed value */
716 	if (!para.unique_data)
717 		vni_value = 1;
718 
719 	memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
720 
721 	for (i = 0; i < 3; i++)
722 		geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
723 
724 	memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
725 	*header += sizeof(geneve_hdr);
726 }
727 
728 static void
729 add_gtp_header(uint8_t **header, uint64_t data,
730 	struct additional_para para)
731 {
732 	struct rte_gtp_hdr gtp_hdr;
733 	uint32_t teid_value = para.counter;
734 
735 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
736 		return;
737 
738 	/* Fixed value */
739 	if (!para.unique_data)
740 		teid_value = 1;
741 
742 	memset(&gtp_hdr, 0, sizeof(struct rte_flow_item_gtp));
743 
744 	gtp_hdr.teid = RTE_BE32(teid_value);
745 	gtp_hdr.msg_type = 255;
746 
747 	memcpy(*header, &gtp_hdr, sizeof(gtp_hdr));
748 	*header += sizeof(gtp_hdr);
749 }
750 
751 static const struct encap_decap_headers {
752 	void (*funct)(
753 		uint8_t **header,
754 		uint64_t data,
755 		struct additional_para para
756 		);
757 } headers[] = {
758 	{.funct = add_ether_header},
759 	{.funct = add_vlan_header},
760 	{.funct = add_ipv4_header},
761 	{.funct = add_ipv6_header},
762 	{.funct = add_udp_header},
763 	{.funct = add_vxlan_header},
764 	{.funct = add_vxlan_gpe_header},
765 	{.funct = add_gre_header},
766 	{.funct = add_geneve_header},
767 	{.funct = add_gtp_header},
768 };
769 
770 static void
771 add_raw_encap(struct rte_flow_action *actions,
772 	uint8_t actions_counter,
773 	struct additional_para para)
774 {
775 	static alignas(RTE_CACHE_LINE_SIZE)
776 	    struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE];
777 	uint64_t encap_data = para.encap_data;
778 	uint8_t *header;
779 	uint8_t i;
780 
781 	/* Avoid double allocation. */
782 	if (action_encap_data[para.core_idx] == NULL)
783 		action_encap_data[para.core_idx] = rte_malloc("encap_data",
784 			sizeof(struct action_raw_encap_data), 0);
785 
786 	/* Check if allocation failed. */
787 	if (action_encap_data[para.core_idx] == NULL)
788 		rte_exit(EXIT_FAILURE, "No Memory available!");
789 
790 	*action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
791 		.conf = (struct rte_flow_action_raw_encap) {
792 			.data = action_encap_data[para.core_idx]->data,
793 		},
794 			.data = {},
795 	};
796 	header = action_encap_data[para.core_idx]->data;
797 
798 	for (i = 0; i < RTE_DIM(headers); i++)
799 		headers[i].funct(&header, encap_data, para);
800 
801 	action_encap_data[para.core_idx]->conf.size = header -
802 		action_encap_data[para.core_idx]->data;
803 
804 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
805 	actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
806 }
807 
808 static void
809 add_raw_decap(struct rte_flow_action *actions,
810 	uint8_t actions_counter,
811 	struct additional_para para)
812 {
813 	static alignas(RTE_CACHE_LINE_SIZE)
814 	    struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE];
815 	uint64_t decap_data = para.decap_data;
816 	uint8_t *header;
817 	uint8_t i;
818 
819 	/* Avoid double allocation. */
820 	if (action_decap_data[para.core_idx] == NULL)
821 		action_decap_data[para.core_idx] = rte_malloc("decap_data",
822 			sizeof(struct action_raw_decap_data), 0);
823 
824 	/* Check if allocation failed. */
825 	if (action_decap_data[para.core_idx] == NULL)
826 		rte_exit(EXIT_FAILURE, "No Memory available!");
827 
828 	*action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
829 		.conf = (struct rte_flow_action_raw_decap) {
830 			.data = action_decap_data[para.core_idx]->data,
831 		},
832 			.data = {},
833 	};
834 	header = action_decap_data[para.core_idx]->data;
835 
836 	for (i = 0; i < RTE_DIM(headers); i++)
837 		headers[i].funct(&header, decap_data, para);
838 
839 	action_decap_data[para.core_idx]->conf.size = header -
840 		action_decap_data[para.core_idx]->data;
841 
842 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
843 	actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
844 }
845 
846 static void
847 add_vxlan_encap(struct rte_flow_action *actions,
848 	uint8_t actions_counter,
849 	__rte_unused struct additional_para para)
850 {
851 	static alignas(RTE_CACHE_LINE_SIZE)
852 	    struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE];
853 	static struct rte_flow_item items[5];
854 	static struct rte_flow_item_eth item_eth;
855 	static struct rte_flow_item_ipv4 item_ipv4;
856 	static struct rte_flow_item_udp item_udp;
857 	static struct rte_flow_item_vxlan item_vxlan;
858 	uint32_t ip_dst = para.counter;
859 
860 	/* Fixed value */
861 	if (!para.unique_data)
862 		ip_dst = 1;
863 
864 	items[0].spec = &item_eth;
865 	items[0].mask = &item_eth;
866 	items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
867 
868 	item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
869 	item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
870 	item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
871 	items[1].spec = &item_ipv4;
872 	items[1].mask = &item_ipv4;
873 	items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
874 
875 
876 	item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
877 	items[2].spec = &item_udp;
878 	items[2].mask = &item_udp;
879 	items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
880 
881 
882 	item_vxlan.hdr.vni[2] = 1;
883 	items[3].spec = &item_vxlan;
884 	items[3].mask = &item_vxlan;
885 	items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
886 
887 	items[4].type = RTE_FLOW_ITEM_TYPE_END;
888 
889 	vxlan_encap[para.core_idx].definition = items;
890 
891 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
892 	actions[actions_counter].conf = &vxlan_encap[para.core_idx];
893 }
894 
895 static void
896 add_vxlan_decap(struct rte_flow_action *actions,
897 	uint8_t actions_counter,
898 	__rte_unused struct additional_para para)
899 {
900 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
901 }
902 
903 static void
904 add_meter(struct rte_flow_action *actions,
905 	uint8_t actions_counter,
906 	__rte_unused struct additional_para para)
907 {
908 	static alignas(RTE_CACHE_LINE_SIZE) struct rte_flow_action_meter
909 		meters[RTE_MAX_LCORE];
910 
911 	meters[para.core_idx].mtr_id = para.counter;
912 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_METER;
913 	actions[actions_counter].conf = &meters[para.core_idx];
914 }
915 
916 void
917 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
918 	uint32_t counter, uint16_t next_table, uint16_t hairpinq,
919 	uint64_t encap_data, uint64_t decap_data, uint8_t core_idx,
920 	bool unique_data, uint8_t rx_queues_count, uint16_t dst_port)
921 {
922 	struct additional_para additional_para_data;
923 	uint8_t actions_counter = 0;
924 	uint16_t hairpin_queues[hairpinq];
925 	uint16_t queues[rx_queues_count];
926 	uint16_t i, j;
927 
928 	for (i = 0; i < rx_queues_count; i++)
929 		queues[i] = i;
930 
931 	for (i = 0; i < hairpinq; i++)
932 		hairpin_queues[i] = i + rx_queues_count;
933 
934 	additional_para_data = (struct additional_para){
935 		.queue = counter % rx_queues_count,
936 		.next_table = next_table,
937 		.queues = queues,
938 		.queues_number = rx_queues_count,
939 		.counter = counter,
940 		.encap_data = encap_data,
941 		.decap_data = decap_data,
942 		.core_idx = core_idx,
943 		.unique_data = unique_data,
944 		.dst_port = dst_port,
945 	};
946 
947 	if (hairpinq != 0) {
948 		additional_para_data.queues = hairpin_queues;
949 		additional_para_data.queues_number = hairpinq;
950 		additional_para_data.queue = (counter % hairpinq) + rx_queues_count;
951 	}
952 
953 	static const struct actions_dict {
954 		uint64_t mask;
955 		void (*funct)(
956 			struct rte_flow_action *actions,
957 			uint8_t actions_counter,
958 			struct additional_para para
959 			);
960 	} actions_list[] = {
961 		{
962 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
963 			.funct = add_mark,
964 		},
965 		{
966 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
967 			.funct = add_count,
968 		},
969 		{
970 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
971 			.funct = add_set_meta,
972 		},
973 		{
974 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
975 			.funct = add_set_tag,
976 		},
977 		{
978 			.mask = FLOW_ACTION_MASK(
979 				RTE_FLOW_ACTION_TYPE_FLAG
980 			),
981 			.funct = add_flag,
982 		},
983 		{
984 			.mask = FLOW_ACTION_MASK(
985 				RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
986 			),
987 			.funct = add_set_src_mac,
988 		},
989 		{
990 			.mask = FLOW_ACTION_MASK(
991 				RTE_FLOW_ACTION_TYPE_SET_MAC_DST
992 			),
993 			.funct = add_set_dst_mac,
994 		},
995 		{
996 			.mask = FLOW_ACTION_MASK(
997 				RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
998 			),
999 			.funct = add_set_src_ipv4,
1000 		},
1001 		{
1002 			.mask =	FLOW_ACTION_MASK(
1003 				RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
1004 			),
1005 			.funct = add_set_dst_ipv4,
1006 		},
1007 		{
1008 			.mask = FLOW_ACTION_MASK(
1009 				RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
1010 			),
1011 			.funct = add_set_src_ipv6,
1012 		},
1013 		{
1014 			.mask = FLOW_ACTION_MASK(
1015 				RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
1016 			),
1017 			.funct = add_set_dst_ipv6,
1018 		},
1019 		{
1020 			.mask = FLOW_ACTION_MASK(
1021 				RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1022 			),
1023 			.funct = add_set_src_tp,
1024 		},
1025 		{
1026 			.mask = FLOW_ACTION_MASK(
1027 				RTE_FLOW_ACTION_TYPE_SET_TP_DST
1028 			),
1029 			.funct = add_set_dst_tp,
1030 		},
1031 		{
1032 			.mask = FLOW_ACTION_MASK(
1033 				RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1034 			),
1035 			.funct = add_inc_tcp_ack,
1036 		},
1037 		{
1038 			.mask = FLOW_ACTION_MASK(
1039 				RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1040 			),
1041 			.funct = add_dec_tcp_ack,
1042 		},
1043 		{
1044 			.mask = FLOW_ACTION_MASK(
1045 				RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1046 			),
1047 			.funct = add_inc_tcp_seq,
1048 		},
1049 		{
1050 			.mask = FLOW_ACTION_MASK(
1051 				RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1052 			),
1053 			.funct = add_dec_tcp_seq,
1054 		},
1055 		{
1056 			.mask = FLOW_ACTION_MASK(
1057 				RTE_FLOW_ACTION_TYPE_SET_TTL
1058 			),
1059 			.funct = add_set_ttl,
1060 		},
1061 		{
1062 			.mask = FLOW_ACTION_MASK(
1063 				RTE_FLOW_ACTION_TYPE_DEC_TTL
1064 			),
1065 			.funct = add_dec_ttl,
1066 		},
1067 		{
1068 			.mask = FLOW_ACTION_MASK(
1069 				RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1070 			),
1071 			.funct = add_set_ipv4_dscp,
1072 		},
1073 		{
1074 			.mask = FLOW_ACTION_MASK(
1075 				RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1076 			),
1077 			.funct = add_set_ipv6_dscp,
1078 		},
1079 		{
1080 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1081 			.funct = add_queue,
1082 		},
1083 		{
1084 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1085 			.funct = add_rss,
1086 		},
1087 		{
1088 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1089 			.funct = add_jump,
1090 		},
1091 		{
1092 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1093 			.funct = add_port_id
1094 		},
1095 		{
1096 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1097 			.funct = add_drop,
1098 		},
1099 		{
1100 			.mask = HAIRPIN_QUEUE_ACTION,
1101 			.funct = add_queue,
1102 		},
1103 		{
1104 			.mask = HAIRPIN_RSS_ACTION,
1105 			.funct = add_rss,
1106 		},
1107 		{
1108 			.mask = FLOW_ACTION_MASK(
1109 				RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1110 			),
1111 			.funct = add_raw_encap,
1112 		},
1113 		{
1114 			.mask = FLOW_ACTION_MASK(
1115 				RTE_FLOW_ACTION_TYPE_RAW_DECAP
1116 			),
1117 			.funct = add_raw_decap,
1118 		},
1119 		{
1120 			.mask = FLOW_ACTION_MASK(
1121 				RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1122 			),
1123 			.funct = add_vxlan_encap,
1124 		},
1125 		{
1126 			.mask = FLOW_ACTION_MASK(
1127 				RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1128 			),
1129 			.funct = add_vxlan_decap,
1130 		},
1131 		{
1132 			.mask = FLOW_ACTION_MASK(
1133 				RTE_FLOW_ACTION_TYPE_METER
1134 			),
1135 			.funct = add_meter,
1136 		},
1137 	};
1138 
1139 	for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1140 		if (flow_actions[j] == 0)
1141 			break;
1142 		for (i = 0; i < RTE_DIM(actions_list); i++) {
1143 			if ((flow_actions[j] &
1144 				actions_list[i].mask) == 0)
1145 				continue;
1146 			actions_list[i].funct(
1147 				actions, actions_counter++,
1148 				additional_para_data
1149 			);
1150 			break;
1151 		}
1152 	}
1153 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;
1154 }
1155