xref: /dpdk/app/test-flow-perf/actions_gen.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * The file contains the implementations of actions generators.
5  * Each generator is responsible for preparing it's action instance
6  * and initializing it with needed data.
7  */
8 
9 #include <sys/types.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
14 #include <rte_gtp.h>
15 #include <rte_gre.h>
16 #include <rte_geneve.h>
17 
18 #include "actions_gen.h"
19 #include "flow_gen.h"
20 #include "config.h"
21 
22 
23 /* Storage for additional parameters for actions */
24 struct additional_para {
25 	uint16_t queue;
26 	uint16_t next_table;
27 	uint16_t *queues;
28 	uint16_t queues_number;
29 	uint32_t counter;
30 	uint64_t encap_data;
31 	uint64_t decap_data;
32 	uint8_t core_idx;
33 };
34 
35 /* Storage for struct rte_flow_action_raw_encap including external data. */
36 struct action_raw_encap_data {
37 	struct rte_flow_action_raw_encap conf;
38 	uint8_t data[128];
39 	uint8_t preserve[128];
40 	uint16_t idx;
41 };
42 
43 /* Storage for struct rte_flow_action_raw_decap including external data. */
44 struct action_raw_decap_data {
45 	struct rte_flow_action_raw_decap conf;
46 	uint8_t data[128];
47 	uint16_t idx;
48 };
49 
50 /* Storage for struct rte_flow_action_rss including external data. */
51 struct action_rss_data {
52 	struct rte_flow_action_rss conf;
53 	uint8_t key[40];
54 	uint16_t queue[128];
55 };
56 
57 static void
58 add_mark(struct rte_flow_action *actions,
59 	uint8_t actions_counter,
60 	struct additional_para para)
61 {
62 	static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
63 	uint32_t counter = para.counter;
64 
65 	do {
66 		/* Random values from 1 to 256 */
67 		mark_actions[para.core_idx].id = (counter % 255) + 1;
68 	} while (0);
69 
70 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
71 	actions[actions_counter].conf = &mark_actions[para.core_idx];
72 }
73 
74 static void
75 add_queue(struct rte_flow_action *actions,
76 	uint8_t actions_counter,
77 	struct additional_para para)
78 {
79 	static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
80 
81 	do {
82 		queue_actions[para.core_idx].index = para.queue;
83 	} while (0);
84 
85 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
86 	actions[actions_counter].conf = &queue_actions[para.core_idx];
87 }
88 
89 static void
90 add_jump(struct rte_flow_action *actions,
91 	uint8_t actions_counter,
92 	struct additional_para para)
93 {
94 	static struct rte_flow_action_jump jump_action;
95 
96 	do {
97 		jump_action.group = para.next_table;
98 	} while (0);
99 
100 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
101 	actions[actions_counter].conf = &jump_action;
102 }
103 
104 static void
105 add_rss(struct rte_flow_action *actions,
106 	uint8_t actions_counter,
107 	struct additional_para para)
108 {
109 	static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
110 
111 	uint16_t queue;
112 
113 	if (rss_data[para.core_idx] == NULL)
114 		rss_data[para.core_idx] = rte_malloc("rss_data",
115 			sizeof(struct action_rss_data), 0);
116 
117 	if (rss_data[para.core_idx] == NULL)
118 		rte_exit(EXIT_FAILURE, "No Memory available!");
119 
120 	*rss_data[para.core_idx] = (struct action_rss_data){
121 		.conf = (struct rte_flow_action_rss){
122 			.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
123 			.level = 0,
124 			.types = GET_RSS_HF(),
125 			.key_len = sizeof(rss_data[para.core_idx]->key),
126 			.queue_num = para.queues_number,
127 			.key = rss_data[para.core_idx]->key,
128 			.queue = rss_data[para.core_idx]->queue,
129 		},
130 		.key = { 1 },
131 		.queue = { 0 },
132 	};
133 
134 	for (queue = 0; queue < para.queues_number; queue++)
135 		rss_data[para.core_idx]->queue[queue] = para.queues[queue];
136 
137 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
138 	actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
139 }
140 
141 static void
142 add_set_meta(struct rte_flow_action *actions,
143 	uint8_t actions_counter,
144 	__rte_unused struct additional_para para)
145 {
146 	static struct rte_flow_action_set_meta meta_action = {
147 		.data = RTE_BE32(META_DATA),
148 		.mask = RTE_BE32(0xffffffff),
149 	};
150 
151 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
152 	actions[actions_counter].conf = &meta_action;
153 }
154 
155 static void
156 add_set_tag(struct rte_flow_action *actions,
157 	uint8_t actions_counter,
158 	__rte_unused struct additional_para para)
159 {
160 	static struct rte_flow_action_set_tag tag_action = {
161 		.data = RTE_BE32(META_DATA),
162 		.mask = RTE_BE32(0xffffffff),
163 		.index = TAG_INDEX,
164 	};
165 
166 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
167 	actions[actions_counter].conf = &tag_action;
168 }
169 
170 static void
171 add_port_id(struct rte_flow_action *actions,
172 	uint8_t actions_counter,
173 	__rte_unused struct additional_para para)
174 {
175 	static struct rte_flow_action_port_id port_id = {
176 		.id = PORT_ID_DST,
177 	};
178 
179 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
180 	actions[actions_counter].conf = &port_id;
181 }
182 
183 static void
184 add_drop(struct rte_flow_action *actions,
185 	uint8_t actions_counter,
186 	__rte_unused struct additional_para para)
187 {
188 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
189 }
190 
191 static void
192 add_count(struct rte_flow_action *actions,
193 	uint8_t actions_counter,
194 	__rte_unused struct additional_para para)
195 {
196 	static struct rte_flow_action_count count_action;
197 
198 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
199 	actions[actions_counter].conf = &count_action;
200 }
201 
202 static void
203 add_set_src_mac(struct rte_flow_action *actions,
204 	uint8_t actions_counter,
205 	__rte_unused struct additional_para para)
206 {
207 	static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
208 	uint32_t mac = para.counter;
209 	uint16_t i;
210 
211 	/* Fixed value */
212 	if (FIXED_VALUES)
213 		mac = 1;
214 
215 	/* Mac address to be set is random each time */
216 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
217 		set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
218 		mac = mac >> 8;
219 	}
220 
221 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
222 	actions[actions_counter].conf = &set_macs[para.core_idx];
223 }
224 
225 static void
226 add_set_dst_mac(struct rte_flow_action *actions,
227 	uint8_t actions_counter,
228 	__rte_unused struct additional_para para)
229 {
230 	static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
231 	uint32_t mac = para.counter;
232 	uint16_t i;
233 
234 	/* Fixed value */
235 	if (FIXED_VALUES)
236 		mac = 1;
237 
238 	/* Mac address to be set is random each time */
239 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
240 		set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
241 		mac = mac >> 8;
242 	}
243 
244 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
245 	actions[actions_counter].conf = &set_macs[para.core_idx];
246 }
247 
248 static void
249 add_set_src_ipv4(struct rte_flow_action *actions,
250 	uint8_t actions_counter,
251 	__rte_unused struct additional_para para)
252 {
253 	static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
254 	uint32_t ip = para.counter;
255 
256 	/* Fixed value */
257 	if (FIXED_VALUES)
258 		ip = 1;
259 
260 	/* IPv4 value to be set is random each time */
261 	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
262 
263 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
264 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
265 }
266 
267 static void
268 add_set_dst_ipv4(struct rte_flow_action *actions,
269 	uint8_t actions_counter,
270 	__rte_unused struct additional_para para)
271 {
272 	static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
273 	uint32_t ip = para.counter;
274 
275 	/* Fixed value */
276 	if (FIXED_VALUES)
277 		ip = 1;
278 
279 	/* IPv4 value to be set is random each time */
280 	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
281 
282 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
283 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
284 }
285 
286 static void
287 add_set_src_ipv6(struct rte_flow_action *actions,
288 	uint8_t actions_counter,
289 	__rte_unused struct additional_para para)
290 {
291 	static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
292 	uint32_t ipv6 = para.counter;
293 	uint8_t i;
294 
295 	/* Fixed value */
296 	if (FIXED_VALUES)
297 		ipv6 = 1;
298 
299 	/* IPv6 value to set is random each time */
300 	for (i = 0; i < 16; i++) {
301 		set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
302 		ipv6 = ipv6 >> 8;
303 	}
304 
305 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
306 	actions[actions_counter].conf = &set_ipv6[para.core_idx];
307 }
308 
309 static void
310 add_set_dst_ipv6(struct rte_flow_action *actions,
311 	uint8_t actions_counter,
312 	__rte_unused struct additional_para para)
313 {
314 	static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
315 	uint32_t ipv6 = para.counter;
316 	uint8_t i;
317 
318 	/* Fixed value */
319 	if (FIXED_VALUES)
320 		ipv6 = 1;
321 
322 	/* IPv6 value to set is random each time */
323 	for (i = 0; i < 16; i++) {
324 		set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
325 		ipv6 = ipv6 >> 8;
326 	}
327 
328 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
329 	actions[actions_counter].conf = &set_ipv6[para.core_idx];
330 }
331 
332 static void
333 add_set_src_tp(struct rte_flow_action *actions,
334 	uint8_t actions_counter,
335 	__rte_unused struct additional_para para)
336 {
337 	static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
338 	uint32_t tp = para.counter;
339 
340 	/* Fixed value */
341 	if (FIXED_VALUES)
342 		tp = 100;
343 
344 	/* TP src port is random each time */
345 	tp = tp % 0xffff;
346 
347 	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
348 
349 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
350 	actions[actions_counter].conf = &set_tp[para.core_idx];
351 }
352 
353 static void
354 add_set_dst_tp(struct rte_flow_action *actions,
355 	uint8_t actions_counter,
356 	__rte_unused struct additional_para para)
357 {
358 	static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
359 	uint32_t tp = para.counter;
360 
361 	/* Fixed value */
362 	if (FIXED_VALUES)
363 		tp = 100;
364 
365 	/* TP src port is random each time */
366 	if (tp > 0xffff)
367 		tp = tp >> 16;
368 
369 	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
370 
371 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
372 	actions[actions_counter].conf = &set_tp[para.core_idx];
373 }
374 
375 static void
376 add_inc_tcp_ack(struct rte_flow_action *actions,
377 	uint8_t actions_counter,
378 	__rte_unused struct additional_para para)
379 {
380 	static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
381 	uint32_t ack_value = para.counter;
382 
383 	/* Fixed value */
384 	if (FIXED_VALUES)
385 		ack_value = 1;
386 
387 	value[para.core_idx] = RTE_BE32(ack_value);
388 
389 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
390 	actions[actions_counter].conf = &value[para.core_idx];
391 }
392 
393 static void
394 add_dec_tcp_ack(struct rte_flow_action *actions,
395 	uint8_t actions_counter,
396 	__rte_unused struct additional_para para)
397 {
398 	static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
399 	uint32_t ack_value = para.counter;
400 
401 	/* Fixed value */
402 	if (FIXED_VALUES)
403 		ack_value = 1;
404 
405 	value[para.core_idx] = RTE_BE32(ack_value);
406 
407 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
408 	actions[actions_counter].conf = &value[para.core_idx];
409 }
410 
411 static void
412 add_inc_tcp_seq(struct rte_flow_action *actions,
413 	uint8_t actions_counter,
414 	__rte_unused struct additional_para para)
415 {
416 	static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
417 	uint32_t seq_value = para.counter;
418 
419 	/* Fixed value */
420 	if (FIXED_VALUES)
421 		seq_value = 1;
422 
423 	value[para.core_idx] = RTE_BE32(seq_value);
424 
425 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
426 	actions[actions_counter].conf = &value[para.core_idx];
427 }
428 
429 static void
430 add_dec_tcp_seq(struct rte_flow_action *actions,
431 	uint8_t actions_counter,
432 	__rte_unused struct additional_para para)
433 {
434 	static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
435 	uint32_t seq_value = para.counter;
436 
437 	/* Fixed value */
438 	if (FIXED_VALUES)
439 		seq_value = 1;
440 
441 	value[para.core_idx] = RTE_BE32(seq_value);
442 
443 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
444 	actions[actions_counter].conf = &value[para.core_idx];
445 }
446 
447 static void
448 add_set_ttl(struct rte_flow_action *actions,
449 	uint8_t actions_counter,
450 	__rte_unused struct additional_para para)
451 {
452 	static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
453 	uint32_t ttl_value = para.counter;
454 
455 	/* Fixed value */
456 	if (FIXED_VALUES)
457 		ttl_value = 1;
458 
459 	/* Set ttl to random value each time */
460 	ttl_value = ttl_value % 0xff;
461 
462 	set_ttl[para.core_idx].ttl_value = ttl_value;
463 
464 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
465 	actions[actions_counter].conf = &set_ttl[para.core_idx];
466 }
467 
468 static void
469 add_dec_ttl(struct rte_flow_action *actions,
470 	uint8_t actions_counter,
471 	__rte_unused struct additional_para para)
472 {
473 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
474 }
475 
476 static void
477 add_set_ipv4_dscp(struct rte_flow_action *actions,
478 	uint8_t actions_counter,
479 	__rte_unused struct additional_para para)
480 {
481 	static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
482 	uint32_t dscp_value = para.counter;
483 
484 	/* Fixed value */
485 	if (FIXED_VALUES)
486 		dscp_value = 1;
487 
488 	/* Set dscp to random value each time */
489 	dscp_value = dscp_value % 0xff;
490 
491 	set_dscp[para.core_idx].dscp = dscp_value;
492 
493 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
494 	actions[actions_counter].conf = &set_dscp[para.core_idx];
495 }
496 
497 static void
498 add_set_ipv6_dscp(struct rte_flow_action *actions,
499 	uint8_t actions_counter,
500 	__rte_unused struct additional_para para)
501 {
502 	static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
503 	uint32_t dscp_value = para.counter;
504 
505 	/* Fixed value */
506 	if (FIXED_VALUES)
507 		dscp_value = 1;
508 
509 	/* Set dscp to random value each time */
510 	dscp_value = dscp_value % 0xff;
511 
512 	set_dscp[para.core_idx].dscp = dscp_value;
513 
514 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
515 	actions[actions_counter].conf = &set_dscp[para.core_idx];
516 }
517 
518 static void
519 add_flag(struct rte_flow_action *actions,
520 	uint8_t actions_counter,
521 	__rte_unused struct additional_para para)
522 {
523 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
524 }
525 
526 static void
527 add_ether_header(uint8_t **header, uint64_t data,
528 	__rte_unused struct additional_para para)
529 {
530 	struct rte_ether_hdr eth_hdr;
531 
532 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
533 		return;
534 
535 	memset(&eth_hdr, 0, sizeof(struct rte_ether_hdr));
536 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
537 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
538 	else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
539 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
540 	else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
541 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
542 	memcpy(*header, &eth_hdr, sizeof(eth_hdr));
543 	*header += sizeof(eth_hdr);
544 }
545 
546 static void
547 add_vlan_header(uint8_t **header, uint64_t data,
548 	__rte_unused struct additional_para para)
549 {
550 	struct rte_vlan_hdr vlan_hdr;
551 	uint16_t vlan_value;
552 
553 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
554 		return;
555 
556 	vlan_value = VLAN_VALUE;
557 
558 	memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
559 	vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
560 
561 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
562 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
563 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
564 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
565 	memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
566 	*header += sizeof(vlan_hdr);
567 }
568 
569 static void
570 add_ipv4_header(uint8_t **header, uint64_t data,
571 	struct additional_para para)
572 {
573 	struct rte_ipv4_hdr ipv4_hdr;
574 	uint32_t ip_dst = para.counter;
575 
576 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
577 		return;
578 
579 	/* Fixed value */
580 	if (FIXED_VALUES)
581 		ip_dst = 1;
582 
583 	memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
584 	ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
585 	ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
586 	ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
587 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
588 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
589 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
590 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
591 	memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
592 	*header += sizeof(ipv4_hdr);
593 }
594 
595 static void
596 add_ipv6_header(uint8_t **header, uint64_t data,
597 	__rte_unused struct additional_para para)
598 {
599 	struct rte_ipv6_hdr ipv6_hdr;
600 
601 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
602 		return;
603 
604 	memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
605 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
606 		ipv6_hdr.proto = RTE_IP_TYPE_UDP;
607 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
608 		ipv6_hdr.proto = RTE_IP_TYPE_GRE;
609 	memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
610 	*header += sizeof(ipv6_hdr);
611 }
612 
613 static void
614 add_udp_header(uint8_t **header, uint64_t data,
615 	__rte_unused struct additional_para para)
616 {
617 	struct rte_udp_hdr udp_hdr;
618 
619 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
620 		return;
621 
622 	memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
623 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
624 		udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
625 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
626 		udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
627 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
628 		udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
629 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
630 		udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
631 	 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
632 	 *header += sizeof(udp_hdr);
633 }
634 
635 static void
636 add_vxlan_header(uint8_t **header, uint64_t data,
637 	struct additional_para para)
638 {
639 	struct rte_vxlan_hdr vxlan_hdr;
640 	uint32_t vni_value = para.counter;
641 
642 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
643 		return;
644 
645 	/* Fixed value */
646 	if (FIXED_VALUES)
647 		vni_value = 1;
648 
649 	memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
650 
651 	vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
652 	vxlan_hdr.vx_flags = 0x8;
653 
654 	memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
655 	*header += sizeof(vxlan_hdr);
656 }
657 
658 static void
659 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
660 	struct additional_para para)
661 {
662 	struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
663 	uint32_t vni_value = para.counter;
664 
665 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
666 		return;
667 
668 	/* Fixed value */
669 	if (FIXED_VALUES)
670 		vni_value = 1;
671 
672 	memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
673 
674 	vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
675 	vxlan_gpe_hdr.vx_flags = 0x0c;
676 
677 	memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
678 	*header += sizeof(vxlan_gpe_hdr);
679 }
680 
681 static void
682 add_gre_header(uint8_t **header, uint64_t data,
683 	__rte_unused struct additional_para para)
684 {
685 	struct rte_gre_hdr gre_hdr;
686 
687 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
688 		return;
689 
690 	memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
691 
692 	gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
693 
694 	memcpy(*header, &gre_hdr, sizeof(gre_hdr));
695 	*header += sizeof(gre_hdr);
696 }
697 
698 static void
699 add_geneve_header(uint8_t **header, uint64_t data,
700 	struct additional_para para)
701 {
702 	struct rte_geneve_hdr geneve_hdr;
703 	uint32_t vni_value = para.counter;
704 	uint8_t i;
705 
706 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
707 		return;
708 
709 	/* Fixed value */
710 	if (FIXED_VALUES)
711 		vni_value = 1;
712 
713 	memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
714 
715 	for (i = 0; i < 3; i++)
716 		geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
717 
718 	memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
719 	*header += sizeof(geneve_hdr);
720 }
721 
722 static void
723 add_gtp_header(uint8_t **header, uint64_t data,
724 	struct additional_para para)
725 {
726 	struct rte_gtp_hdr gtp_hdr;
727 	uint32_t teid_value = para.counter;
728 
729 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
730 		return;
731 
732 	/* Fixed value */
733 	if (FIXED_VALUES)
734 		teid_value = 1;
735 
736 	memset(&gtp_hdr, 0, sizeof(struct rte_flow_item_gtp));
737 
738 	gtp_hdr.teid = RTE_BE32(teid_value);
739 	gtp_hdr.msg_type = 255;
740 
741 	memcpy(*header, &gtp_hdr, sizeof(gtp_hdr));
742 	*header += sizeof(gtp_hdr);
743 }
744 
745 static const struct encap_decap_headers {
746 	void (*funct)(
747 		uint8_t **header,
748 		uint64_t data,
749 		struct additional_para para
750 		);
751 } headers[] = {
752 	{.funct = add_ether_header},
753 	{.funct = add_vlan_header},
754 	{.funct = add_ipv4_header},
755 	{.funct = add_ipv6_header},
756 	{.funct = add_udp_header},
757 	{.funct = add_vxlan_header},
758 	{.funct = add_vxlan_gpe_header},
759 	{.funct = add_gre_header},
760 	{.funct = add_geneve_header},
761 	{.funct = add_gtp_header},
762 };
763 
764 static void
765 add_raw_encap(struct rte_flow_action *actions,
766 	uint8_t actions_counter,
767 	struct additional_para para)
768 {
769 	static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
770 	uint64_t encap_data = para.encap_data;
771 	uint8_t *header;
772 	uint8_t i;
773 
774 	/* Avoid double allocation. */
775 	if (action_encap_data[para.core_idx] == NULL)
776 		action_encap_data[para.core_idx] = rte_malloc("encap_data",
777 			sizeof(struct action_raw_encap_data), 0);
778 
779 	/* Check if allocation failed. */
780 	if (action_encap_data[para.core_idx] == NULL)
781 		rte_exit(EXIT_FAILURE, "No Memory available!");
782 
783 	*action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
784 		.conf = (struct rte_flow_action_raw_encap) {
785 			.data = action_encap_data[para.core_idx]->data,
786 		},
787 			.data = {},
788 	};
789 	header = action_encap_data[para.core_idx]->data;
790 
791 	for (i = 0; i < RTE_DIM(headers); i++)
792 		headers[i].funct(&header, encap_data, para);
793 
794 	action_encap_data[para.core_idx]->conf.size = header -
795 		action_encap_data[para.core_idx]->data;
796 
797 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
798 	actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
799 }
800 
801 static void
802 add_raw_decap(struct rte_flow_action *actions,
803 	uint8_t actions_counter,
804 	struct additional_para para)
805 {
806 	static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
807 	uint64_t decap_data = para.decap_data;
808 	uint8_t *header;
809 	uint8_t i;
810 
811 	/* Avoid double allocation. */
812 	if (action_decap_data[para.core_idx] == NULL)
813 		action_decap_data[para.core_idx] = rte_malloc("decap_data",
814 			sizeof(struct action_raw_decap_data), 0);
815 
816 	/* Check if allocation failed. */
817 	if (action_decap_data[para.core_idx] == NULL)
818 		rte_exit(EXIT_FAILURE, "No Memory available!");
819 
820 	*action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
821 		.conf = (struct rte_flow_action_raw_decap) {
822 			.data = action_decap_data[para.core_idx]->data,
823 		},
824 			.data = {},
825 	};
826 	header = action_decap_data[para.core_idx]->data;
827 
828 	for (i = 0; i < RTE_DIM(headers); i++)
829 		headers[i].funct(&header, decap_data, para);
830 
831 	action_decap_data[para.core_idx]->conf.size = header -
832 		action_decap_data[para.core_idx]->data;
833 
834 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
835 	actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
836 }
837 
838 static void
839 add_vxlan_encap(struct rte_flow_action *actions,
840 	uint8_t actions_counter,
841 	__rte_unused struct additional_para para)
842 {
843 	static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
844 	static struct rte_flow_item items[5];
845 	static struct rte_flow_item_eth item_eth;
846 	static struct rte_flow_item_ipv4 item_ipv4;
847 	static struct rte_flow_item_udp item_udp;
848 	static struct rte_flow_item_vxlan item_vxlan;
849 	uint32_t ip_dst = para.counter;
850 
851 	/* Fixed value */
852 	if (FIXED_VALUES)
853 		ip_dst = 1;
854 
855 	items[0].spec = &item_eth;
856 	items[0].mask = &item_eth;
857 	items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
858 
859 	item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
860 	item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
861 	item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
862 	items[1].spec = &item_ipv4;
863 	items[1].mask = &item_ipv4;
864 	items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
865 
866 
867 	item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
868 	items[2].spec = &item_udp;
869 	items[2].mask = &item_udp;
870 	items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
871 
872 
873 	item_vxlan.vni[2] = 1;
874 	items[3].spec = &item_vxlan;
875 	items[3].mask = &item_vxlan;
876 	items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
877 
878 	items[4].type = RTE_FLOW_ITEM_TYPE_END;
879 
880 	vxlan_encap[para.core_idx].definition = items;
881 
882 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
883 	actions[actions_counter].conf = &vxlan_encap[para.core_idx];
884 }
885 
886 static void
887 add_vxlan_decap(struct rte_flow_action *actions,
888 	uint8_t actions_counter,
889 	__rte_unused struct additional_para para)
890 {
891 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
892 }
893 
894 static void
895 add_meter(struct rte_flow_action *actions,
896 	uint8_t actions_counter,
897 	__rte_unused struct additional_para para)
898 {
899 	static struct rte_flow_action_meter
900 		meters[RTE_MAX_LCORE] __rte_cache_aligned;
901 
902 	meters[para.core_idx].mtr_id = para.counter;
903 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_METER;
904 	actions[actions_counter].conf = &meters[para.core_idx];
905 }
906 
907 void
908 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
909 	uint32_t counter, uint16_t next_table, uint16_t hairpinq,
910 	uint64_t encap_data, uint64_t decap_data, uint8_t core_idx)
911 {
912 	struct additional_para additional_para_data;
913 	uint8_t actions_counter = 0;
914 	uint16_t hairpin_queues[hairpinq];
915 	uint16_t queues[RXQ_NUM];
916 	uint16_t i, j;
917 
918 	for (i = 0; i < RXQ_NUM; i++)
919 		queues[i] = i;
920 
921 	for (i = 0; i < hairpinq; i++)
922 		hairpin_queues[i] = i + RXQ_NUM;
923 
924 	additional_para_data = (struct additional_para){
925 		.queue = counter % RXQ_NUM,
926 		.next_table = next_table,
927 		.queues = queues,
928 		.queues_number = RXQ_NUM,
929 		.counter = counter,
930 		.encap_data = encap_data,
931 		.decap_data = decap_data,
932 		.core_idx = core_idx,
933 	};
934 
935 	if (hairpinq != 0) {
936 		additional_para_data.queues = hairpin_queues;
937 		additional_para_data.queues_number = hairpinq;
938 		additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
939 	}
940 
941 	static const struct actions_dict {
942 		uint64_t mask;
943 		void (*funct)(
944 			struct rte_flow_action *actions,
945 			uint8_t actions_counter,
946 			struct additional_para para
947 			);
948 	} actions_list[] = {
949 		{
950 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
951 			.funct = add_mark,
952 		},
953 		{
954 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
955 			.funct = add_count,
956 		},
957 		{
958 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
959 			.funct = add_set_meta,
960 		},
961 		{
962 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
963 			.funct = add_set_tag,
964 		},
965 		{
966 			.mask = FLOW_ACTION_MASK(
967 				RTE_FLOW_ACTION_TYPE_FLAG
968 			),
969 			.funct = add_flag,
970 		},
971 		{
972 			.mask = FLOW_ACTION_MASK(
973 				RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
974 			),
975 			.funct = add_set_src_mac,
976 		},
977 		{
978 			.mask = FLOW_ACTION_MASK(
979 				RTE_FLOW_ACTION_TYPE_SET_MAC_DST
980 			),
981 			.funct = add_set_dst_mac,
982 		},
983 		{
984 			.mask = FLOW_ACTION_MASK(
985 				RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
986 			),
987 			.funct = add_set_src_ipv4,
988 		},
989 		{
990 			.mask =	FLOW_ACTION_MASK(
991 				RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
992 			),
993 			.funct = add_set_dst_ipv4,
994 		},
995 		{
996 			.mask = FLOW_ACTION_MASK(
997 				RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
998 			),
999 			.funct = add_set_src_ipv6,
1000 		},
1001 		{
1002 			.mask = FLOW_ACTION_MASK(
1003 				RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
1004 			),
1005 			.funct = add_set_dst_ipv6,
1006 		},
1007 		{
1008 			.mask = FLOW_ACTION_MASK(
1009 				RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1010 			),
1011 			.funct = add_set_src_tp,
1012 		},
1013 		{
1014 			.mask = FLOW_ACTION_MASK(
1015 				RTE_FLOW_ACTION_TYPE_SET_TP_DST
1016 			),
1017 			.funct = add_set_dst_tp,
1018 		},
1019 		{
1020 			.mask = FLOW_ACTION_MASK(
1021 				RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1022 			),
1023 			.funct = add_inc_tcp_ack,
1024 		},
1025 		{
1026 			.mask = FLOW_ACTION_MASK(
1027 				RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1028 			),
1029 			.funct = add_dec_tcp_ack,
1030 		},
1031 		{
1032 			.mask = FLOW_ACTION_MASK(
1033 				RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1034 			),
1035 			.funct = add_inc_tcp_seq,
1036 		},
1037 		{
1038 			.mask = FLOW_ACTION_MASK(
1039 				RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1040 			),
1041 			.funct = add_dec_tcp_seq,
1042 		},
1043 		{
1044 			.mask = FLOW_ACTION_MASK(
1045 				RTE_FLOW_ACTION_TYPE_SET_TTL
1046 			),
1047 			.funct = add_set_ttl,
1048 		},
1049 		{
1050 			.mask = FLOW_ACTION_MASK(
1051 				RTE_FLOW_ACTION_TYPE_DEC_TTL
1052 			),
1053 			.funct = add_dec_ttl,
1054 		},
1055 		{
1056 			.mask = FLOW_ACTION_MASK(
1057 				RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1058 			),
1059 			.funct = add_set_ipv4_dscp,
1060 		},
1061 		{
1062 			.mask = FLOW_ACTION_MASK(
1063 				RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1064 			),
1065 			.funct = add_set_ipv6_dscp,
1066 		},
1067 		{
1068 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1069 			.funct = add_queue,
1070 		},
1071 		{
1072 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1073 			.funct = add_rss,
1074 		},
1075 		{
1076 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1077 			.funct = add_jump,
1078 		},
1079 		{
1080 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1081 			.funct = add_port_id
1082 		},
1083 		{
1084 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1085 			.funct = add_drop,
1086 		},
1087 		{
1088 			.mask = HAIRPIN_QUEUE_ACTION,
1089 			.funct = add_queue,
1090 		},
1091 		{
1092 			.mask = HAIRPIN_RSS_ACTION,
1093 			.funct = add_rss,
1094 		},
1095 		{
1096 			.mask = FLOW_ACTION_MASK(
1097 				RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1098 			),
1099 			.funct = add_raw_encap,
1100 		},
1101 		{
1102 			.mask = FLOW_ACTION_MASK(
1103 				RTE_FLOW_ACTION_TYPE_RAW_DECAP
1104 			),
1105 			.funct = add_raw_decap,
1106 		},
1107 		{
1108 			.mask = FLOW_ACTION_MASK(
1109 				RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1110 			),
1111 			.funct = add_vxlan_encap,
1112 		},
1113 		{
1114 			.mask = FLOW_ACTION_MASK(
1115 				RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1116 			),
1117 			.funct = add_vxlan_decap,
1118 		},
1119 		{
1120 			.mask = FLOW_ACTION_MASK(
1121 				RTE_FLOW_ACTION_TYPE_METER
1122 			),
1123 			.funct = add_meter,
1124 		},
1125 	};
1126 
1127 	for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1128 		if (flow_actions[j] == 0)
1129 			break;
1130 		for (i = 0; i < RTE_DIM(actions_list); i++) {
1131 			if ((flow_actions[j] &
1132 				actions_list[i].mask) == 0)
1133 				continue;
1134 			actions_list[i].funct(
1135 				actions, actions_counter++,
1136 				additional_para_data
1137 			);
1138 			break;
1139 		}
1140 	}
1141 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;
1142 }
1143