xref: /dpdk/lib/pipeline/rte_table_action.c (revision 4d23d39fd06ed89b2d2566273b95bbecbd48ed83)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <string.h>
6 
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
13 #include <rte_ip.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_vxlan.h>
17 #include <rte_cryptodev.h>
18 
19 #include "rte_table_action.h"
20 
21 #define rte_htons rte_cpu_to_be_16
22 #define rte_htonl rte_cpu_to_be_32
23 
24 #define rte_ntohs rte_be_to_cpu_16
25 #define rte_ntohl rte_be_to_cpu_32
26 
27 /**
28  * RTE_TABLE_ACTION_FWD
29  */
30 #define fwd_data rte_pipeline_table_entry
31 
32 static int
33 fwd_apply(struct fwd_data *data,
34 	struct rte_table_action_fwd_params *p)
35 {
36 	data->action = p->action;
37 
38 	if (p->action == RTE_PIPELINE_ACTION_PORT)
39 		data->port_id = p->id;
40 
41 	if (p->action == RTE_PIPELINE_ACTION_TABLE)
42 		data->table_id = p->id;
43 
44 	return 0;
45 }
46 
47 /**
48  * RTE_TABLE_ACTION_LB
49  */
50 static int
51 lb_cfg_check(struct rte_table_action_lb_config *cfg)
52 {
53 	if ((cfg == NULL) ||
54 		(cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
55 		(cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
56 		(!rte_is_power_of_2(cfg->key_size)) ||
57 		(cfg->f_hash == NULL))
58 		return -1;
59 
60 	return 0;
61 }
62 
63 struct lb_data {
64 	uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
65 } __rte_packed;
66 
67 static int
68 lb_apply(struct lb_data *data,
69 	struct rte_table_action_lb_params *p)
70 {
71 	memcpy(data->out, p->out, sizeof(data->out));
72 
73 	return 0;
74 }
75 
76 static __rte_always_inline void
77 pkt_work_lb(struct rte_mbuf *mbuf,
78 	struct lb_data *data,
79 	struct rte_table_action_lb_config *cfg)
80 {
81 	uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
82 	uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
83 	uint64_t digest, pos;
84 	uint32_t out_val;
85 
86 	digest = cfg->f_hash(pkt_key,
87 		cfg->key_mask,
88 		cfg->key_size,
89 		cfg->seed);
90 	pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
91 	out_val = data->out[pos];
92 
93 	*out = out_val;
94 }
95 
96 /**
97  * RTE_TABLE_ACTION_MTR
98  */
99 static int
100 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
101 {
102 	if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
103 		((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
104 		(mtr->n_bytes_enabled != 0))
105 		return -ENOTSUP;
106 	return 0;
107 }
108 
109 struct mtr_trtcm_data {
110 	struct rte_meter_trtcm trtcm;
111 	uint64_t stats[RTE_COLORS];
112 };
113 
114 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data)          \
115 	(((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
116 
117 static void
118 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
119 	uint32_t profile_id)
120 {
121 	data->stats[RTE_COLOR_GREEN] &= ~0xF8LLU;
122 	data->stats[RTE_COLOR_GREEN] |= (profile_id % 32) << 3;
123 }
124 
125 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
126 	(((data)->stats[(color)] & 4LLU) >> 2)
127 
128 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
129 	((enum rte_color)((data)->stats[(color)] & 3LLU))
130 
131 static void
132 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
133 	enum rte_color color,
134 	enum rte_table_action_policer action)
135 {
136 	if (action == RTE_TABLE_ACTION_POLICER_DROP) {
137 		data->stats[color] |= 4LLU;
138 	} else {
139 		data->stats[color] &= ~7LLU;
140 		data->stats[color] |= color & 3LLU;
141 	}
142 }
143 
144 static uint64_t
145 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
146 	enum rte_color color)
147 {
148 	return data->stats[color] >> 8;
149 }
150 
151 static void
152 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
153 	enum rte_color color)
154 {
155 	data->stats[color] &= 0xFFLU;
156 }
157 
158 #define MTR_TRTCM_DATA_STATS_INC(data, color)              \
159 	((data)->stats[(color)] += (1LLU << 8))
160 
161 static size_t
162 mtr_data_size(struct rte_table_action_mtr_config *mtr)
163 {
164 	return mtr->n_tc * sizeof(struct mtr_trtcm_data);
165 }
166 
167 struct dscp_table_entry_data {
168 	enum rte_color color;
169 	uint16_t tc;
170 	uint16_t tc_queue;
171 };
172 
173 struct dscp_table_data {
174 	struct dscp_table_entry_data entry[64];
175 };
176 
177 struct meter_profile_data {
178 	struct rte_meter_trtcm_profile profile;
179 	uint32_t profile_id;
180 	int valid;
181 };
182 
183 static struct meter_profile_data *
184 meter_profile_data_find(struct meter_profile_data *mp,
185 	uint32_t mp_size,
186 	uint32_t profile_id)
187 {
188 	uint32_t i;
189 
190 	for (i = 0; i < mp_size; i++) {
191 		struct meter_profile_data *mp_data = &mp[i];
192 
193 		if (mp_data->valid && (mp_data->profile_id == profile_id))
194 			return mp_data;
195 	}
196 
197 	return NULL;
198 }
199 
200 static struct meter_profile_data *
201 meter_profile_data_find_unused(struct meter_profile_data *mp,
202 	uint32_t mp_size)
203 {
204 	uint32_t i;
205 
206 	for (i = 0; i < mp_size; i++) {
207 		struct meter_profile_data *mp_data = &mp[i];
208 
209 		if (!mp_data->valid)
210 			return mp_data;
211 	}
212 
213 	return NULL;
214 }
215 
216 static int
217 mtr_apply_check(struct rte_table_action_mtr_params *p,
218 	struct rte_table_action_mtr_config *cfg,
219 	struct meter_profile_data *mp,
220 	uint32_t mp_size)
221 {
222 	uint32_t i;
223 
224 	if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
225 		return -EINVAL;
226 
227 	for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
228 		struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
229 		struct meter_profile_data *mp_data;
230 
231 		if ((p->tc_mask & (1LLU << i)) == 0)
232 			continue;
233 
234 		mp_data = meter_profile_data_find(mp,
235 			mp_size,
236 			p_tc->meter_profile_id);
237 		if (!mp_data)
238 			return -EINVAL;
239 	}
240 
241 	return 0;
242 }
243 
244 static int
245 mtr_apply(struct mtr_trtcm_data *data,
246 	struct rte_table_action_mtr_params *p,
247 	struct rte_table_action_mtr_config *cfg,
248 	struct meter_profile_data *mp,
249 	uint32_t mp_size)
250 {
251 	uint32_t i;
252 	int status;
253 
254 	/* Check input arguments */
255 	status = mtr_apply_check(p, cfg, mp, mp_size);
256 	if (status)
257 		return status;
258 
259 	/* Apply */
260 	for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
261 		struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
262 		struct mtr_trtcm_data *data_tc = &data[i];
263 		struct meter_profile_data *mp_data;
264 
265 		if ((p->tc_mask & (1LLU << i)) == 0)
266 			continue;
267 
268 		/* Find profile */
269 		mp_data = meter_profile_data_find(mp,
270 			mp_size,
271 			p_tc->meter_profile_id);
272 		if (!mp_data)
273 			return -EINVAL;
274 
275 		memset(data_tc, 0, sizeof(*data_tc));
276 
277 		/* Meter object */
278 		status = rte_meter_trtcm_config(&data_tc->trtcm,
279 			&mp_data->profile);
280 		if (status)
281 			return status;
282 
283 		/* Meter profile */
284 		mtr_trtcm_data_meter_profile_id_set(data_tc,
285 			mp_data - mp);
286 
287 		/* Policer actions */
288 		mtr_trtcm_data_policer_action_set(data_tc,
289 			RTE_COLOR_GREEN,
290 			p_tc->policer[RTE_COLOR_GREEN]);
291 
292 		mtr_trtcm_data_policer_action_set(data_tc,
293 			RTE_COLOR_YELLOW,
294 			p_tc->policer[RTE_COLOR_YELLOW]);
295 
296 		mtr_trtcm_data_policer_action_set(data_tc,
297 			RTE_COLOR_RED,
298 			p_tc->policer[RTE_COLOR_RED]);
299 	}
300 
301 	return 0;
302 }
303 
304 static __rte_always_inline uint64_t
305 pkt_work_mtr(struct rte_mbuf *mbuf,
306 	struct mtr_trtcm_data *data,
307 	struct dscp_table_data *dscp_table,
308 	struct meter_profile_data *mp,
309 	uint64_t time,
310 	uint32_t dscp,
311 	uint16_t total_length)
312 {
313 	uint64_t drop_mask;
314 	struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
315 	enum rte_color color_in, color_meter, color_policer;
316 	uint32_t tc, mp_id;
317 
318 	tc = dscp_entry->tc;
319 	color_in = dscp_entry->color;
320 	data += tc;
321 	mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
322 
323 	/* Meter */
324 	color_meter = rte_meter_trtcm_color_aware_check(
325 		&data->trtcm,
326 		&mp[mp_id].profile,
327 		time,
328 		total_length,
329 		color_in);
330 
331 	/* Stats */
332 	MTR_TRTCM_DATA_STATS_INC(data, color_meter);
333 
334 	/* Police */
335 	drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
336 	color_policer =
337 		MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
338 	rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
339 
340 	return drop_mask;
341 }
342 
343 /**
344  * RTE_TABLE_ACTION_TM
345  */
346 static int
347 tm_cfg_check(struct rte_table_action_tm_config *tm)
348 {
349 	if ((tm->n_subports_per_port == 0) ||
350 		(rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
351 		(tm->n_subports_per_port > UINT16_MAX) ||
352 		(tm->n_pipes_per_subport == 0) ||
353 		(rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
354 		return -ENOTSUP;
355 
356 	return 0;
357 }
358 
359 struct tm_data {
360 	uint32_t queue_id;
361 	uint32_t reserved;
362 } __rte_packed;
363 
364 static int
365 tm_apply_check(struct rte_table_action_tm_params *p,
366 	struct rte_table_action_tm_config *cfg)
367 {
368 	if ((p->subport_id >= cfg->n_subports_per_port) ||
369 		(p->pipe_id >= cfg->n_pipes_per_subport))
370 		return -EINVAL;
371 
372 	return 0;
373 }
374 
375 static int
376 tm_apply(struct tm_data *data,
377 	struct rte_table_action_tm_params *p,
378 	struct rte_table_action_tm_config *cfg)
379 {
380 	int status;
381 
382 	/* Check input arguments */
383 	status = tm_apply_check(p, cfg);
384 	if (status)
385 		return status;
386 
387 	/* Apply */
388 	data->queue_id = p->subport_id <<
389 				(rte_ctz32(cfg->n_pipes_per_subport) + 4) |
390 				p->pipe_id << 4;
391 
392 	return 0;
393 }
394 
395 static __rte_always_inline void
396 pkt_work_tm(struct rte_mbuf *mbuf,
397 	struct tm_data *data,
398 	struct dscp_table_data *dscp_table,
399 	uint32_t dscp)
400 {
401 	struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
402 	uint32_t queue_id = data->queue_id |
403 				dscp_entry->tc_queue;
404 	rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
405 				(uint8_t)dscp_entry->color);
406 }
407 
408 /**
409  * RTE_TABLE_ACTION_ENCAP
410  */
411 static int
412 encap_valid(enum rte_table_action_encap_type encap)
413 {
414 	switch (encap) {
415 	case RTE_TABLE_ACTION_ENCAP_ETHER:
416 	case RTE_TABLE_ACTION_ENCAP_VLAN:
417 	case RTE_TABLE_ACTION_ENCAP_QINQ:
418 	case RTE_TABLE_ACTION_ENCAP_MPLS:
419 	case RTE_TABLE_ACTION_ENCAP_PPPOE:
420 	case RTE_TABLE_ACTION_ENCAP_VXLAN:
421 	case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
422 		return 1;
423 	default:
424 		return 0;
425 	}
426 }
427 
428 static int
429 encap_cfg_check(struct rte_table_action_encap_config *encap)
430 {
431 	if ((encap->encap_mask == 0) ||
432 		(rte_popcount64(encap->encap_mask) != 1))
433 		return -ENOTSUP;
434 
435 	return 0;
436 }
437 
438 struct encap_ether_data {
439 	struct rte_ether_hdr ether;
440 };
441 
442 #define VLAN(pcp, dei, vid)                                \
443 	((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) |  \
444 	((((uint64_t)(dei)) & 0x1LLU) << 12) |             \
445 	(((uint64_t)(vid)) & 0xFFFLLU))                    \
446 
447 struct encap_vlan_data {
448 	struct rte_ether_hdr ether;
449 	struct rte_vlan_hdr vlan;
450 };
451 
452 struct encap_qinq_data {
453 	struct rte_ether_hdr ether;
454 	struct rte_vlan_hdr svlan;
455 	struct rte_vlan_hdr cvlan;
456 };
457 
458 #define ETHER_TYPE_MPLS_UNICAST                            0x8847
459 
460 #define ETHER_TYPE_MPLS_MULTICAST                          0x8848
461 
462 #define MPLS(label, tc, s, ttl)                            \
463 	((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
464 	((((uint64_t)(tc)) & 0x7LLU) << 9) |               \
465 	((((uint64_t)(s)) & 0x1LLU) << 8) |                \
466 	(((uint64_t)(ttl)) & 0xFFLLU)))
467 
468 struct __rte_aligned(2) encap_mpls_data {
469 	struct rte_ether_hdr ether;
470 	uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
471 	uint32_t mpls_count;
472 } __rte_packed;
473 
474 #define PPP_PROTOCOL_IP                                    0x0021
475 
476 struct pppoe_ppp_hdr {
477 	uint16_t ver_type_code;
478 	uint16_t session_id;
479 	uint16_t length;
480 	uint16_t protocol;
481 };
482 
483 struct encap_pppoe_data {
484 	struct rte_ether_hdr ether;
485 	struct pppoe_ppp_hdr pppoe_ppp;
486 };
487 
488 #define IP_PROTO_UDP                                       17
489 
490 struct __rte_aligned(2) encap_vxlan_ipv4_data {
491 	struct rte_ether_hdr ether;
492 	struct rte_ipv4_hdr ipv4;
493 	struct rte_udp_hdr udp;
494 	struct rte_vxlan_hdr vxlan;
495 } __rte_packed;
496 
497 struct __rte_aligned(2) encap_vxlan_ipv4_vlan_data {
498 	struct rte_ether_hdr ether;
499 	struct rte_vlan_hdr vlan;
500 	struct rte_ipv4_hdr ipv4;
501 	struct rte_udp_hdr udp;
502 	struct rte_vxlan_hdr vxlan;
503 } __rte_packed;
504 
505 struct __rte_aligned(2) encap_vxlan_ipv6_data {
506 	struct rte_ether_hdr ether;
507 	struct rte_ipv6_hdr ipv6;
508 	struct rte_udp_hdr udp;
509 	struct rte_vxlan_hdr vxlan;
510 } __rte_packed;
511 
512 struct __rte_aligned(2) encap_vxlan_ipv6_vlan_data {
513 	struct rte_ether_hdr ether;
514 	struct rte_vlan_hdr vlan;
515 	struct rte_ipv6_hdr ipv6;
516 	struct rte_udp_hdr udp;
517 	struct rte_vxlan_hdr vxlan;
518 } __rte_packed;
519 
520 struct __rte_aligned(2) encap_qinq_pppoe_data {
521 	struct rte_ether_hdr ether;
522 	struct rte_vlan_hdr svlan;
523 	struct rte_vlan_hdr cvlan;
524 	struct pppoe_ppp_hdr pppoe_ppp;
525 } __rte_packed;
526 
527 static size_t
528 encap_data_size(struct rte_table_action_encap_config *encap)
529 {
530 	switch (encap->encap_mask) {
531 	case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
532 		return sizeof(struct encap_ether_data);
533 
534 	case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
535 		return sizeof(struct encap_vlan_data);
536 
537 	case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
538 		return sizeof(struct encap_qinq_data);
539 
540 	case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
541 		return sizeof(struct encap_mpls_data);
542 
543 	case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
544 		return sizeof(struct encap_pppoe_data);
545 
546 	case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
547 		if (encap->vxlan.ip_version)
548 			if (encap->vxlan.vlan)
549 				return sizeof(struct encap_vxlan_ipv4_vlan_data);
550 			else
551 				return sizeof(struct encap_vxlan_ipv4_data);
552 		else
553 			if (encap->vxlan.vlan)
554 				return sizeof(struct encap_vxlan_ipv6_vlan_data);
555 			else
556 				return sizeof(struct encap_vxlan_ipv6_data);
557 
558 	case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
559 			return sizeof(struct encap_qinq_pppoe_data);
560 
561 	default:
562 		return 0;
563 	}
564 }
565 
566 static int
567 encap_apply_check(struct rte_table_action_encap_params *p,
568 	struct rte_table_action_encap_config *cfg)
569 {
570 	if ((encap_valid(p->type) == 0) ||
571 		((cfg->encap_mask & (1LLU << p->type)) == 0))
572 		return -EINVAL;
573 
574 	switch (p->type) {
575 	case RTE_TABLE_ACTION_ENCAP_ETHER:
576 		return 0;
577 
578 	case RTE_TABLE_ACTION_ENCAP_VLAN:
579 		return 0;
580 
581 	case RTE_TABLE_ACTION_ENCAP_QINQ:
582 		return 0;
583 
584 	case RTE_TABLE_ACTION_ENCAP_MPLS:
585 		if ((p->mpls.mpls_count == 0) ||
586 			(p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
587 			return -EINVAL;
588 
589 		return 0;
590 
591 	case RTE_TABLE_ACTION_ENCAP_PPPOE:
592 		return 0;
593 
594 	case RTE_TABLE_ACTION_ENCAP_VXLAN:
595 		return 0;
596 
597 	case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
598 		return 0;
599 
600 	default:
601 		return -EINVAL;
602 	}
603 }
604 
605 static int
606 encap_ether_apply(void *data,
607 	struct rte_table_action_encap_params *p,
608 	struct rte_table_action_common_config *common_cfg)
609 {
610 	struct encap_ether_data *d = data;
611 	uint16_t ethertype = (common_cfg->ip_version) ?
612 		RTE_ETHER_TYPE_IPV4 :
613 		RTE_ETHER_TYPE_IPV6;
614 
615 	/* Ethernet */
616 	rte_ether_addr_copy(&p->ether.ether.da, &d->ether.dst_addr);
617 	rte_ether_addr_copy(&p->ether.ether.sa, &d->ether.src_addr);
618 	d->ether.ether_type = rte_htons(ethertype);
619 
620 	return 0;
621 }
622 
623 static int
624 encap_vlan_apply(void *data,
625 	struct rte_table_action_encap_params *p,
626 	struct rte_table_action_common_config *common_cfg)
627 {
628 	struct encap_vlan_data *d = data;
629 	uint16_t ethertype = (common_cfg->ip_version) ?
630 		RTE_ETHER_TYPE_IPV4 :
631 		RTE_ETHER_TYPE_IPV6;
632 
633 	/* Ethernet */
634 	rte_ether_addr_copy(&p->vlan.ether.da, &d->ether.dst_addr);
635 	rte_ether_addr_copy(&p->vlan.ether.sa, &d->ether.src_addr);
636 	d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
637 
638 	/* VLAN */
639 	d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
640 		p->vlan.vlan.dei,
641 		p->vlan.vlan.vid));
642 	d->vlan.eth_proto = rte_htons(ethertype);
643 
644 	return 0;
645 }
646 
647 static int
648 encap_qinq_apply(void *data,
649 	struct rte_table_action_encap_params *p,
650 	struct rte_table_action_common_config *common_cfg)
651 {
652 	struct encap_qinq_data *d = data;
653 	uint16_t ethertype = (common_cfg->ip_version) ?
654 		RTE_ETHER_TYPE_IPV4 :
655 		RTE_ETHER_TYPE_IPV6;
656 
657 	/* Ethernet */
658 	rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.dst_addr);
659 	rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.src_addr);
660 	d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_QINQ);
661 
662 	/* SVLAN */
663 	d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
664 		p->qinq.svlan.dei,
665 		p->qinq.svlan.vid));
666 	d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
667 
668 	/* CVLAN */
669 	d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
670 		p->qinq.cvlan.dei,
671 		p->qinq.cvlan.vid));
672 	d->cvlan.eth_proto = rte_htons(ethertype);
673 
674 	return 0;
675 }
676 
677 static int
678 encap_qinq_pppoe_apply(void *data,
679 	struct rte_table_action_encap_params *p)
680 {
681 	struct encap_qinq_pppoe_data *d = data;
682 
683 	/* Ethernet */
684 	rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.dst_addr);
685 	rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.src_addr);
686 	d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
687 
688 	/* SVLAN */
689 	d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
690 		p->qinq.svlan.dei,
691 		p->qinq.svlan.vid));
692 	d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
693 
694 	/* CVLAN */
695 	d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
696 		p->qinq.cvlan.dei,
697 		p->qinq.cvlan.vid));
698 	d->cvlan.eth_proto = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
699 
700 	/* PPPoE and PPP*/
701 	d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
702 	d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id);
703 	d->pppoe_ppp.length = 0; /* not pre-computed */
704 	d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
705 
706 	return 0;
707 }
708 
709 static int
710 encap_mpls_apply(void *data,
711 	struct rte_table_action_encap_params *p)
712 {
713 	struct encap_mpls_data *d = data;
714 	uint16_t ethertype = (p->mpls.unicast) ?
715 		ETHER_TYPE_MPLS_UNICAST :
716 		ETHER_TYPE_MPLS_MULTICAST;
717 	uint32_t i;
718 
719 	/* Ethernet */
720 	rte_ether_addr_copy(&p->mpls.ether.da, &d->ether.dst_addr);
721 	rte_ether_addr_copy(&p->mpls.ether.sa, &d->ether.src_addr);
722 	d->ether.ether_type = rte_htons(ethertype);
723 
724 	/* MPLS */
725 	for (i = 0; i < p->mpls.mpls_count - 1; i++)
726 		d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
727 			p->mpls.mpls[i].tc,
728 			0,
729 			p->mpls.mpls[i].ttl));
730 
731 	d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
732 		p->mpls.mpls[i].tc,
733 		1,
734 		p->mpls.mpls[i].ttl));
735 
736 	d->mpls_count = p->mpls.mpls_count;
737 	return 0;
738 }
739 
740 static int
741 encap_pppoe_apply(void *data,
742 	struct rte_table_action_encap_params *p)
743 {
744 	struct encap_pppoe_data *d = data;
745 
746 	/* Ethernet */
747 	rte_ether_addr_copy(&p->pppoe.ether.da, &d->ether.dst_addr);
748 	rte_ether_addr_copy(&p->pppoe.ether.sa, &d->ether.src_addr);
749 	d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
750 
751 	/* PPPoE and PPP*/
752 	d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
753 	d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
754 	d->pppoe_ppp.length = 0; /* not pre-computed */
755 	d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
756 
757 	return 0;
758 }
759 
760 static int
761 encap_vxlan_apply(void *data,
762 	struct rte_table_action_encap_params *p,
763 	struct rte_table_action_encap_config *cfg)
764 {
765 	if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
766 		(cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
767 		(!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
768 		(!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
769 		(cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
770 		return -1;
771 
772 	if (cfg->vxlan.ip_version)
773 		if (cfg->vxlan.vlan) {
774 			struct encap_vxlan_ipv4_vlan_data *d = data;
775 
776 			/* Ethernet */
777 			rte_ether_addr_copy(&p->vxlan.ether.da,
778 					&d->ether.dst_addr);
779 			rte_ether_addr_copy(&p->vxlan.ether.sa,
780 					&d->ether.src_addr);
781 			d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
782 
783 			/* VLAN */
784 			d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
785 				p->vxlan.vlan.dei,
786 				p->vxlan.vlan.vid));
787 			d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV4);
788 
789 			/* IPv4*/
790 			d->ipv4.version_ihl = 0x45;
791 			d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
792 			d->ipv4.total_length = 0; /* not pre-computed */
793 			d->ipv4.packet_id = 0;
794 			d->ipv4.fragment_offset = 0;
795 			d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
796 			d->ipv4.next_proto_id = IP_PROTO_UDP;
797 			d->ipv4.hdr_checksum = 0;
798 			d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
799 			d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
800 
801 			d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
802 
803 			/* UDP */
804 			d->udp.src_port = rte_htons(p->vxlan.udp.sp);
805 			d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
806 			d->udp.dgram_len = 0; /* not pre-computed */
807 			d->udp.dgram_cksum = 0;
808 
809 			/* VXLAN */
810 			d->vxlan.vx_flags = rte_htonl(0x08000000);
811 			d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
812 
813 			return 0;
814 		} else {
815 			struct encap_vxlan_ipv4_data *d = data;
816 
817 			/* Ethernet */
818 			rte_ether_addr_copy(&p->vxlan.ether.da,
819 					&d->ether.dst_addr);
820 			rte_ether_addr_copy(&p->vxlan.ether.sa,
821 					&d->ether.src_addr);
822 			d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV4);
823 
824 			/* IPv4*/
825 			d->ipv4.version_ihl = 0x45;
826 			d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
827 			d->ipv4.total_length = 0; /* not pre-computed */
828 			d->ipv4.packet_id = 0;
829 			d->ipv4.fragment_offset = 0;
830 			d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
831 			d->ipv4.next_proto_id = IP_PROTO_UDP;
832 			d->ipv4.hdr_checksum = 0;
833 			d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
834 			d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
835 
836 			d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
837 
838 			/* UDP */
839 			d->udp.src_port = rte_htons(p->vxlan.udp.sp);
840 			d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
841 			d->udp.dgram_len = 0; /* not pre-computed */
842 			d->udp.dgram_cksum = 0;
843 
844 			/* VXLAN */
845 			d->vxlan.vx_flags = rte_htonl(0x08000000);
846 			d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
847 
848 			return 0;
849 		}
850 	else
851 		if (cfg->vxlan.vlan) {
852 			struct encap_vxlan_ipv6_vlan_data *d = data;
853 
854 			/* Ethernet */
855 			rte_ether_addr_copy(&p->vxlan.ether.da,
856 					&d->ether.dst_addr);
857 			rte_ether_addr_copy(&p->vxlan.ether.sa,
858 					&d->ether.src_addr);
859 			d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
860 
861 			/* VLAN */
862 			d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
863 				p->vxlan.vlan.dei,
864 				p->vxlan.vlan.vid));
865 			d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV6);
866 
867 			/* IPv6*/
868 			d->ipv6.vtc_flow = rte_htonl((6 << 28) |
869 				(p->vxlan.ipv6.dscp << 22) |
870 				p->vxlan.ipv6.flow_label);
871 			d->ipv6.payload_len = 0; /* not pre-computed */
872 			d->ipv6.proto = IP_PROTO_UDP;
873 			d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
874 			d->ipv6.src_addr = p->vxlan.ipv6.sa;
875 			d->ipv6.dst_addr = p->vxlan.ipv6.da;
876 
877 			/* UDP */
878 			d->udp.src_port = rte_htons(p->vxlan.udp.sp);
879 			d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
880 			d->udp.dgram_len = 0; /* not pre-computed */
881 			d->udp.dgram_cksum = 0;
882 
883 			/* VXLAN */
884 			d->vxlan.vx_flags = rte_htonl(0x08000000);
885 			d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
886 
887 			return 0;
888 		} else {
889 			struct encap_vxlan_ipv6_data *d = data;
890 
891 			/* Ethernet */
892 			rte_ether_addr_copy(&p->vxlan.ether.da,
893 					&d->ether.dst_addr);
894 			rte_ether_addr_copy(&p->vxlan.ether.sa,
895 					&d->ether.src_addr);
896 			d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV6);
897 
898 			/* IPv6*/
899 			d->ipv6.vtc_flow = rte_htonl((6 << 28) |
900 				(p->vxlan.ipv6.dscp << 22) |
901 				p->vxlan.ipv6.flow_label);
902 			d->ipv6.payload_len = 0; /* not pre-computed */
903 			d->ipv6.proto = IP_PROTO_UDP;
904 			d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
905 			d->ipv6.src_addr = p->vxlan.ipv6.sa;
906 			d->ipv6.dst_addr = p->vxlan.ipv6.da;
907 
908 			/* UDP */
909 			d->udp.src_port = rte_htons(p->vxlan.udp.sp);
910 			d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
911 			d->udp.dgram_len = 0; /* not pre-computed */
912 			d->udp.dgram_cksum = 0;
913 
914 			/* VXLAN */
915 			d->vxlan.vx_flags = rte_htonl(0x08000000);
916 			d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
917 
918 			return 0;
919 		}
920 }
921 
922 static int
923 encap_apply(void *data,
924 	struct rte_table_action_encap_params *p,
925 	struct rte_table_action_encap_config *cfg,
926 	struct rte_table_action_common_config *common_cfg)
927 {
928 	int status;
929 
930 	/* Check input arguments */
931 	status = encap_apply_check(p, cfg);
932 	if (status)
933 		return status;
934 
935 	switch (p->type) {
936 	case RTE_TABLE_ACTION_ENCAP_ETHER:
937 		return encap_ether_apply(data, p, common_cfg);
938 
939 	case RTE_TABLE_ACTION_ENCAP_VLAN:
940 		return encap_vlan_apply(data, p, common_cfg);
941 
942 	case RTE_TABLE_ACTION_ENCAP_QINQ:
943 		return encap_qinq_apply(data, p, common_cfg);
944 
945 	case RTE_TABLE_ACTION_ENCAP_MPLS:
946 		return encap_mpls_apply(data, p);
947 
948 	case RTE_TABLE_ACTION_ENCAP_PPPOE:
949 		return encap_pppoe_apply(data, p);
950 
951 	case RTE_TABLE_ACTION_ENCAP_VXLAN:
952 		return encap_vxlan_apply(data, p, cfg);
953 
954 	case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
955 		return encap_qinq_pppoe_apply(data, p);
956 
957 	default:
958 		return -EINVAL;
959 	}
960 }
961 
962 static __rte_always_inline uint16_t
963 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
964 	uint16_t total_length)
965 {
966 	int32_t cksum1;
967 
968 	cksum1 = cksum0;
969 	cksum1 = ~cksum1 & 0xFFFF;
970 
971 	/* Add total length (one's complement logic) */
972 	cksum1 += total_length;
973 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
974 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
975 
976 	return (uint16_t)(~cksum1);
977 }
978 
979 static __rte_always_inline void *
980 encap(void *dst, const void *src, size_t n)
981 {
982 	dst = ((uint8_t *) dst) - n;
983 	return rte_memcpy(dst, src, n);
984 }
985 
986 static __rte_always_inline void
987 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
988 	struct encap_vxlan_ipv4_data *vxlan_tbl,
989 	struct rte_table_action_encap_config *cfg)
990 {
991 	uint32_t ether_offset = cfg->vxlan.data_offset;
992 	void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
993 	struct encap_vxlan_ipv4_data *vxlan_pkt;
994 	uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
995 
996 	ether_length = (uint16_t)mbuf->pkt_len;
997 	ipv4_total_length = ether_length +
998 		(sizeof(struct rte_vxlan_hdr) +
999 		sizeof(struct rte_udp_hdr) +
1000 		sizeof(struct rte_ipv4_hdr));
1001 	ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1002 		rte_htons(ipv4_total_length));
1003 	udp_length = ether_length +
1004 		(sizeof(struct rte_vxlan_hdr) +
1005 		sizeof(struct rte_udp_hdr));
1006 
1007 	vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1008 	vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1009 	vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1010 	vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1011 
1012 	mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1013 	mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1014 }
1015 
1016 static __rte_always_inline void
1017 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
1018 	struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
1019 	struct rte_table_action_encap_config *cfg)
1020 {
1021 	uint32_t ether_offset = cfg->vxlan.data_offset;
1022 	void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1023 	struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
1024 	uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1025 
1026 	ether_length = (uint16_t)mbuf->pkt_len;
1027 	ipv4_total_length = ether_length +
1028 		(sizeof(struct rte_vxlan_hdr) +
1029 		sizeof(struct rte_udp_hdr) +
1030 		sizeof(struct rte_ipv4_hdr));
1031 	ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1032 		rte_htons(ipv4_total_length));
1033 	udp_length = ether_length +
1034 		(sizeof(struct rte_vxlan_hdr) +
1035 		sizeof(struct rte_udp_hdr));
1036 
1037 	vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1038 	vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1039 	vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1040 	vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1041 
1042 	mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1043 	mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1044 }
1045 
1046 static __rte_always_inline void
1047 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1048 	struct encap_vxlan_ipv6_data *vxlan_tbl,
1049 	struct rte_table_action_encap_config *cfg)
1050 {
1051 	uint32_t ether_offset = cfg->vxlan.data_offset;
1052 	void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1053 	struct encap_vxlan_ipv6_data *vxlan_pkt;
1054 	uint16_t ether_length, ipv6_payload_length, udp_length;
1055 
1056 	ether_length = (uint16_t)mbuf->pkt_len;
1057 	ipv6_payload_length = ether_length +
1058 		(sizeof(struct rte_vxlan_hdr) +
1059 		sizeof(struct rte_udp_hdr));
1060 	udp_length = ether_length +
1061 		(sizeof(struct rte_vxlan_hdr) +
1062 		sizeof(struct rte_udp_hdr));
1063 
1064 	vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1065 	vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1066 	vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1067 
1068 	mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1069 	mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1070 }
1071 
1072 static __rte_always_inline void
1073 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1074 	struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1075 	struct rte_table_action_encap_config *cfg)
1076 {
1077 	uint32_t ether_offset = cfg->vxlan.data_offset;
1078 	void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1079 	struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1080 	uint16_t ether_length, ipv6_payload_length, udp_length;
1081 
1082 	ether_length = (uint16_t)mbuf->pkt_len;
1083 	ipv6_payload_length = ether_length +
1084 		(sizeof(struct rte_vxlan_hdr) +
1085 		sizeof(struct rte_udp_hdr));
1086 	udp_length = ether_length +
1087 		(sizeof(struct rte_vxlan_hdr) +
1088 		sizeof(struct rte_udp_hdr));
1089 
1090 	vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1091 	vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1092 	vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1093 
1094 	mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1095 	mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1096 }
1097 
1098 static __rte_always_inline void
1099 pkt_work_encap(struct rte_mbuf *mbuf,
1100 	void *data,
1101 	struct rte_table_action_encap_config *cfg,
1102 	void *ip,
1103 	uint16_t total_length,
1104 	uint32_t ip_offset)
1105 {
1106 	switch (cfg->encap_mask) {
1107 	case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1108 		encap(ip, data, sizeof(struct encap_ether_data));
1109 		mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1110 			sizeof(struct encap_ether_data));
1111 		mbuf->pkt_len = mbuf->data_len = total_length +
1112 			sizeof(struct encap_ether_data);
1113 		break;
1114 
1115 	case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1116 		encap(ip, data, sizeof(struct encap_vlan_data));
1117 		mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1118 			sizeof(struct encap_vlan_data));
1119 		mbuf->pkt_len = mbuf->data_len = total_length +
1120 			sizeof(struct encap_vlan_data);
1121 		break;
1122 
1123 	case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1124 		encap(ip, data, sizeof(struct encap_qinq_data));
1125 		mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1126 			sizeof(struct encap_qinq_data));
1127 		mbuf->pkt_len = mbuf->data_len = total_length +
1128 			sizeof(struct encap_qinq_data);
1129 		break;
1130 
1131 	case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1132 	{
1133 		struct encap_mpls_data *mpls = data;
1134 		size_t size = sizeof(struct rte_ether_hdr) +
1135 			mpls->mpls_count * 4;
1136 
1137 		encap(ip, data, size);
1138 		mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1139 		mbuf->pkt_len = mbuf->data_len = total_length + size;
1140 		break;
1141 	}
1142 
1143 	case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1144 	{
1145 		struct encap_pppoe_data *pppoe =
1146 			encap(ip, data, sizeof(struct encap_pppoe_data));
1147 		pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1148 		mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1149 			sizeof(struct encap_pppoe_data));
1150 		mbuf->pkt_len = mbuf->data_len = total_length +
1151 			sizeof(struct encap_pppoe_data);
1152 		break;
1153 	}
1154 
1155 	case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
1156 	{
1157 		struct encap_qinq_pppoe_data *qinq_pppoe =
1158 			encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
1159 		qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1160 		mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1161 			sizeof(struct encap_qinq_pppoe_data));
1162 		mbuf->pkt_len = mbuf->data_len = total_length +
1163 			sizeof(struct encap_qinq_pppoe_data);
1164 		break;
1165 	}
1166 
1167 	case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1168 	{
1169 		if (cfg->vxlan.ip_version)
1170 			if (cfg->vxlan.vlan)
1171 				pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1172 			else
1173 				pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1174 		else
1175 			if (cfg->vxlan.vlan)
1176 				pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1177 			else
1178 				pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1179 	}
1180 
1181 	default:
1182 		break;
1183 	}
1184 }
1185 
1186 /**
1187  * RTE_TABLE_ACTION_NAT
1188  */
1189 static int
1190 nat_cfg_check(struct rte_table_action_nat_config *nat)
1191 {
1192 	if ((nat->proto != 0x06) &&
1193 		(nat->proto != 0x11))
1194 		return -ENOTSUP;
1195 
1196 	return 0;
1197 }
1198 
1199 struct nat_ipv4_data {
1200 	uint32_t addr;
1201 	uint16_t port;
1202 } __rte_packed;
1203 
1204 struct nat_ipv6_data {
1205 	struct rte_ipv6_addr addr;
1206 	uint16_t port;
1207 } __rte_packed;
1208 
1209 static size_t
1210 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1211 	struct rte_table_action_common_config *common)
1212 {
1213 	int ip_version = common->ip_version;
1214 
1215 	return (ip_version) ?
1216 		sizeof(struct nat_ipv4_data) :
1217 		sizeof(struct nat_ipv6_data);
1218 }
1219 
1220 static int
1221 nat_apply_check(struct rte_table_action_nat_params *p,
1222 	struct rte_table_action_common_config *cfg)
1223 {
1224 	if ((p->ip_version && (cfg->ip_version == 0)) ||
1225 		((p->ip_version == 0) && cfg->ip_version))
1226 		return -EINVAL;
1227 
1228 	return 0;
1229 }
1230 
1231 static int
1232 nat_apply(void *data,
1233 	struct rte_table_action_nat_params *p,
1234 	struct rte_table_action_common_config *cfg)
1235 {
1236 	int status;
1237 
1238 	/* Check input arguments */
1239 	status = nat_apply_check(p, cfg);
1240 	if (status)
1241 		return status;
1242 
1243 	/* Apply */
1244 	if (p->ip_version) {
1245 		struct nat_ipv4_data *d = data;
1246 
1247 		d->addr = rte_htonl(p->addr.ipv4);
1248 		d->port = rte_htons(p->port);
1249 	} else {
1250 		struct nat_ipv6_data *d = data;
1251 
1252 		d->addr = p->addr.ipv6;
1253 		d->port = rte_htons(p->port);
1254 	}
1255 
1256 	return 0;
1257 }
1258 
1259 static __rte_always_inline uint16_t
1260 nat_ipv4_checksum_update(uint16_t cksum0,
1261 	uint32_t ip0,
1262 	uint32_t ip1)
1263 {
1264 	int32_t cksum1;
1265 
1266 	cksum1 = cksum0;
1267 	cksum1 = ~cksum1 & 0xFFFF;
1268 
1269 	/* Subtract ip0 (one's complement logic) */
1270 	cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1271 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1272 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1273 
1274 	/* Add ip1 (one's complement logic) */
1275 	cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1276 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1277 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1278 
1279 	return (uint16_t)(~cksum1);
1280 }
1281 
1282 static __rte_always_inline uint16_t
1283 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1284 	uint32_t ip0,
1285 	uint32_t ip1,
1286 	uint16_t port0,
1287 	uint16_t port1)
1288 {
1289 	int32_t cksum1;
1290 
1291 	cksum1 = cksum0;
1292 	cksum1 = ~cksum1 & 0xFFFF;
1293 
1294 	/* Subtract ip0 and port 0 (one's complement logic) */
1295 	cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1296 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1297 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1298 
1299 	/* Add ip1 and port1 (one's complement logic) */
1300 	cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1301 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1302 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1303 
1304 	return (uint16_t)(~cksum1);
1305 }
1306 
1307 static __rte_always_inline uint16_t
1308 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1309 	uint16_t *ip0,
1310 	uint16_t *ip1,
1311 	uint16_t port0,
1312 	uint16_t port1)
1313 {
1314 	int32_t cksum1;
1315 
1316 	cksum1 = cksum0;
1317 	cksum1 = ~cksum1 & 0xFFFF;
1318 
1319 	/* Subtract ip0 and port 0 (one's complement logic) */
1320 	cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1321 		ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1322 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1323 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1324 
1325 	/* Add ip1 and port1 (one's complement logic) */
1326 	cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1327 		ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1328 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1329 	cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1330 
1331 	return (uint16_t)(~cksum1);
1332 }
1333 
1334 static __rte_always_inline void
1335 pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
1336 	struct nat_ipv4_data *data,
1337 	struct rte_table_action_nat_config *cfg)
1338 {
1339 	if (cfg->source_nat) {
1340 		if (cfg->proto == 0x6) {
1341 			struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1342 			uint16_t ip_cksum, tcp_cksum;
1343 
1344 			ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1345 				ip->src_addr,
1346 				data->addr);
1347 
1348 			tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1349 				ip->src_addr,
1350 				data->addr,
1351 				tcp->src_port,
1352 				data->port);
1353 
1354 			ip->src_addr = data->addr;
1355 			ip->hdr_checksum = ip_cksum;
1356 			tcp->src_port = data->port;
1357 			tcp->cksum = tcp_cksum;
1358 		} else {
1359 			struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1360 			uint16_t ip_cksum, udp_cksum;
1361 
1362 			ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1363 				ip->src_addr,
1364 				data->addr);
1365 
1366 			udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1367 				ip->src_addr,
1368 				data->addr,
1369 				udp->src_port,
1370 				data->port);
1371 
1372 			ip->src_addr = data->addr;
1373 			ip->hdr_checksum = ip_cksum;
1374 			udp->src_port = data->port;
1375 			if (udp->dgram_cksum)
1376 				udp->dgram_cksum = udp_cksum;
1377 		}
1378 	} else {
1379 		if (cfg->proto == 0x6) {
1380 			struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1381 			uint16_t ip_cksum, tcp_cksum;
1382 
1383 			ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1384 				ip->dst_addr,
1385 				data->addr);
1386 
1387 			tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1388 				ip->dst_addr,
1389 				data->addr,
1390 				tcp->dst_port,
1391 				data->port);
1392 
1393 			ip->dst_addr = data->addr;
1394 			ip->hdr_checksum = ip_cksum;
1395 			tcp->dst_port = data->port;
1396 			tcp->cksum = tcp_cksum;
1397 		} else {
1398 			struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1399 			uint16_t ip_cksum, udp_cksum;
1400 
1401 			ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1402 				ip->dst_addr,
1403 				data->addr);
1404 
1405 			udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1406 				ip->dst_addr,
1407 				data->addr,
1408 				udp->dst_port,
1409 				data->port);
1410 
1411 			ip->dst_addr = data->addr;
1412 			ip->hdr_checksum = ip_cksum;
1413 			udp->dst_port = data->port;
1414 			if (udp->dgram_cksum)
1415 				udp->dgram_cksum = udp_cksum;
1416 		}
1417 	}
1418 }
1419 
1420 static __rte_always_inline void
1421 pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
1422 	struct nat_ipv6_data *data,
1423 	struct rte_table_action_nat_config *cfg)
1424 {
1425 	if (cfg->source_nat) {
1426 		if (cfg->proto == 0x6) {
1427 			struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1428 			uint16_t tcp_cksum;
1429 
1430 			tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1431 				(uint16_t *)&ip->src_addr,
1432 				(uint16_t *)&data->addr,
1433 				tcp->src_port,
1434 				data->port);
1435 
1436 			ip->src_addr = data->addr;
1437 			tcp->src_port = data->port;
1438 			tcp->cksum = tcp_cksum;
1439 		} else {
1440 			struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1441 			uint16_t udp_cksum;
1442 
1443 			udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1444 				(uint16_t *)&ip->src_addr,
1445 				(uint16_t *)&data->addr,
1446 				udp->src_port,
1447 				data->port);
1448 
1449 			ip->src_addr = data->addr;
1450 			udp->src_port = data->port;
1451 			udp->dgram_cksum = udp_cksum;
1452 		}
1453 	} else {
1454 		if (cfg->proto == 0x6) {
1455 			struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1456 			uint16_t tcp_cksum;
1457 
1458 			tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1459 				(uint16_t *)&ip->dst_addr,
1460 				(uint16_t *)&data->addr,
1461 				tcp->dst_port,
1462 				data->port);
1463 
1464 			ip->dst_addr = data->addr;
1465 			tcp->dst_port = data->port;
1466 			tcp->cksum = tcp_cksum;
1467 		} else {
1468 			struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1469 			uint16_t udp_cksum;
1470 
1471 			udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1472 				(uint16_t *)&ip->dst_addr,
1473 				(uint16_t *)&data->addr,
1474 				udp->dst_port,
1475 				data->port);
1476 
1477 			ip->dst_addr = data->addr;
1478 			udp->dst_port = data->port;
1479 			udp->dgram_cksum = udp_cksum;
1480 		}
1481 	}
1482 }
1483 
1484 /**
1485  * RTE_TABLE_ACTION_TTL
1486  */
1487 static int
1488 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1489 {
1490 	if (ttl->drop == 0)
1491 		return -ENOTSUP;
1492 
1493 	return 0;
1494 }
1495 
1496 struct ttl_data {
1497 	uint32_t n_packets;
1498 } __rte_packed;
1499 
1500 #define TTL_INIT(data, decrement)                         \
1501 	((data)->n_packets = (decrement) ? 1 : 0)
1502 
1503 #define TTL_DEC_GET(data)                                  \
1504 	((uint8_t)((data)->n_packets & 1))
1505 
1506 #define TTL_STATS_RESET(data)                             \
1507 	((data)->n_packets = ((data)->n_packets & 1))
1508 
1509 #define TTL_STATS_READ(data)                               \
1510 	((data)->n_packets >> 1)
1511 
1512 #define TTL_STATS_ADD(data, value)                        \
1513 	((data)->n_packets =                                  \
1514 		(((((data)->n_packets >> 1) + (value)) << 1) |    \
1515 		((data)->n_packets & 1)))
1516 
1517 static int
1518 ttl_apply(void *data,
1519 	struct rte_table_action_ttl_params *p)
1520 {
1521 	struct ttl_data *d = data;
1522 
1523 	TTL_INIT(d, p->decrement);
1524 
1525 	return 0;
1526 }
1527 
1528 static __rte_always_inline uint64_t
1529 pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
1530 	struct ttl_data *data)
1531 {
1532 	uint32_t drop;
1533 	uint16_t cksum = ip->hdr_checksum;
1534 	uint8_t ttl = ip->time_to_live;
1535 	uint8_t ttl_diff = TTL_DEC_GET(data);
1536 
1537 	cksum += ttl_diff;
1538 	ttl -= ttl_diff;
1539 
1540 	ip->hdr_checksum = cksum;
1541 	ip->time_to_live = ttl;
1542 
1543 	drop = (ttl == 0) ? 1 : 0;
1544 	TTL_STATS_ADD(data, drop);
1545 
1546 	return drop;
1547 }
1548 
1549 static __rte_always_inline uint64_t
1550 pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
1551 	struct ttl_data *data)
1552 {
1553 	uint32_t drop;
1554 	uint8_t ttl = ip->hop_limits;
1555 	uint8_t ttl_diff = TTL_DEC_GET(data);
1556 
1557 	ttl -= ttl_diff;
1558 
1559 	ip->hop_limits = ttl;
1560 
1561 	drop = (ttl == 0) ? 1 : 0;
1562 	TTL_STATS_ADD(data, drop);
1563 
1564 	return drop;
1565 }
1566 
1567 /**
1568  * RTE_TABLE_ACTION_STATS
1569  */
1570 static int
1571 stats_cfg_check(struct rte_table_action_stats_config *stats)
1572 {
1573 	if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1574 		return -EINVAL;
1575 
1576 	return 0;
1577 }
1578 
1579 struct stats_data {
1580 	uint64_t n_packets;
1581 	uint64_t n_bytes;
1582 } __rte_packed;
1583 
1584 static int
1585 stats_apply(struct stats_data *data,
1586 	struct rte_table_action_stats_params *p)
1587 {
1588 	data->n_packets = p->n_packets;
1589 	data->n_bytes = p->n_bytes;
1590 
1591 	return 0;
1592 }
1593 
1594 static __rte_always_inline void
1595 pkt_work_stats(struct stats_data *data,
1596 	uint16_t total_length)
1597 {
1598 	data->n_packets++;
1599 	data->n_bytes += total_length;
1600 }
1601 
1602 /**
1603  * RTE_TABLE_ACTION_TIME
1604  */
1605 struct time_data {
1606 	uint64_t time;
1607 } __rte_packed;
1608 
1609 static int
1610 time_apply(struct time_data *data,
1611 	struct rte_table_action_time_params *p)
1612 {
1613 	data->time = p->time;
1614 	return 0;
1615 }
1616 
1617 static __rte_always_inline void
1618 pkt_work_time(struct time_data *data,
1619 	uint64_t time)
1620 {
1621 	data->time = time;
1622 }
1623 
1624 
1625 /**
1626  * RTE_TABLE_ACTION_CRYPTO
1627  */
1628 
1629 #define CRYPTO_OP_MASK_CIPHER	0x1
1630 #define CRYPTO_OP_MASK_AUTH	0x2
1631 #define CRYPTO_OP_MASK_AEAD	0x4
1632 
1633 struct crypto_op_sym_iv_aad {
1634 	struct rte_crypto_op op;
1635 	struct rte_crypto_sym_op sym_op;
1636 	union {
1637 		struct {
1638 			uint8_t cipher_iv[
1639 				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1640 			uint8_t auth_iv[
1641 				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1642 		} cipher_auth;
1643 
1644 		struct {
1645 			uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1646 			uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
1647 		} aead_iv_aad;
1648 
1649 	} iv_aad;
1650 };
1651 
1652 struct sym_crypto_data {
1653 
1654 	union {
1655 		struct {
1656 
1657 			/** Length of cipher iv. */
1658 			uint16_t cipher_iv_len;
1659 
1660 			/** Offset from start of IP header to the cipher iv. */
1661 			uint16_t cipher_iv_data_offset;
1662 
1663 			/** Length of cipher iv to be updated in the mbuf. */
1664 			uint16_t cipher_iv_update_len;
1665 
1666 			/** Offset from start of IP header to the auth iv. */
1667 			uint16_t auth_iv_data_offset;
1668 
1669 			/** Length of auth iv in the mbuf. */
1670 			uint16_t auth_iv_len;
1671 
1672 			/** Length of auth iv to be updated in the mbuf. */
1673 			uint16_t auth_iv_update_len;
1674 
1675 		} cipher_auth;
1676 		struct {
1677 
1678 			/** Length of iv. */
1679 			uint16_t iv_len;
1680 
1681 			/** Offset from start of IP header to the aead iv. */
1682 			uint16_t iv_data_offset;
1683 
1684 			/** Length of iv to be updated in the mbuf. */
1685 			uint16_t iv_update_len;
1686 
1687 			/** Length of aad */
1688 			uint16_t aad_len;
1689 
1690 			/** Offset from start of IP header to the aad. */
1691 			uint16_t aad_data_offset;
1692 
1693 			/** Length of aad to updated in the mbuf. */
1694 			uint16_t aad_update_len;
1695 
1696 		} aead;
1697 	};
1698 
1699 	/** Offset from start of IP header to the data. */
1700 	uint16_t data_offset;
1701 
1702 	/** Digest length. */
1703 	uint16_t digest_len;
1704 
1705 	/** block size */
1706 	uint16_t block_size;
1707 
1708 	/** Mask of crypto operation */
1709 	uint16_t op_mask;
1710 
1711 	/** Session pointer. */
1712 	struct rte_cryptodev_sym_session *session;
1713 
1714 	/** Direction of crypto, encrypt or decrypt */
1715 	uint16_t direction;
1716 
1717 	/** Private data size to store cipher iv / aad. */
1718 	uint8_t iv_aad_data[32];
1719 
1720 } __rte_packed;
1721 
1722 static int
1723 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
1724 {
1725 	if (!rte_cryptodev_is_valid_dev(cfg->cryptodev_id))
1726 		return -EINVAL;
1727 	if (cfg->mp_create == NULL || cfg->mp_init == NULL)
1728 		return -EINVAL;
1729 
1730 	return 0;
1731 }
1732 
1733 static int
1734 get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
1735 {
1736 	struct rte_cryptodev_info dev_info;
1737 	const struct rte_cryptodev_capabilities *cap;
1738 	uint32_t i;
1739 
1740 	rte_cryptodev_info_get(cdev_id, &dev_info);
1741 
1742 	for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1743 			i++) {
1744 		cap = &dev_info.capabilities[i];
1745 
1746 		if (cap->sym.xform_type != xform->type)
1747 			continue;
1748 
1749 		if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
1750 				(cap->sym.cipher.algo == xform->cipher.algo))
1751 			return cap->sym.cipher.block_size;
1752 
1753 		if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
1754 				(cap->sym.aead.algo == xform->aead.algo))
1755 			return cap->sym.aead.block_size;
1756 
1757 		if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
1758 			break;
1759 	}
1760 
1761 	return -1;
1762 }
1763 
1764 static int
1765 sym_crypto_apply(struct sym_crypto_data *data,
1766 	struct rte_table_action_sym_crypto_config *cfg,
1767 	struct rte_table_action_sym_crypto_params *p)
1768 {
1769 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
1770 	const struct rte_crypto_auth_xform *auth_xform = NULL;
1771 	const struct rte_crypto_aead_xform *aead_xform = NULL;
1772 	struct rte_crypto_sym_xform *xform = p->xform;
1773 	struct rte_cryptodev_sym_session *session;
1774 	int ret;
1775 
1776 	memset(data, 0, sizeof(*data));
1777 
1778 	while (xform) {
1779 		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1780 			cipher_xform = &xform->cipher;
1781 
1782 			if (cipher_xform->iv.length >
1783 				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1784 				return -ENOMEM;
1785 			if (cipher_xform->iv.offset !=
1786 					RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1787 				return -EINVAL;
1788 
1789 			ret = get_block_size(xform, cfg->cryptodev_id);
1790 			if (ret < 0)
1791 				return -1;
1792 			data->block_size = (uint16_t)ret;
1793 			data->op_mask |= CRYPTO_OP_MASK_CIPHER;
1794 
1795 			data->cipher_auth.cipher_iv_len =
1796 					cipher_xform->iv.length;
1797 			data->cipher_auth.cipher_iv_data_offset = (uint16_t)
1798 					p->cipher_auth.cipher_iv_update.offset;
1799 			data->cipher_auth.cipher_iv_update_len = (uint16_t)
1800 					p->cipher_auth.cipher_iv_update.length;
1801 
1802 			rte_memcpy(data->iv_aad_data,
1803 					p->cipher_auth.cipher_iv.val,
1804 					p->cipher_auth.cipher_iv.length);
1805 
1806 			data->direction = cipher_xform->op;
1807 
1808 		} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1809 			auth_xform = &xform->auth;
1810 			if (auth_xform->iv.length >
1811 				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1812 				return -ENOMEM;
1813 			data->op_mask |= CRYPTO_OP_MASK_AUTH;
1814 
1815 			data->cipher_auth.auth_iv_len = auth_xform->iv.length;
1816 			data->cipher_auth.auth_iv_data_offset = (uint16_t)
1817 					p->cipher_auth.auth_iv_update.offset;
1818 			data->cipher_auth.auth_iv_update_len = (uint16_t)
1819 					p->cipher_auth.auth_iv_update.length;
1820 			data->digest_len = auth_xform->digest_length;
1821 
1822 			data->direction = (auth_xform->op ==
1823 					RTE_CRYPTO_AUTH_OP_GENERATE) ?
1824 					RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1825 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
1826 
1827 		} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1828 			aead_xform = &xform->aead;
1829 
1830 			if ((aead_xform->iv.length >
1831 				RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
1832 				aead_xform->aad_length >
1833 				RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
1834 				return -EINVAL;
1835 			if (aead_xform->iv.offset !=
1836 					RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1837 				return -EINVAL;
1838 
1839 			ret = get_block_size(xform, cfg->cryptodev_id);
1840 			if (ret < 0)
1841 				return -1;
1842 			data->block_size = (uint16_t)ret;
1843 			data->op_mask |= CRYPTO_OP_MASK_AEAD;
1844 
1845 			data->digest_len = aead_xform->digest_length;
1846 			data->aead.iv_len = aead_xform->iv.length;
1847 			data->aead.aad_len = aead_xform->aad_length;
1848 
1849 			data->aead.iv_data_offset = (uint16_t)
1850 					p->aead.iv_update.offset;
1851 			data->aead.iv_update_len = (uint16_t)
1852 					p->aead.iv_update.length;
1853 			data->aead.aad_data_offset = (uint16_t)
1854 					p->aead.aad_update.offset;
1855 			data->aead.aad_update_len = (uint16_t)
1856 					p->aead.aad_update.length;
1857 
1858 			rte_memcpy(data->iv_aad_data,
1859 					p->aead.iv.val,
1860 					p->aead.iv.length);
1861 
1862 			rte_memcpy(data->iv_aad_data + p->aead.iv.length,
1863 					p->aead.aad.val,
1864 					p->aead.aad.length);
1865 
1866 			data->direction = (aead_xform->op ==
1867 					RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1868 					RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1869 					RTE_CRYPTO_CIPHER_OP_DECRYPT;
1870 		} else
1871 			return -EINVAL;
1872 
1873 		xform = xform->next;
1874 	}
1875 
1876 	if (auth_xform && auth_xform->iv.length) {
1877 		if (cipher_xform) {
1878 			if (auth_xform->iv.offset !=
1879 					RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
1880 					cipher_xform->iv.length)
1881 				return -EINVAL;
1882 
1883 			rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
1884 					p->cipher_auth.auth_iv.val,
1885 					p->cipher_auth.auth_iv.length);
1886 		} else {
1887 			rte_memcpy(data->iv_aad_data,
1888 					p->cipher_auth.auth_iv.val,
1889 					p->cipher_auth.auth_iv.length);
1890 		}
1891 	}
1892 
1893 	session = rte_cryptodev_sym_session_create(cfg->cryptodev_id,
1894 			p->xform, cfg->mp_create);
1895 	if (!session)
1896 		return -ENOMEM;
1897 
1898 	data->data_offset = (uint16_t)p->data_offset;
1899 	data->session = session;
1900 
1901 	return 0;
1902 }
1903 
1904 static __rte_always_inline uint64_t
1905 pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
1906 		struct rte_table_action_sym_crypto_config *cfg,
1907 		uint16_t ip_offset)
1908 {
1909 	struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
1910 			RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
1911 	struct rte_crypto_op *op = &crypto_op->op;
1912 	struct rte_crypto_sym_op *sym = op->sym;
1913 	uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
1914 	uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
1915 
1916 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1917 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1918 	op->phys_addr = rte_mbuf_iova_get(mbuf) + cfg->op_offset - sizeof(*mbuf);
1919 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1920 	sym->m_src = mbuf;
1921 	sym->m_dst = NULL;
1922 	sym->session = data->session;
1923 
1924 	/** pad the packet */
1925 	if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1926 		uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
1927 				data->block_size) - payload_len;
1928 
1929 		if (unlikely(rte_pktmbuf_append(mbuf, append_len +
1930 				data->digest_len) == NULL))
1931 			return 1;
1932 
1933 		payload_len += append_len;
1934 	} else
1935 		payload_len -= data->digest_len;
1936 
1937 	if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
1938 		/** prepare cipher op */
1939 		uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
1940 
1941 		sym->cipher.data.length = payload_len;
1942 		sym->cipher.data.offset = data->data_offset - pkt_offset;
1943 
1944 		if (data->cipher_auth.cipher_iv_update_len) {
1945 			uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1946 				data->cipher_auth.cipher_iv_data_offset
1947 				+ ip_offset);
1948 
1949 			/** For encryption, update the pkt iv field, otherwise
1950 			 *  update the iv_aad_field
1951 			 */
1952 			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1953 				rte_memcpy(pkt_iv, data->iv_aad_data,
1954 					data->cipher_auth.cipher_iv_update_len);
1955 			else
1956 				rte_memcpy(data->iv_aad_data, pkt_iv,
1957 					data->cipher_auth.cipher_iv_update_len);
1958 		}
1959 
1960 		/** write iv */
1961 		rte_memcpy(iv, data->iv_aad_data,
1962 				data->cipher_auth.cipher_iv_len);
1963 	}
1964 
1965 	if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
1966 		/** authentication always start from IP header. */
1967 		sym->auth.data.offset = ip_offset - pkt_offset;
1968 		sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
1969 				data->digest_len;
1970 		sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1971 				uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1972 				data->digest_len);
1973 		sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1974 				rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1975 
1976 		if (data->cipher_auth.auth_iv_update_len) {
1977 			uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1978 					data->cipher_auth.auth_iv_data_offset
1979 					+ ip_offset);
1980 			uint8_t *data_iv = data->iv_aad_data +
1981 					data->cipher_auth.cipher_iv_len;
1982 
1983 			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1984 				rte_memcpy(pkt_iv, data_iv,
1985 					data->cipher_auth.auth_iv_update_len);
1986 			else
1987 				rte_memcpy(data_iv, pkt_iv,
1988 					data->cipher_auth.auth_iv_update_len);
1989 		}
1990 
1991 		if (data->cipher_auth.auth_iv_len) {
1992 			/** prepare cipher op */
1993 			uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
1994 
1995 			rte_memcpy(iv, data->iv_aad_data +
1996 					data->cipher_auth.cipher_iv_len,
1997 					data->cipher_auth.auth_iv_len);
1998 		}
1999 	}
2000 
2001 	if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
2002 		uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
2003 		uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
2004 
2005 		sym->aead.aad.data = aad;
2006 		sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2007 				aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
2008 		sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
2009 				uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
2010 				data->digest_len);
2011 		sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2012 				rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
2013 		sym->aead.data.offset = data->data_offset - pkt_offset;
2014 		sym->aead.data.length = payload_len;
2015 
2016 		if (data->aead.iv_update_len) {
2017 			uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2018 					data->aead.iv_data_offset + ip_offset);
2019 			uint8_t *data_iv = data->iv_aad_data;
2020 
2021 			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2022 				rte_memcpy(pkt_iv, data_iv,
2023 						data->aead.iv_update_len);
2024 			else
2025 				rte_memcpy(data_iv, pkt_iv,
2026 					data->aead.iv_update_len);
2027 		}
2028 
2029 		rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
2030 
2031 		if (data->aead.aad_update_len) {
2032 			uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2033 					data->aead.aad_data_offset + ip_offset);
2034 			uint8_t *data_aad = data->iv_aad_data +
2035 					data->aead.iv_len;
2036 
2037 			if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2038 				rte_memcpy(pkt_aad, data_aad,
2039 						data->aead.iv_update_len);
2040 			else
2041 				rte_memcpy(data_aad, pkt_aad,
2042 					data->aead.iv_update_len);
2043 		}
2044 
2045 		rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
2046 					data->aead.aad_len);
2047 	}
2048 
2049 	return 0;
2050 }
2051 
2052 /**
2053  * RTE_TABLE_ACTION_TAG
2054  */
2055 struct tag_data {
2056 	uint32_t tag;
2057 } __rte_packed;
2058 
2059 static int
2060 tag_apply(struct tag_data *data,
2061 	struct rte_table_action_tag_params *p)
2062 {
2063 	data->tag = p->tag;
2064 	return 0;
2065 }
2066 
2067 static __rte_always_inline void
2068 pkt_work_tag(struct rte_mbuf *mbuf,
2069 	struct tag_data *data)
2070 {
2071 	mbuf->hash.fdir.hi = data->tag;
2072 	mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2073 }
2074 
2075 static __rte_always_inline void
2076 pkt4_work_tag(struct rte_mbuf *mbuf0,
2077 	struct rte_mbuf *mbuf1,
2078 	struct rte_mbuf *mbuf2,
2079 	struct rte_mbuf *mbuf3,
2080 	struct tag_data *data0,
2081 	struct tag_data *data1,
2082 	struct tag_data *data2,
2083 	struct tag_data *data3)
2084 {
2085 	mbuf0->hash.fdir.hi = data0->tag;
2086 	mbuf1->hash.fdir.hi = data1->tag;
2087 	mbuf2->hash.fdir.hi = data2->tag;
2088 	mbuf3->hash.fdir.hi = data3->tag;
2089 
2090 	mbuf0->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2091 	mbuf1->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2092 	mbuf2->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2093 	mbuf3->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2094 }
2095 
2096 /**
2097  * RTE_TABLE_ACTION_DECAP
2098  */
2099 struct decap_data {
2100 	uint16_t n;
2101 } __rte_packed;
2102 
2103 static int
2104 decap_apply(struct decap_data *data,
2105 	struct rte_table_action_decap_params *p)
2106 {
2107 	data->n = p->n;
2108 	return 0;
2109 }
2110 
2111 static __rte_always_inline void
2112 pkt_work_decap(struct rte_mbuf *mbuf,
2113 	struct decap_data *data)
2114 {
2115 	uint16_t data_off = mbuf->data_off;
2116 	uint16_t data_len = mbuf->data_len;
2117 	uint32_t pkt_len = mbuf->pkt_len;
2118 	uint16_t n = data->n;
2119 
2120 	mbuf->data_off = data_off + n;
2121 	mbuf->data_len = data_len - n;
2122 	mbuf->pkt_len = pkt_len - n;
2123 }
2124 
2125 static __rte_always_inline void
2126 pkt4_work_decap(struct rte_mbuf *mbuf0,
2127 	struct rte_mbuf *mbuf1,
2128 	struct rte_mbuf *mbuf2,
2129 	struct rte_mbuf *mbuf3,
2130 	struct decap_data *data0,
2131 	struct decap_data *data1,
2132 	struct decap_data *data2,
2133 	struct decap_data *data3)
2134 {
2135 	uint16_t data_off0 = mbuf0->data_off;
2136 	uint16_t data_len0 = mbuf0->data_len;
2137 	uint32_t pkt_len0 = mbuf0->pkt_len;
2138 
2139 	uint16_t data_off1 = mbuf1->data_off;
2140 	uint16_t data_len1 = mbuf1->data_len;
2141 	uint32_t pkt_len1 = mbuf1->pkt_len;
2142 
2143 	uint16_t data_off2 = mbuf2->data_off;
2144 	uint16_t data_len2 = mbuf2->data_len;
2145 	uint32_t pkt_len2 = mbuf2->pkt_len;
2146 
2147 	uint16_t data_off3 = mbuf3->data_off;
2148 	uint16_t data_len3 = mbuf3->data_len;
2149 	uint32_t pkt_len3 = mbuf3->pkt_len;
2150 
2151 	uint16_t n0 = data0->n;
2152 	uint16_t n1 = data1->n;
2153 	uint16_t n2 = data2->n;
2154 	uint16_t n3 = data3->n;
2155 
2156 	mbuf0->data_off = data_off0 + n0;
2157 	mbuf0->data_len = data_len0 - n0;
2158 	mbuf0->pkt_len = pkt_len0 - n0;
2159 
2160 	mbuf1->data_off = data_off1 + n1;
2161 	mbuf1->data_len = data_len1 - n1;
2162 	mbuf1->pkt_len = pkt_len1 - n1;
2163 
2164 	mbuf2->data_off = data_off2 + n2;
2165 	mbuf2->data_len = data_len2 - n2;
2166 	mbuf2->pkt_len = pkt_len2 - n2;
2167 
2168 	mbuf3->data_off = data_off3 + n3;
2169 	mbuf3->data_len = data_len3 - n3;
2170 	mbuf3->pkt_len = pkt_len3 - n3;
2171 }
2172 
2173 /**
2174  * Action profile
2175  */
2176 static int
2177 action_valid(enum rte_table_action_type action)
2178 {
2179 	switch (action) {
2180 	case RTE_TABLE_ACTION_FWD:
2181 	case RTE_TABLE_ACTION_LB:
2182 	case RTE_TABLE_ACTION_MTR:
2183 	case RTE_TABLE_ACTION_TM:
2184 	case RTE_TABLE_ACTION_ENCAP:
2185 	case RTE_TABLE_ACTION_NAT:
2186 	case RTE_TABLE_ACTION_TTL:
2187 	case RTE_TABLE_ACTION_STATS:
2188 	case RTE_TABLE_ACTION_TIME:
2189 	case RTE_TABLE_ACTION_SYM_CRYPTO:
2190 	case RTE_TABLE_ACTION_TAG:
2191 	case RTE_TABLE_ACTION_DECAP:
2192 		return 1;
2193 	default:
2194 		return 0;
2195 	}
2196 }
2197 
2198 
2199 #define RTE_TABLE_ACTION_MAX                      64
2200 
2201 struct ap_config {
2202 	uint64_t action_mask;
2203 	struct rte_table_action_common_config common;
2204 	struct rte_table_action_lb_config lb;
2205 	struct rte_table_action_mtr_config mtr;
2206 	struct rte_table_action_tm_config tm;
2207 	struct rte_table_action_encap_config encap;
2208 	struct rte_table_action_nat_config nat;
2209 	struct rte_table_action_ttl_config ttl;
2210 	struct rte_table_action_stats_config stats;
2211 	struct rte_table_action_sym_crypto_config sym_crypto;
2212 };
2213 
2214 static size_t
2215 action_cfg_size(enum rte_table_action_type action)
2216 {
2217 	switch (action) {
2218 	case RTE_TABLE_ACTION_LB:
2219 		return sizeof(struct rte_table_action_lb_config);
2220 	case RTE_TABLE_ACTION_MTR:
2221 		return sizeof(struct rte_table_action_mtr_config);
2222 	case RTE_TABLE_ACTION_TM:
2223 		return sizeof(struct rte_table_action_tm_config);
2224 	case RTE_TABLE_ACTION_ENCAP:
2225 		return sizeof(struct rte_table_action_encap_config);
2226 	case RTE_TABLE_ACTION_NAT:
2227 		return sizeof(struct rte_table_action_nat_config);
2228 	case RTE_TABLE_ACTION_TTL:
2229 		return sizeof(struct rte_table_action_ttl_config);
2230 	case RTE_TABLE_ACTION_STATS:
2231 		return sizeof(struct rte_table_action_stats_config);
2232 	case RTE_TABLE_ACTION_SYM_CRYPTO:
2233 		return sizeof(struct rte_table_action_sym_crypto_config);
2234 	default:
2235 		return 0;
2236 	}
2237 }
2238 
2239 static void*
2240 action_cfg_get(struct ap_config *ap_config,
2241 	enum rte_table_action_type type)
2242 {
2243 	switch (type) {
2244 	case RTE_TABLE_ACTION_LB:
2245 		return &ap_config->lb;
2246 
2247 	case RTE_TABLE_ACTION_MTR:
2248 		return &ap_config->mtr;
2249 
2250 	case RTE_TABLE_ACTION_TM:
2251 		return &ap_config->tm;
2252 
2253 	case RTE_TABLE_ACTION_ENCAP:
2254 		return &ap_config->encap;
2255 
2256 	case RTE_TABLE_ACTION_NAT:
2257 		return &ap_config->nat;
2258 
2259 	case RTE_TABLE_ACTION_TTL:
2260 		return &ap_config->ttl;
2261 
2262 	case RTE_TABLE_ACTION_STATS:
2263 		return &ap_config->stats;
2264 
2265 	case RTE_TABLE_ACTION_SYM_CRYPTO:
2266 		return &ap_config->sym_crypto;
2267 	default:
2268 		return NULL;
2269 	}
2270 }
2271 
2272 static void
2273 action_cfg_set(struct ap_config *ap_config,
2274 	enum rte_table_action_type type,
2275 	void *action_cfg)
2276 {
2277 	void *dst = action_cfg_get(ap_config, type);
2278 
2279 	if (dst)
2280 		memcpy(dst, action_cfg, action_cfg_size(type));
2281 
2282 	ap_config->action_mask |= 1LLU << type;
2283 }
2284 
2285 struct ap_data {
2286 	size_t offset[RTE_TABLE_ACTION_MAX];
2287 	size_t total_size;
2288 };
2289 
2290 static size_t
2291 action_data_size(enum rte_table_action_type action,
2292 	struct ap_config *ap_config)
2293 {
2294 	switch (action) {
2295 	case RTE_TABLE_ACTION_FWD:
2296 		return sizeof(struct fwd_data);
2297 
2298 	case RTE_TABLE_ACTION_LB:
2299 		return sizeof(struct lb_data);
2300 
2301 	case RTE_TABLE_ACTION_MTR:
2302 		return mtr_data_size(&ap_config->mtr);
2303 
2304 	case RTE_TABLE_ACTION_TM:
2305 		return sizeof(struct tm_data);
2306 
2307 	case RTE_TABLE_ACTION_ENCAP:
2308 		return encap_data_size(&ap_config->encap);
2309 
2310 	case RTE_TABLE_ACTION_NAT:
2311 		return nat_data_size(&ap_config->nat,
2312 			&ap_config->common);
2313 
2314 	case RTE_TABLE_ACTION_TTL:
2315 		return sizeof(struct ttl_data);
2316 
2317 	case RTE_TABLE_ACTION_STATS:
2318 		return sizeof(struct stats_data);
2319 
2320 	case RTE_TABLE_ACTION_TIME:
2321 		return sizeof(struct time_data);
2322 
2323 	case RTE_TABLE_ACTION_SYM_CRYPTO:
2324 		return (sizeof(struct sym_crypto_data));
2325 
2326 	case RTE_TABLE_ACTION_TAG:
2327 		return sizeof(struct tag_data);
2328 
2329 	case RTE_TABLE_ACTION_DECAP:
2330 		return sizeof(struct decap_data);
2331 
2332 	default:
2333 		return 0;
2334 	}
2335 }
2336 
2337 
2338 static void
2339 action_data_offset_set(struct ap_data *ap_data,
2340 	struct ap_config *ap_config)
2341 {
2342 	uint64_t action_mask = ap_config->action_mask;
2343 	size_t offset;
2344 	uint32_t action;
2345 
2346 	memset(ap_data->offset, 0, sizeof(ap_data->offset));
2347 
2348 	offset = 0;
2349 	for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
2350 		if (action_mask & (1LLU << action)) {
2351 			ap_data->offset[action] = offset;
2352 			offset += action_data_size((enum rte_table_action_type)action,
2353 				ap_config);
2354 		}
2355 
2356 	ap_data->total_size = offset;
2357 }
2358 
2359 struct rte_table_action_profile {
2360 	struct ap_config cfg;
2361 	struct ap_data data;
2362 	int frozen;
2363 };
2364 
2365 struct rte_table_action_profile *
2366 rte_table_action_profile_create(struct rte_table_action_common_config *common)
2367 {
2368 	struct rte_table_action_profile *ap;
2369 
2370 	/* Check input arguments */
2371 	if (common == NULL)
2372 		return NULL;
2373 
2374 	/* Memory allocation */
2375 	ap = calloc(1, sizeof(struct rte_table_action_profile));
2376 	if (ap == NULL)
2377 		return NULL;
2378 
2379 	/* Initialization */
2380 	memcpy(&ap->cfg.common, common, sizeof(*common));
2381 
2382 	return ap;
2383 }
2384 
2385 
2386 int
2387 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
2388 	enum rte_table_action_type type,
2389 	void *action_config)
2390 {
2391 	int status;
2392 
2393 	/* Check input arguments */
2394 	if ((profile == NULL) ||
2395 		profile->frozen ||
2396 		(action_valid(type) == 0) ||
2397 		(profile->cfg.action_mask & (1LLU << type)) ||
2398 		((action_cfg_size(type) == 0) && action_config) ||
2399 		(action_cfg_size(type) && (action_config == NULL)))
2400 		return -EINVAL;
2401 
2402 	switch (type) {
2403 	case RTE_TABLE_ACTION_LB:
2404 		status = lb_cfg_check(action_config);
2405 		break;
2406 
2407 	case RTE_TABLE_ACTION_MTR:
2408 		status = mtr_cfg_check(action_config);
2409 		break;
2410 
2411 	case RTE_TABLE_ACTION_TM:
2412 		status = tm_cfg_check(action_config);
2413 		break;
2414 
2415 	case RTE_TABLE_ACTION_ENCAP:
2416 		status = encap_cfg_check(action_config);
2417 		break;
2418 
2419 	case RTE_TABLE_ACTION_NAT:
2420 		status = nat_cfg_check(action_config);
2421 		break;
2422 
2423 	case RTE_TABLE_ACTION_TTL:
2424 		status = ttl_cfg_check(action_config);
2425 		break;
2426 
2427 	case RTE_TABLE_ACTION_STATS:
2428 		status = stats_cfg_check(action_config);
2429 		break;
2430 
2431 	case RTE_TABLE_ACTION_SYM_CRYPTO:
2432 		status = sym_crypto_cfg_check(action_config);
2433 		break;
2434 
2435 	default:
2436 		status = 0;
2437 		break;
2438 	}
2439 
2440 	if (status)
2441 		return status;
2442 
2443 	/* Action enable */
2444 	action_cfg_set(&profile->cfg, type, action_config);
2445 
2446 	return 0;
2447 }
2448 
2449 int
2450 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
2451 {
2452 	if (profile->frozen)
2453 		return -EBUSY;
2454 
2455 	profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
2456 	action_data_offset_set(&profile->data, &profile->cfg);
2457 	profile->frozen = 1;
2458 
2459 	return 0;
2460 }
2461 
2462 int
2463 rte_table_action_profile_free(struct rte_table_action_profile *profile)
2464 {
2465 	if (profile == NULL)
2466 		return 0;
2467 
2468 	free(profile);
2469 	return 0;
2470 }
2471 
2472 /**
2473  * Action
2474  */
2475 #define METER_PROFILES_MAX                                 32
2476 
2477 struct rte_table_action {
2478 	struct ap_config cfg;
2479 	struct ap_data data;
2480 	struct dscp_table_data dscp_table;
2481 	struct meter_profile_data mp[METER_PROFILES_MAX];
2482 };
2483 
2484 struct rte_table_action *
2485 rte_table_action_create(struct rte_table_action_profile *profile,
2486 	uint32_t socket_id)
2487 {
2488 	struct rte_table_action *action;
2489 
2490 	/* Check input arguments */
2491 	if ((profile == NULL) ||
2492 		(profile->frozen == 0))
2493 		return NULL;
2494 
2495 	/* Memory allocation */
2496 	action = rte_zmalloc_socket(NULL,
2497 		sizeof(struct rte_table_action),
2498 		RTE_CACHE_LINE_SIZE,
2499 		socket_id);
2500 	if (action == NULL)
2501 		return NULL;
2502 
2503 	/* Initialization */
2504 	memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
2505 	memcpy(&action->data, &profile->data, sizeof(profile->data));
2506 
2507 	return action;
2508 }
2509 
2510 static __rte_always_inline void *
2511 action_data_get(void *data,
2512 	struct rte_table_action *action,
2513 	enum rte_table_action_type type)
2514 {
2515 	size_t offset = action->data.offset[type];
2516 	uint8_t *data_bytes = data;
2517 
2518 	return &data_bytes[offset];
2519 }
2520 
2521 int
2522 rte_table_action_apply(struct rte_table_action *action,
2523 	void *data,
2524 	enum rte_table_action_type type,
2525 	void *action_params)
2526 {
2527 	void *action_data;
2528 
2529 	/* Check input arguments */
2530 	if ((action == NULL) ||
2531 		(data == NULL) ||
2532 		(action_valid(type) == 0) ||
2533 		((action->cfg.action_mask & (1LLU << type)) == 0) ||
2534 		(action_params == NULL))
2535 		return -EINVAL;
2536 
2537 	/* Data update */
2538 	action_data = action_data_get(data, action, type);
2539 
2540 	switch (type) {
2541 	case RTE_TABLE_ACTION_FWD:
2542 		return fwd_apply(action_data,
2543 			action_params);
2544 
2545 	case RTE_TABLE_ACTION_LB:
2546 		return lb_apply(action_data,
2547 			action_params);
2548 
2549 	case RTE_TABLE_ACTION_MTR:
2550 		return mtr_apply(action_data,
2551 			action_params,
2552 			&action->cfg.mtr,
2553 			action->mp,
2554 			RTE_DIM(action->mp));
2555 
2556 	case RTE_TABLE_ACTION_TM:
2557 		return tm_apply(action_data,
2558 			action_params,
2559 			&action->cfg.tm);
2560 
2561 	case RTE_TABLE_ACTION_ENCAP:
2562 		return encap_apply(action_data,
2563 			action_params,
2564 			&action->cfg.encap,
2565 			&action->cfg.common);
2566 
2567 	case RTE_TABLE_ACTION_NAT:
2568 		return nat_apply(action_data,
2569 			action_params,
2570 			&action->cfg.common);
2571 
2572 	case RTE_TABLE_ACTION_TTL:
2573 		return ttl_apply(action_data,
2574 			action_params);
2575 
2576 	case RTE_TABLE_ACTION_STATS:
2577 		return stats_apply(action_data,
2578 			action_params);
2579 
2580 	case RTE_TABLE_ACTION_TIME:
2581 		return time_apply(action_data,
2582 			action_params);
2583 
2584 	case RTE_TABLE_ACTION_SYM_CRYPTO:
2585 		return sym_crypto_apply(action_data,
2586 				&action->cfg.sym_crypto,
2587 				action_params);
2588 
2589 	case RTE_TABLE_ACTION_TAG:
2590 		return tag_apply(action_data,
2591 			action_params);
2592 
2593 	case RTE_TABLE_ACTION_DECAP:
2594 		return decap_apply(action_data,
2595 			action_params);
2596 
2597 	default:
2598 		return -EINVAL;
2599 	}
2600 }
2601 
2602 int
2603 rte_table_action_dscp_table_update(struct rte_table_action *action,
2604 	uint64_t dscp_mask,
2605 	struct rte_table_action_dscp_table *table)
2606 {
2607 	uint32_t i;
2608 
2609 	/* Check input arguments */
2610 	if ((action == NULL) ||
2611 		((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2612 		(1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
2613 		(dscp_mask == 0) ||
2614 		(table == NULL))
2615 		return -EINVAL;
2616 
2617 	for (i = 0; i < RTE_DIM(table->entry); i++) {
2618 		struct dscp_table_entry_data *data =
2619 			&action->dscp_table.entry[i];
2620 		struct rte_table_action_dscp_table_entry *entry =
2621 			&table->entry[i];
2622 
2623 		if ((dscp_mask & (1LLU << i)) == 0)
2624 			continue;
2625 
2626 		data->color = entry->color;
2627 		data->tc = entry->tc_id;
2628 		data->tc_queue = entry->tc_queue_id;
2629 	}
2630 
2631 	return 0;
2632 }
2633 
2634 int
2635 rte_table_action_meter_profile_add(struct rte_table_action *action,
2636 	uint32_t meter_profile_id,
2637 	struct rte_table_action_meter_profile *profile)
2638 {
2639 	struct meter_profile_data *mp_data;
2640 	uint32_t status;
2641 
2642 	/* Check input arguments */
2643 	if ((action == NULL) ||
2644 		((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2645 		(profile == NULL))
2646 		return -EINVAL;
2647 
2648 	if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2649 		return -ENOTSUP;
2650 
2651 	mp_data = meter_profile_data_find(action->mp,
2652 		RTE_DIM(action->mp),
2653 		meter_profile_id);
2654 	if (mp_data)
2655 		return -EEXIST;
2656 
2657 	mp_data = meter_profile_data_find_unused(action->mp,
2658 		RTE_DIM(action->mp));
2659 	if (!mp_data)
2660 		return -ENOSPC;
2661 
2662 	/* Install new profile */
2663 	status = rte_meter_trtcm_profile_config(&mp_data->profile,
2664 		&profile->trtcm);
2665 	if (status)
2666 		return status;
2667 
2668 	mp_data->profile_id = meter_profile_id;
2669 	mp_data->valid = 1;
2670 
2671 	return 0;
2672 }
2673 
2674 int
2675 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2676 	uint32_t meter_profile_id)
2677 {
2678 	struct meter_profile_data *mp_data;
2679 
2680 	/* Check input arguments */
2681 	if ((action == NULL) ||
2682 		((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2683 		return -EINVAL;
2684 
2685 	mp_data = meter_profile_data_find(action->mp,
2686 		RTE_DIM(action->mp),
2687 		meter_profile_id);
2688 	if (!mp_data)
2689 		return 0;
2690 
2691 	/* Uninstall profile */
2692 	mp_data->valid = 0;
2693 
2694 	return 0;
2695 }
2696 
2697 int
2698 rte_table_action_meter_read(struct rte_table_action *action,
2699 	void *data,
2700 	uint32_t tc_mask,
2701 	struct rte_table_action_mtr_counters *stats,
2702 	int clear)
2703 {
2704 	struct mtr_trtcm_data *mtr_data;
2705 	uint32_t i;
2706 
2707 	/* Check input arguments */
2708 	if ((action == NULL) ||
2709 		((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2710 		(data == NULL) ||
2711 		(tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2712 		return -EINVAL;
2713 
2714 	mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2715 
2716 	/* Read */
2717 	if (stats) {
2718 		for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2719 			struct rte_table_action_mtr_counters_tc *dst =
2720 				&stats->stats[i];
2721 			struct mtr_trtcm_data *src = &mtr_data[i];
2722 
2723 			if ((tc_mask & (1 << i)) == 0)
2724 				continue;
2725 
2726 			dst->n_packets[RTE_COLOR_GREEN] =
2727 				mtr_trtcm_data_stats_get(src, RTE_COLOR_GREEN);
2728 
2729 			dst->n_packets[RTE_COLOR_YELLOW] =
2730 				mtr_trtcm_data_stats_get(src, RTE_COLOR_YELLOW);
2731 
2732 			dst->n_packets[RTE_COLOR_RED] =
2733 				mtr_trtcm_data_stats_get(src, RTE_COLOR_RED);
2734 
2735 			dst->n_packets_valid = 1;
2736 			dst->n_bytes_valid = 0;
2737 		}
2738 
2739 		stats->tc_mask = tc_mask;
2740 	}
2741 
2742 	/* Clear */
2743 	if (clear)
2744 		for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2745 			struct mtr_trtcm_data *src = &mtr_data[i];
2746 
2747 			if ((tc_mask & (1 << i)) == 0)
2748 				continue;
2749 
2750 			mtr_trtcm_data_stats_reset(src, RTE_COLOR_GREEN);
2751 			mtr_trtcm_data_stats_reset(src, RTE_COLOR_YELLOW);
2752 			mtr_trtcm_data_stats_reset(src, RTE_COLOR_RED);
2753 		}
2754 
2755 
2756 	return 0;
2757 }
2758 
2759 int
2760 rte_table_action_ttl_read(struct rte_table_action *action,
2761 	void *data,
2762 	struct rte_table_action_ttl_counters *stats,
2763 	int clear)
2764 {
2765 	struct ttl_data *ttl_data;
2766 
2767 	/* Check input arguments */
2768 	if ((action == NULL) ||
2769 		((action->cfg.action_mask &
2770 		(1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2771 		(data == NULL))
2772 		return -EINVAL;
2773 
2774 	ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2775 
2776 	/* Read */
2777 	if (stats)
2778 		stats->n_packets = TTL_STATS_READ(ttl_data);
2779 
2780 	/* Clear */
2781 	if (clear)
2782 		TTL_STATS_RESET(ttl_data);
2783 
2784 	return 0;
2785 }
2786 
2787 int
2788 rte_table_action_stats_read(struct rte_table_action *action,
2789 	void *data,
2790 	struct rte_table_action_stats_counters *stats,
2791 	int clear)
2792 {
2793 	struct stats_data *stats_data;
2794 
2795 	/* Check input arguments */
2796 	if ((action == NULL) ||
2797 		((action->cfg.action_mask &
2798 		(1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2799 		(data == NULL))
2800 		return -EINVAL;
2801 
2802 	stats_data = action_data_get(data, action,
2803 		RTE_TABLE_ACTION_STATS);
2804 
2805 	/* Read */
2806 	if (stats) {
2807 		stats->n_packets = stats_data->n_packets;
2808 		stats->n_bytes = stats_data->n_bytes;
2809 		stats->n_packets_valid = 1;
2810 		stats->n_bytes_valid = 1;
2811 	}
2812 
2813 	/* Clear */
2814 	if (clear) {
2815 		stats_data->n_packets = 0;
2816 		stats_data->n_bytes = 0;
2817 	}
2818 
2819 	return 0;
2820 }
2821 
2822 int
2823 rte_table_action_time_read(struct rte_table_action *action,
2824 	void *data,
2825 	uint64_t *timestamp)
2826 {
2827 	struct time_data *time_data;
2828 
2829 	/* Check input arguments */
2830 	if ((action == NULL) ||
2831 		((action->cfg.action_mask &
2832 		(1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2833 		(data == NULL) ||
2834 		(timestamp == NULL))
2835 		return -EINVAL;
2836 
2837 	time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2838 
2839 	/* Read */
2840 	*timestamp = time_data->time;
2841 
2842 	return 0;
2843 }
2844 
2845 struct rte_cryptodev_sym_session *
2846 rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
2847 	void *data)
2848 {
2849 	struct sym_crypto_data *sym_crypto_data;
2850 
2851 	/* Check input arguments */
2852 	if ((action == NULL) ||
2853 		((action->cfg.action_mask &
2854 		(1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
2855 		(data == NULL))
2856 		return NULL;
2857 
2858 	sym_crypto_data = action_data_get(data, action,
2859 			RTE_TABLE_ACTION_SYM_CRYPTO);
2860 
2861 	return sym_crypto_data->session;
2862 }
2863 
2864 static __rte_always_inline uint64_t
2865 pkt_work(struct rte_mbuf *mbuf,
2866 	struct rte_pipeline_table_entry *table_entry,
2867 	uint64_t time,
2868 	struct rte_table_action *action,
2869 	struct ap_config *cfg)
2870 {
2871 	uint64_t drop_mask = 0;
2872 
2873 	uint32_t ip_offset = action->cfg.common.ip_offset;
2874 	void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2875 
2876 	uint32_t dscp;
2877 	uint16_t total_length;
2878 
2879 	if (cfg->common.ip_version) {
2880 		struct rte_ipv4_hdr *hdr = ip;
2881 
2882 		dscp = hdr->type_of_service >> 2;
2883 		total_length = rte_ntohs(hdr->total_length);
2884 	} else {
2885 		struct rte_ipv6_hdr *hdr = ip;
2886 
2887 		dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2888 		total_length = rte_ntohs(hdr->payload_len) +
2889 			sizeof(struct rte_ipv6_hdr);
2890 	}
2891 
2892 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2893 		void *data =
2894 			action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2895 
2896 		pkt_work_lb(mbuf,
2897 			data,
2898 			&cfg->lb);
2899 	}
2900 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2901 		void *data =
2902 			action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2903 
2904 		drop_mask |= pkt_work_mtr(mbuf,
2905 			data,
2906 			&action->dscp_table,
2907 			action->mp,
2908 			time,
2909 			dscp,
2910 			total_length);
2911 	}
2912 
2913 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2914 		void *data =
2915 			action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2916 
2917 		pkt_work_tm(mbuf,
2918 			data,
2919 			&action->dscp_table,
2920 			dscp);
2921 	}
2922 
2923 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2924 		void *data = action_data_get(table_entry,
2925 			action,
2926 			RTE_TABLE_ACTION_DECAP);
2927 
2928 		pkt_work_decap(mbuf, data);
2929 	}
2930 
2931 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2932 		void *data =
2933 			action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2934 
2935 		pkt_work_encap(mbuf,
2936 			data,
2937 			&cfg->encap,
2938 			ip,
2939 			total_length,
2940 			ip_offset);
2941 	}
2942 
2943 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2944 		void *data =
2945 			action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2946 
2947 		if (cfg->common.ip_version)
2948 			pkt_ipv4_work_nat(ip, data, &cfg->nat);
2949 		else
2950 			pkt_ipv6_work_nat(ip, data, &cfg->nat);
2951 	}
2952 
2953 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2954 		void *data =
2955 			action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2956 
2957 		if (cfg->common.ip_version)
2958 			drop_mask |= pkt_ipv4_work_ttl(ip, data);
2959 		else
2960 			drop_mask |= pkt_ipv6_work_ttl(ip, data);
2961 	}
2962 
2963 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2964 		void *data =
2965 			action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2966 
2967 		pkt_work_stats(data, total_length);
2968 	}
2969 
2970 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2971 		void *data =
2972 			action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2973 
2974 		pkt_work_time(data, time);
2975 	}
2976 
2977 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2978 		void *data = action_data_get(table_entry, action,
2979 				RTE_TABLE_ACTION_SYM_CRYPTO);
2980 
2981 		drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
2982 				ip_offset);
2983 	}
2984 
2985 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2986 		void *data = action_data_get(table_entry,
2987 			action,
2988 			RTE_TABLE_ACTION_TAG);
2989 
2990 		pkt_work_tag(mbuf, data);
2991 	}
2992 
2993 	return drop_mask;
2994 }
2995 
2996 static __rte_always_inline uint64_t
2997 pkt4_work(struct rte_mbuf **mbufs,
2998 	struct rte_pipeline_table_entry **table_entries,
2999 	uint64_t time,
3000 	struct rte_table_action *action,
3001 	struct ap_config *cfg)
3002 {
3003 	uint64_t drop_mask0 = 0;
3004 	uint64_t drop_mask1 = 0;
3005 	uint64_t drop_mask2 = 0;
3006 	uint64_t drop_mask3 = 0;
3007 
3008 	struct rte_mbuf *mbuf0 = mbufs[0];
3009 	struct rte_mbuf *mbuf1 = mbufs[1];
3010 	struct rte_mbuf *mbuf2 = mbufs[2];
3011 	struct rte_mbuf *mbuf3 = mbufs[3];
3012 
3013 	struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
3014 	struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
3015 	struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
3016 	struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
3017 
3018 	uint32_t ip_offset = action->cfg.common.ip_offset;
3019 	void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
3020 	void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
3021 	void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
3022 	void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
3023 
3024 	uint32_t dscp0, dscp1, dscp2, dscp3;
3025 	uint16_t total_length0, total_length1, total_length2, total_length3;
3026 
3027 	if (cfg->common.ip_version) {
3028 		struct rte_ipv4_hdr *hdr0 = ip0;
3029 		struct rte_ipv4_hdr *hdr1 = ip1;
3030 		struct rte_ipv4_hdr *hdr2 = ip2;
3031 		struct rte_ipv4_hdr *hdr3 = ip3;
3032 
3033 		dscp0 = hdr0->type_of_service >> 2;
3034 		dscp1 = hdr1->type_of_service >> 2;
3035 		dscp2 = hdr2->type_of_service >> 2;
3036 		dscp3 = hdr3->type_of_service >> 2;
3037 
3038 		total_length0 = rte_ntohs(hdr0->total_length);
3039 		total_length1 = rte_ntohs(hdr1->total_length);
3040 		total_length2 = rte_ntohs(hdr2->total_length);
3041 		total_length3 = rte_ntohs(hdr3->total_length);
3042 	} else {
3043 		struct rte_ipv6_hdr *hdr0 = ip0;
3044 		struct rte_ipv6_hdr *hdr1 = ip1;
3045 		struct rte_ipv6_hdr *hdr2 = ip2;
3046 		struct rte_ipv6_hdr *hdr3 = ip3;
3047 
3048 		dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
3049 		dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
3050 		dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
3051 		dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
3052 
3053 		total_length0 = rte_ntohs(hdr0->payload_len) +
3054 			sizeof(struct rte_ipv6_hdr);
3055 		total_length1 = rte_ntohs(hdr1->payload_len) +
3056 			sizeof(struct rte_ipv6_hdr);
3057 		total_length2 = rte_ntohs(hdr2->payload_len) +
3058 			sizeof(struct rte_ipv6_hdr);
3059 		total_length3 = rte_ntohs(hdr3->payload_len) +
3060 			sizeof(struct rte_ipv6_hdr);
3061 	}
3062 
3063 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
3064 		void *data0 =
3065 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
3066 		void *data1 =
3067 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
3068 		void *data2 =
3069 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
3070 		void *data3 =
3071 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
3072 
3073 		pkt_work_lb(mbuf0,
3074 			data0,
3075 			&cfg->lb);
3076 
3077 		pkt_work_lb(mbuf1,
3078 			data1,
3079 			&cfg->lb);
3080 
3081 		pkt_work_lb(mbuf2,
3082 			data2,
3083 			&cfg->lb);
3084 
3085 		pkt_work_lb(mbuf3,
3086 			data3,
3087 			&cfg->lb);
3088 	}
3089 
3090 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
3091 		void *data0 =
3092 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
3093 		void *data1 =
3094 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
3095 		void *data2 =
3096 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
3097 		void *data3 =
3098 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
3099 
3100 		drop_mask0 |= pkt_work_mtr(mbuf0,
3101 			data0,
3102 			&action->dscp_table,
3103 			action->mp,
3104 			time,
3105 			dscp0,
3106 			total_length0);
3107 
3108 		drop_mask1 |= pkt_work_mtr(mbuf1,
3109 			data1,
3110 			&action->dscp_table,
3111 			action->mp,
3112 			time,
3113 			dscp1,
3114 			total_length1);
3115 
3116 		drop_mask2 |= pkt_work_mtr(mbuf2,
3117 			data2,
3118 			&action->dscp_table,
3119 			action->mp,
3120 			time,
3121 			dscp2,
3122 			total_length2);
3123 
3124 		drop_mask3 |= pkt_work_mtr(mbuf3,
3125 			data3,
3126 			&action->dscp_table,
3127 			action->mp,
3128 			time,
3129 			dscp3,
3130 			total_length3);
3131 	}
3132 
3133 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
3134 		void *data0 =
3135 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
3136 		void *data1 =
3137 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
3138 		void *data2 =
3139 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
3140 		void *data3 =
3141 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
3142 
3143 		pkt_work_tm(mbuf0,
3144 			data0,
3145 			&action->dscp_table,
3146 			dscp0);
3147 
3148 		pkt_work_tm(mbuf1,
3149 			data1,
3150 			&action->dscp_table,
3151 			dscp1);
3152 
3153 		pkt_work_tm(mbuf2,
3154 			data2,
3155 			&action->dscp_table,
3156 			dscp2);
3157 
3158 		pkt_work_tm(mbuf3,
3159 			data3,
3160 			&action->dscp_table,
3161 			dscp3);
3162 	}
3163 
3164 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
3165 		void *data0 = action_data_get(table_entry0,
3166 			action,
3167 			RTE_TABLE_ACTION_DECAP);
3168 		void *data1 = action_data_get(table_entry1,
3169 			action,
3170 			RTE_TABLE_ACTION_DECAP);
3171 		void *data2 = action_data_get(table_entry2,
3172 			action,
3173 			RTE_TABLE_ACTION_DECAP);
3174 		void *data3 = action_data_get(table_entry3,
3175 			action,
3176 			RTE_TABLE_ACTION_DECAP);
3177 
3178 		pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
3179 			data0, data1, data2, data3);
3180 	}
3181 
3182 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
3183 		void *data0 =
3184 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
3185 		void *data1 =
3186 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
3187 		void *data2 =
3188 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
3189 		void *data3 =
3190 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
3191 
3192 		pkt_work_encap(mbuf0,
3193 			data0,
3194 			&cfg->encap,
3195 			ip0,
3196 			total_length0,
3197 			ip_offset);
3198 
3199 		pkt_work_encap(mbuf1,
3200 			data1,
3201 			&cfg->encap,
3202 			ip1,
3203 			total_length1,
3204 			ip_offset);
3205 
3206 		pkt_work_encap(mbuf2,
3207 			data2,
3208 			&cfg->encap,
3209 			ip2,
3210 			total_length2,
3211 			ip_offset);
3212 
3213 		pkt_work_encap(mbuf3,
3214 			data3,
3215 			&cfg->encap,
3216 			ip3,
3217 			total_length3,
3218 			ip_offset);
3219 	}
3220 
3221 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
3222 		void *data0 =
3223 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
3224 		void *data1 =
3225 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
3226 		void *data2 =
3227 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
3228 		void *data3 =
3229 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
3230 
3231 		if (cfg->common.ip_version) {
3232 			pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
3233 			pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
3234 			pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
3235 			pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
3236 		} else {
3237 			pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
3238 			pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
3239 			pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
3240 			pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
3241 		}
3242 	}
3243 
3244 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
3245 		void *data0 =
3246 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
3247 		void *data1 =
3248 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
3249 		void *data2 =
3250 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
3251 		void *data3 =
3252 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
3253 
3254 		if (cfg->common.ip_version) {
3255 			drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
3256 			drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
3257 			drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
3258 			drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
3259 		} else {
3260 			drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
3261 			drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
3262 			drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
3263 			drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
3264 		}
3265 	}
3266 
3267 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
3268 		void *data0 =
3269 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
3270 		void *data1 =
3271 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
3272 		void *data2 =
3273 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
3274 		void *data3 =
3275 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
3276 
3277 		pkt_work_stats(data0, total_length0);
3278 		pkt_work_stats(data1, total_length1);
3279 		pkt_work_stats(data2, total_length2);
3280 		pkt_work_stats(data3, total_length3);
3281 	}
3282 
3283 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
3284 		void *data0 =
3285 			action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
3286 		void *data1 =
3287 			action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
3288 		void *data2 =
3289 			action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
3290 		void *data3 =
3291 			action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
3292 
3293 		pkt_work_time(data0, time);
3294 		pkt_work_time(data1, time);
3295 		pkt_work_time(data2, time);
3296 		pkt_work_time(data3, time);
3297 	}
3298 
3299 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
3300 		void *data0 = action_data_get(table_entry0, action,
3301 				RTE_TABLE_ACTION_SYM_CRYPTO);
3302 		void *data1 = action_data_get(table_entry1, action,
3303 				RTE_TABLE_ACTION_SYM_CRYPTO);
3304 		void *data2 = action_data_get(table_entry2, action,
3305 				RTE_TABLE_ACTION_SYM_CRYPTO);
3306 		void *data3 = action_data_get(table_entry3, action,
3307 				RTE_TABLE_ACTION_SYM_CRYPTO);
3308 
3309 		drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
3310 				ip_offset);
3311 		drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
3312 				ip_offset);
3313 		drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
3314 				ip_offset);
3315 		drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
3316 				ip_offset);
3317 	}
3318 
3319 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3320 		void *data0 = action_data_get(table_entry0,
3321 			action,
3322 			RTE_TABLE_ACTION_TAG);
3323 		void *data1 = action_data_get(table_entry1,
3324 			action,
3325 			RTE_TABLE_ACTION_TAG);
3326 		void *data2 = action_data_get(table_entry2,
3327 			action,
3328 			RTE_TABLE_ACTION_TAG);
3329 		void *data3 = action_data_get(table_entry3,
3330 			action,
3331 			RTE_TABLE_ACTION_TAG);
3332 
3333 		pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
3334 			data0, data1, data2, data3);
3335 	}
3336 
3337 	return drop_mask0 |
3338 		(drop_mask1 << 1) |
3339 		(drop_mask2 << 2) |
3340 		(drop_mask3 << 3);
3341 }
3342 
3343 static __rte_always_inline int
3344 ah(struct rte_pipeline *p,
3345 	struct rte_mbuf **pkts,
3346 	uint64_t pkts_mask,
3347 	struct rte_pipeline_table_entry **entries,
3348 	struct rte_table_action *action,
3349 	struct ap_config *cfg)
3350 {
3351 	uint64_t pkts_drop_mask = 0;
3352 	uint64_t time = 0;
3353 
3354 	if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
3355 		(1LLU << RTE_TABLE_ACTION_TIME)))
3356 		time = rte_rdtsc();
3357 
3358 	if ((pkts_mask & (pkts_mask + 1)) == 0) {
3359 		uint64_t n_pkts = rte_popcount64(pkts_mask);
3360 		uint32_t i;
3361 
3362 		for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3363 			uint64_t drop_mask;
3364 
3365 			drop_mask = pkt4_work(&pkts[i],
3366 				&entries[i],
3367 				time,
3368 				action,
3369 				cfg);
3370 
3371 			pkts_drop_mask |= drop_mask << i;
3372 		}
3373 
3374 		for ( ; i < n_pkts; i++) {
3375 			uint64_t drop_mask;
3376 
3377 			drop_mask = pkt_work(pkts[i],
3378 				entries[i],
3379 				time,
3380 				action,
3381 				cfg);
3382 
3383 			pkts_drop_mask |= drop_mask << i;
3384 		}
3385 	} else
3386 		for ( ; pkts_mask; ) {
3387 			uint32_t pos = rte_ctz64(pkts_mask);
3388 			uint64_t pkt_mask = 1LLU << pos;
3389 			uint64_t drop_mask;
3390 
3391 			drop_mask = pkt_work(pkts[pos],
3392 				entries[pos],
3393 				time,
3394 				action,
3395 				cfg);
3396 
3397 			pkts_mask &= ~pkt_mask;
3398 			pkts_drop_mask |= drop_mask << pos;
3399 		}
3400 
3401 	rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3402 
3403 	return 0;
3404 }
3405 
3406 static int
3407 ah_default(struct rte_pipeline *p,
3408 	struct rte_mbuf **pkts,
3409 	uint64_t pkts_mask,
3410 	struct rte_pipeline_table_entry **entries,
3411 	void *arg)
3412 {
3413 	struct rte_table_action *action = arg;
3414 
3415 	return ah(p,
3416 		pkts,
3417 		pkts_mask,
3418 		entries,
3419 		action,
3420 		&action->cfg);
3421 }
3422 
3423 static rte_pipeline_table_action_handler_hit
3424 ah_selector(struct rte_table_action *action)
3425 {
3426 	if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
3427 		return NULL;
3428 
3429 	return ah_default;
3430 }
3431 
3432 int
3433 rte_table_action_table_params_get(struct rte_table_action *action,
3434 	struct rte_pipeline_table_params *params)
3435 {
3436 	rte_pipeline_table_action_handler_hit f_action_hit;
3437 	uint32_t total_size;
3438 
3439 	/* Check input arguments */
3440 	if ((action == NULL) ||
3441 		(params == NULL))
3442 		return -EINVAL;
3443 
3444 	f_action_hit = ah_selector(action);
3445 	total_size = rte_align32pow2(action->data.total_size);
3446 
3447 	/* Fill in params */
3448 	params->f_action_hit = f_action_hit;
3449 	params->f_action_miss = NULL;
3450 	params->arg_ah = (f_action_hit) ? action : NULL;
3451 	params->action_data_size = total_size -
3452 		sizeof(struct rte_pipeline_table_entry);
3453 
3454 	return 0;
3455 }
3456 
3457 int
3458 rte_table_action_free(struct rte_table_action *action)
3459 {
3460 	if (action == NULL)
3461 		return 0;
3462 
3463 	rte_free(action);
3464 
3465 	return 0;
3466 }
3467