xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision e6bf3256b95c77ee4d0b2874e1896d01c41c2d7c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <sys/mman.h>
13 
14 #include <rte_ethdev.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include <rte_tailq.h>
19 
20 #include <fsl_dpni.h>
21 #include <fsl_dpkg.h>
22 
23 #include <dpaa2_ethdev.h>
24 #include <dpaa2_pmd_logs.h>
25 
26 static char *dpaa2_flow_control_log;
27 static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */
28 static int dpaa2_sp_loaded = -1;
29 
30 enum dpaa2_flow_entry_size {
31 	DPAA2_FLOW_ENTRY_MIN_SIZE = (DPNI_MAX_KEY_SIZE / 2),
32 	DPAA2_FLOW_ENTRY_MAX_SIZE = DPNI_MAX_KEY_SIZE
33 };
34 
35 enum dpaa2_flow_dist_type {
36 	DPAA2_FLOW_QOS_TYPE = 1 << 0,
37 	DPAA2_FLOW_FS_TYPE = 1 << 1
38 };
39 
40 #define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT	16
41 #define DPAA2_FLOW_MAX_KEY_SIZE			16
42 #define DPAA2_PROT_FIELD_STRING_SIZE		16
43 #define VXLAN_HF_VNI 0x08
44 
45 struct dpaa2_dev_flow {
46 	LIST_ENTRY(dpaa2_dev_flow) next;
47 	struct dpni_rule_cfg qos_rule;
48 	uint8_t *qos_key_addr;
49 	uint8_t *qos_mask_addr;
50 	uint16_t qos_rule_size;
51 	struct dpni_rule_cfg fs_rule;
52 	uint8_t qos_real_key_size;
53 	uint8_t fs_real_key_size;
54 	uint8_t *fs_key_addr;
55 	uint8_t *fs_mask_addr;
56 	uint16_t fs_rule_size;
57 	uint8_t tc_id; /** Traffic Class ID. */
58 	uint8_t tc_index; /** index within this Traffic Class. */
59 	enum rte_flow_action_type action_type;
60 	struct dpni_fs_action_cfg fs_action_cfg;
61 };
62 
63 struct rte_dpaa2_flow_item {
64 	struct rte_flow_item generic_item;
65 	int in_tunnel;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_hp_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 	RTE_FLOW_ITEM_TYPE_GTP,
81 	RTE_FLOW_ITEM_TYPE_ESP,
82 	RTE_FLOW_ITEM_TYPE_AH,
83 	RTE_FLOW_ITEM_TYPE_RAW
84 };
85 
86 static const
87 enum rte_flow_item_type dpaa2_sp_supported_pattern_type[] = {
88 	RTE_FLOW_ITEM_TYPE_VXLAN,
89 	RTE_FLOW_ITEM_TYPE_ECPRI
90 };
91 
92 static const
93 enum rte_flow_action_type dpaa2_supported_action_type[] = {
94 	RTE_FLOW_ACTION_TYPE_END,
95 	RTE_FLOW_ACTION_TYPE_QUEUE,
96 	RTE_FLOW_ACTION_TYPE_PORT_ID,
97 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
98 	RTE_FLOW_ACTION_TYPE_RSS
99 };
100 
101 #ifndef __cplusplus
102 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
103 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
104 	.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
105 	.hdr.ether_type = RTE_BE16(0xffff),
106 };
107 
108 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
109 	.hdr.vlan_tci = RTE_BE16(0xffff),
110 };
111 
112 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
113 	.hdr.src_addr = RTE_BE32(0xffffffff),
114 	.hdr.dst_addr = RTE_BE32(0xffffffff),
115 	.hdr.next_proto_id = 0xff,
116 };
117 
118 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
119 	.hdr = {
120 		.src_addr = RTE_IPV6_MASK_FULL,
121 		.dst_addr = RTE_IPV6_MASK_FULL,
122 		.proto = 0xff
123 	},
124 };
125 
126 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
127 	.hdr.icmp_type = 0xff,
128 	.hdr.icmp_code = 0xff,
129 };
130 
131 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
132 	.hdr = {
133 		.src_port = RTE_BE16(0xffff),
134 		.dst_port = RTE_BE16(0xffff),
135 	},
136 };
137 
138 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
139 	.hdr = {
140 		.src_port = RTE_BE16(0xffff),
141 		.dst_port = RTE_BE16(0xffff),
142 	},
143 };
144 
145 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
146 	.hdr = {
147 		.src_port = RTE_BE16(0xffff),
148 		.dst_port = RTE_BE16(0xffff),
149 	},
150 };
151 
152 static const struct rte_flow_item_esp dpaa2_flow_item_esp_mask = {
153 	.hdr = {
154 		.spi = RTE_BE32(0xffffffff),
155 		.seq = RTE_BE32(0xffffffff),
156 	},
157 };
158 
159 static const struct rte_flow_item_ah dpaa2_flow_item_ah_mask = {
160 	.spi = RTE_BE32(0xffffffff),
161 };
162 
163 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
164 	.protocol = RTE_BE16(0xffff),
165 };
166 
167 static const struct rte_flow_item_vxlan dpaa2_flow_item_vxlan_mask = {
168 	.flags = 0xff,
169 	.vni = { 0xff, 0xff, 0xff },
170 };
171 
172 static const struct rte_flow_item_ecpri dpaa2_flow_item_ecpri_mask = {
173 	.hdr.common.type = 0xff,
174 	.hdr.dummy[0] = RTE_BE32(0xffffffff),
175 	.hdr.dummy[1] = RTE_BE32(0xffffffff),
176 	.hdr.dummy[2] = RTE_BE32(0xffffffff),
177 };
178 
179 static const struct rte_flow_item_gtp dpaa2_flow_item_gtp_mask = {
180 	.teid = RTE_BE32(0xffffffff),
181 };
182 
183 #endif
184 
185 #define DPAA2_FLOW_DUMP printf
186 
187 static inline void
188 dpaa2_prot_field_string(uint32_t prot, uint32_t field,
189 	char *string)
190 {
191 	if (!dpaa2_flow_control_log)
192 		return;
193 
194 	if (prot == NET_PROT_ETH) {
195 		strcpy(string, "eth");
196 		if (field == NH_FLD_ETH_DA)
197 			strcat(string, ".dst");
198 		else if (field == NH_FLD_ETH_SA)
199 			strcat(string, ".src");
200 		else if (field == NH_FLD_ETH_TYPE)
201 			strcat(string, ".type");
202 		else
203 			strcat(string, ".unknown field");
204 	} else if (prot == NET_PROT_VLAN) {
205 		strcpy(string, "vlan");
206 		if (field == NH_FLD_VLAN_TCI)
207 			strcat(string, ".tci");
208 		else
209 			strcat(string, ".unknown field");
210 	} else if (prot == NET_PROT_IP) {
211 		strcpy(string, "ip");
212 		if (field == NH_FLD_IP_SRC)
213 			strcat(string, ".src");
214 		else if (field == NH_FLD_IP_DST)
215 			strcat(string, ".dst");
216 		else if (field == NH_FLD_IP_PROTO)
217 			strcat(string, ".proto");
218 		else
219 			strcat(string, ".unknown field");
220 	} else if (prot == NET_PROT_TCP) {
221 		strcpy(string, "tcp");
222 		if (field == NH_FLD_TCP_PORT_SRC)
223 			strcat(string, ".src");
224 		else if (field == NH_FLD_TCP_PORT_DST)
225 			strcat(string, ".dst");
226 		else
227 			strcat(string, ".unknown field");
228 	} else if (prot == NET_PROT_UDP) {
229 		strcpy(string, "udp");
230 		if (field == NH_FLD_UDP_PORT_SRC)
231 			strcat(string, ".src");
232 		else if (field == NH_FLD_UDP_PORT_DST)
233 			strcat(string, ".dst");
234 		else
235 			strcat(string, ".unknown field");
236 	} else if (prot == NET_PROT_ICMP) {
237 		strcpy(string, "icmp");
238 		if (field == NH_FLD_ICMP_TYPE)
239 			strcat(string, ".type");
240 		else if (field == NH_FLD_ICMP_CODE)
241 			strcat(string, ".code");
242 		else
243 			strcat(string, ".unknown field");
244 	} else if (prot == NET_PROT_SCTP) {
245 		strcpy(string, "sctp");
246 		if (field == NH_FLD_SCTP_PORT_SRC)
247 			strcat(string, ".src");
248 		else if (field == NH_FLD_SCTP_PORT_DST)
249 			strcat(string, ".dst");
250 		else
251 			strcat(string, ".unknown field");
252 	} else if (prot == NET_PROT_GRE) {
253 		strcpy(string, "gre");
254 		if (field == NH_FLD_GRE_TYPE)
255 			strcat(string, ".type");
256 		else
257 			strcat(string, ".unknown field");
258 	} else if (prot == NET_PROT_GTP) {
259 		rte_strscpy(string, "gtp", DPAA2_PROT_FIELD_STRING_SIZE);
260 		if (field == NH_FLD_GTP_TEID)
261 			strcat(string, ".teid");
262 		else
263 			strcat(string, ".unknown field");
264 	} else if (prot == NET_PROT_IPSEC_ESP) {
265 		rte_strscpy(string, "esp", DPAA2_PROT_FIELD_STRING_SIZE);
266 		if (field == NH_FLD_IPSEC_ESP_SPI)
267 			strcat(string, ".spi");
268 		else if (field == NH_FLD_IPSEC_ESP_SEQUENCE_NUM)
269 			strcat(string, ".seq");
270 		else
271 			strcat(string, ".unknown field");
272 	} else {
273 		sprintf(string, "unknown protocol(%d)", prot);
274 	}
275 }
276 
277 static inline void
278 dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv)
279 {
280 	int idx;
281 	char string[32];
282 	const struct dpkg_profile_cfg *dpkg =
283 		&priv->extract.qos_key_extract.dpkg;
284 	const struct dpkg_extract *extract;
285 	enum dpkg_extract_type type;
286 	enum net_prot prot;
287 	uint32_t field;
288 
289 	if (!dpaa2_flow_control_log)
290 		return;
291 
292 	DPAA2_FLOW_DUMP("QoS table: %d extracts\r\n",
293 		dpkg->num_extracts);
294 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
295 		extract = &dpkg->extracts[idx];
296 		type = extract->type;
297 		if (type == DPKG_EXTRACT_FROM_HDR) {
298 			prot = extract->extract.from_hdr.prot;
299 			field = extract->extract.from_hdr.field;
300 			dpaa2_prot_field_string(prot, field,
301 				string);
302 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
303 			sprintf(string, "raw offset/len: %d/%d",
304 				extract->extract.from_data.offset,
305 				extract->extract.from_data.size);
306 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
307 			sprintf(string, "parse offset/len: %d/%d",
308 				extract->extract.from_parse.offset,
309 				extract->extract.from_parse.size);
310 		}
311 		DPAA2_FLOW_DUMP("%s", string);
312 		if ((idx + 1) < dpkg->num_extracts)
313 			DPAA2_FLOW_DUMP(" / ");
314 	}
315 	DPAA2_FLOW_DUMP("\r\n");
316 }
317 
318 static inline void
319 dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv,
320 	int tc_id)
321 {
322 	int idx;
323 	char string[32];
324 	const struct dpkg_profile_cfg *dpkg =
325 		&priv->extract.tc_key_extract[tc_id].dpkg;
326 	const struct dpkg_extract *extract;
327 	enum dpkg_extract_type type;
328 	enum net_prot prot;
329 	uint32_t field;
330 
331 	if (!dpaa2_flow_control_log)
332 		return;
333 
334 	DPAA2_FLOW_DUMP("FS table: %d extracts in TC[%d]\r\n",
335 		dpkg->num_extracts, tc_id);
336 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
337 		extract = &dpkg->extracts[idx];
338 		type = extract->type;
339 		if (type == DPKG_EXTRACT_FROM_HDR) {
340 			prot = extract->extract.from_hdr.prot;
341 			field = extract->extract.from_hdr.field;
342 			dpaa2_prot_field_string(prot, field,
343 				string);
344 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
345 			sprintf(string, "raw offset/len: %d/%d",
346 				extract->extract.from_data.offset,
347 				extract->extract.from_data.size);
348 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
349 			sprintf(string, "parse offset/len: %d/%d",
350 				extract->extract.from_parse.offset,
351 				extract->extract.from_parse.size);
352 		}
353 		DPAA2_FLOW_DUMP("%s", string);
354 		if ((idx + 1) < dpkg->num_extracts)
355 			DPAA2_FLOW_DUMP(" / ");
356 	}
357 	DPAA2_FLOW_DUMP("\r\n");
358 }
359 
360 static inline void
361 dpaa2_flow_qos_entry_log(const char *log_info,
362 	const struct dpaa2_dev_flow *flow, int qos_index)
363 {
364 	int idx;
365 	uint8_t *key, *mask;
366 
367 	if (!dpaa2_flow_control_log)
368 		return;
369 
370 	if (qos_index >= 0) {
371 		DPAA2_FLOW_DUMP("%s QoS entry[%d](size %d/%d) for TC[%d]\r\n",
372 			log_info, qos_index, flow->qos_rule_size,
373 			flow->qos_rule.key_size,
374 			flow->tc_id);
375 	} else {
376 		DPAA2_FLOW_DUMP("%s QoS entry(size %d/%d) for TC[%d]\r\n",
377 			log_info, flow->qos_rule_size,
378 			flow->qos_rule.key_size,
379 			flow->tc_id);
380 	}
381 
382 	key = flow->qos_key_addr;
383 	mask = flow->qos_mask_addr;
384 
385 	DPAA2_FLOW_DUMP("key:\r\n");
386 	for (idx = 0; idx < flow->qos_rule_size; idx++)
387 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
388 
389 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
390 	for (idx = 0; idx < flow->qos_rule_size; idx++)
391 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
392 	DPAA2_FLOW_DUMP("\r\n");
393 }
394 
395 static inline void
396 dpaa2_flow_fs_entry_log(const char *log_info,
397 	const struct dpaa2_dev_flow *flow)
398 {
399 	int idx;
400 	uint8_t *key, *mask;
401 
402 	if (!dpaa2_flow_control_log)
403 		return;
404 
405 	DPAA2_FLOW_DUMP("%s FS/TC entry[%d](size %d/%d) of TC[%d]\r\n",
406 		log_info, flow->tc_index,
407 		flow->fs_rule_size, flow->fs_rule.key_size,
408 		flow->tc_id);
409 
410 	key = flow->fs_key_addr;
411 	mask = flow->fs_mask_addr;
412 
413 	DPAA2_FLOW_DUMP("key:\r\n");
414 	for (idx = 0; idx < flow->fs_rule_size; idx++)
415 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
416 
417 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
418 	for (idx = 0; idx < flow->fs_rule_size; idx++)
419 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
420 	DPAA2_FLOW_DUMP("\r\n");
421 }
422 
423 /** For LX2160A, LS2088A and LS1088A*/
424 #define WRIOP_CCSR_BASE 0x8b80000
425 #define WRIOP_CCSR_CTLU_OFFSET 0
426 #define WRIOP_CCSR_CTLU_PARSER_OFFSET 0
427 #define WRIOP_CCSR_CTLU_PARSER_INGRESS_OFFSET 0
428 
429 #define WRIOP_INGRESS_PARSER_PHY \
430 	(WRIOP_CCSR_BASE + WRIOP_CCSR_CTLU_OFFSET + \
431 	WRIOP_CCSR_CTLU_PARSER_OFFSET + \
432 	WRIOP_CCSR_CTLU_PARSER_INGRESS_OFFSET)
433 
434 struct dpaa2_parser_ccsr {
435 	uint32_t psr_cfg;
436 	uint32_t psr_idle;
437 	uint32_t psr_pclm;
438 	uint8_t psr_ver_min;
439 	uint8_t psr_ver_maj;
440 	uint8_t psr_id1_l;
441 	uint8_t psr_id1_h;
442 	uint32_t psr_rev2;
443 	uint8_t rsv[0x2c];
444 	uint8_t sp_ins[4032];
445 };
446 
447 int
448 dpaa2_soft_parser_loaded(void)
449 {
450 	int fd, i, ret = 0;
451 	struct dpaa2_parser_ccsr *parser_ccsr = NULL;
452 
453 	dpaa2_flow_control_log = getenv("DPAA2_FLOW_CONTROL_LOG");
454 
455 	if (dpaa2_sp_loaded >= 0)
456 		return dpaa2_sp_loaded;
457 
458 	fd = open("/dev/mem", O_RDWR | O_SYNC);
459 	if (fd < 0) {
460 		DPAA2_PMD_ERR("open \"/dev/mem\" ERROR(%d)", fd);
461 		ret = fd;
462 		goto exit;
463 	}
464 
465 	parser_ccsr = mmap(NULL, sizeof(struct dpaa2_parser_ccsr),
466 		PROT_READ | PROT_WRITE, MAP_SHARED, fd,
467 		WRIOP_INGRESS_PARSER_PHY);
468 	if (!parser_ccsr) {
469 		DPAA2_PMD_ERR("Map 0x%" PRIx64 "(size=0x%x) failed",
470 			(uint64_t)WRIOP_INGRESS_PARSER_PHY,
471 			(uint32_t)sizeof(struct dpaa2_parser_ccsr));
472 		ret = -ENOBUFS;
473 		goto exit;
474 	}
475 
476 	DPAA2_PMD_INFO("Parser ID:0x%02x%02x, Rev:major(%02x), minor(%02x)",
477 		parser_ccsr->psr_id1_h, parser_ccsr->psr_id1_l,
478 		parser_ccsr->psr_ver_maj, parser_ccsr->psr_ver_min);
479 
480 	if (dpaa2_flow_control_log) {
481 		for (i = 0; i < 64; i++) {
482 			DPAA2_FLOW_DUMP("%02x ",
483 				parser_ccsr->sp_ins[i]);
484 			if (!((i + 1) % 16))
485 				DPAA2_FLOW_DUMP("\r\n");
486 		}
487 	}
488 
489 	for (i = 0; i < 16; i++) {
490 		if (parser_ccsr->sp_ins[i]) {
491 			dpaa2_sp_loaded = 1;
492 			break;
493 		}
494 	}
495 	if (dpaa2_sp_loaded < 0)
496 		dpaa2_sp_loaded = 0;
497 
498 	ret = dpaa2_sp_loaded;
499 
500 exit:
501 	if (parser_ccsr)
502 		munmap(parser_ccsr, sizeof(struct dpaa2_parser_ccsr));
503 	if (fd >= 0)
504 		close(fd);
505 
506 	return ret;
507 }
508 
509 static int
510 dpaa2_flow_ip_address_extract(enum net_prot prot,
511 	uint32_t field)
512 {
513 	if (prot == NET_PROT_IPV4 &&
514 		(field == NH_FLD_IPV4_SRC_IP ||
515 		field == NH_FLD_IPV4_DST_IP))
516 		return true;
517 	else if (prot == NET_PROT_IPV6 &&
518 		(field == NH_FLD_IPV6_SRC_IP ||
519 		field == NH_FLD_IPV6_DST_IP))
520 		return true;
521 	else if (prot == NET_PROT_IP &&
522 		(field == NH_FLD_IP_SRC ||
523 		field == NH_FLD_IP_DST))
524 		return true;
525 
526 	return false;
527 }
528 
529 static int
530 dpaa2_flow_l4_src_port_extract(enum net_prot prot,
531 	uint32_t field)
532 {
533 	if (prot == NET_PROT_TCP &&
534 		field == NH_FLD_TCP_PORT_SRC)
535 		return true;
536 	else if (prot == NET_PROT_UDP &&
537 		field == NH_FLD_UDP_PORT_SRC)
538 		return true;
539 	else if (prot == NET_PROT_SCTP &&
540 		field == NH_FLD_SCTP_PORT_SRC)
541 		return true;
542 
543 	return false;
544 }
545 
546 static int
547 dpaa2_flow_l4_dst_port_extract(enum net_prot prot,
548 	uint32_t field)
549 {
550 	if (prot == NET_PROT_TCP &&
551 		field == NH_FLD_TCP_PORT_DST)
552 		return true;
553 	else if (prot == NET_PROT_UDP &&
554 		field == NH_FLD_UDP_PORT_DST)
555 		return true;
556 	else if (prot == NET_PROT_SCTP &&
557 		field == NH_FLD_SCTP_PORT_DST)
558 		return true;
559 
560 	return false;
561 }
562 
563 static int
564 dpaa2_flow_add_qos_rule(struct dpaa2_dev_priv *priv,
565 	struct dpaa2_dev_flow *flow)
566 {
567 	uint16_t qos_index;
568 	int ret;
569 	struct fsl_mc_io *dpni = priv->hw;
570 
571 	if (priv->num_rx_tc <= 1 &&
572 		flow->action_type != RTE_FLOW_ACTION_TYPE_RSS) {
573 		DPAA2_PMD_WARN("No QoS Table for FS");
574 		return -EINVAL;
575 	}
576 
577 	/* QoS entry added is only effective for multiple TCs.*/
578 	qos_index = flow->tc_id * priv->fs_entries + flow->tc_index;
579 	if (qos_index >= priv->qos_entries) {
580 		DPAA2_PMD_ERR("QoS table full(%d >= %d)",
581 			qos_index, priv->qos_entries);
582 		return -EINVAL;
583 	}
584 
585 	dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
586 
587 	ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
588 			priv->token, &flow->qos_rule,
589 			flow->tc_id, qos_index,
590 			0, 0);
591 	if (ret < 0) {
592 		DPAA2_PMD_ERR("Add entry(%d) to table(%d) failed",
593 			qos_index, flow->tc_id);
594 		return ret;
595 	}
596 
597 	return 0;
598 }
599 
600 static int
601 dpaa2_flow_add_fs_rule(struct dpaa2_dev_priv *priv,
602 	struct dpaa2_dev_flow *flow)
603 {
604 	int ret;
605 	struct fsl_mc_io *dpni = priv->hw;
606 
607 	if (flow->tc_index >= priv->fs_entries) {
608 		DPAA2_PMD_ERR("FS table full(%d >= %d)",
609 			flow->tc_index, priv->fs_entries);
610 		return -EINVAL;
611 	}
612 
613 	dpaa2_flow_fs_entry_log("Start add", flow);
614 
615 	ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
616 			priv->token, flow->tc_id,
617 			flow->tc_index, &flow->fs_rule,
618 			&flow->fs_action_cfg);
619 	if (ret < 0) {
620 		DPAA2_PMD_ERR("Add rule(%d) to FS table(%d) failed",
621 			flow->tc_index, flow->tc_id);
622 		return ret;
623 	}
624 
625 	return 0;
626 }
627 
628 static int
629 dpaa2_flow_rule_insert_hole(struct dpaa2_dev_flow *flow,
630 	int offset, int size,
631 	enum dpaa2_flow_dist_type dist_type)
632 {
633 	int end;
634 
635 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
636 		end = flow->qos_rule_size;
637 		if (end > offset) {
638 			memmove(flow->qos_key_addr + offset + size,
639 					flow->qos_key_addr + offset,
640 					end - offset);
641 			memset(flow->qos_key_addr + offset,
642 					0, size);
643 
644 			memmove(flow->qos_mask_addr + offset + size,
645 					flow->qos_mask_addr + offset,
646 					end - offset);
647 			memset(flow->qos_mask_addr + offset,
648 					0, size);
649 		}
650 		flow->qos_rule_size += size;
651 	}
652 
653 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
654 		end = flow->fs_rule_size;
655 		if (end > offset) {
656 			memmove(flow->fs_key_addr + offset + size,
657 					flow->fs_key_addr + offset,
658 					end - offset);
659 			memset(flow->fs_key_addr + offset,
660 					0, size);
661 
662 			memmove(flow->fs_mask_addr + offset + size,
663 					flow->fs_mask_addr + offset,
664 					end - offset);
665 			memset(flow->fs_mask_addr + offset,
666 					0, size);
667 		}
668 		flow->fs_rule_size += size;
669 	}
670 
671 	return 0;
672 }
673 
674 static int
675 dpaa2_flow_rule_add_all(struct dpaa2_dev_priv *priv,
676 	enum dpaa2_flow_dist_type dist_type,
677 	uint16_t entry_size, uint8_t tc_id)
678 {
679 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
680 	int ret;
681 
682 	while (curr) {
683 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
684 			if (priv->num_rx_tc > 1 ||
685 				curr->action_type ==
686 				RTE_FLOW_ACTION_TYPE_RSS) {
687 				curr->qos_rule.key_size = entry_size;
688 				ret = dpaa2_flow_add_qos_rule(priv, curr);
689 				if (ret)
690 					return ret;
691 			}
692 		}
693 		if (dist_type & DPAA2_FLOW_FS_TYPE &&
694 			curr->tc_id == tc_id) {
695 			curr->fs_rule.key_size = entry_size;
696 			ret = dpaa2_flow_add_fs_rule(priv, curr);
697 			if (ret)
698 				return ret;
699 		}
700 		curr = LIST_NEXT(curr, next);
701 	}
702 
703 	return 0;
704 }
705 
706 static int
707 dpaa2_flow_qos_rule_insert_hole(struct dpaa2_dev_priv *priv,
708 	int offset, int size)
709 {
710 	struct dpaa2_dev_flow *curr;
711 	int ret;
712 
713 	curr = priv->curr;
714 	if (!curr) {
715 		DPAA2_PMD_ERR("Current qos flow insert hole failed.");
716 		return -EINVAL;
717 	} else {
718 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
719 				DPAA2_FLOW_QOS_TYPE);
720 		if (ret)
721 			return ret;
722 	}
723 
724 	curr = LIST_FIRST(&priv->flows);
725 	while (curr) {
726 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
727 				DPAA2_FLOW_QOS_TYPE);
728 		if (ret)
729 			return ret;
730 		curr = LIST_NEXT(curr, next);
731 	}
732 
733 	return 0;
734 }
735 
736 static int
737 dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv,
738 	int offset, int size, int tc_id)
739 {
740 	struct dpaa2_dev_flow *curr;
741 	int ret;
742 
743 	curr = priv->curr;
744 	if (!curr || curr->tc_id != tc_id) {
745 		DPAA2_PMD_ERR("Current flow insert hole failed.");
746 		return -EINVAL;
747 	} else {
748 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
749 				DPAA2_FLOW_FS_TYPE);
750 		if (ret)
751 			return ret;
752 	}
753 
754 	curr = LIST_FIRST(&priv->flows);
755 
756 	while (curr) {
757 		if (curr->tc_id != tc_id) {
758 			curr = LIST_NEXT(curr, next);
759 			continue;
760 		}
761 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
762 				DPAA2_FLOW_FS_TYPE);
763 		if (ret)
764 			return ret;
765 		curr = LIST_NEXT(curr, next);
766 	}
767 
768 	return 0;
769 }
770 
771 static int
772 dpaa2_flow_faf_advance(struct dpaa2_dev_priv *priv,
773 	int faf_byte, enum dpaa2_flow_dist_type dist_type, int tc_id,
774 	int *insert_offset)
775 {
776 	int offset, ret;
777 	struct dpaa2_key_profile *key_profile;
778 	int num, pos;
779 
780 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
781 		key_profile = &priv->extract.qos_key_extract.key_profile;
782 	else
783 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
784 
785 	num = key_profile->num;
786 
787 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
788 		DPAA2_PMD_ERR("Number of extracts overflows");
789 		return -EINVAL;
790 	}
791 
792 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
793 		offset = key_profile->ip_addr_extract_off;
794 		pos = key_profile->ip_addr_extract_pos;
795 		key_profile->ip_addr_extract_pos++;
796 		key_profile->ip_addr_extract_off++;
797 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
798 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
799 					offset, 1);
800 		} else {
801 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
802 				offset, 1, tc_id);
803 		}
804 		if (ret)
805 			return ret;
806 	} else {
807 		pos = num;
808 	}
809 
810 	if (pos > 0) {
811 		key_profile->key_offset[pos] =
812 			key_profile->key_offset[pos - 1] +
813 			key_profile->key_size[pos - 1];
814 	} else {
815 		key_profile->key_offset[pos] = 0;
816 	}
817 
818 	key_profile->key_size[pos] = 1;
819 	key_profile->prot_field[pos].type = DPAA2_FAF_KEY;
820 	key_profile->prot_field[pos].key_field = faf_byte;
821 	key_profile->num++;
822 
823 	if (insert_offset)
824 		*insert_offset = key_profile->key_offset[pos];
825 
826 	key_profile->key_max_size++;
827 
828 	return pos;
829 }
830 
831 static int
832 dpaa2_flow_pr_advance(struct dpaa2_dev_priv *priv,
833 	uint32_t pr_offset, uint32_t pr_size,
834 	enum dpaa2_flow_dist_type dist_type, int tc_id,
835 	int *insert_offset)
836 {
837 	int offset, ret;
838 	struct dpaa2_key_profile *key_profile;
839 	int num, pos;
840 
841 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
842 		key_profile = &priv->extract.qos_key_extract.key_profile;
843 	else
844 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
845 
846 	num = key_profile->num;
847 
848 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
849 		DPAA2_PMD_ERR("Number of extracts overflows");
850 		return -EINVAL;
851 	}
852 
853 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
854 		offset = key_profile->ip_addr_extract_off;
855 		pos = key_profile->ip_addr_extract_pos;
856 		key_profile->ip_addr_extract_pos++;
857 		key_profile->ip_addr_extract_off += pr_size;
858 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
859 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
860 					offset, pr_size);
861 		} else {
862 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
863 				offset, pr_size, tc_id);
864 		}
865 		if (ret)
866 			return ret;
867 	} else {
868 		pos = num;
869 	}
870 
871 	if (pos > 0) {
872 		key_profile->key_offset[pos] =
873 			key_profile->key_offset[pos - 1] +
874 			key_profile->key_size[pos - 1];
875 	} else {
876 		key_profile->key_offset[pos] = 0;
877 	}
878 
879 	key_profile->key_size[pos] = pr_size;
880 	key_profile->prot_field[pos].type = DPAA2_PR_KEY;
881 	key_profile->prot_field[pos].key_field =
882 		(pr_offset << 16) | pr_size;
883 	key_profile->num++;
884 
885 	if (insert_offset)
886 		*insert_offset = key_profile->key_offset[pos];
887 
888 	key_profile->key_max_size += pr_size;
889 
890 	return pos;
891 }
892 
893 /* Move IPv4/IPv6 addresses to fill new extract previous IP address.
894  * Current MC/WRIOP only support generic IP extract but IP address
895  * is not fixed, so we have to put them at end of extracts, otherwise,
896  * the extracts position following them can't be identified.
897  */
898 static int
899 dpaa2_flow_key_profile_advance(enum net_prot prot,
900 	uint32_t field, uint8_t field_size,
901 	struct dpaa2_dev_priv *priv,
902 	enum dpaa2_flow_dist_type dist_type, int tc_id,
903 	int *insert_offset)
904 {
905 	int offset, ret;
906 	struct dpaa2_key_profile *key_profile;
907 	int num, pos;
908 
909 	if (dpaa2_flow_ip_address_extract(prot, field)) {
910 		DPAA2_PMD_ERR("%s only for none IP address extract",
911 			__func__);
912 		return -EINVAL;
913 	}
914 
915 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
916 		key_profile = &priv->extract.qos_key_extract.key_profile;
917 	else
918 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
919 
920 	num = key_profile->num;
921 
922 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
923 		DPAA2_PMD_ERR("Number of extracts overflows");
924 		return -EINVAL;
925 	}
926 
927 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
928 		offset = key_profile->ip_addr_extract_off;
929 		pos = key_profile->ip_addr_extract_pos;
930 		key_profile->ip_addr_extract_pos++;
931 		key_profile->ip_addr_extract_off += field_size;
932 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
933 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
934 					offset, field_size);
935 		} else {
936 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
937 				offset, field_size, tc_id);
938 		}
939 		if (ret)
940 			return ret;
941 	} else {
942 		pos = num;
943 	}
944 
945 	if (pos > 0) {
946 		key_profile->key_offset[pos] =
947 			key_profile->key_offset[pos - 1] +
948 			key_profile->key_size[pos - 1];
949 	} else {
950 		key_profile->key_offset[pos] = 0;
951 	}
952 
953 	key_profile->key_size[pos] = field_size;
954 	key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
955 	key_profile->prot_field[pos].prot = prot;
956 	key_profile->prot_field[pos].key_field = field;
957 	key_profile->num++;
958 
959 	if (insert_offset)
960 		*insert_offset = key_profile->key_offset[pos];
961 
962 	if (dpaa2_flow_l4_src_port_extract(prot, field)) {
963 		key_profile->l4_src_port_present = 1;
964 		key_profile->l4_src_port_pos = pos;
965 		key_profile->l4_src_port_offset =
966 			key_profile->key_offset[pos];
967 	} else if (dpaa2_flow_l4_dst_port_extract(prot, field)) {
968 		key_profile->l4_dst_port_present = 1;
969 		key_profile->l4_dst_port_pos = pos;
970 		key_profile->l4_dst_port_offset =
971 			key_profile->key_offset[pos];
972 	}
973 	key_profile->key_max_size += field_size;
974 
975 	return pos;
976 }
977 
978 static int
979 dpaa2_flow_faf_add_hdr(int faf_byte,
980 	struct dpaa2_dev_priv *priv,
981 	enum dpaa2_flow_dist_type dist_type, int tc_id,
982 	int *insert_offset)
983 {
984 	int pos, i, offset;
985 	struct dpaa2_key_extract *key_extract;
986 	struct dpkg_profile_cfg *dpkg;
987 	struct dpkg_extract *extracts;
988 
989 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
990 		key_extract = &priv->extract.qos_key_extract;
991 	else
992 		key_extract = &priv->extract.tc_key_extract[tc_id];
993 
994 	dpkg = &key_extract->dpkg;
995 	extracts = dpkg->extracts;
996 
997 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
998 		DPAA2_PMD_ERR("Number of extracts overflows");
999 		return -EINVAL;
1000 	}
1001 
1002 	pos = dpaa2_flow_faf_advance(priv,
1003 			faf_byte, dist_type, tc_id,
1004 			insert_offset);
1005 	if (pos < 0)
1006 		return pos;
1007 
1008 	if (pos != dpkg->num_extracts) {
1009 		/* Not the last pos, must have IP address extract.*/
1010 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
1011 			memcpy(&extracts[i + 1],
1012 				&extracts[i], sizeof(struct dpkg_extract));
1013 		}
1014 	}
1015 
1016 	offset = DPAA2_FAFE_PSR_OFFSET + faf_byte;
1017 
1018 	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
1019 	extracts[pos].extract.from_parse.offset = offset;
1020 	extracts[pos].extract.from_parse.size = 1;
1021 
1022 	dpkg->num_extracts++;
1023 
1024 	return 0;
1025 }
1026 
1027 static int
1028 dpaa2_flow_pr_add_hdr(uint32_t pr_offset,
1029 	uint32_t pr_size, struct dpaa2_dev_priv *priv,
1030 	enum dpaa2_flow_dist_type dist_type, int tc_id,
1031 	int *insert_offset)
1032 {
1033 	int pos, i;
1034 	struct dpaa2_key_extract *key_extract;
1035 	struct dpkg_profile_cfg *dpkg;
1036 	struct dpkg_extract *extracts;
1037 
1038 	if ((pr_offset + pr_size) > DPAA2_FAPR_SIZE) {
1039 		DPAA2_PMD_ERR("PR extracts(%d:%d) overflow",
1040 			pr_offset, pr_size);
1041 		return -EINVAL;
1042 	}
1043 
1044 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1045 		key_extract = &priv->extract.qos_key_extract;
1046 	else
1047 		key_extract = &priv->extract.tc_key_extract[tc_id];
1048 
1049 	dpkg = &key_extract->dpkg;
1050 	extracts = dpkg->extracts;
1051 
1052 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
1053 		DPAA2_PMD_ERR("Number of extracts overflows");
1054 		return -EINVAL;
1055 	}
1056 
1057 	pos = dpaa2_flow_pr_advance(priv,
1058 			pr_offset, pr_size, dist_type, tc_id,
1059 			insert_offset);
1060 	if (pos < 0)
1061 		return pos;
1062 
1063 	if (pos != dpkg->num_extracts) {
1064 		/* Not the last pos, must have IP address extract.*/
1065 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
1066 			memcpy(&extracts[i + 1],
1067 				&extracts[i], sizeof(struct dpkg_extract));
1068 		}
1069 	}
1070 
1071 	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
1072 	extracts[pos].extract.from_parse.offset = pr_offset;
1073 	extracts[pos].extract.from_parse.size = pr_size;
1074 
1075 	dpkg->num_extracts++;
1076 
1077 	return 0;
1078 }
1079 
1080 static int
1081 dpaa2_flow_extract_add_hdr(enum net_prot prot,
1082 	uint32_t field, uint8_t field_size,
1083 	struct dpaa2_dev_priv *priv,
1084 	enum dpaa2_flow_dist_type dist_type, int tc_id,
1085 	int *insert_offset)
1086 {
1087 	int pos, i;
1088 	struct dpaa2_key_extract *key_extract;
1089 	struct dpkg_profile_cfg *dpkg;
1090 	struct dpkg_extract *extracts;
1091 
1092 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1093 		key_extract = &priv->extract.qos_key_extract;
1094 	else
1095 		key_extract = &priv->extract.tc_key_extract[tc_id];
1096 
1097 	dpkg = &key_extract->dpkg;
1098 	extracts = dpkg->extracts;
1099 
1100 	if (dpaa2_flow_ip_address_extract(prot, field)) {
1101 		DPAA2_PMD_ERR("%s only for none IP address extract",
1102 			__func__);
1103 		return -EINVAL;
1104 	}
1105 
1106 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
1107 		DPAA2_PMD_ERR("Number of extracts overflows");
1108 		return -EINVAL;
1109 	}
1110 
1111 	pos = dpaa2_flow_key_profile_advance(prot,
1112 			field, field_size, priv,
1113 			dist_type, tc_id,
1114 			insert_offset);
1115 	if (pos < 0)
1116 		return pos;
1117 
1118 	if (pos != dpkg->num_extracts) {
1119 		/* Not the last pos, must have IP address extract.*/
1120 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
1121 			memcpy(&extracts[i + 1],
1122 				&extracts[i], sizeof(struct dpkg_extract));
1123 		}
1124 	}
1125 
1126 	extracts[pos].type = DPKG_EXTRACT_FROM_HDR;
1127 	extracts[pos].extract.from_hdr.prot = prot;
1128 	extracts[pos].extract.from_hdr.type = DPKG_FULL_FIELD;
1129 	extracts[pos].extract.from_hdr.field = field;
1130 
1131 	dpkg->num_extracts++;
1132 
1133 	return 0;
1134 }
1135 
1136 static int
1137 dpaa2_flow_extract_new_raw(struct dpaa2_dev_priv *priv,
1138 	int offset, int size,
1139 	enum dpaa2_flow_dist_type dist_type, int tc_id)
1140 {
1141 	struct dpaa2_key_extract *key_extract;
1142 	struct dpkg_profile_cfg *dpkg;
1143 	struct dpaa2_key_profile *key_profile;
1144 	int last_extract_size, index, pos, item_size;
1145 	uint8_t num_extracts;
1146 	uint32_t field;
1147 
1148 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1149 		key_extract = &priv->extract.qos_key_extract;
1150 	else
1151 		key_extract = &priv->extract.tc_key_extract[tc_id];
1152 
1153 	dpkg = &key_extract->dpkg;
1154 	key_profile = &key_extract->key_profile;
1155 
1156 	key_profile->raw_region.raw_start = 0;
1157 	key_profile->raw_region.raw_size = 0;
1158 
1159 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
1160 	num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
1161 	if (last_extract_size)
1162 		num_extracts++;
1163 	else
1164 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
1165 
1166 	for (index = 0; index < num_extracts; index++) {
1167 		if (index == num_extracts - 1)
1168 			item_size = last_extract_size;
1169 		else
1170 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
1171 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1172 		field |= item_size;
1173 
1174 		pos = dpaa2_flow_key_profile_advance(NET_PROT_PAYLOAD,
1175 				field, item_size, priv, dist_type,
1176 				tc_id, NULL);
1177 		if (pos < 0)
1178 			return pos;
1179 
1180 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
1181 		dpkg->extracts[pos].extract.from_data.size = item_size;
1182 		dpkg->extracts[pos].extract.from_data.offset = offset;
1183 
1184 		if (index == 0) {
1185 			key_profile->raw_extract_pos = pos;
1186 			key_profile->raw_extract_off =
1187 				key_profile->key_offset[pos];
1188 			key_profile->raw_region.raw_start = offset;
1189 		}
1190 		key_profile->raw_extract_num++;
1191 		key_profile->raw_region.raw_size +=
1192 			key_profile->key_size[pos];
1193 
1194 		offset += item_size;
1195 		dpkg->num_extracts++;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 static int
1202 dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv,
1203 	int offset, int size, enum dpaa2_flow_dist_type dist_type,
1204 	int tc_id, int *recfg)
1205 {
1206 	struct dpaa2_key_profile *key_profile;
1207 	struct dpaa2_raw_region *raw_region;
1208 	int end = offset + size, ret = 0, extract_extended, sz_extend;
1209 	int start_cmp, end_cmp, new_size, index, pos, end_pos;
1210 	int last_extract_size, item_size, num_extracts, bk_num = 0;
1211 	struct dpkg_extract extract_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1212 	uint8_t key_offset_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1213 	uint8_t key_size_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1214 	struct key_prot_field prot_field_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1215 	struct dpaa2_raw_region raw_hole;
1216 	struct dpkg_profile_cfg *dpkg;
1217 	enum net_prot prot;
1218 	uint32_t field;
1219 
1220 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1221 		key_profile = &priv->extract.qos_key_extract.key_profile;
1222 		dpkg = &priv->extract.qos_key_extract.dpkg;
1223 	} else {
1224 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
1225 		dpkg = &priv->extract.tc_key_extract[tc_id].dpkg;
1226 	}
1227 
1228 	raw_region = &key_profile->raw_region;
1229 	if (!raw_region->raw_size) {
1230 		/* New RAW region*/
1231 		ret = dpaa2_flow_extract_new_raw(priv, offset, size,
1232 			dist_type, tc_id);
1233 		if (!ret && recfg)
1234 			(*recfg) |= dist_type;
1235 
1236 		return ret;
1237 	}
1238 	start_cmp = raw_region->raw_start;
1239 	end_cmp = raw_region->raw_start + raw_region->raw_size;
1240 
1241 	if (offset >= start_cmp && end <= end_cmp)
1242 		return 0;
1243 
1244 	sz_extend = 0;
1245 	new_size = raw_region->raw_size;
1246 	if (offset < start_cmp) {
1247 		sz_extend += start_cmp - offset;
1248 		new_size += (start_cmp - offset);
1249 	}
1250 	if (end > end_cmp) {
1251 		sz_extend += end - end_cmp;
1252 		new_size += (end - end_cmp);
1253 	}
1254 
1255 	last_extract_size = (new_size % DPAA2_FLOW_MAX_KEY_SIZE);
1256 	num_extracts = (new_size / DPAA2_FLOW_MAX_KEY_SIZE);
1257 	if (last_extract_size)
1258 		num_extracts++;
1259 	else
1260 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
1261 
1262 	if ((key_profile->num + num_extracts -
1263 		key_profile->raw_extract_num) >=
1264 		DPKG_MAX_NUM_OF_EXTRACTS) {
1265 		DPAA2_PMD_ERR("%s Failed to expand raw extracts",
1266 			__func__);
1267 		return -EINVAL;
1268 	}
1269 
1270 	if (offset < start_cmp) {
1271 		raw_hole.raw_start = key_profile->raw_extract_off;
1272 		raw_hole.raw_size = start_cmp - offset;
1273 		raw_region->raw_start = offset;
1274 		raw_region->raw_size += start_cmp - offset;
1275 
1276 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1277 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1278 					raw_hole.raw_start,
1279 					raw_hole.raw_size);
1280 			if (ret)
1281 				return ret;
1282 		}
1283 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1284 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1285 					raw_hole.raw_start,
1286 					raw_hole.raw_size, tc_id);
1287 			if (ret)
1288 				return ret;
1289 		}
1290 	}
1291 
1292 	if (end > end_cmp) {
1293 		raw_hole.raw_start =
1294 			key_profile->raw_extract_off +
1295 			raw_region->raw_size;
1296 		raw_hole.raw_size = end - end_cmp;
1297 		raw_region->raw_size += end - end_cmp;
1298 
1299 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1300 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1301 					raw_hole.raw_start,
1302 					raw_hole.raw_size);
1303 			if (ret)
1304 				return ret;
1305 		}
1306 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1307 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1308 					raw_hole.raw_start,
1309 					raw_hole.raw_size, tc_id);
1310 			if (ret)
1311 				return ret;
1312 		}
1313 	}
1314 
1315 	end_pos = key_profile->raw_extract_pos +
1316 		key_profile->raw_extract_num;
1317 	if (key_profile->num > end_pos) {
1318 		bk_num = key_profile->num - end_pos;
1319 		memcpy(extract_bk, &dpkg->extracts[end_pos],
1320 			bk_num * sizeof(struct dpkg_extract));
1321 		memcpy(key_offset_bk, &key_profile->key_offset[end_pos],
1322 			bk_num * sizeof(uint8_t));
1323 		memcpy(key_size_bk, &key_profile->key_size[end_pos],
1324 			bk_num * sizeof(uint8_t));
1325 		memcpy(prot_field_bk, &key_profile->prot_field[end_pos],
1326 			bk_num * sizeof(struct key_prot_field));
1327 
1328 		for (index = 0; index < bk_num; index++) {
1329 			key_offset_bk[index] += sz_extend;
1330 			prot = prot_field_bk[index].prot;
1331 			field = prot_field_bk[index].key_field;
1332 			if (dpaa2_flow_l4_src_port_extract(prot,
1333 				field)) {
1334 				key_profile->l4_src_port_present = 1;
1335 				key_profile->l4_src_port_pos = end_pos + index;
1336 				key_profile->l4_src_port_offset =
1337 					key_offset_bk[index];
1338 			} else if (dpaa2_flow_l4_dst_port_extract(prot,
1339 				field)) {
1340 				key_profile->l4_dst_port_present = 1;
1341 				key_profile->l4_dst_port_pos = end_pos + index;
1342 				key_profile->l4_dst_port_offset =
1343 					key_offset_bk[index];
1344 			}
1345 		}
1346 	}
1347 
1348 	pos = key_profile->raw_extract_pos;
1349 
1350 	for (index = 0; index < num_extracts; index++) {
1351 		if (index == num_extracts - 1)
1352 			item_size = last_extract_size;
1353 		else
1354 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
1355 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1356 		field |= item_size;
1357 
1358 		if (pos > 0) {
1359 			key_profile->key_offset[pos] =
1360 				key_profile->key_offset[pos - 1] +
1361 				key_profile->key_size[pos - 1];
1362 		} else {
1363 			key_profile->key_offset[pos] = 0;
1364 		}
1365 		key_profile->key_size[pos] = item_size;
1366 		key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
1367 		key_profile->prot_field[pos].prot = NET_PROT_PAYLOAD;
1368 		key_profile->prot_field[pos].key_field = field;
1369 
1370 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
1371 		dpkg->extracts[pos].extract.from_data.size = item_size;
1372 		dpkg->extracts[pos].extract.from_data.offset = offset;
1373 		offset += item_size;
1374 		pos++;
1375 	}
1376 
1377 	if (bk_num) {
1378 		memcpy(&dpkg->extracts[pos], extract_bk,
1379 			bk_num * sizeof(struct dpkg_extract));
1380 		memcpy(&key_profile->key_offset[end_pos],
1381 			key_offset_bk, bk_num * sizeof(uint8_t));
1382 		memcpy(&key_profile->key_size[end_pos],
1383 			key_size_bk, bk_num * sizeof(uint8_t));
1384 		memcpy(&key_profile->prot_field[end_pos],
1385 			prot_field_bk, bk_num * sizeof(struct key_prot_field));
1386 	}
1387 
1388 	extract_extended = num_extracts - key_profile->raw_extract_num;
1389 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
1390 		key_profile->ip_addr_extract_pos += extract_extended;
1391 		key_profile->ip_addr_extract_off += sz_extend;
1392 	}
1393 	key_profile->raw_extract_num = num_extracts;
1394 	key_profile->num += extract_extended;
1395 	key_profile->key_max_size += sz_extend;
1396 
1397 	dpkg->num_extracts += extract_extended;
1398 	if (!ret && recfg)
1399 		(*recfg) |= dist_type;
1400 
1401 	return ret;
1402 }
1403 
1404 static inline int
1405 dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
1406 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1407 {
1408 	int pos;
1409 	struct key_prot_field *prot_field;
1410 
1411 	if (dpaa2_flow_ip_address_extract(prot, key_field)) {
1412 		DPAA2_PMD_ERR("%s only for none IP address extract",
1413 			__func__);
1414 		return -EINVAL;
1415 	}
1416 
1417 	prot_field = key_profile->prot_field;
1418 	for (pos = 0; pos < key_profile->num; pos++) {
1419 		if (type == DPAA2_NET_PROT_KEY &&
1420 			prot_field[pos].prot == prot &&
1421 			prot_field[pos].key_field == key_field &&
1422 			prot_field[pos].type == type)
1423 			return pos;
1424 		else if (type == DPAA2_FAF_KEY &&
1425 			prot_field[pos].key_field == key_field &&
1426 			prot_field[pos].type == type)
1427 			return pos;
1428 		else if (type == DPAA2_PR_KEY &&
1429 			prot_field[pos].key_field == key_field &&
1430 			prot_field[pos].type == type)
1431 			return pos;
1432 	}
1433 
1434 	if (type == DPAA2_NET_PROT_KEY &&
1435 		dpaa2_flow_l4_src_port_extract(prot, key_field)) {
1436 		if (key_profile->l4_src_port_present)
1437 			return key_profile->l4_src_port_pos;
1438 	} else if (type == DPAA2_NET_PROT_KEY &&
1439 		dpaa2_flow_l4_dst_port_extract(prot, key_field)) {
1440 		if (key_profile->l4_dst_port_present)
1441 			return key_profile->l4_dst_port_pos;
1442 	}
1443 
1444 	return -ENXIO;
1445 }
1446 
1447 static inline int
1448 dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile,
1449 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1450 {
1451 	int i;
1452 
1453 	i = dpaa2_flow_extract_search(key_profile, type, prot, key_field);
1454 	if (i >= 0)
1455 		return key_profile->key_offset[i];
1456 	else
1457 		return i;
1458 }
1459 
1460 static int
1461 dpaa2_flow_faf_add_rule(struct dpaa2_dev_priv *priv,
1462 	struct dpaa2_dev_flow *flow,
1463 	enum dpaa2_rx_faf_offset faf_bit_off,
1464 	int group,
1465 	enum dpaa2_flow_dist_type dist_type)
1466 {
1467 	int offset;
1468 	uint8_t *key_addr;
1469 	uint8_t *mask_addr;
1470 	struct dpaa2_key_extract *key_extract;
1471 	struct dpaa2_key_profile *key_profile;
1472 	uint8_t faf_byte = faf_bit_off / 8;
1473 	uint8_t faf_bit_in_byte = faf_bit_off % 8;
1474 
1475 	faf_bit_in_byte = 7 - faf_bit_in_byte;
1476 
1477 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1478 		key_extract = &priv->extract.qos_key_extract;
1479 		key_profile = &key_extract->key_profile;
1480 
1481 		offset = dpaa2_flow_extract_key_offset(key_profile,
1482 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1483 		if (offset < 0) {
1484 			DPAA2_PMD_ERR("%s QoS key extract failed", __func__);
1485 			return -EINVAL;
1486 		}
1487 		key_addr = flow->qos_key_addr + offset;
1488 		mask_addr = flow->qos_mask_addr + offset;
1489 
1490 		if (!(*key_addr) &&
1491 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1492 			flow->qos_rule_size++;
1493 
1494 		*key_addr |=  (1 << faf_bit_in_byte);
1495 		*mask_addr |=  (1 << faf_bit_in_byte);
1496 	}
1497 
1498 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1499 		key_extract = &priv->extract.tc_key_extract[group];
1500 		key_profile = &key_extract->key_profile;
1501 
1502 		offset = dpaa2_flow_extract_key_offset(key_profile,
1503 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1504 		if (offset < 0) {
1505 			DPAA2_PMD_ERR("%s TC[%d] key extract failed",
1506 				__func__, group);
1507 			return -EINVAL;
1508 		}
1509 		key_addr = flow->fs_key_addr + offset;
1510 		mask_addr = flow->fs_mask_addr + offset;
1511 
1512 		if (!(*key_addr) &&
1513 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1514 			flow->fs_rule_size++;
1515 
1516 		*key_addr |=  (1 << faf_bit_in_byte);
1517 		*mask_addr |=  (1 << faf_bit_in_byte);
1518 	}
1519 
1520 	return 0;
1521 }
1522 
1523 static inline int
1524 dpaa2_flow_pr_rule_data_set(struct dpaa2_dev_flow *flow,
1525 	struct dpaa2_key_profile *key_profile,
1526 	uint32_t pr_offset, uint32_t pr_size,
1527 	const void *key, const void *mask,
1528 	enum dpaa2_flow_dist_type dist_type)
1529 {
1530 	int offset;
1531 	uint32_t pr_field = pr_offset << 16 | pr_size;
1532 
1533 	offset = dpaa2_flow_extract_key_offset(key_profile,
1534 			DPAA2_PR_KEY, NET_PROT_NONE, pr_field);
1535 	if (offset < 0) {
1536 		DPAA2_PMD_ERR("PR off(%d)/size(%d) does not exist!",
1537 			pr_offset, pr_size);
1538 		return -EINVAL;
1539 	}
1540 
1541 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1542 		memcpy((flow->qos_key_addr + offset), key, pr_size);
1543 		memcpy((flow->qos_mask_addr + offset), mask, pr_size);
1544 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1545 			flow->qos_rule_size = offset + pr_size;
1546 	}
1547 
1548 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1549 		memcpy((flow->fs_key_addr + offset), key, pr_size);
1550 		memcpy((flow->fs_mask_addr + offset), mask, pr_size);
1551 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1552 			flow->fs_rule_size = offset + pr_size;
1553 	}
1554 
1555 	return 0;
1556 }
1557 
1558 static inline int
1559 dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow,
1560 	struct dpaa2_key_profile *key_profile,
1561 	enum net_prot prot, uint32_t field, int size,
1562 	const void *key, const void *mask,
1563 	enum dpaa2_flow_dist_type dist_type)
1564 {
1565 	int offset;
1566 
1567 	if (dpaa2_flow_ip_address_extract(prot, field)) {
1568 		DPAA2_PMD_ERR("%s only for none IP address extract",
1569 			__func__);
1570 		return -EINVAL;
1571 	}
1572 
1573 	offset = dpaa2_flow_extract_key_offset(key_profile,
1574 			DPAA2_NET_PROT_KEY, prot, field);
1575 	if (offset < 0) {
1576 		DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!",
1577 			prot, field);
1578 		return -EINVAL;
1579 	}
1580 
1581 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1582 		memcpy((flow->qos_key_addr + offset), key, size);
1583 		memcpy((flow->qos_mask_addr + offset), mask, size);
1584 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1585 			flow->qos_rule_size = offset + size;
1586 	}
1587 
1588 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1589 		memcpy((flow->fs_key_addr + offset), key, size);
1590 		memcpy((flow->fs_mask_addr + offset), mask, size);
1591 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1592 			flow->fs_rule_size = offset + size;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 static inline int
1599 dpaa2_flow_raw_rule_data_set(struct dpaa2_dev_flow *flow,
1600 	struct dpaa2_key_profile *key_profile,
1601 	uint32_t extract_offset, int size,
1602 	const void *key, const void *mask,
1603 	enum dpaa2_flow_dist_type dist_type)
1604 {
1605 	int extract_size = size > DPAA2_FLOW_MAX_KEY_SIZE ?
1606 		DPAA2_FLOW_MAX_KEY_SIZE : size;
1607 	int offset, field;
1608 
1609 	field = extract_offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1610 	field |= extract_size;
1611 	offset = dpaa2_flow_extract_key_offset(key_profile,
1612 			DPAA2_NET_PROT_KEY, NET_PROT_PAYLOAD, field);
1613 	if (offset < 0) {
1614 		DPAA2_PMD_ERR("offset(%d)/size(%d) raw extract failed",
1615 			extract_offset, size);
1616 		return -EINVAL;
1617 	}
1618 
1619 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1620 		memcpy((flow->qos_key_addr + offset), key, size);
1621 		memcpy((flow->qos_mask_addr + offset), mask, size);
1622 		flow->qos_rule_size = offset + size;
1623 	}
1624 
1625 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1626 		memcpy((flow->fs_key_addr + offset), key, size);
1627 		memcpy((flow->fs_mask_addr + offset), mask, size);
1628 		flow->fs_rule_size = offset + size;
1629 	}
1630 
1631 	return 0;
1632 }
1633 
1634 static int
1635 dpaa2_flow_extract_support(const uint8_t *mask_src,
1636 	enum rte_flow_item_type type)
1637 {
1638 	char mask[64];
1639 	int i, size = 0;
1640 	const char *mask_support = 0;
1641 
1642 	switch (type) {
1643 	case RTE_FLOW_ITEM_TYPE_ETH:
1644 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
1645 		size = sizeof(struct rte_flow_item_eth);
1646 		break;
1647 	case RTE_FLOW_ITEM_TYPE_VLAN:
1648 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
1649 		size = sizeof(struct rte_flow_item_vlan);
1650 		break;
1651 	case RTE_FLOW_ITEM_TYPE_IPV4:
1652 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
1653 		size = sizeof(struct rte_flow_item_ipv4);
1654 		break;
1655 	case RTE_FLOW_ITEM_TYPE_IPV6:
1656 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
1657 		size = sizeof(struct rte_flow_item_ipv6);
1658 		break;
1659 	case RTE_FLOW_ITEM_TYPE_ICMP:
1660 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
1661 		size = sizeof(struct rte_flow_item_icmp);
1662 		break;
1663 	case RTE_FLOW_ITEM_TYPE_UDP:
1664 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
1665 		size = sizeof(struct rte_flow_item_udp);
1666 		break;
1667 	case RTE_FLOW_ITEM_TYPE_TCP:
1668 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
1669 		size = sizeof(struct rte_flow_item_tcp);
1670 		break;
1671 	case RTE_FLOW_ITEM_TYPE_ESP:
1672 		mask_support = (const char *)&dpaa2_flow_item_esp_mask;
1673 		size = sizeof(struct rte_flow_item_esp);
1674 		break;
1675 	case RTE_FLOW_ITEM_TYPE_AH:
1676 		mask_support = (const char *)&dpaa2_flow_item_ah_mask;
1677 		size = sizeof(struct rte_flow_item_ah);
1678 		break;
1679 	case RTE_FLOW_ITEM_TYPE_SCTP:
1680 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
1681 		size = sizeof(struct rte_flow_item_sctp);
1682 		break;
1683 	case RTE_FLOW_ITEM_TYPE_GRE:
1684 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
1685 		size = sizeof(struct rte_flow_item_gre);
1686 		break;
1687 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1688 		mask_support = (const char *)&dpaa2_flow_item_vxlan_mask;
1689 		size = sizeof(struct rte_flow_item_vxlan);
1690 		break;
1691 	case RTE_FLOW_ITEM_TYPE_ECPRI:
1692 		mask_support = (const char *)&dpaa2_flow_item_ecpri_mask;
1693 		size = sizeof(struct rte_flow_item_ecpri);
1694 		break;
1695 	case RTE_FLOW_ITEM_TYPE_GTP:
1696 		mask_support = (const char *)&dpaa2_flow_item_gtp_mask;
1697 		size = sizeof(struct rte_flow_item_gtp);
1698 		break;
1699 	default:
1700 		return -EINVAL;
1701 	}
1702 
1703 	memcpy(mask, mask_support, size);
1704 
1705 	for (i = 0; i < size; i++)
1706 		mask[i] = (mask[i] | mask_src[i]);
1707 
1708 	if (memcmp(mask, mask_support, size))
1709 		return -ENOTSUP;
1710 
1711 	return 0;
1712 }
1713 
1714 static int
1715 dpaa2_flow_identify_by_faf(struct dpaa2_dev_priv *priv,
1716 	struct dpaa2_dev_flow *flow,
1717 	enum dpaa2_rx_faf_offset faf_off,
1718 	enum dpaa2_flow_dist_type dist_type,
1719 	int group, int *recfg)
1720 {
1721 	int ret, index, local_cfg = 0;
1722 	struct dpaa2_key_extract *extract;
1723 	struct dpaa2_key_profile *key_profile;
1724 	uint8_t faf_byte = faf_off / 8;
1725 
1726 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1727 		extract = &priv->extract.qos_key_extract;
1728 		key_profile = &extract->key_profile;
1729 
1730 		index = dpaa2_flow_extract_search(key_profile,
1731 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1732 		if (index < 0) {
1733 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1734 					priv, DPAA2_FLOW_QOS_TYPE, group,
1735 					NULL);
1736 			if (ret) {
1737 				DPAA2_PMD_ERR("QOS faf extract add failed");
1738 
1739 				return -EINVAL;
1740 			}
1741 			local_cfg |= DPAA2_FLOW_QOS_TYPE;
1742 		}
1743 
1744 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1745 				DPAA2_FLOW_QOS_TYPE);
1746 		if (ret) {
1747 			DPAA2_PMD_ERR("QoS faf rule set failed");
1748 			return -EINVAL;
1749 		}
1750 	}
1751 
1752 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1753 		extract = &priv->extract.tc_key_extract[group];
1754 		key_profile = &extract->key_profile;
1755 
1756 		index = dpaa2_flow_extract_search(key_profile,
1757 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1758 		if (index < 0) {
1759 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1760 					priv, DPAA2_FLOW_FS_TYPE, group,
1761 					NULL);
1762 			if (ret) {
1763 				DPAA2_PMD_ERR("FS[%d] faf extract add failed",
1764 					group);
1765 
1766 				return -EINVAL;
1767 			}
1768 			local_cfg |= DPAA2_FLOW_FS_TYPE;
1769 		}
1770 
1771 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1772 				DPAA2_FLOW_FS_TYPE);
1773 		if (ret) {
1774 			DPAA2_PMD_ERR("FS[%d] faf rule set failed",
1775 				group);
1776 			return -EINVAL;
1777 		}
1778 	}
1779 
1780 	if (recfg)
1781 		*recfg |= local_cfg;
1782 
1783 	return 0;
1784 }
1785 
1786 static int
1787 dpaa2_flow_add_pr_extract_rule(struct dpaa2_dev_flow *flow,
1788 	uint32_t pr_offset, uint32_t pr_size,
1789 	const void *key, const void *mask,
1790 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1791 	enum dpaa2_flow_dist_type dist_type)
1792 {
1793 	int index, ret, local_cfg = 0;
1794 	struct dpaa2_key_extract *key_extract;
1795 	struct dpaa2_key_profile *key_profile;
1796 	uint32_t pr_field = pr_offset << 16 | pr_size;
1797 
1798 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1799 		key_extract = &priv->extract.qos_key_extract;
1800 	else
1801 		key_extract = &priv->extract.tc_key_extract[tc_id];
1802 
1803 	key_profile = &key_extract->key_profile;
1804 
1805 	index = dpaa2_flow_extract_search(key_profile,
1806 			DPAA2_PR_KEY, NET_PROT_NONE, pr_field);
1807 	if (index < 0) {
1808 		ret = dpaa2_flow_pr_add_hdr(pr_offset,
1809 				pr_size, priv,
1810 				dist_type, tc_id, NULL);
1811 		if (ret) {
1812 			DPAA2_PMD_ERR("PR add off(%d)/size(%d) failed",
1813 				pr_offset, pr_size);
1814 
1815 			return ret;
1816 		}
1817 		local_cfg |= dist_type;
1818 	}
1819 
1820 	ret = dpaa2_flow_pr_rule_data_set(flow, key_profile,
1821 			pr_offset, pr_size, key, mask, dist_type);
1822 	if (ret) {
1823 		DPAA2_PMD_ERR("PR off(%d)/size(%d) rule data set failed",
1824 			pr_offset, pr_size);
1825 
1826 		return ret;
1827 	}
1828 
1829 	if (recfg)
1830 		*recfg |= local_cfg;
1831 
1832 	return 0;
1833 }
1834 
1835 static int
1836 dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow,
1837 	enum net_prot prot, uint32_t field,
1838 	const void *key, const void *mask, int size,
1839 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1840 	enum dpaa2_flow_dist_type dist_type)
1841 {
1842 	int index, ret, local_cfg = 0;
1843 	struct dpaa2_key_extract *key_extract;
1844 	struct dpaa2_key_profile *key_profile;
1845 
1846 	if (dpaa2_flow_ip_address_extract(prot, field))
1847 		return -EINVAL;
1848 
1849 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1850 		key_extract = &priv->extract.qos_key_extract;
1851 	else
1852 		key_extract = &priv->extract.tc_key_extract[tc_id];
1853 
1854 	key_profile = &key_extract->key_profile;
1855 
1856 	index = dpaa2_flow_extract_search(key_profile,
1857 			DPAA2_NET_PROT_KEY, prot, field);
1858 	if (index < 0) {
1859 		ret = dpaa2_flow_extract_add_hdr(prot,
1860 				field, size, priv,
1861 				dist_type, tc_id, NULL);
1862 		if (ret) {
1863 			DPAA2_PMD_ERR("QoS Extract P(%d)/F(%d) failed",
1864 				prot, field);
1865 
1866 			return ret;
1867 		}
1868 		local_cfg |= dist_type;
1869 	}
1870 
1871 	ret = dpaa2_flow_hdr_rule_data_set(flow, key_profile,
1872 			prot, field, size, key, mask, dist_type);
1873 	if (ret) {
1874 		DPAA2_PMD_ERR("QoS P(%d)/F(%d) rule data set failed",
1875 			prot, field);
1876 
1877 		return ret;
1878 	}
1879 
1880 	if (recfg)
1881 		*recfg |= local_cfg;
1882 
1883 	return 0;
1884 }
1885 
1886 static int
1887 dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow,
1888 	enum net_prot prot, uint32_t field,
1889 	const void *key, const void *mask, int size,
1890 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1891 	enum dpaa2_flow_dist_type dist_type)
1892 {
1893 	int local_cfg = 0, num, ipaddr_extract_len = 0;
1894 	struct dpaa2_key_extract *key_extract;
1895 	struct dpaa2_key_profile *key_profile;
1896 	struct dpkg_profile_cfg *dpkg;
1897 	uint8_t *key_addr, *mask_addr;
1898 	union ip_addr_extract_rule *ip_addr_data;
1899 	union ip_addr_extract_rule *ip_addr_mask;
1900 	enum net_prot orig_prot;
1901 	uint32_t orig_field;
1902 
1903 	if (prot != NET_PROT_IPV4 && prot != NET_PROT_IPV6)
1904 		return -EINVAL;
1905 
1906 	if (prot == NET_PROT_IPV4 && field != NH_FLD_IPV4_SRC_IP &&
1907 		field != NH_FLD_IPV4_DST_IP) {
1908 		return -EINVAL;
1909 	}
1910 
1911 	if (prot == NET_PROT_IPV6 && field != NH_FLD_IPV6_SRC_IP &&
1912 		field != NH_FLD_IPV6_DST_IP) {
1913 		return -EINVAL;
1914 	}
1915 
1916 	orig_prot = prot;
1917 	orig_field = field;
1918 
1919 	if (prot == NET_PROT_IPV4 &&
1920 		field == NH_FLD_IPV4_SRC_IP) {
1921 		prot = NET_PROT_IP;
1922 		field = NH_FLD_IP_SRC;
1923 	} else if (prot == NET_PROT_IPV4 &&
1924 		field == NH_FLD_IPV4_DST_IP) {
1925 		prot = NET_PROT_IP;
1926 		field = NH_FLD_IP_DST;
1927 	} else if (prot == NET_PROT_IPV6 &&
1928 		field == NH_FLD_IPV6_SRC_IP) {
1929 		prot = NET_PROT_IP;
1930 		field = NH_FLD_IP_SRC;
1931 	} else if (prot == NET_PROT_IPV6 &&
1932 		field == NH_FLD_IPV6_DST_IP) {
1933 		prot = NET_PROT_IP;
1934 		field = NH_FLD_IP_DST;
1935 	} else {
1936 		DPAA2_PMD_ERR("Inval P(%d)/F(%d) to extract ip address",
1937 			prot, field);
1938 		return -EINVAL;
1939 	}
1940 
1941 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1942 		key_extract = &priv->extract.qos_key_extract;
1943 		key_profile = &key_extract->key_profile;
1944 		dpkg = &key_extract->dpkg;
1945 		num = key_profile->num;
1946 		key_addr = flow->qos_key_addr;
1947 		mask_addr = flow->qos_mask_addr;
1948 	} else {
1949 		key_extract = &priv->extract.tc_key_extract[tc_id];
1950 		key_profile = &key_extract->key_profile;
1951 		dpkg = &key_extract->dpkg;
1952 		num = key_profile->num;
1953 		key_addr = flow->fs_key_addr;
1954 		mask_addr = flow->fs_mask_addr;
1955 	}
1956 
1957 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
1958 		DPAA2_PMD_ERR("Number of extracts overflows");
1959 		return -EINVAL;
1960 	}
1961 
1962 	if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) {
1963 		if (field == NH_FLD_IP_SRC)
1964 			key_profile->ip_addr_type = IP_SRC_EXTRACT;
1965 		else
1966 			key_profile->ip_addr_type = IP_DST_EXTRACT;
1967 		ipaddr_extract_len = size;
1968 
1969 		key_profile->ip_addr_extract_pos = num;
1970 		if (num > 0) {
1971 			key_profile->ip_addr_extract_off =
1972 				key_profile->key_offset[num - 1] +
1973 				key_profile->key_size[num - 1];
1974 		} else {
1975 			key_profile->ip_addr_extract_off = 0;
1976 		}
1977 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1978 	} else if (key_profile->ip_addr_type == IP_SRC_EXTRACT) {
1979 		if (field == NH_FLD_IP_SRC) {
1980 			ipaddr_extract_len = size;
1981 			goto rule_configure;
1982 		}
1983 		key_profile->ip_addr_type = IP_SRC_DST_EXTRACT;
1984 		ipaddr_extract_len = size * 2;
1985 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1986 	} else if (key_profile->ip_addr_type == IP_DST_EXTRACT) {
1987 		if (field == NH_FLD_IP_DST) {
1988 			ipaddr_extract_len = size;
1989 			goto rule_configure;
1990 		}
1991 		key_profile->ip_addr_type = IP_DST_SRC_EXTRACT;
1992 		ipaddr_extract_len = size * 2;
1993 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1994 	}
1995 	key_profile->num++;
1996 	key_profile->prot_field[num].type = DPAA2_NET_PROT_KEY;
1997 
1998 	dpkg->extracts[num].extract.from_hdr.prot = prot;
1999 	dpkg->extracts[num].extract.from_hdr.field = field;
2000 	dpkg->extracts[num].extract.from_hdr.type = DPKG_FULL_FIELD;
2001 	dpkg->num_extracts++;
2002 
2003 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
2004 		local_cfg = DPAA2_FLOW_QOS_TYPE;
2005 	else
2006 		local_cfg = DPAA2_FLOW_FS_TYPE;
2007 
2008 rule_configure:
2009 	key_addr += key_profile->ip_addr_extract_off;
2010 	ip_addr_data = (union ip_addr_extract_rule *)key_addr;
2011 	mask_addr += key_profile->ip_addr_extract_off;
2012 	ip_addr_mask = (union ip_addr_extract_rule *)mask_addr;
2013 
2014 	if (orig_prot == NET_PROT_IPV4 &&
2015 		orig_field == NH_FLD_IPV4_SRC_IP) {
2016 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
2017 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
2018 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_src,
2019 				key, size);
2020 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_src,
2021 				mask, size);
2022 		} else {
2023 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_src,
2024 				key, size);
2025 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_src,
2026 				mask, size);
2027 		}
2028 	} else if (orig_prot == NET_PROT_IPV4 &&
2029 		orig_field == NH_FLD_IPV4_DST_IP) {
2030 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
2031 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
2032 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_dst,
2033 				key, size);
2034 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_dst,
2035 				mask, size);
2036 		} else {
2037 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_dst,
2038 				key, size);
2039 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_dst,
2040 				mask, size);
2041 		}
2042 	} else if (orig_prot == NET_PROT_IPV6 &&
2043 		orig_field == NH_FLD_IPV6_SRC_IP) {
2044 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
2045 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
2046 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_src,
2047 				key, size);
2048 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_src,
2049 				mask, size);
2050 		} else {
2051 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_src,
2052 				key, size);
2053 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_src,
2054 				mask, size);
2055 		}
2056 	} else if (orig_prot == NET_PROT_IPV6 &&
2057 		orig_field == NH_FLD_IPV6_DST_IP) {
2058 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
2059 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
2060 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_dst,
2061 				key, size);
2062 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_dst,
2063 				mask, size);
2064 		} else {
2065 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_dst,
2066 				key, size);
2067 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_dst,
2068 				mask, size);
2069 		}
2070 	}
2071 
2072 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
2073 		flow->qos_rule_size =
2074 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
2075 	} else {
2076 		flow->fs_rule_size =
2077 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
2078 	}
2079 
2080 	if (recfg)
2081 		*recfg |= local_cfg;
2082 
2083 	return 0;
2084 }
2085 
2086 static int
2087 dpaa2_configure_flow_tunnel_eth(struct dpaa2_dev_flow *flow,
2088 	struct rte_eth_dev *dev,
2089 	const struct rte_flow_attr *attr,
2090 	const struct rte_flow_item *pattern,
2091 	int *device_configured)
2092 {
2093 	int ret, local_cfg = 0;
2094 	uint32_t group;
2095 	const struct rte_flow_item_eth *spec, *mask;
2096 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2097 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
2098 
2099 	group = attr->group;
2100 
2101 	/* Parse pattern list to get the matching parameters */
2102 	spec = pattern->spec;
2103 	mask = pattern->mask ?
2104 			pattern->mask : &dpaa2_flow_item_eth_mask;
2105 
2106 	/* Get traffic class index and flow id to be configured */
2107 	flow->tc_id = group;
2108 	flow->tc_index = attr->priority;
2109 
2110 	if (!spec)
2111 		return 0;
2112 
2113 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
2114 		RTE_FLOW_ITEM_TYPE_ETH);
2115 	if (ret) {
2116 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
2117 
2118 		return ret;
2119 	}
2120 
2121 	if (memcmp((const char *)&mask->src,
2122 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2123 		/*SRC[0:1]*/
2124 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2125 			DPAA2_VXLAN_IN_SADDR0_OFFSET,
2126 			1, &spec->src.addr_bytes[0],
2127 			&mask->src.addr_bytes[0],
2128 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2129 		if (ret)
2130 			return ret;
2131 		/*SRC[1:2]*/
2132 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2133 			DPAA2_VXLAN_IN_SADDR1_OFFSET,
2134 			2, &spec->src.addr_bytes[1],
2135 			&mask->src.addr_bytes[1],
2136 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2137 		if (ret)
2138 			return ret;
2139 		/*SRC[3:1]*/
2140 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2141 			DPAA2_VXLAN_IN_SADDR3_OFFSET,
2142 			1, &spec->src.addr_bytes[3],
2143 			&mask->src.addr_bytes[3],
2144 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2145 		if (ret)
2146 			return ret;
2147 		/*SRC[4:2]*/
2148 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2149 			DPAA2_VXLAN_IN_SADDR4_OFFSET,
2150 			2, &spec->src.addr_bytes[4],
2151 			&mask->src.addr_bytes[4],
2152 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2153 		if (ret)
2154 			return ret;
2155 
2156 		/*SRC[0:1]*/
2157 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2158 			DPAA2_VXLAN_IN_SADDR0_OFFSET,
2159 			1, &spec->src.addr_bytes[0],
2160 			&mask->src.addr_bytes[0],
2161 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2162 		if (ret)
2163 			return ret;
2164 		/*SRC[1:2]*/
2165 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2166 			DPAA2_VXLAN_IN_SADDR1_OFFSET,
2167 			2, &spec->src.addr_bytes[1],
2168 			&mask->src.addr_bytes[1],
2169 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2170 		if (ret)
2171 			return ret;
2172 		/*SRC[3:1]*/
2173 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2174 			DPAA2_VXLAN_IN_SADDR3_OFFSET,
2175 			1, &spec->src.addr_bytes[3],
2176 			&mask->src.addr_bytes[3],
2177 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2178 		if (ret)
2179 			return ret;
2180 		/*SRC[4:2]*/
2181 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2182 			DPAA2_VXLAN_IN_SADDR4_OFFSET,
2183 			2, &spec->src.addr_bytes[4],
2184 			&mask->src.addr_bytes[4],
2185 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2186 		if (ret)
2187 			return ret;
2188 	}
2189 
2190 	if (memcmp((const char *)&mask->dst,
2191 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2192 		/*DST[0:1]*/
2193 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2194 			DPAA2_VXLAN_IN_DADDR0_OFFSET,
2195 			1, &spec->dst.addr_bytes[0],
2196 			&mask->dst.addr_bytes[0],
2197 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2198 		if (ret)
2199 			return ret;
2200 		/*DST[1:1]*/
2201 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2202 			DPAA2_VXLAN_IN_DADDR1_OFFSET,
2203 			1, &spec->dst.addr_bytes[1],
2204 			&mask->dst.addr_bytes[1],
2205 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2206 		if (ret)
2207 			return ret;
2208 		/*DST[2:3]*/
2209 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2210 			DPAA2_VXLAN_IN_DADDR2_OFFSET,
2211 			3, &spec->dst.addr_bytes[2],
2212 			&mask->dst.addr_bytes[2],
2213 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2214 		if (ret)
2215 			return ret;
2216 		/*DST[5:1]*/
2217 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2218 			DPAA2_VXLAN_IN_DADDR5_OFFSET,
2219 			1, &spec->dst.addr_bytes[5],
2220 			&mask->dst.addr_bytes[5],
2221 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2222 		if (ret)
2223 			return ret;
2224 
2225 		/*DST[0:1]*/
2226 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2227 			DPAA2_VXLAN_IN_DADDR0_OFFSET,
2228 			1, &spec->dst.addr_bytes[0],
2229 			&mask->dst.addr_bytes[0],
2230 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2231 		if (ret)
2232 			return ret;
2233 		/*DST[1:1]*/
2234 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2235 			DPAA2_VXLAN_IN_DADDR1_OFFSET,
2236 			1, &spec->dst.addr_bytes[1],
2237 			&mask->dst.addr_bytes[1],
2238 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2239 		if (ret)
2240 			return ret;
2241 		/*DST[2:3]*/
2242 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2243 			DPAA2_VXLAN_IN_DADDR2_OFFSET,
2244 			3, &spec->dst.addr_bytes[2],
2245 			&mask->dst.addr_bytes[2],
2246 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2247 		if (ret)
2248 			return ret;
2249 		/*DST[5:1]*/
2250 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2251 			DPAA2_VXLAN_IN_DADDR5_OFFSET,
2252 			1, &spec->dst.addr_bytes[5],
2253 			&mask->dst.addr_bytes[5],
2254 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2255 		if (ret)
2256 			return ret;
2257 	}
2258 
2259 	if (memcmp((const char *)&mask->type,
2260 		zero_cmp, sizeof(rte_be16_t))) {
2261 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2262 			DPAA2_VXLAN_IN_TYPE_OFFSET,
2263 			sizeof(rte_be16_t), &spec->type, &mask->type,
2264 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2265 		if (ret)
2266 			return ret;
2267 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2268 			DPAA2_VXLAN_IN_TYPE_OFFSET,
2269 			sizeof(rte_be16_t), &spec->type, &mask->type,
2270 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2271 		if (ret)
2272 			return ret;
2273 	}
2274 
2275 	(*device_configured) |= local_cfg;
2276 
2277 	return 0;
2278 }
2279 
2280 static int
2281 dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
2282 	struct rte_eth_dev *dev,
2283 	const struct rte_flow_attr *attr,
2284 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2285 	const struct rte_flow_action actions[] __rte_unused,
2286 	struct rte_flow_error *error __rte_unused,
2287 	int *device_configured)
2288 {
2289 	int ret, local_cfg = 0;
2290 	uint32_t group;
2291 	const struct rte_flow_item_eth *spec, *mask;
2292 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2293 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
2294 	const struct rte_flow_item *pattern =
2295 		&dpaa2_pattern->generic_item;
2296 
2297 	if (dpaa2_pattern->in_tunnel) {
2298 		return dpaa2_configure_flow_tunnel_eth(flow,
2299 				dev, attr, pattern, device_configured);
2300 	}
2301 
2302 	group = attr->group;
2303 
2304 	/* Parse pattern list to get the matching parameters */
2305 	spec = pattern->spec;
2306 	mask = pattern->mask ?
2307 			pattern->mask : &dpaa2_flow_item_eth_mask;
2308 
2309 	/* Get traffic class index and flow id to be configured */
2310 	flow->tc_id = group;
2311 	flow->tc_index = attr->priority;
2312 
2313 	if (!spec) {
2314 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2315 				FAF_ETH_FRAM, DPAA2_FLOW_QOS_TYPE,
2316 				group, &local_cfg);
2317 		if (ret)
2318 			return ret;
2319 
2320 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2321 				FAF_ETH_FRAM, DPAA2_FLOW_FS_TYPE,
2322 				group, &local_cfg);
2323 		if (ret)
2324 			return ret;
2325 
2326 		(*device_configured) |= local_cfg;
2327 		return 0;
2328 	}
2329 
2330 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
2331 		RTE_FLOW_ITEM_TYPE_ETH);
2332 	if (ret) {
2333 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
2334 
2335 		return ret;
2336 	}
2337 
2338 	if (memcmp((const char *)&mask->src,
2339 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2340 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2341 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
2342 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
2343 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2344 		if (ret)
2345 			return ret;
2346 
2347 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2348 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
2349 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
2350 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2351 		if (ret)
2352 			return ret;
2353 	}
2354 
2355 	if (memcmp((const char *)&mask->dst,
2356 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2357 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2358 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
2359 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
2360 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2361 		if (ret)
2362 			return ret;
2363 
2364 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2365 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
2366 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
2367 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2368 		if (ret)
2369 			return ret;
2370 	}
2371 
2372 	if (memcmp((const char *)&mask->type,
2373 		zero_cmp, sizeof(rte_be16_t))) {
2374 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2375 			NH_FLD_ETH_TYPE, &spec->type,
2376 			&mask->type, sizeof(rte_be16_t),
2377 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2378 		if (ret)
2379 			return ret;
2380 
2381 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2382 			NH_FLD_ETH_TYPE, &spec->type,
2383 			&mask->type, sizeof(rte_be16_t),
2384 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2385 		if (ret)
2386 			return ret;
2387 	}
2388 
2389 	(*device_configured) |= local_cfg;
2390 
2391 	return 0;
2392 }
2393 
2394 static int
2395 dpaa2_configure_flow_tunnel_vlan(struct dpaa2_dev_flow *flow,
2396 	struct rte_eth_dev *dev,
2397 	const struct rte_flow_attr *attr,
2398 	const struct rte_flow_item *pattern,
2399 	int *device_configured)
2400 {
2401 	int ret, local_cfg = 0;
2402 	uint32_t group;
2403 	const struct rte_flow_item_vlan *spec, *mask;
2404 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2405 
2406 	group = attr->group;
2407 
2408 	/* Parse pattern list to get the matching parameters */
2409 	spec = pattern->spec;
2410 	mask = pattern->mask ?
2411 		pattern->mask : &dpaa2_flow_item_vlan_mask;
2412 
2413 	/* Get traffic class index and flow id to be configured */
2414 	flow->tc_id = group;
2415 	flow->tc_index = attr->priority;
2416 
2417 	if (!spec) {
2418 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2419 				FAFE_VXLAN_IN_VLAN_FRAM,
2420 				DPAA2_FLOW_QOS_TYPE,
2421 				group, &local_cfg);
2422 		if (ret)
2423 			return ret;
2424 
2425 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2426 				FAFE_VXLAN_IN_VLAN_FRAM,
2427 				DPAA2_FLOW_FS_TYPE,
2428 				group, &local_cfg);
2429 		if (ret)
2430 			return ret;
2431 
2432 		(*device_configured) |= local_cfg;
2433 		return 0;
2434 	}
2435 
2436 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
2437 		RTE_FLOW_ITEM_TYPE_VLAN);
2438 	if (ret) {
2439 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
2440 
2441 		return ret;
2442 	}
2443 
2444 	if (!mask->tci)
2445 		return 0;
2446 
2447 	ret = dpaa2_flow_add_pr_extract_rule(flow,
2448 			DPAA2_VXLAN_IN_TCI_OFFSET,
2449 			sizeof(rte_be16_t), &spec->tci, &mask->tci,
2450 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2451 	if (ret)
2452 		return ret;
2453 
2454 	ret = dpaa2_flow_add_pr_extract_rule(flow,
2455 			DPAA2_VXLAN_IN_TCI_OFFSET,
2456 			sizeof(rte_be16_t), &spec->tci, &mask->tci,
2457 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2458 	if (ret)
2459 		return ret;
2460 
2461 	(*device_configured) |= local_cfg;
2462 
2463 	return 0;
2464 }
2465 
2466 static int
2467 dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
2468 	struct rte_eth_dev *dev,
2469 	const struct rte_flow_attr *attr,
2470 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2471 	const struct rte_flow_action actions[] __rte_unused,
2472 	struct rte_flow_error *error __rte_unused,
2473 	int *device_configured)
2474 {
2475 	int ret, local_cfg = 0;
2476 	uint32_t group;
2477 	const struct rte_flow_item_vlan *spec, *mask;
2478 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2479 	const struct rte_flow_item *pattern =
2480 		&dpaa2_pattern->generic_item;
2481 
2482 	if (dpaa2_pattern->in_tunnel) {
2483 		return dpaa2_configure_flow_tunnel_vlan(flow,
2484 				dev, attr, pattern, device_configured);
2485 	}
2486 
2487 	group = attr->group;
2488 
2489 	/* Parse pattern list to get the matching parameters */
2490 	spec = pattern->spec;
2491 	mask = pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask;
2492 
2493 	/* Get traffic class index and flow id to be configured */
2494 	flow->tc_id = group;
2495 	flow->tc_index = attr->priority;
2496 
2497 	if (!spec) {
2498 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
2499 				DPAA2_FLOW_QOS_TYPE, group,
2500 				&local_cfg);
2501 		if (ret)
2502 			return ret;
2503 
2504 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
2505 				DPAA2_FLOW_FS_TYPE, group,
2506 				&local_cfg);
2507 		if (ret)
2508 			return ret;
2509 
2510 		(*device_configured) |= local_cfg;
2511 		return 0;
2512 	}
2513 
2514 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
2515 			RTE_FLOW_ITEM_TYPE_VLAN);
2516 	if (ret) {
2517 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
2518 		return ret;
2519 	}
2520 
2521 	if (!mask->tci)
2522 		return 0;
2523 
2524 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
2525 			NH_FLD_VLAN_TCI, &spec->tci,
2526 			&mask->tci, sizeof(rte_be16_t),
2527 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2528 	if (ret)
2529 		return ret;
2530 
2531 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
2532 			NH_FLD_VLAN_TCI, &spec->tci,
2533 			&mask->tci, sizeof(rte_be16_t),
2534 			priv, group, &local_cfg,
2535 			DPAA2_FLOW_FS_TYPE);
2536 	if (ret)
2537 		return ret;
2538 
2539 	(*device_configured) |= local_cfg;
2540 	return 0;
2541 }
2542 
2543 static int
2544 dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow,
2545 	struct rte_eth_dev *dev,
2546 	const struct rte_flow_attr *attr,
2547 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2548 	const struct rte_flow_action actions[] __rte_unused,
2549 	struct rte_flow_error *error __rte_unused,
2550 	int *device_configured)
2551 {
2552 	int ret, local_cfg = 0;
2553 	uint32_t group;
2554 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0, *mask_ipv4 = 0;
2555 	const void *key, *mask;
2556 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2557 	int size;
2558 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2559 
2560 	group = attr->group;
2561 
2562 	/* Parse pattern list to get the matching parameters */
2563 	spec_ipv4 = pattern->spec;
2564 	mask_ipv4 = pattern->mask ?
2565 		    pattern->mask : &dpaa2_flow_item_ipv4_mask;
2566 
2567 	if (dpaa2_pattern->in_tunnel) {
2568 		if (spec_ipv4) {
2569 			DPAA2_PMD_ERR("Tunnel-IPv4 distribution not support");
2570 			return -ENOTSUP;
2571 		}
2572 
2573 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2574 				FAFE_VXLAN_IN_IPV4_FRAM,
2575 				DPAA2_FLOW_QOS_TYPE, group,
2576 				&local_cfg);
2577 		if (ret)
2578 			return ret;
2579 
2580 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2581 				FAFE_VXLAN_IN_IPV4_FRAM,
2582 				DPAA2_FLOW_FS_TYPE, group,
2583 				&local_cfg);
2584 		return ret;
2585 	}
2586 
2587 	/* Get traffic class index and flow id to be configured */
2588 	flow->tc_id = group;
2589 	flow->tc_index = attr->priority;
2590 
2591 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
2592 			DPAA2_FLOW_QOS_TYPE, group,
2593 			&local_cfg);
2594 	if (ret)
2595 		return ret;
2596 
2597 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
2598 			DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2599 	if (ret)
2600 		return ret;
2601 
2602 	if (!spec_ipv4) {
2603 		(*device_configured) |= local_cfg;
2604 		return 0;
2605 	}
2606 
2607 	ret = dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
2608 			RTE_FLOW_ITEM_TYPE_IPV4);
2609 	if (ret) {
2610 		DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
2611 		return ret;
2612 	}
2613 
2614 	if (mask_ipv4->hdr.src_addr) {
2615 		key = &spec_ipv4->hdr.src_addr;
2616 		mask = &mask_ipv4->hdr.src_addr;
2617 		size = sizeof(rte_be32_t);
2618 
2619 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2620 				NH_FLD_IPV4_SRC_IP,
2621 				key, mask, size, priv,
2622 				group, &local_cfg,
2623 				DPAA2_FLOW_QOS_TYPE);
2624 		if (ret)
2625 			return ret;
2626 
2627 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2628 				NH_FLD_IPV4_SRC_IP,
2629 				key, mask, size, priv,
2630 				group, &local_cfg,
2631 				DPAA2_FLOW_FS_TYPE);
2632 		if (ret)
2633 			return ret;
2634 	}
2635 
2636 	if (mask_ipv4->hdr.dst_addr) {
2637 		key = &spec_ipv4->hdr.dst_addr;
2638 		mask = &mask_ipv4->hdr.dst_addr;
2639 		size = sizeof(rte_be32_t);
2640 
2641 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2642 				NH_FLD_IPV4_DST_IP,
2643 				key, mask, size, priv,
2644 				group, &local_cfg,
2645 				DPAA2_FLOW_QOS_TYPE);
2646 		if (ret)
2647 			return ret;
2648 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2649 				NH_FLD_IPV4_DST_IP,
2650 				key, mask, size, priv,
2651 				group, &local_cfg,
2652 				DPAA2_FLOW_FS_TYPE);
2653 		if (ret)
2654 			return ret;
2655 	}
2656 
2657 	if (mask_ipv4->hdr.next_proto_id) {
2658 		key = &spec_ipv4->hdr.next_proto_id;
2659 		mask = &mask_ipv4->hdr.next_proto_id;
2660 		size = sizeof(uint8_t);
2661 
2662 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2663 				NH_FLD_IP_PROTO, key,
2664 				mask, size, priv, group,
2665 				&local_cfg,
2666 				DPAA2_FLOW_QOS_TYPE);
2667 		if (ret)
2668 			return ret;
2669 
2670 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2671 				NH_FLD_IP_PROTO, key,
2672 				mask, size, priv, group,
2673 				&local_cfg,
2674 				DPAA2_FLOW_FS_TYPE);
2675 		if (ret)
2676 			return ret;
2677 	}
2678 
2679 	(*device_configured) |= local_cfg;
2680 	return 0;
2681 }
2682 
2683 static int
2684 dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow,
2685 	struct rte_eth_dev *dev,
2686 	const struct rte_flow_attr *attr,
2687 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2688 	const struct rte_flow_action actions[] __rte_unused,
2689 	struct rte_flow_error *error __rte_unused,
2690 	int *device_configured)
2691 {
2692 	int ret, local_cfg = 0;
2693 	uint32_t group;
2694 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0, *mask_ipv6 = 0;
2695 	const void *key, *mask;
2696 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2697 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
2698 	int size;
2699 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2700 
2701 	group = attr->group;
2702 
2703 	/* Parse pattern list to get the matching parameters */
2704 	spec_ipv6 = pattern->spec;
2705 	mask_ipv6 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv6_mask;
2706 
2707 	/* Get traffic class index and flow id to be configured */
2708 	flow->tc_id = group;
2709 	flow->tc_index = attr->priority;
2710 
2711 	if (dpaa2_pattern->in_tunnel) {
2712 		if (spec_ipv6) {
2713 			DPAA2_PMD_ERR("Tunnel-IPv6 distribution not support");
2714 			return -ENOTSUP;
2715 		}
2716 
2717 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2718 				FAFE_VXLAN_IN_IPV6_FRAM,
2719 				DPAA2_FLOW_QOS_TYPE, group,
2720 				&local_cfg);
2721 		if (ret)
2722 			return ret;
2723 
2724 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2725 				FAFE_VXLAN_IN_IPV6_FRAM,
2726 				DPAA2_FLOW_FS_TYPE, group,
2727 				&local_cfg);
2728 		return ret;
2729 	}
2730 
2731 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2732 			DPAA2_FLOW_QOS_TYPE, group,
2733 			&local_cfg);
2734 	if (ret)
2735 		return ret;
2736 
2737 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2738 			DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2739 	if (ret)
2740 		return ret;
2741 
2742 	if (!spec_ipv6) {
2743 		(*device_configured) |= local_cfg;
2744 		return 0;
2745 	}
2746 
2747 	ret = dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
2748 			RTE_FLOW_ITEM_TYPE_IPV6);
2749 	if (ret) {
2750 		DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
2751 		return ret;
2752 	}
2753 
2754 	if (memcmp((const char *)&mask_ipv6->hdr.src_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2755 		key = &spec_ipv6->hdr.src_addr;
2756 		mask = &mask_ipv6->hdr.src_addr;
2757 		size = NH_FLD_IPV6_ADDR_SIZE;
2758 
2759 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2760 				NH_FLD_IPV6_SRC_IP,
2761 				key, mask, size, priv,
2762 				group, &local_cfg,
2763 				DPAA2_FLOW_QOS_TYPE);
2764 		if (ret)
2765 			return ret;
2766 
2767 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2768 				NH_FLD_IPV6_SRC_IP,
2769 				key, mask, size, priv,
2770 				group, &local_cfg,
2771 				DPAA2_FLOW_FS_TYPE);
2772 		if (ret)
2773 			return ret;
2774 	}
2775 
2776 	if (memcmp((const char *)&mask_ipv6->hdr.dst_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2777 		key = &spec_ipv6->hdr.dst_addr;
2778 		mask = &mask_ipv6->hdr.dst_addr;
2779 		size = NH_FLD_IPV6_ADDR_SIZE;
2780 
2781 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2782 				NH_FLD_IPV6_DST_IP,
2783 				key, mask, size, priv,
2784 				group, &local_cfg,
2785 				DPAA2_FLOW_QOS_TYPE);
2786 		if (ret)
2787 			return ret;
2788 
2789 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2790 				NH_FLD_IPV6_DST_IP,
2791 				key, mask, size, priv,
2792 				group, &local_cfg,
2793 				DPAA2_FLOW_FS_TYPE);
2794 		if (ret)
2795 			return ret;
2796 	}
2797 
2798 	if (mask_ipv6->hdr.proto) {
2799 		key = &spec_ipv6->hdr.proto;
2800 		mask = &mask_ipv6->hdr.proto;
2801 		size = sizeof(uint8_t);
2802 
2803 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2804 				NH_FLD_IP_PROTO, key,
2805 				mask, size, priv, group,
2806 				&local_cfg,
2807 				DPAA2_FLOW_QOS_TYPE);
2808 		if (ret)
2809 			return ret;
2810 
2811 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2812 				NH_FLD_IP_PROTO, key,
2813 				mask, size, priv, group,
2814 				&local_cfg,
2815 				DPAA2_FLOW_FS_TYPE);
2816 		if (ret)
2817 			return ret;
2818 	}
2819 
2820 	(*device_configured) |= local_cfg;
2821 	return 0;
2822 }
2823 
2824 static int
2825 dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
2826 	struct rte_eth_dev *dev,
2827 	const struct rte_flow_attr *attr,
2828 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2829 	const struct rte_flow_action actions[] __rte_unused,
2830 	struct rte_flow_error *error __rte_unused,
2831 	int *device_configured)
2832 {
2833 	int ret, local_cfg = 0;
2834 	uint32_t group;
2835 	const struct rte_flow_item_icmp *spec, *mask;
2836 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2837 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2838 
2839 	group = attr->group;
2840 
2841 	/* Parse pattern list to get the matching parameters */
2842 	spec = pattern->spec;
2843 	mask = pattern->mask ?
2844 		pattern->mask : &dpaa2_flow_item_icmp_mask;
2845 
2846 	/* Get traffic class index and flow id to be configured */
2847 	flow->tc_id = group;
2848 	flow->tc_index = attr->priority;
2849 
2850 	if (dpaa2_pattern->in_tunnel) {
2851 		DPAA2_PMD_ERR("Tunnel-ICMP distribution not support");
2852 		return -ENOTSUP;
2853 	}
2854 
2855 	if (!spec) {
2856 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2857 				FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE,
2858 				group, &local_cfg);
2859 		if (ret)
2860 			return ret;
2861 
2862 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2863 				FAF_ICMP_FRAM, DPAA2_FLOW_FS_TYPE,
2864 				group, &local_cfg);
2865 		if (ret)
2866 			return ret;
2867 
2868 		(*device_configured) |= local_cfg;
2869 		return 0;
2870 	}
2871 
2872 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
2873 		RTE_FLOW_ITEM_TYPE_ICMP);
2874 	if (ret) {
2875 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
2876 
2877 		return ret;
2878 	}
2879 
2880 	if (mask->hdr.icmp_type) {
2881 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2882 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2883 			&mask->hdr.icmp_type, sizeof(uint8_t),
2884 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2885 		if (ret)
2886 			return ret;
2887 
2888 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2889 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2890 			&mask->hdr.icmp_type, sizeof(uint8_t),
2891 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2892 		if (ret)
2893 			return ret;
2894 	}
2895 
2896 	if (mask->hdr.icmp_code) {
2897 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2898 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2899 			&mask->hdr.icmp_code, sizeof(uint8_t),
2900 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2901 		if (ret)
2902 			return ret;
2903 
2904 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2905 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2906 			&mask->hdr.icmp_code, sizeof(uint8_t),
2907 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2908 		if (ret)
2909 			return ret;
2910 	}
2911 
2912 	(*device_configured) |= local_cfg;
2913 
2914 	return 0;
2915 }
2916 
2917 static int
2918 dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
2919 	struct rte_eth_dev *dev,
2920 	const struct rte_flow_attr *attr,
2921 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2922 	const struct rte_flow_action actions[] __rte_unused,
2923 	struct rte_flow_error *error __rte_unused,
2924 	int *device_configured)
2925 {
2926 	int ret, local_cfg = 0;
2927 	uint32_t group;
2928 	const struct rte_flow_item_udp *spec, *mask;
2929 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2930 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2931 
2932 	group = attr->group;
2933 
2934 	/* Parse pattern list to get the matching parameters */
2935 	spec = pattern->spec;
2936 	mask = pattern->mask ?
2937 		pattern->mask : &dpaa2_flow_item_udp_mask;
2938 
2939 	/* Get traffic class index and flow id to be configured */
2940 	flow->tc_id = group;
2941 	flow->tc_index = attr->priority;
2942 
2943 	if (dpaa2_pattern->in_tunnel) {
2944 		if (spec) {
2945 			DPAA2_PMD_ERR("Tunnel-UDP distribution not support");
2946 			return -ENOTSUP;
2947 		}
2948 
2949 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2950 				FAFE_VXLAN_IN_UDP_FRAM,
2951 				DPAA2_FLOW_QOS_TYPE, group,
2952 				&local_cfg);
2953 		if (ret)
2954 			return ret;
2955 
2956 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2957 				FAFE_VXLAN_IN_UDP_FRAM,
2958 				DPAA2_FLOW_FS_TYPE, group,
2959 				&local_cfg);
2960 		return ret;
2961 	}
2962 
2963 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2964 			FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE,
2965 			group, &local_cfg);
2966 	if (ret)
2967 		return ret;
2968 
2969 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2970 			FAF_UDP_FRAM, DPAA2_FLOW_FS_TYPE,
2971 			group, &local_cfg);
2972 	if (ret)
2973 		return ret;
2974 
2975 	if (!spec) {
2976 		(*device_configured) |= local_cfg;
2977 		return 0;
2978 	}
2979 
2980 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
2981 		RTE_FLOW_ITEM_TYPE_UDP);
2982 	if (ret) {
2983 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2984 
2985 		return ret;
2986 	}
2987 
2988 	if (mask->hdr.src_port) {
2989 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2990 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2991 			&mask->hdr.src_port, sizeof(rte_be16_t),
2992 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2993 		if (ret)
2994 			return ret;
2995 
2996 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2997 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2998 			&mask->hdr.src_port, sizeof(rte_be16_t),
2999 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3000 		if (ret)
3001 			return ret;
3002 	}
3003 
3004 	if (mask->hdr.dst_port) {
3005 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
3006 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
3007 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3008 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3009 		if (ret)
3010 			return ret;
3011 
3012 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
3013 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
3014 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3015 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3016 		if (ret)
3017 			return ret;
3018 	}
3019 
3020 	(*device_configured) |= local_cfg;
3021 
3022 	return 0;
3023 }
3024 
3025 static int
3026 dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
3027 	struct rte_eth_dev *dev,
3028 	const struct rte_flow_attr *attr,
3029 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3030 	const struct rte_flow_action actions[] __rte_unused,
3031 	struct rte_flow_error *error __rte_unused,
3032 	int *device_configured)
3033 {
3034 	int ret, local_cfg = 0;
3035 	uint32_t group;
3036 	const struct rte_flow_item_tcp *spec, *mask;
3037 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3038 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3039 
3040 	group = attr->group;
3041 
3042 	/* Parse pattern list to get the matching parameters */
3043 	spec = pattern->spec;
3044 	mask = pattern->mask ?
3045 		pattern->mask : &dpaa2_flow_item_tcp_mask;
3046 
3047 	/* Get traffic class index and flow id to be configured */
3048 	flow->tc_id = group;
3049 	flow->tc_index = attr->priority;
3050 
3051 	if (dpaa2_pattern->in_tunnel) {
3052 		if (spec) {
3053 			DPAA2_PMD_ERR("Tunnel-TCP distribution not support");
3054 			return -ENOTSUP;
3055 		}
3056 
3057 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3058 				FAFE_VXLAN_IN_TCP_FRAM,
3059 				DPAA2_FLOW_QOS_TYPE, group,
3060 				&local_cfg);
3061 		if (ret)
3062 			return ret;
3063 
3064 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3065 						 FAFE_VXLAN_IN_TCP_FRAM,
3066 						 DPAA2_FLOW_FS_TYPE, group,
3067 						 &local_cfg);
3068 		return ret;
3069 	}
3070 
3071 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3072 			FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE,
3073 			group, &local_cfg);
3074 	if (ret)
3075 		return ret;
3076 
3077 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3078 			FAF_TCP_FRAM, DPAA2_FLOW_FS_TYPE,
3079 			group, &local_cfg);
3080 	if (ret)
3081 		return ret;
3082 
3083 	if (!spec) {
3084 		(*device_configured) |= local_cfg;
3085 		return 0;
3086 	}
3087 
3088 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3089 		RTE_FLOW_ITEM_TYPE_TCP);
3090 	if (ret) {
3091 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
3092 
3093 		return ret;
3094 	}
3095 
3096 	if (mask->hdr.src_port) {
3097 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3098 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
3099 			&mask->hdr.src_port, sizeof(rte_be16_t),
3100 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3101 		if (ret)
3102 			return ret;
3103 
3104 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3105 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
3106 			&mask->hdr.src_port, sizeof(rte_be16_t),
3107 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3108 		if (ret)
3109 			return ret;
3110 	}
3111 
3112 	if (mask->hdr.dst_port) {
3113 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3114 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
3115 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3116 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3117 		if (ret)
3118 			return ret;
3119 
3120 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3121 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
3122 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3123 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3124 		if (ret)
3125 			return ret;
3126 	}
3127 
3128 	(*device_configured) |= local_cfg;
3129 
3130 	return 0;
3131 }
3132 
3133 static int
3134 dpaa2_configure_flow_esp(struct dpaa2_dev_flow *flow,
3135 	struct rte_eth_dev *dev,
3136 	const struct rte_flow_attr *attr,
3137 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3138 	const struct rte_flow_action actions[] __rte_unused,
3139 	struct rte_flow_error *error __rte_unused,
3140 	int *device_configured)
3141 {
3142 	int ret, local_cfg = 0;
3143 	uint32_t group;
3144 	const struct rte_flow_item_esp *spec, *mask;
3145 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3146 	const struct rte_flow_item *pattern =
3147 		&dpaa2_pattern->generic_item;
3148 
3149 	group = attr->group;
3150 
3151 	/* Parse pattern list to get the matching parameters */
3152 	spec = pattern->spec;
3153 	mask = pattern->mask ?
3154 		pattern->mask : &dpaa2_flow_item_esp_mask;
3155 
3156 	/* Get traffic class index and flow id to be configured */
3157 	flow->tc_id = group;
3158 	flow->tc_index = attr->priority;
3159 
3160 	if (dpaa2_pattern->in_tunnel) {
3161 		DPAA2_PMD_ERR("Tunnel-ESP distribution not support");
3162 		return -ENOTSUP;
3163 	}
3164 
3165 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3166 			FAF_IPSEC_ESP_FRAM, DPAA2_FLOW_QOS_TYPE,
3167 			group, &local_cfg);
3168 	if (ret)
3169 		return ret;
3170 
3171 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3172 			FAF_IPSEC_ESP_FRAM, DPAA2_FLOW_FS_TYPE,
3173 			group, &local_cfg);
3174 	if (ret)
3175 		return ret;
3176 
3177 	if (!spec) {
3178 		(*device_configured) |= local_cfg;
3179 		return 0;
3180 	}
3181 
3182 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3183 		RTE_FLOW_ITEM_TYPE_ESP);
3184 	if (ret) {
3185 		DPAA2_PMD_WARN("Extract field(s) of ESP not support.");
3186 
3187 		return ret;
3188 	}
3189 
3190 	if (mask->hdr.spi) {
3191 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP,
3192 			NH_FLD_IPSEC_ESP_SPI, &spec->hdr.spi,
3193 			&mask->hdr.spi, sizeof(rte_be32_t),
3194 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3195 		if (ret)
3196 			return ret;
3197 
3198 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP,
3199 			NH_FLD_IPSEC_ESP_SPI, &spec->hdr.spi,
3200 			&mask->hdr.spi, sizeof(rte_be32_t),
3201 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3202 		if (ret)
3203 			return ret;
3204 	}
3205 
3206 	if (mask->hdr.seq) {
3207 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP,
3208 			NH_FLD_IPSEC_ESP_SEQUENCE_NUM, &spec->hdr.seq,
3209 			&mask->hdr.seq, sizeof(rte_be32_t),
3210 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3211 		if (ret)
3212 			return ret;
3213 
3214 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_ESP,
3215 			NH_FLD_IPSEC_ESP_SEQUENCE_NUM, &spec->hdr.seq,
3216 			&mask->hdr.seq, sizeof(rte_be32_t),
3217 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3218 		if (ret)
3219 			return ret;
3220 	}
3221 
3222 	(*device_configured) |= local_cfg;
3223 
3224 	return 0;
3225 }
3226 
3227 static int
3228 dpaa2_configure_flow_ah(struct dpaa2_dev_flow *flow,
3229 	struct rte_eth_dev *dev,
3230 	const struct rte_flow_attr *attr,
3231 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3232 	const struct rte_flow_action actions[] __rte_unused,
3233 	struct rte_flow_error *error __rte_unused,
3234 	int *device_configured)
3235 {
3236 	int ret, local_cfg = 0;
3237 	uint32_t group;
3238 	const struct rte_flow_item_ah *spec, *mask;
3239 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3240 	const struct rte_flow_item *pattern =
3241 		&dpaa2_pattern->generic_item;
3242 
3243 	group = attr->group;
3244 
3245 	/* Parse pattern list to get the matching parameters */
3246 	spec = pattern->spec;
3247 	mask = pattern->mask ?
3248 		pattern->mask : &dpaa2_flow_item_ah_mask;
3249 
3250 	/* Get traffic class index and flow id to be configured */
3251 	flow->tc_id = group;
3252 	flow->tc_index = attr->priority;
3253 
3254 	if (dpaa2_pattern->in_tunnel) {
3255 		DPAA2_PMD_ERR("Tunnel-AH distribution not support");
3256 		return -ENOTSUP;
3257 	}
3258 
3259 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3260 			FAF_IPSEC_AH_FRAM, DPAA2_FLOW_QOS_TYPE,
3261 			group, &local_cfg);
3262 	if (ret)
3263 		return ret;
3264 
3265 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3266 			FAF_IPSEC_AH_FRAM, DPAA2_FLOW_FS_TYPE,
3267 			group, &local_cfg);
3268 	if (ret)
3269 		return ret;
3270 
3271 	if (!spec) {
3272 		(*device_configured) |= local_cfg;
3273 		return 0;
3274 	}
3275 
3276 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3277 		RTE_FLOW_ITEM_TYPE_AH);
3278 	if (ret) {
3279 		DPAA2_PMD_WARN("Extract field(s) of AH not support.");
3280 
3281 		return ret;
3282 	}
3283 
3284 	if (mask->spi) {
3285 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_AH,
3286 			NH_FLD_IPSEC_AH_SPI, &spec->spi,
3287 			&mask->spi, sizeof(rte_be32_t),
3288 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3289 		if (ret)
3290 			return ret;
3291 
3292 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IPSEC_AH,
3293 			NH_FLD_IPSEC_AH_SPI, &spec->spi,
3294 			&mask->spi, sizeof(rte_be32_t),
3295 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3296 		if (ret)
3297 			return ret;
3298 	}
3299 
3300 	if (mask->seq_num) {
3301 		DPAA2_PMD_ERR("AH seq distribution not support");
3302 		return -ENOTSUP;
3303 	}
3304 
3305 	(*device_configured) |= local_cfg;
3306 
3307 	return 0;
3308 }
3309 
3310 static int
3311 dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
3312 	struct rte_eth_dev *dev,
3313 	const struct rte_flow_attr *attr,
3314 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3315 	const struct rte_flow_action actions[] __rte_unused,
3316 	struct rte_flow_error *error __rte_unused,
3317 	int *device_configured)
3318 {
3319 	int ret, local_cfg = 0;
3320 	uint32_t group;
3321 	const struct rte_flow_item_sctp *spec, *mask;
3322 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3323 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3324 
3325 	group = attr->group;
3326 
3327 	/* Parse pattern list to get the matching parameters */
3328 	spec = pattern->spec;
3329 	mask = pattern->mask ?
3330 		pattern->mask : &dpaa2_flow_item_sctp_mask;
3331 
3332 	/* Get traffic class index and flow id to be configured */
3333 	flow->tc_id = group;
3334 	flow->tc_index = attr->priority;
3335 
3336 	if (dpaa2_pattern->in_tunnel) {
3337 		DPAA2_PMD_ERR("Tunnel-SCTP distribution not support");
3338 		return -ENOTSUP;
3339 	}
3340 
3341 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3342 			FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE,
3343 			group, &local_cfg);
3344 	if (ret)
3345 		return ret;
3346 
3347 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3348 			FAF_SCTP_FRAM, DPAA2_FLOW_FS_TYPE,
3349 			group, &local_cfg);
3350 	if (ret)
3351 		return ret;
3352 
3353 	if (!spec) {
3354 		(*device_configured) |= local_cfg;
3355 		return 0;
3356 	}
3357 
3358 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3359 		RTE_FLOW_ITEM_TYPE_SCTP);
3360 	if (ret) {
3361 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
3362 
3363 		return ret;
3364 	}
3365 
3366 	if (mask->hdr.src_port) {
3367 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3368 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
3369 			&mask->hdr.src_port, sizeof(rte_be16_t),
3370 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3371 		if (ret)
3372 			return ret;
3373 
3374 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3375 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
3376 			&mask->hdr.src_port, sizeof(rte_be16_t),
3377 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3378 		if (ret)
3379 			return ret;
3380 	}
3381 
3382 	if (mask->hdr.dst_port) {
3383 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3384 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
3385 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3386 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3387 		if (ret)
3388 			return ret;
3389 
3390 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3391 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
3392 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3393 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3394 		if (ret)
3395 			return ret;
3396 	}
3397 
3398 	(*device_configured) |= local_cfg;
3399 
3400 	return 0;
3401 }
3402 
3403 static int
3404 dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
3405 	struct rte_eth_dev *dev,
3406 	const struct rte_flow_attr *attr,
3407 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3408 	const struct rte_flow_action actions[] __rte_unused,
3409 	struct rte_flow_error *error __rte_unused,
3410 	int *device_configured)
3411 {
3412 	int ret, local_cfg = 0;
3413 	uint32_t group;
3414 	const struct rte_flow_item_gre *spec, *mask;
3415 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3416 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3417 
3418 	group = attr->group;
3419 
3420 	/* Parse pattern list to get the matching parameters */
3421 	spec = pattern->spec;
3422 	mask = pattern->mask ?
3423 		pattern->mask : &dpaa2_flow_item_gre_mask;
3424 
3425 	/* Get traffic class index and flow id to be configured */
3426 	flow->tc_id = group;
3427 	flow->tc_index = attr->priority;
3428 
3429 	if (dpaa2_pattern->in_tunnel) {
3430 		DPAA2_PMD_ERR("Tunnel-GRE distribution not support");
3431 		return -ENOTSUP;
3432 	}
3433 
3434 	if (!spec) {
3435 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3436 				FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE,
3437 				group, &local_cfg);
3438 		if (ret)
3439 			return ret;
3440 
3441 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3442 				FAF_GRE_FRAM, DPAA2_FLOW_FS_TYPE,
3443 				group, &local_cfg);
3444 		if (ret)
3445 			return ret;
3446 
3447 		(*device_configured) |= local_cfg;
3448 		return 0;
3449 	}
3450 
3451 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3452 		RTE_FLOW_ITEM_TYPE_GRE);
3453 	if (ret) {
3454 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
3455 
3456 		return ret;
3457 	}
3458 
3459 	if (!mask->protocol)
3460 		return 0;
3461 
3462 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
3463 			NH_FLD_GRE_TYPE, &spec->protocol,
3464 			&mask->protocol, sizeof(rte_be16_t),
3465 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3466 	if (ret)
3467 		return ret;
3468 
3469 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
3470 			NH_FLD_GRE_TYPE, &spec->protocol,
3471 			&mask->protocol, sizeof(rte_be16_t),
3472 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3473 	if (ret)
3474 		return ret;
3475 
3476 	(*device_configured) |= local_cfg;
3477 
3478 	return 0;
3479 }
3480 
3481 static int
3482 dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow,
3483 	struct rte_eth_dev *dev,
3484 	const struct rte_flow_attr *attr,
3485 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3486 	const struct rte_flow_action actions[] __rte_unused,
3487 	struct rte_flow_error *error __rte_unused,
3488 	int *device_configured)
3489 {
3490 	int ret, local_cfg = 0;
3491 	uint32_t group;
3492 	const struct rte_flow_item_vxlan *spec, *mask;
3493 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3494 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3495 
3496 	group = attr->group;
3497 
3498 	/* Parse pattern list to get the matching parameters */
3499 	spec = pattern->spec;
3500 	mask = pattern->mask ?
3501 		pattern->mask : &dpaa2_flow_item_vxlan_mask;
3502 
3503 	/* Get traffic class index and flow id to be configured */
3504 	flow->tc_id = group;
3505 	flow->tc_index = attr->priority;
3506 
3507 	if (dpaa2_pattern->in_tunnel) {
3508 		DPAA2_PMD_ERR("Tunnel-VXLAN distribution not support");
3509 		return -ENOTSUP;
3510 	}
3511 
3512 	if (!spec) {
3513 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3514 				FAF_VXLAN_FRAM, DPAA2_FLOW_QOS_TYPE,
3515 				group, &local_cfg);
3516 		if (ret)
3517 			return ret;
3518 
3519 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3520 				FAF_VXLAN_FRAM, DPAA2_FLOW_FS_TYPE,
3521 				group, &local_cfg);
3522 		if (ret)
3523 			return ret;
3524 
3525 		(*device_configured) |= local_cfg;
3526 		return 0;
3527 	}
3528 
3529 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3530 		RTE_FLOW_ITEM_TYPE_VXLAN);
3531 	if (ret) {
3532 		DPAA2_PMD_WARN("Extract field(s) of VXLAN not support.");
3533 
3534 		return ret;
3535 	}
3536 
3537 	if (mask->flags) {
3538 		if (spec->flags != VXLAN_HF_VNI) {
3539 			DPAA2_PMD_ERR("vxlan flag(0x%02x) must be 0x%02x.",
3540 				spec->flags, VXLAN_HF_VNI);
3541 			return -EINVAL;
3542 		}
3543 		if (mask->flags != 0xff) {
3544 			DPAA2_PMD_ERR("Not support to extract vxlan flag.");
3545 			return -EINVAL;
3546 		}
3547 	}
3548 
3549 	if (mask->vni[0] || mask->vni[1] || mask->vni[2]) {
3550 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3551 			DPAA2_VXLAN_VNI_OFFSET,
3552 			sizeof(mask->vni), spec->vni,
3553 			mask->vni,
3554 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3555 		if (ret)
3556 			return ret;
3557 
3558 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3559 			DPAA2_VXLAN_VNI_OFFSET,
3560 			sizeof(mask->vni), spec->vni,
3561 			mask->vni,
3562 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3563 		if (ret)
3564 			return ret;
3565 	}
3566 
3567 	(*device_configured) |= local_cfg;
3568 
3569 	return 0;
3570 }
3571 
3572 static int
3573 dpaa2_configure_flow_ecpri(struct dpaa2_dev_flow *flow,
3574 	struct rte_eth_dev *dev,
3575 	const struct rte_flow_attr *attr,
3576 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3577 	const struct rte_flow_action actions[] __rte_unused,
3578 	struct rte_flow_error *error __rte_unused,
3579 	int *device_configured)
3580 {
3581 	int ret, local_cfg = 0;
3582 	uint32_t group;
3583 	const struct rte_flow_item_ecpri *spec, *mask;
3584 	struct rte_flow_item_ecpri local_mask;
3585 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3586 	const struct rte_flow_item *pattern =
3587 		&dpaa2_pattern->generic_item;
3588 	uint8_t extract_nb = 0, i;
3589 	uint64_t rule_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
3590 	uint64_t mask_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
3591 	uint8_t extract_size[DPAA2_ECPRI_MAX_EXTRACT_NB];
3592 	uint8_t extract_off[DPAA2_ECPRI_MAX_EXTRACT_NB];
3593 
3594 	group = attr->group;
3595 
3596 	/* Parse pattern list to get the matching parameters */
3597 	spec = pattern->spec;
3598 	if (pattern->mask) {
3599 		memcpy(&local_mask, pattern->mask,
3600 			sizeof(struct rte_flow_item_ecpri));
3601 		local_mask.hdr.common.u32 =
3602 			rte_be_to_cpu_32(local_mask.hdr.common.u32);
3603 		mask = &local_mask;
3604 	} else {
3605 		mask = &dpaa2_flow_item_ecpri_mask;
3606 	}
3607 
3608 	/* Get traffic class index and flow id to be configured */
3609 	flow->tc_id = group;
3610 	flow->tc_index = attr->priority;
3611 
3612 	if (dpaa2_pattern->in_tunnel) {
3613 		DPAA2_PMD_ERR("Tunnel-ECPRI distribution not support");
3614 		return -ENOTSUP;
3615 	}
3616 
3617 	if (!spec) {
3618 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3619 			FAFE_ECPRI_FRAM, DPAA2_FLOW_QOS_TYPE,
3620 			group, &local_cfg);
3621 		if (ret)
3622 			return ret;
3623 
3624 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3625 			FAFE_ECPRI_FRAM, DPAA2_FLOW_FS_TYPE,
3626 			group, &local_cfg);
3627 		if (ret)
3628 			return ret;
3629 
3630 		(*device_configured) |= local_cfg;
3631 		return 0;
3632 	}
3633 
3634 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3635 		RTE_FLOW_ITEM_TYPE_ECPRI);
3636 	if (ret) {
3637 		DPAA2_PMD_WARN("Extract field(s) of ECPRI not support.");
3638 
3639 		return ret;
3640 	}
3641 
3642 	if (mask->hdr.common.type != 0xff) {
3643 		DPAA2_PMD_WARN("ECPRI header type not specified.");
3644 
3645 		return -EINVAL;
3646 	}
3647 
3648 	if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA) {
3649 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_0;
3650 		mask_data[extract_nb] = 0xff;
3651 		extract_size[extract_nb] = sizeof(uint8_t);
3652 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3653 		extract_nb++;
3654 
3655 		if (mask->hdr.type0.pc_id) {
3656 			rule_data[extract_nb] = spec->hdr.type0.pc_id;
3657 			mask_data[extract_nb] = mask->hdr.type0.pc_id;
3658 			extract_size[extract_nb] = sizeof(rte_be16_t);
3659 			extract_off[extract_nb] =
3660 				DPAA2_ECPRI_MSG_OFFSET +
3661 				offsetof(struct rte_ecpri_msg_iq_data, pc_id);
3662 			extract_nb++;
3663 		}
3664 		if (mask->hdr.type0.seq_id) {
3665 			rule_data[extract_nb] = spec->hdr.type0.seq_id;
3666 			mask_data[extract_nb] = mask->hdr.type0.seq_id;
3667 			extract_size[extract_nb] = sizeof(rte_be16_t);
3668 			extract_off[extract_nb] =
3669 				DPAA2_ECPRI_MSG_OFFSET +
3670 				offsetof(struct rte_ecpri_msg_iq_data, seq_id);
3671 			extract_nb++;
3672 		}
3673 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_BIT_SEQ) {
3674 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_1;
3675 		mask_data[extract_nb] = 0xff;
3676 		extract_size[extract_nb] = sizeof(uint8_t);
3677 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3678 		extract_nb++;
3679 
3680 		if (mask->hdr.type1.pc_id) {
3681 			rule_data[extract_nb] = spec->hdr.type1.pc_id;
3682 			mask_data[extract_nb] = mask->hdr.type1.pc_id;
3683 			extract_size[extract_nb] = sizeof(rte_be16_t);
3684 			extract_off[extract_nb] =
3685 				DPAA2_ECPRI_MSG_OFFSET +
3686 				offsetof(struct rte_ecpri_msg_bit_seq, pc_id);
3687 			extract_nb++;
3688 		}
3689 		if (mask->hdr.type1.seq_id) {
3690 			rule_data[extract_nb] = spec->hdr.type1.seq_id;
3691 			mask_data[extract_nb] = mask->hdr.type1.seq_id;
3692 			extract_size[extract_nb] = sizeof(rte_be16_t);
3693 			extract_off[extract_nb] =
3694 				DPAA2_ECPRI_MSG_OFFSET +
3695 				offsetof(struct rte_ecpri_msg_bit_seq, seq_id);
3696 			extract_nb++;
3697 		}
3698 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RTC_CTRL) {
3699 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_2;
3700 		mask_data[extract_nb] = 0xff;
3701 		extract_size[extract_nb] = sizeof(uint8_t);
3702 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3703 		extract_nb++;
3704 
3705 		if (mask->hdr.type2.rtc_id) {
3706 			rule_data[extract_nb] = spec->hdr.type2.rtc_id;
3707 			mask_data[extract_nb] = mask->hdr.type2.rtc_id;
3708 			extract_size[extract_nb] = sizeof(rte_be16_t);
3709 			extract_off[extract_nb] =
3710 				DPAA2_ECPRI_MSG_OFFSET +
3711 				offsetof(struct rte_ecpri_msg_rtc_ctrl, rtc_id);
3712 			extract_nb++;
3713 		}
3714 		if (mask->hdr.type2.seq_id) {
3715 			rule_data[extract_nb] = spec->hdr.type2.seq_id;
3716 			mask_data[extract_nb] = mask->hdr.type2.seq_id;
3717 			extract_size[extract_nb] = sizeof(rte_be16_t);
3718 			extract_off[extract_nb] =
3719 				DPAA2_ECPRI_MSG_OFFSET +
3720 				offsetof(struct rte_ecpri_msg_rtc_ctrl, seq_id);
3721 			extract_nb++;
3722 		}
3723 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_GEN_DATA) {
3724 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_3;
3725 		mask_data[extract_nb] = 0xff;
3726 		extract_size[extract_nb] = sizeof(uint8_t);
3727 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3728 		extract_nb++;
3729 
3730 		if (mask->hdr.type3.pc_id || mask->hdr.type3.seq_id)
3731 			DPAA2_PMD_WARN("Extract type3 msg not support.");
3732 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RM_ACC) {
3733 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_4;
3734 		mask_data[extract_nb] = 0xff;
3735 		extract_size[extract_nb] = sizeof(uint8_t);
3736 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3737 		extract_nb++;
3738 
3739 		if (mask->hdr.type4.rma_id) {
3740 			rule_data[extract_nb] = spec->hdr.type4.rma_id;
3741 			mask_data[extract_nb] = mask->hdr.type4.rma_id;
3742 			extract_size[extract_nb] = sizeof(uint8_t);
3743 			extract_off[extract_nb] =
3744 				DPAA2_ECPRI_MSG_OFFSET + 0;
3745 				/** Compiler not support to take address
3746 				 * of bit-field
3747 				 * offsetof(struct rte_ecpri_msg_rm_access,
3748 				 * rma_id);
3749 				 */
3750 			extract_nb++;
3751 		}
3752 		if (mask->hdr.type4.ele_id) {
3753 			rule_data[extract_nb] = spec->hdr.type4.ele_id;
3754 			mask_data[extract_nb] = mask->hdr.type4.ele_id;
3755 			extract_size[extract_nb] = sizeof(rte_be16_t);
3756 			extract_off[extract_nb] =
3757 				DPAA2_ECPRI_MSG_OFFSET + 2;
3758 				/** Compiler not support to take address
3759 				 * of bit-field
3760 				 * offsetof(struct rte_ecpri_msg_rm_access,
3761 				 * ele_id);
3762 				 */
3763 			extract_nb++;
3764 		}
3765 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_DLY_MSR) {
3766 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_5;
3767 		mask_data[extract_nb] = 0xff;
3768 		extract_size[extract_nb] = sizeof(uint8_t);
3769 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3770 		extract_nb++;
3771 
3772 		if (mask->hdr.type5.msr_id) {
3773 			rule_data[extract_nb] = spec->hdr.type5.msr_id;
3774 			mask_data[extract_nb] = mask->hdr.type5.msr_id;
3775 			extract_size[extract_nb] = sizeof(uint8_t);
3776 			extract_off[extract_nb] =
3777 				DPAA2_ECPRI_MSG_OFFSET +
3778 				offsetof(struct rte_ecpri_msg_delay_measure,
3779 					msr_id);
3780 			extract_nb++;
3781 		}
3782 		if (mask->hdr.type5.act_type) {
3783 			rule_data[extract_nb] = spec->hdr.type5.act_type;
3784 			mask_data[extract_nb] = mask->hdr.type5.act_type;
3785 			extract_size[extract_nb] = sizeof(uint8_t);
3786 			extract_off[extract_nb] =
3787 				DPAA2_ECPRI_MSG_OFFSET +
3788 				offsetof(struct rte_ecpri_msg_delay_measure,
3789 					act_type);
3790 			extract_nb++;
3791 		}
3792 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RMT_RST) {
3793 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_6;
3794 		mask_data[extract_nb] = 0xff;
3795 		extract_size[extract_nb] = sizeof(uint8_t);
3796 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3797 		extract_nb++;
3798 
3799 		if (mask->hdr.type6.rst_id) {
3800 			rule_data[extract_nb] = spec->hdr.type6.rst_id;
3801 			mask_data[extract_nb] = mask->hdr.type6.rst_id;
3802 			extract_size[extract_nb] = sizeof(rte_be16_t);
3803 			extract_off[extract_nb] =
3804 				DPAA2_ECPRI_MSG_OFFSET +
3805 				offsetof(struct rte_ecpri_msg_remote_reset,
3806 					rst_id);
3807 			extract_nb++;
3808 		}
3809 		if (mask->hdr.type6.rst_op) {
3810 			rule_data[extract_nb] = spec->hdr.type6.rst_op;
3811 			mask_data[extract_nb] = mask->hdr.type6.rst_op;
3812 			extract_size[extract_nb] = sizeof(uint8_t);
3813 			extract_off[extract_nb] =
3814 				DPAA2_ECPRI_MSG_OFFSET +
3815 				offsetof(struct rte_ecpri_msg_remote_reset,
3816 					rst_op);
3817 			extract_nb++;
3818 		}
3819 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_EVT_IND) {
3820 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_7;
3821 		mask_data[extract_nb] = 0xff;
3822 		extract_size[extract_nb] = sizeof(uint8_t);
3823 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3824 		extract_nb++;
3825 
3826 		if (mask->hdr.type7.evt_id) {
3827 			rule_data[extract_nb] = spec->hdr.type7.evt_id;
3828 			mask_data[extract_nb] = mask->hdr.type7.evt_id;
3829 			extract_size[extract_nb] = sizeof(uint8_t);
3830 			extract_off[extract_nb] =
3831 				DPAA2_ECPRI_MSG_OFFSET +
3832 				offsetof(struct rte_ecpri_msg_event_ind,
3833 					evt_id);
3834 			extract_nb++;
3835 		}
3836 		if (mask->hdr.type7.evt_type) {
3837 			rule_data[extract_nb] = spec->hdr.type7.evt_type;
3838 			mask_data[extract_nb] = mask->hdr.type7.evt_type;
3839 			extract_size[extract_nb] = sizeof(uint8_t);
3840 			extract_off[extract_nb] =
3841 				DPAA2_ECPRI_MSG_OFFSET +
3842 				offsetof(struct rte_ecpri_msg_event_ind,
3843 					evt_type);
3844 			extract_nb++;
3845 		}
3846 		if (mask->hdr.type7.seq) {
3847 			rule_data[extract_nb] = spec->hdr.type7.seq;
3848 			mask_data[extract_nb] = mask->hdr.type7.seq;
3849 			extract_size[extract_nb] = sizeof(uint8_t);
3850 			extract_off[extract_nb] =
3851 				DPAA2_ECPRI_MSG_OFFSET +
3852 				offsetof(struct rte_ecpri_msg_event_ind,
3853 					seq);
3854 			extract_nb++;
3855 		}
3856 		if (mask->hdr.type7.number) {
3857 			rule_data[extract_nb] = spec->hdr.type7.number;
3858 			mask_data[extract_nb] = mask->hdr.type7.number;
3859 			extract_size[extract_nb] = sizeof(uint8_t);
3860 			extract_off[extract_nb] =
3861 				DPAA2_ECPRI_MSG_OFFSET +
3862 				offsetof(struct rte_ecpri_msg_event_ind,
3863 					number);
3864 			extract_nb++;
3865 		}
3866 	} else {
3867 		DPAA2_PMD_ERR("Invalid ecpri header type(%d)",
3868 				spec->hdr.common.type);
3869 		return -EINVAL;
3870 	}
3871 
3872 	for (i = 0; i < extract_nb; i++) {
3873 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3874 			extract_off[i],
3875 			extract_size[i], &rule_data[i], &mask_data[i],
3876 			priv, group,
3877 			device_configured,
3878 			DPAA2_FLOW_QOS_TYPE);
3879 		if (ret)
3880 			return ret;
3881 
3882 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3883 			extract_off[i],
3884 			extract_size[i], &rule_data[i], &mask_data[i],
3885 			priv, group,
3886 			device_configured,
3887 			DPAA2_FLOW_FS_TYPE);
3888 		if (ret)
3889 			return ret;
3890 	}
3891 
3892 	(*device_configured) |= local_cfg;
3893 
3894 	return 0;
3895 }
3896 
3897 static int
3898 dpaa2_configure_flow_gtp(struct dpaa2_dev_flow *flow,
3899 	struct rte_eth_dev *dev,
3900 	const struct rte_flow_attr *attr,
3901 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3902 	const struct rte_flow_action actions[] __rte_unused,
3903 	struct rte_flow_error *error __rte_unused,
3904 	int *device_configured)
3905 {
3906 	int ret, local_cfg = 0;
3907 	uint32_t group;
3908 	const struct rte_flow_item_gtp *spec, *mask;
3909 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3910 	const struct rte_flow_item *pattern =
3911 		&dpaa2_pattern->generic_item;
3912 
3913 	group = attr->group;
3914 
3915 	/* Parse pattern list to get the matching parameters */
3916 	spec = pattern->spec;
3917 	mask = pattern->mask ?
3918 		pattern->mask : &dpaa2_flow_item_gtp_mask;
3919 
3920 	/* Get traffic class index and flow id to be configured */
3921 	flow->tc_id = group;
3922 	flow->tc_index = attr->priority;
3923 
3924 	if (dpaa2_pattern->in_tunnel) {
3925 		DPAA2_PMD_ERR("Tunnel-GTP distribution not support");
3926 		return -ENOTSUP;
3927 	}
3928 
3929 	if (!spec) {
3930 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3931 				FAF_GTP_FRAM, DPAA2_FLOW_QOS_TYPE,
3932 				group, &local_cfg);
3933 		if (ret)
3934 			return ret;
3935 
3936 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3937 				FAF_GTP_FRAM, DPAA2_FLOW_FS_TYPE,
3938 				group, &local_cfg);
3939 		if (ret)
3940 			return ret;
3941 
3942 		(*device_configured) |= local_cfg;
3943 		return 0;
3944 	}
3945 
3946 	ret = dpaa2_flow_extract_support((const uint8_t *)mask,
3947 		RTE_FLOW_ITEM_TYPE_GTP);
3948 	if (ret) {
3949 		DPAA2_PMD_WARN("Extract field(s) of GTP not support.");
3950 
3951 		return ret;
3952 	}
3953 
3954 	if (!mask->teid)
3955 		return 0;
3956 
3957 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GTP,
3958 			NH_FLD_GTP_TEID, &spec->teid,
3959 			&mask->teid, sizeof(rte_be32_t),
3960 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3961 	if (ret)
3962 		return ret;
3963 
3964 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GTP,
3965 			NH_FLD_GTP_TEID, &spec->teid,
3966 			&mask->teid, sizeof(rte_be32_t),
3967 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3968 	if (ret)
3969 		return ret;
3970 
3971 	(*device_configured) |= local_cfg;
3972 
3973 	return 0;
3974 }
3975 
3976 static int
3977 dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
3978 	struct rte_eth_dev *dev,
3979 	const struct rte_flow_attr *attr,
3980 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3981 	const struct rte_flow_action actions[] __rte_unused,
3982 	struct rte_flow_error *error __rte_unused,
3983 	int *device_configured)
3984 {
3985 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3986 	int local_cfg = 0, ret;
3987 	uint32_t group;
3988 	struct dpaa2_key_extract *qos_key_extract;
3989 	struct dpaa2_key_extract *tc_key_extract;
3990 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3991 	const struct rte_flow_item_raw *spec = pattern->spec;
3992 	const struct rte_flow_item_raw *mask = pattern->mask;
3993 
3994 	/* Need both spec and mask */
3995 	if (!spec || !mask) {
3996 		DPAA2_PMD_ERR("spec or mask not present.");
3997 		return -EINVAL;
3998 	}
3999 
4000 	if (spec->relative) {
4001 		/* TBD: relative offset support.
4002 		 * To support relative offset of previous L3 protocol item,
4003 		 * extracts should be expanded to identify if the frame is:
4004 		 * vlan or none-vlan.
4005 		 *
4006 		 * To support relative offset of previous L4 protocol item,
4007 		 * extracts should be expanded to identify if the frame is:
4008 		 * vlan/IPv4 or vlan/IPv6 or none-vlan/IPv4 or none-vlan/IPv6.
4009 		 */
4010 		DPAA2_PMD_ERR("relative not supported.");
4011 		return -EINVAL;
4012 	}
4013 
4014 	if (spec->search) {
4015 		DPAA2_PMD_ERR("search not supported.");
4016 		return -EINVAL;
4017 	}
4018 
4019 	/* Spec len and mask len should be same */
4020 	if (spec->length != mask->length) {
4021 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
4022 		return -EINVAL;
4023 	}
4024 
4025 	/* Get traffic class index and flow id to be configured */
4026 	group = attr->group;
4027 	flow->tc_id = group;
4028 	flow->tc_index = attr->priority;
4029 
4030 	qos_key_extract = &priv->extract.qos_key_extract;
4031 	tc_key_extract = &priv->extract.tc_key_extract[group];
4032 
4033 	ret = dpaa2_flow_extract_add_raw(priv,
4034 			spec->offset, spec->length,
4035 			DPAA2_FLOW_QOS_TYPE, 0, &local_cfg);
4036 	if (ret) {
4037 		DPAA2_PMD_ERR("QoS Extract RAW add failed.");
4038 		return -EINVAL;
4039 	}
4040 
4041 	ret = dpaa2_flow_extract_add_raw(priv,
4042 			spec->offset, spec->length,
4043 			DPAA2_FLOW_FS_TYPE, group, &local_cfg);
4044 	if (ret) {
4045 		DPAA2_PMD_ERR("FS[%d] Extract RAW add failed.",
4046 			group);
4047 		return -EINVAL;
4048 	}
4049 
4050 	ret = dpaa2_flow_raw_rule_data_set(flow,
4051 			&qos_key_extract->key_profile,
4052 			spec->offset, spec->length,
4053 			spec->pattern, mask->pattern,
4054 			DPAA2_FLOW_QOS_TYPE);
4055 	if (ret) {
4056 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
4057 		return -EINVAL;
4058 	}
4059 
4060 	ret = dpaa2_flow_raw_rule_data_set(flow,
4061 			&tc_key_extract->key_profile,
4062 			spec->offset, spec->length,
4063 			spec->pattern, mask->pattern,
4064 			DPAA2_FLOW_FS_TYPE);
4065 	if (ret) {
4066 		DPAA2_PMD_ERR("FS RAW rule data set failed");
4067 		return -EINVAL;
4068 	}
4069 
4070 	(*device_configured) |= local_cfg;
4071 
4072 	return 0;
4073 }
4074 
4075 static inline int
4076 dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv,
4077 	const struct rte_flow_attr *attr)
4078 {
4079 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
4080 
4081 	while (curr) {
4082 		if (curr->tc_id == attr->group &&
4083 			curr->tc_index == attr->priority) {
4084 			DPAA2_PMD_ERR("Flow(TC[%d].entry[%d] exists",
4085 				attr->group, attr->priority);
4086 
4087 			return -EINVAL;
4088 		}
4089 		curr = LIST_NEXT(curr, next);
4090 	}
4091 
4092 	return 0;
4093 }
4094 
4095 static inline struct rte_eth_dev *
4096 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
4097 	const struct rte_flow_action *action)
4098 {
4099 	const struct rte_flow_action_port_id *port_id;
4100 	const struct rte_flow_action_ethdev *ethdev;
4101 	int idx = -1;
4102 	struct rte_eth_dev *dest_dev;
4103 
4104 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
4105 		port_id = action->conf;
4106 		if (!port_id->original)
4107 			idx = port_id->id;
4108 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
4109 		ethdev = action->conf;
4110 		idx = ethdev->port_id;
4111 	} else {
4112 		return NULL;
4113 	}
4114 
4115 	if (idx >= 0) {
4116 		if (!rte_eth_dev_is_valid_port(idx))
4117 			return NULL;
4118 		if (!rte_pmd_dpaa2_dev_is_dpaa2(idx))
4119 			return NULL;
4120 		dest_dev = &rte_eth_devices[idx];
4121 	} else {
4122 		dest_dev = priv->eth_dev;
4123 	}
4124 
4125 	return dest_dev;
4126 }
4127 
4128 static inline int
4129 dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv,
4130 	const struct rte_flow_attr *attr,
4131 	const struct rte_flow_action actions[])
4132 {
4133 	int end_of_list = 0, i, j = 0;
4134 	const struct rte_flow_action_queue *dest_queue;
4135 	const struct rte_flow_action_rss *rss_conf;
4136 	struct dpaa2_queue *rxq;
4137 
4138 	while (!end_of_list) {
4139 		switch (actions[j].type) {
4140 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4141 			dest_queue = actions[j].conf;
4142 			rxq = priv->rx_vq[dest_queue->index];
4143 			if (attr->group != rxq->tc_index) {
4144 				DPAA2_PMD_ERR("FSQ(%d.%d) not in TC[%d]",
4145 					rxq->tc_index, rxq->flow_id,
4146 					attr->group);
4147 
4148 				return -ENOTSUP;
4149 			}
4150 			break;
4151 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4152 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
4153 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
4154 				DPAA2_PMD_ERR("Invalid port id of action");
4155 				return -ENOTSUP;
4156 			}
4157 			break;
4158 		case RTE_FLOW_ACTION_TYPE_RSS:
4159 			rss_conf = (const struct rte_flow_action_rss *)
4160 					(actions[j].conf);
4161 			if (rss_conf->queue_num > priv->dist_queues) {
4162 				DPAA2_PMD_ERR("RSS number too large");
4163 				return -ENOTSUP;
4164 			}
4165 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
4166 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
4167 					DPAA2_PMD_ERR("RSS queue not in range");
4168 					return -ENOTSUP;
4169 				}
4170 				rxq = priv->rx_vq[rss_conf->queue[i]];
4171 				if (rxq->tc_index != attr->group) {
4172 					DPAA2_PMD_ERR("RSS queue not in group");
4173 					return -ENOTSUP;
4174 				}
4175 			}
4176 
4177 			break;
4178 		case RTE_FLOW_ACTION_TYPE_PF:
4179 			/* Skip this action, have to add for vxlan */
4180 			break;
4181 		case RTE_FLOW_ACTION_TYPE_END:
4182 			end_of_list = 1;
4183 			break;
4184 		default:
4185 			DPAA2_PMD_ERR("Invalid action type");
4186 			return -ENOTSUP;
4187 		}
4188 		j++;
4189 	}
4190 
4191 	return 0;
4192 }
4193 
4194 static int
4195 dpaa2_configure_flow_fs_action(struct dpaa2_dev_priv *priv,
4196 	struct dpaa2_dev_flow *flow,
4197 	const struct rte_flow_action *rte_action)
4198 {
4199 	struct rte_eth_dev *dest_dev;
4200 	struct dpaa2_dev_priv *dest_priv;
4201 	const struct rte_flow_action_queue *dest_queue;
4202 	struct dpaa2_queue *dest_q;
4203 
4204 	memset(&flow->fs_action_cfg, 0,
4205 		sizeof(struct dpni_fs_action_cfg));
4206 	flow->action_type = rte_action->type;
4207 
4208 	if (flow->action_type == RTE_FLOW_ACTION_TYPE_QUEUE) {
4209 		dest_queue = rte_action->conf;
4210 		dest_q = priv->rx_vq[dest_queue->index];
4211 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
4212 	} else if (flow->action_type == RTE_FLOW_ACTION_TYPE_PORT_ID ||
4213 		   flow->action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
4214 		dest_dev = dpaa2_flow_redirect_dev(priv, rte_action);
4215 		if (!dest_dev) {
4216 			DPAA2_PMD_ERR("Invalid device to redirect");
4217 			return -EINVAL;
4218 		}
4219 
4220 		dest_priv = dest_dev->data->dev_private;
4221 		dest_q = dest_priv->tx_vq[0];
4222 		flow->fs_action_cfg.options =
4223 			DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
4224 		flow->fs_action_cfg.redirect_obj_token =
4225 			dest_priv->token;
4226 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
4227 	}
4228 
4229 	return 0;
4230 }
4231 
4232 static inline uint16_t
4233 dpaa2_flow_entry_size(uint16_t key_max_size)
4234 {
4235 	if (key_max_size > DPAA2_FLOW_ENTRY_MAX_SIZE) {
4236 		DPAA2_PMD_ERR("Key size(%d) > max(%d)",
4237 			key_max_size,
4238 			DPAA2_FLOW_ENTRY_MAX_SIZE);
4239 
4240 		return 0;
4241 	}
4242 
4243 	if (key_max_size > DPAA2_FLOW_ENTRY_MIN_SIZE)
4244 		return DPAA2_FLOW_ENTRY_MAX_SIZE;
4245 
4246 	/* Current MC only support fixed entry size(56)*/
4247 	return DPAA2_FLOW_ENTRY_MAX_SIZE;
4248 }
4249 
4250 static inline int
4251 dpaa2_flow_clear_fs_table(struct dpaa2_dev_priv *priv,
4252 	uint8_t tc_id)
4253 {
4254 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
4255 	int need_clear = 0, ret;
4256 	struct fsl_mc_io *dpni = priv->hw;
4257 
4258 	while (curr) {
4259 		if (curr->tc_id == tc_id) {
4260 			need_clear = 1;
4261 			break;
4262 		}
4263 		curr = LIST_NEXT(curr, next);
4264 	}
4265 
4266 	if (need_clear) {
4267 		ret = dpni_clear_fs_entries(dpni, CMD_PRI_LOW,
4268 				priv->token, tc_id);
4269 		if (ret) {
4270 			DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
4271 			return ret;
4272 		}
4273 	}
4274 
4275 	return 0;
4276 }
4277 
4278 static int
4279 dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv,
4280 	uint8_t tc_id, uint16_t dist_size, int rss_dist)
4281 {
4282 	struct dpaa2_key_extract *tc_extract;
4283 	uint8_t *key_cfg_buf;
4284 	uint64_t key_cfg_iova;
4285 	int ret;
4286 	struct dpni_rx_dist_cfg tc_cfg;
4287 	struct fsl_mc_io *dpni = priv->hw;
4288 	uint16_t entry_size;
4289 	uint16_t key_max_size;
4290 
4291 	ret = dpaa2_flow_clear_fs_table(priv, tc_id);
4292 	if (ret < 0) {
4293 		DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
4294 		return ret;
4295 	}
4296 
4297 	tc_extract = &priv->extract.tc_key_extract[tc_id];
4298 	key_cfg_buf = priv->extract.tc_extract_param[tc_id];
4299 	key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_buf,
4300 		DPAA2_EXTRACT_PARAM_MAX_SIZE);
4301 	if (key_cfg_iova == RTE_BAD_IOVA) {
4302 		DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
4303 			__func__, key_cfg_buf);
4304 
4305 		return -ENOBUFS;
4306 	}
4307 
4308 	key_max_size = tc_extract->key_profile.key_max_size;
4309 	entry_size = dpaa2_flow_entry_size(key_max_size);
4310 
4311 	dpaa2_flow_fs_extracts_log(priv, tc_id);
4312 	ret = dpkg_prepare_key_cfg(&tc_extract->dpkg,
4313 			key_cfg_buf);
4314 	if (ret < 0) {
4315 		DPAA2_PMD_ERR("TC[%d] prepare key failed", tc_id);
4316 		return ret;
4317 	}
4318 
4319 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
4320 	tc_cfg.dist_size = dist_size;
4321 	tc_cfg.key_cfg_iova = key_cfg_iova;
4322 	if (rss_dist)
4323 		tc_cfg.enable = true;
4324 	else
4325 		tc_cfg.enable = false;
4326 	tc_cfg.tc = tc_id;
4327 	ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
4328 			priv->token, &tc_cfg);
4329 	if (ret < 0) {
4330 		if (rss_dist) {
4331 			DPAA2_PMD_ERR("RSS TC[%d] set failed",
4332 				tc_id);
4333 		} else {
4334 			DPAA2_PMD_ERR("FS TC[%d] hash disable failed",
4335 				tc_id);
4336 		}
4337 
4338 		return ret;
4339 	}
4340 
4341 	if (rss_dist)
4342 		return 0;
4343 
4344 	tc_cfg.enable = true;
4345 	tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
4346 	ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
4347 			priv->token, &tc_cfg);
4348 	if (ret < 0) {
4349 		DPAA2_PMD_ERR("TC[%d] FS configured failed", tc_id);
4350 		return ret;
4351 	}
4352 
4353 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_FS_TYPE,
4354 			entry_size, tc_id);
4355 	if (ret)
4356 		return ret;
4357 
4358 	return 0;
4359 }
4360 
4361 static int
4362 dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv,
4363 	int rss_dist)
4364 {
4365 	struct dpaa2_key_extract *qos_extract;
4366 	uint8_t *key_cfg_buf;
4367 	uint64_t key_cfg_iova;
4368 	int ret;
4369 	struct dpni_qos_tbl_cfg qos_cfg;
4370 	struct fsl_mc_io *dpni = priv->hw;
4371 	uint16_t entry_size;
4372 	uint16_t key_max_size;
4373 
4374 	if (!rss_dist && priv->num_rx_tc <= 1) {
4375 		/* QoS table is effecitive for FS multiple TCs or RSS.*/
4376 		return 0;
4377 	}
4378 
4379 	if (LIST_FIRST(&priv->flows)) {
4380 		ret = dpni_clear_qos_table(dpni, CMD_PRI_LOW,
4381 				priv->token);
4382 		if (ret < 0) {
4383 			DPAA2_PMD_ERR("QoS table clear failed");
4384 			return ret;
4385 		}
4386 	}
4387 
4388 	qos_extract = &priv->extract.qos_key_extract;
4389 	key_cfg_buf = priv->extract.qos_extract_param;
4390 	key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_buf,
4391 		DPAA2_EXTRACT_PARAM_MAX_SIZE);
4392 	if (key_cfg_iova == RTE_BAD_IOVA) {
4393 		DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
4394 			__func__, key_cfg_buf);
4395 
4396 		return -ENOBUFS;
4397 	}
4398 
4399 	key_max_size = qos_extract->key_profile.key_max_size;
4400 	entry_size = dpaa2_flow_entry_size(key_max_size);
4401 
4402 	dpaa2_flow_qos_extracts_log(priv);
4403 
4404 	ret = dpkg_prepare_key_cfg(&qos_extract->dpkg,
4405 			key_cfg_buf);
4406 	if (ret < 0) {
4407 		DPAA2_PMD_ERR("QoS prepare extract failed");
4408 		return ret;
4409 	}
4410 	memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
4411 	qos_cfg.keep_entries = true;
4412 	qos_cfg.key_cfg_iova = key_cfg_iova;
4413 	if (rss_dist) {
4414 		qos_cfg.discard_on_miss = true;
4415 	} else {
4416 		qos_cfg.discard_on_miss = false;
4417 		qos_cfg.default_tc = 0;
4418 	}
4419 
4420 	ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
4421 			priv->token, &qos_cfg);
4422 	if (ret < 0) {
4423 		DPAA2_PMD_ERR("QoS table set failed");
4424 		return ret;
4425 	}
4426 
4427 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_QOS_TYPE,
4428 			entry_size, 0);
4429 	if (ret)
4430 		return ret;
4431 
4432 	return 0;
4433 }
4434 
4435 static int
4436 dpaa2_flow_item_convert(const struct rte_flow_item pattern[],
4437 			struct rte_dpaa2_flow_item **dpaa2_pattern)
4438 {
4439 	struct rte_dpaa2_flow_item *new_pattern;
4440 	int num = 0, tunnel_start = 0;
4441 
4442 	while (1) {
4443 		num++;
4444 		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_END)
4445 			break;
4446 	}
4447 
4448 	new_pattern = rte_malloc(NULL, sizeof(struct rte_dpaa2_flow_item) * num,
4449 				 RTE_CACHE_LINE_SIZE);
4450 	if (!new_pattern) {
4451 		DPAA2_PMD_ERR("Failed to alloc %d flow items", num);
4452 		return -ENOMEM;
4453 	}
4454 
4455 	num = 0;
4456 	while (pattern[num].type != RTE_FLOW_ITEM_TYPE_END) {
4457 		memcpy(&new_pattern[num].generic_item, &pattern[num],
4458 		       sizeof(struct rte_flow_item));
4459 		new_pattern[num].in_tunnel = 0;
4460 
4461 		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_VXLAN)
4462 			tunnel_start = 1;
4463 		else if (tunnel_start)
4464 			new_pattern[num].in_tunnel = 1;
4465 		num++;
4466 	}
4467 
4468 	new_pattern[num].generic_item.type = RTE_FLOW_ITEM_TYPE_END;
4469 	*dpaa2_pattern = new_pattern;
4470 
4471 	return 0;
4472 }
4473 
4474 static int
4475 dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
4476 	struct rte_eth_dev *dev,
4477 	const struct rte_flow_attr *attr,
4478 	const struct rte_flow_item pattern[],
4479 	const struct rte_flow_action actions[],
4480 	struct rte_flow_error *error)
4481 {
4482 	const struct rte_flow_action_rss *rss_conf;
4483 	int is_keycfg_configured = 0, end_of_list = 0;
4484 	int ret = 0, i = 0, j = 0;
4485 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4486 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
4487 	uint16_t dist_size, key_size;
4488 	struct dpaa2_key_extract *qos_key_extract;
4489 	struct dpaa2_key_extract *tc_key_extract;
4490 	struct rte_dpaa2_flow_item *dpaa2_pattern = NULL;
4491 
4492 	ret = dpaa2_flow_verify_attr(priv, attr);
4493 	if (ret)
4494 		return ret;
4495 
4496 	ret = dpaa2_flow_verify_action(priv, attr, actions);
4497 	if (ret)
4498 		return ret;
4499 
4500 	ret = dpaa2_flow_item_convert(pattern, &dpaa2_pattern);
4501 	if (ret)
4502 		return ret;
4503 
4504 	/* Parse pattern list to get the matching parameters */
4505 	while (!end_of_list) {
4506 		switch (pattern[i].type) {
4507 		case RTE_FLOW_ITEM_TYPE_ETH:
4508 			ret = dpaa2_configure_flow_eth(flow, dev, attr,
4509 					&dpaa2_pattern[i],
4510 					actions, error,
4511 					&is_keycfg_configured);
4512 			if (ret) {
4513 				DPAA2_PMD_ERR("ETH flow config failed!");
4514 				goto end_flow_set;
4515 			}
4516 			break;
4517 		case RTE_FLOW_ITEM_TYPE_VLAN:
4518 			ret = dpaa2_configure_flow_vlan(flow, dev, attr,
4519 					&dpaa2_pattern[i],
4520 					actions, error,
4521 					&is_keycfg_configured);
4522 			if (ret) {
4523 				DPAA2_PMD_ERR("vLan flow config failed!");
4524 				goto end_flow_set;
4525 			}
4526 			break;
4527 		case RTE_FLOW_ITEM_TYPE_IPV4:
4528 			ret = dpaa2_configure_flow_ipv4(flow, dev, attr,
4529 					&dpaa2_pattern[i],
4530 					actions, error,
4531 					&is_keycfg_configured);
4532 			if (ret) {
4533 				DPAA2_PMD_ERR("IPV4 flow config failed!");
4534 				goto end_flow_set;
4535 			}
4536 			break;
4537 		case RTE_FLOW_ITEM_TYPE_IPV6:
4538 			ret = dpaa2_configure_flow_ipv6(flow, dev, attr,
4539 					&dpaa2_pattern[i],
4540 					actions, error,
4541 					&is_keycfg_configured);
4542 			if (ret) {
4543 				DPAA2_PMD_ERR("IPV6 flow config failed!");
4544 				goto end_flow_set;
4545 			}
4546 			break;
4547 		case RTE_FLOW_ITEM_TYPE_ICMP:
4548 			ret = dpaa2_configure_flow_icmp(flow, dev, attr,
4549 					&dpaa2_pattern[i],
4550 					actions, error,
4551 					&is_keycfg_configured);
4552 			if (ret) {
4553 				DPAA2_PMD_ERR("ICMP flow config failed!");
4554 				goto end_flow_set;
4555 			}
4556 			break;
4557 		case RTE_FLOW_ITEM_TYPE_UDP:
4558 			ret = dpaa2_configure_flow_udp(flow, dev, attr,
4559 					&dpaa2_pattern[i],
4560 					actions, error,
4561 					&is_keycfg_configured);
4562 			if (ret) {
4563 				DPAA2_PMD_ERR("UDP flow config failed!");
4564 				goto end_flow_set;
4565 			}
4566 			break;
4567 		case RTE_FLOW_ITEM_TYPE_TCP:
4568 			ret = dpaa2_configure_flow_tcp(flow, dev, attr,
4569 					&dpaa2_pattern[i],
4570 					actions, error,
4571 					&is_keycfg_configured);
4572 			if (ret) {
4573 				DPAA2_PMD_ERR("TCP flow config failed!");
4574 				goto end_flow_set;
4575 			}
4576 			break;
4577 		case RTE_FLOW_ITEM_TYPE_SCTP:
4578 			ret = dpaa2_configure_flow_sctp(flow, dev, attr,
4579 					&dpaa2_pattern[i],
4580 					actions, error,
4581 					&is_keycfg_configured);
4582 			if (ret) {
4583 				DPAA2_PMD_ERR("SCTP flow config failed!");
4584 				goto end_flow_set;
4585 			}
4586 			break;
4587 		case RTE_FLOW_ITEM_TYPE_ESP:
4588 			ret = dpaa2_configure_flow_esp(flow,
4589 					dev, attr, &dpaa2_pattern[i],
4590 					actions, error,
4591 					&is_keycfg_configured);
4592 			if (ret) {
4593 				DPAA2_PMD_ERR("ESP flow config failed!");
4594 				goto end_flow_set;
4595 			}
4596 			break;
4597 		case RTE_FLOW_ITEM_TYPE_AH:
4598 			ret = dpaa2_configure_flow_ah(flow,
4599 					dev, attr, &dpaa2_pattern[i],
4600 					actions, error,
4601 					&is_keycfg_configured);
4602 			if (ret) {
4603 				DPAA2_PMD_ERR("AH flow config failed!");
4604 				goto end_flow_set;
4605 			}
4606 			break;
4607 		case RTE_FLOW_ITEM_TYPE_GRE:
4608 			ret = dpaa2_configure_flow_gre(flow, dev, attr,
4609 					&dpaa2_pattern[i],
4610 					actions, error,
4611 					&is_keycfg_configured);
4612 			if (ret) {
4613 				DPAA2_PMD_ERR("GRE flow config failed!");
4614 				goto end_flow_set;
4615 			}
4616 			break;
4617 		case RTE_FLOW_ITEM_TYPE_VXLAN:
4618 			ret = dpaa2_configure_flow_vxlan(flow, dev, attr,
4619 					&dpaa2_pattern[i],
4620 					actions, error,
4621 					&is_keycfg_configured);
4622 			if (ret) {
4623 				DPAA2_PMD_ERR("VXLAN flow config failed!");
4624 				goto end_flow_set;
4625 			}
4626 			break;
4627 		case RTE_FLOW_ITEM_TYPE_ECPRI:
4628 			ret = dpaa2_configure_flow_ecpri(flow,
4629 					dev, attr, &dpaa2_pattern[i],
4630 					actions, error,
4631 					&is_keycfg_configured);
4632 			if (ret) {
4633 				DPAA2_PMD_ERR("ECPRI flow config failed!");
4634 				goto end_flow_set;
4635 			}
4636 			break;
4637 		case RTE_FLOW_ITEM_TYPE_GTP:
4638 			ret = dpaa2_configure_flow_gtp(flow,
4639 					dev, attr, &dpaa2_pattern[i],
4640 					actions, error,
4641 					&is_keycfg_configured);
4642 			if (ret) {
4643 				DPAA2_PMD_ERR("GTP flow config failed!");
4644 				goto end_flow_set;
4645 			}
4646 			break;
4647 		case RTE_FLOW_ITEM_TYPE_RAW:
4648 			ret = dpaa2_configure_flow_raw(flow, dev, attr,
4649 					&dpaa2_pattern[i],
4650 					actions, error,
4651 					&is_keycfg_configured);
4652 			if (ret) {
4653 				DPAA2_PMD_ERR("RAW flow config failed!");
4654 				goto end_flow_set;
4655 			}
4656 			break;
4657 		case RTE_FLOW_ITEM_TYPE_END:
4658 			end_of_list = 1;
4659 			break; /*End of List*/
4660 		default:
4661 			DPAA2_PMD_ERR("Invalid flow item[%d] type(%d)",
4662 				i, pattern[i].type);
4663 			ret = -ENOTSUP;
4664 			break;
4665 		}
4666 		i++;
4667 	}
4668 
4669 	qos_key_extract = &priv->extract.qos_key_extract;
4670 	key_size = qos_key_extract->key_profile.key_max_size;
4671 	flow->qos_rule.key_size = dpaa2_flow_entry_size(key_size);
4672 
4673 	tc_key_extract = &priv->extract.tc_key_extract[flow->tc_id];
4674 	key_size = tc_key_extract->key_profile.key_max_size;
4675 	flow->fs_rule.key_size = dpaa2_flow_entry_size(key_size);
4676 
4677 	/* Let's parse action on matching traffic */
4678 	end_of_list = 0;
4679 	while (!end_of_list) {
4680 		switch (actions[j].type) {
4681 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4682 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4683 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
4684 			ret = dpaa2_configure_flow_fs_action(priv, flow,
4685 							     &actions[j]);
4686 			if (ret)
4687 				goto end_flow_set;
4688 
4689 			/* Configure FS table first*/
4690 			dist_size = priv->nb_rx_queues / priv->num_rx_tc;
4691 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
4692 				ret = dpaa2_configure_fs_rss_table(priv,
4693 								   flow->tc_id,
4694 								   dist_size,
4695 								   false);
4696 				if (ret)
4697 					goto end_flow_set;
4698 			}
4699 
4700 			/* Configure QoS table then.*/
4701 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
4702 				ret = dpaa2_configure_qos_table(priv, false);
4703 				if (ret)
4704 					goto end_flow_set;
4705 			}
4706 
4707 			if (priv->num_rx_tc > 1) {
4708 				ret = dpaa2_flow_add_qos_rule(priv, flow);
4709 				if (ret)
4710 					goto end_flow_set;
4711 			}
4712 
4713 			if (flow->tc_index >= priv->fs_entries) {
4714 				DPAA2_PMD_ERR("FS table with %d entries full",
4715 					priv->fs_entries);
4716 				return -1;
4717 			}
4718 
4719 			ret = dpaa2_flow_add_fs_rule(priv, flow);
4720 			if (ret)
4721 				goto end_flow_set;
4722 
4723 			break;
4724 		case RTE_FLOW_ACTION_TYPE_RSS:
4725 			rss_conf = actions[j].conf;
4726 			flow->action_type = RTE_FLOW_ACTION_TYPE_RSS;
4727 
4728 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
4729 					&tc_key_extract->dpkg);
4730 			if (ret < 0) {
4731 				DPAA2_PMD_ERR("TC[%d] distset RSS failed",
4732 					      flow->tc_id);
4733 				goto end_flow_set;
4734 			}
4735 
4736 			dist_size = rss_conf->queue_num;
4737 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
4738 				ret = dpaa2_configure_fs_rss_table(priv,
4739 								   flow->tc_id,
4740 								   dist_size,
4741 								   true);
4742 				if (ret)
4743 					goto end_flow_set;
4744 			}
4745 
4746 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
4747 				ret = dpaa2_configure_qos_table(priv, true);
4748 				if (ret)
4749 					goto end_flow_set;
4750 			}
4751 
4752 			ret = dpaa2_flow_add_qos_rule(priv, flow);
4753 			if (ret)
4754 				goto end_flow_set;
4755 
4756 			ret = dpaa2_flow_add_fs_rule(priv, flow);
4757 			if (ret)
4758 				goto end_flow_set;
4759 
4760 			break;
4761 		case RTE_FLOW_ACTION_TYPE_PF:
4762 			/* Skip this action, have to add for vxlan */
4763 			break;
4764 		case RTE_FLOW_ACTION_TYPE_END:
4765 			end_of_list = 1;
4766 			break;
4767 		default:
4768 			DPAA2_PMD_ERR("Invalid action type");
4769 			ret = -ENOTSUP;
4770 			break;
4771 		}
4772 		j++;
4773 	}
4774 
4775 end_flow_set:
4776 	if (!ret) {
4777 		/* New rules are inserted. */
4778 		if (!curr) {
4779 			LIST_INSERT_HEAD(&priv->flows, flow, next);
4780 		} else {
4781 			while (LIST_NEXT(curr, next))
4782 				curr = LIST_NEXT(curr, next);
4783 			LIST_INSERT_AFTER(curr, flow, next);
4784 		}
4785 	}
4786 
4787 	rte_free(dpaa2_pattern);
4788 
4789 	return ret;
4790 }
4791 
4792 static inline int
4793 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
4794 	const struct rte_flow_attr *attr)
4795 {
4796 	int ret = 0;
4797 
4798 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
4799 		DPAA2_PMD_ERR("Group/TC(%d) is out of range(%d)",
4800 			attr->group, dpni_attr->num_rx_tcs);
4801 		ret = -ENOTSUP;
4802 	}
4803 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
4804 		DPAA2_PMD_ERR("Priority(%d) within group is out of range(%d)",
4805 			attr->priority, dpni_attr->fs_entries);
4806 		ret = -ENOTSUP;
4807 	}
4808 	if (unlikely(attr->egress)) {
4809 		DPAA2_PMD_ERR("Egress flow configuration is not supported");
4810 		ret = -ENOTSUP;
4811 	}
4812 	if (unlikely(!attr->ingress)) {
4813 		DPAA2_PMD_ERR("Ingress flag must be configured");
4814 		ret = -EINVAL;
4815 	}
4816 	return ret;
4817 }
4818 
4819 static inline int
4820 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
4821 {
4822 	unsigned int i, j, is_found = 0;
4823 	int ret = 0;
4824 	const enum rte_flow_item_type *hp_supported;
4825 	const enum rte_flow_item_type *sp_supported;
4826 	uint64_t hp_supported_num, sp_supported_num;
4827 
4828 	hp_supported = dpaa2_hp_supported_pattern_type;
4829 	hp_supported_num = RTE_DIM(dpaa2_hp_supported_pattern_type);
4830 
4831 	sp_supported = dpaa2_sp_supported_pattern_type;
4832 	sp_supported_num = RTE_DIM(dpaa2_sp_supported_pattern_type);
4833 
4834 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
4835 		is_found = 0;
4836 		for (i = 0; i < hp_supported_num; i++) {
4837 			if (hp_supported[i] == pattern[j].type) {
4838 				is_found = 1;
4839 				break;
4840 			}
4841 		}
4842 		if (is_found)
4843 			continue;
4844 		if (dpaa2_sp_loaded > 0) {
4845 			for (i = 0; i < sp_supported_num; i++) {
4846 				if (sp_supported[i] == pattern[j].type) {
4847 					is_found = 1;
4848 					break;
4849 				}
4850 			}
4851 		}
4852 		if (!is_found) {
4853 			DPAA2_PMD_WARN("Flow type(%d) not supported",
4854 				pattern[j].type);
4855 			ret = -ENOTSUP;
4856 			break;
4857 		}
4858 	}
4859 
4860 	return ret;
4861 }
4862 
4863 static inline int
4864 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
4865 {
4866 	unsigned int i, j, is_found = 0;
4867 	int ret = 0;
4868 
4869 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
4870 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
4871 			if (dpaa2_supported_action_type[i] == actions[j].type) {
4872 				is_found = 1;
4873 				break;
4874 			}
4875 		}
4876 		if (!is_found) {
4877 			ret = -ENOTSUP;
4878 			break;
4879 		}
4880 	}
4881 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
4882 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
4883 		    !actions[j].conf)
4884 			ret = -EINVAL;
4885 	}
4886 	return ret;
4887 }
4888 
4889 static int
4890 dpaa2_flow_validate(struct rte_eth_dev *dev,
4891 	const struct rte_flow_attr *flow_attr,
4892 	const struct rte_flow_item pattern[],
4893 	const struct rte_flow_action actions[],
4894 	struct rte_flow_error *error)
4895 {
4896 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4897 	struct dpni_attr dpni_attr;
4898 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4899 	uint16_t token = priv->token;
4900 	int ret = 0;
4901 
4902 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
4903 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
4904 	if (ret < 0) {
4905 		DPAA2_PMD_ERR("Get dpni@%d attribute failed(%d)",
4906 			priv->hw_id, ret);
4907 		rte_flow_error_set(error, EPERM,
4908 			RTE_FLOW_ERROR_TYPE_ATTR,
4909 			flow_attr, "invalid");
4910 		return ret;
4911 	}
4912 
4913 	/* Verify input attributes */
4914 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
4915 	if (ret < 0) {
4916 		DPAA2_PMD_ERR("Invalid attributes are given");
4917 		rte_flow_error_set(error, EPERM,
4918 			RTE_FLOW_ERROR_TYPE_ATTR,
4919 			flow_attr, "invalid");
4920 		goto not_valid_params;
4921 	}
4922 	/* Verify input pattern list */
4923 	ret = dpaa2_dev_verify_patterns(pattern);
4924 	if (ret < 0) {
4925 		DPAA2_PMD_ERR("Invalid pattern list is given");
4926 		rte_flow_error_set(error, EPERM,
4927 			RTE_FLOW_ERROR_TYPE_ITEM,
4928 			pattern, "invalid");
4929 		goto not_valid_params;
4930 	}
4931 	/* Verify input action list */
4932 	ret = dpaa2_dev_verify_actions(actions);
4933 	if (ret < 0) {
4934 		DPAA2_PMD_ERR("Invalid action list is given");
4935 		rte_flow_error_set(error, EPERM,
4936 			RTE_FLOW_ERROR_TYPE_ACTION,
4937 			actions, "invalid");
4938 		goto not_valid_params;
4939 	}
4940 not_valid_params:
4941 	return ret;
4942 }
4943 
4944 static struct rte_flow *
4945 dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4946 		  const struct rte_flow_item pattern[],
4947 		  const struct rte_flow_action actions[],
4948 		  struct rte_flow_error *error)
4949 {
4950 	struct dpaa2_dev_flow *flow = NULL;
4951 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4952 	int ret;
4953 	uint64_t iova;
4954 
4955 	dpaa2_flow_control_log =
4956 		getenv("DPAA2_FLOW_CONTROL_LOG");
4957 
4958 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
4959 		dpaa2_flow_miss_flow_id =
4960 			(uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
4961 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
4962 			DPAA2_PMD_ERR("Missed flow ID %d >= dist size(%d)",
4963 				      dpaa2_flow_miss_flow_id,
4964 				      priv->dist_queues);
4965 			return NULL;
4966 		}
4967 	}
4968 
4969 	flow = rte_zmalloc(NULL, sizeof(struct dpaa2_dev_flow),
4970 			   RTE_CACHE_LINE_SIZE);
4971 	if (!flow) {
4972 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4973 		goto mem_failure;
4974 	}
4975 
4976 	/* Allocate DMA'ble memory to write the qos rules */
4977 	flow->qos_key_addr = rte_zmalloc(NULL,
4978 		DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
4979 	if (!flow->qos_key_addr) {
4980 		DPAA2_PMD_ERR("Memory allocation failed");
4981 		goto mem_failure;
4982 	}
4983 	iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->qos_key_addr,
4984 			DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
4985 	if (iova == RTE_BAD_IOVA) {
4986 		DPAA2_PMD_ERR("%s: No IOMMU map for qos key(%p)",
4987 			__func__, flow->qos_key_addr);
4988 		goto mem_failure;
4989 	}
4990 	flow->qos_rule.key_iova = iova;
4991 
4992 	flow->qos_mask_addr = rte_zmalloc(NULL,
4993 		DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
4994 	if (!flow->qos_mask_addr) {
4995 		DPAA2_PMD_ERR("Memory allocation failed");
4996 		goto mem_failure;
4997 	}
4998 	iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->qos_mask_addr,
4999 			DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
5000 	if (iova == RTE_BAD_IOVA) {
5001 		DPAA2_PMD_ERR("%s: No IOMMU map for qos mask(%p)",
5002 			__func__, flow->qos_mask_addr);
5003 		goto mem_failure;
5004 	}
5005 	flow->qos_rule.mask_iova = iova;
5006 
5007 	/* Allocate DMA'ble memory to write the FS rules */
5008 	flow->fs_key_addr = rte_zmalloc(NULL,
5009 		DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
5010 	if (!flow->fs_key_addr) {
5011 		DPAA2_PMD_ERR("Memory allocation failed");
5012 		goto mem_failure;
5013 	}
5014 	iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->fs_key_addr,
5015 			DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
5016 	if (iova == RTE_BAD_IOVA) {
5017 		DPAA2_PMD_ERR("%s: No IOMMU map for fs key(%p)",
5018 			__func__, flow->fs_key_addr);
5019 		goto mem_failure;
5020 	}
5021 	flow->fs_rule.key_iova = iova;
5022 
5023 	flow->fs_mask_addr = rte_zmalloc(NULL,
5024 		DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
5025 	if (!flow->fs_mask_addr) {
5026 		DPAA2_PMD_ERR("Memory allocation failed");
5027 		goto mem_failure;
5028 	}
5029 	iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->fs_mask_addr,
5030 		DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
5031 	if (iova == RTE_BAD_IOVA) {
5032 		DPAA2_PMD_ERR("%s: No IOMMU map for fs mask(%p)",
5033 			__func__, flow->fs_mask_addr);
5034 		goto mem_failure;
5035 	}
5036 	flow->fs_rule.mask_iova = iova;
5037 
5038 	priv->curr = flow;
5039 
5040 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, actions, error);
5041 	if (ret < 0) {
5042 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
5043 			rte_flow_error_set(error, EPERM,
5044 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5045 					   attr, "unknown");
5046 		DPAA2_PMD_ERR("Create flow failed (%d)", ret);
5047 		goto creation_error;
5048 	}
5049 
5050 	priv->curr = NULL;
5051 	return (struct rte_flow *)flow;
5052 
5053 mem_failure:
5054 	rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5055 			   "memory alloc");
5056 
5057 creation_error:
5058 	if (flow) {
5059 		rte_free(flow->qos_key_addr);
5060 		rte_free(flow->qos_mask_addr);
5061 		rte_free(flow->fs_key_addr);
5062 		rte_free(flow->fs_mask_addr);
5063 		rte_free(flow);
5064 	}
5065 	priv->curr = NULL;
5066 
5067 	return NULL;
5068 }
5069 
5070 static int
5071 dpaa2_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *_flow,
5072 		   struct rte_flow_error *error)
5073 {
5074 	int ret = 0;
5075 	struct dpaa2_dev_flow *flow;
5076 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
5077 	struct fsl_mc_io *dpni = priv->hw;
5078 
5079 	flow = (struct dpaa2_dev_flow *)_flow;
5080 
5081 	switch (flow->action_type) {
5082 	case RTE_FLOW_ACTION_TYPE_QUEUE:
5083 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
5084 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
5085 		if (priv->num_rx_tc > 1) {
5086 			/* Remove entry from QoS table first */
5087 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
5088 						    priv->token,
5089 						    &flow->qos_rule);
5090 			if (ret < 0) {
5091 				DPAA2_PMD_ERR("Remove FS QoS entry failed");
5092 				dpaa2_flow_qos_entry_log("Delete failed", flow,
5093 							 -1);
5094 				abort();
5095 				goto error;
5096 			}
5097 		}
5098 
5099 		/* Then remove entry from FS table */
5100 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
5101 					   flow->tc_id, &flow->fs_rule);
5102 		if (ret < 0) {
5103 			DPAA2_PMD_ERR("Remove entry from FS[%d] failed",
5104 				      flow->tc_id);
5105 			goto error;
5106 		}
5107 		break;
5108 	case RTE_FLOW_ACTION_TYPE_RSS:
5109 		if (priv->num_rx_tc > 1) {
5110 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
5111 						    priv->token,
5112 						    &flow->qos_rule);
5113 			if (ret < 0) {
5114 				DPAA2_PMD_ERR("Remove RSS QoS entry failed");
5115 				goto error;
5116 			}
5117 		}
5118 		break;
5119 	default:
5120 		DPAA2_PMD_ERR("Action(%d) not supported", flow->action_type);
5121 		ret = -ENOTSUP;
5122 		break;
5123 	}
5124 
5125 	LIST_REMOVE(flow, next);
5126 	rte_free(flow->qos_key_addr);
5127 	rte_free(flow->qos_mask_addr);
5128 	rte_free(flow->fs_key_addr);
5129 	rte_free(flow->fs_mask_addr);
5130 	/* Now free the flow */
5131 	rte_free(flow);
5132 
5133 error:
5134 	if (ret)
5135 		rte_flow_error_set(error, EPERM,
5136 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5137 				   NULL, "unknown");
5138 	return ret;
5139 }
5140 
5141 /**
5142  * Destroy user-configured flow rules.
5143  *
5144  * This function skips internal flows rules.
5145  *
5146  * @see rte_flow_flush()
5147  * @see rte_flow_ops
5148  */
5149 static int
5150 dpaa2_flow_flush(struct rte_eth_dev *dev,
5151 		struct rte_flow_error *error)
5152 {
5153 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
5154 	struct dpaa2_dev_flow *flow = LIST_FIRST(&priv->flows);
5155 
5156 	while (flow) {
5157 		struct dpaa2_dev_flow *next = LIST_NEXT(flow, next);
5158 
5159 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, error);
5160 		flow = next;
5161 	}
5162 	return 0;
5163 }
5164 
5165 static int
5166 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
5167 	struct rte_flow *_flow __rte_unused,
5168 	const struct rte_flow_action *actions __rte_unused,
5169 	void *data __rte_unused,
5170 	struct rte_flow_error *error __rte_unused)
5171 {
5172 	return 0;
5173 }
5174 
5175 /**
5176  * Clean up all flow rules.
5177  *
5178  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
5179  * rules regardless of whether they are internal or user-configured.
5180  *
5181  * @param priv
5182  *   Pointer to private structure.
5183  */
5184 void
5185 dpaa2_flow_clean(struct rte_eth_dev *dev)
5186 {
5187 	struct dpaa2_dev_flow *flow;
5188 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
5189 
5190 	while ((flow = LIST_FIRST(&priv->flows)))
5191 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, NULL);
5192 }
5193 
5194 const struct rte_flow_ops dpaa2_flow_ops = {
5195 	.create	= dpaa2_flow_create,
5196 	.validate = dpaa2_flow_validate,
5197 	.destroy = dpaa2_flow_destroy,
5198 	.flush	= dpaa2_flow_flush,
5199 	.query	= dpaa2_flow_query,
5200 };
5201