xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 1cf6d181b58f83bd54c444499ec95380eb4b1c38)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <sys/mman.h>
13 
14 #include <rte_ethdev.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include <rte_tailq.h>
19 
20 #include <fsl_dpni.h>
21 #include <fsl_dpkg.h>
22 
23 #include <dpaa2_ethdev.h>
24 #include <dpaa2_pmd_logs.h>
25 
26 static char *dpaa2_flow_control_log;
27 static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */
28 static int dpaa2_sp_loaded = -1;
29 
30 enum dpaa2_flow_entry_size {
31 	DPAA2_FLOW_ENTRY_MIN_SIZE = (DPNI_MAX_KEY_SIZE / 2),
32 	DPAA2_FLOW_ENTRY_MAX_SIZE = DPNI_MAX_KEY_SIZE
33 };
34 
35 enum dpaa2_flow_dist_type {
36 	DPAA2_FLOW_QOS_TYPE = 1 << 0,
37 	DPAA2_FLOW_FS_TYPE = 1 << 1
38 };
39 
40 #define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT	16
41 #define DPAA2_FLOW_MAX_KEY_SIZE			16
42 #define DPAA2_PROT_FIELD_STRING_SIZE		16
43 #define VXLAN_HF_VNI 0x08
44 
45 struct dpaa2_dev_flow {
46 	LIST_ENTRY(dpaa2_dev_flow) next;
47 	struct dpni_rule_cfg qos_rule;
48 	uint8_t *qos_key_addr;
49 	uint8_t *qos_mask_addr;
50 	uint16_t qos_rule_size;
51 	struct dpni_rule_cfg fs_rule;
52 	uint8_t qos_real_key_size;
53 	uint8_t fs_real_key_size;
54 	uint8_t *fs_key_addr;
55 	uint8_t *fs_mask_addr;
56 	uint16_t fs_rule_size;
57 	uint8_t tc_id; /** Traffic Class ID. */
58 	uint8_t tc_index; /** index within this Traffic Class. */
59 	enum rte_flow_action_type action_type;
60 	struct dpni_fs_action_cfg fs_action_cfg;
61 };
62 
63 struct rte_dpaa2_flow_item {
64 	struct rte_flow_item generic_item;
65 	int in_tunnel;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_hp_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 	RTE_FLOW_ITEM_TYPE_GTP,
81 	RTE_FLOW_ITEM_TYPE_RAW
82 };
83 
84 static const
85 enum rte_flow_item_type dpaa2_sp_supported_pattern_type[] = {
86 	RTE_FLOW_ITEM_TYPE_VXLAN,
87 	RTE_FLOW_ITEM_TYPE_ECPRI
88 };
89 
90 static const
91 enum rte_flow_action_type dpaa2_supported_action_type[] = {
92 	RTE_FLOW_ACTION_TYPE_END,
93 	RTE_FLOW_ACTION_TYPE_QUEUE,
94 	RTE_FLOW_ACTION_TYPE_PORT_ID,
95 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
96 	RTE_FLOW_ACTION_TYPE_RSS
97 };
98 
99 #ifndef __cplusplus
100 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
101 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
102 	.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
103 	.hdr.ether_type = RTE_BE16(0xffff),
104 };
105 
106 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
107 	.hdr.vlan_tci = RTE_BE16(0xffff),
108 };
109 
110 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
111 	.hdr.src_addr = RTE_BE32(0xffffffff),
112 	.hdr.dst_addr = RTE_BE32(0xffffffff),
113 	.hdr.next_proto_id = 0xff,
114 };
115 
116 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
117 	.hdr = {
118 		.src_addr = RTE_IPV6_MASK_FULL,
119 		.dst_addr = RTE_IPV6_MASK_FULL,
120 		.proto = 0xff
121 	},
122 };
123 
124 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
125 	.hdr.icmp_type = 0xff,
126 	.hdr.icmp_code = 0xff,
127 };
128 
129 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
130 	.hdr = {
131 		.src_port = RTE_BE16(0xffff),
132 		.dst_port = RTE_BE16(0xffff),
133 	},
134 };
135 
136 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
137 	.hdr = {
138 		.src_port = RTE_BE16(0xffff),
139 		.dst_port = RTE_BE16(0xffff),
140 	},
141 };
142 
143 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
144 	.hdr = {
145 		.src_port = RTE_BE16(0xffff),
146 		.dst_port = RTE_BE16(0xffff),
147 	},
148 };
149 
150 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
151 	.protocol = RTE_BE16(0xffff),
152 };
153 
154 static const struct rte_flow_item_vxlan dpaa2_flow_item_vxlan_mask = {
155 	.flags = 0xff,
156 	.vni = "\xff\xff\xff",
157 };
158 
159 static const struct rte_flow_item_ecpri dpaa2_flow_item_ecpri_mask = {
160 	.hdr.common.type = 0xff,
161 	.hdr.dummy[0] = RTE_BE32(0xffffffff),
162 	.hdr.dummy[1] = RTE_BE32(0xffffffff),
163 	.hdr.dummy[2] = RTE_BE32(0xffffffff),
164 };
165 
166 static const struct rte_flow_item_gtp dpaa2_flow_item_gtp_mask = {
167 	.teid = RTE_BE32(0xffffffff),
168 };
169 
170 #endif
171 
172 #define DPAA2_FLOW_DUMP printf
173 
174 static inline void
175 dpaa2_prot_field_string(uint32_t prot, uint32_t field,
176 	char *string)
177 {
178 	if (!dpaa2_flow_control_log)
179 		return;
180 
181 	if (prot == NET_PROT_ETH) {
182 		strcpy(string, "eth");
183 		if (field == NH_FLD_ETH_DA)
184 			strcat(string, ".dst");
185 		else if (field == NH_FLD_ETH_SA)
186 			strcat(string, ".src");
187 		else if (field == NH_FLD_ETH_TYPE)
188 			strcat(string, ".type");
189 		else
190 			strcat(string, ".unknown field");
191 	} else if (prot == NET_PROT_VLAN) {
192 		strcpy(string, "vlan");
193 		if (field == NH_FLD_VLAN_TCI)
194 			strcat(string, ".tci");
195 		else
196 			strcat(string, ".unknown field");
197 	} else if (prot == NET_PROT_IP) {
198 		strcpy(string, "ip");
199 		if (field == NH_FLD_IP_SRC)
200 			strcat(string, ".src");
201 		else if (field == NH_FLD_IP_DST)
202 			strcat(string, ".dst");
203 		else if (field == NH_FLD_IP_PROTO)
204 			strcat(string, ".proto");
205 		else
206 			strcat(string, ".unknown field");
207 	} else if (prot == NET_PROT_TCP) {
208 		strcpy(string, "tcp");
209 		if (field == NH_FLD_TCP_PORT_SRC)
210 			strcat(string, ".src");
211 		else if (field == NH_FLD_TCP_PORT_DST)
212 			strcat(string, ".dst");
213 		else
214 			strcat(string, ".unknown field");
215 	} else if (prot == NET_PROT_UDP) {
216 		strcpy(string, "udp");
217 		if (field == NH_FLD_UDP_PORT_SRC)
218 			strcat(string, ".src");
219 		else if (field == NH_FLD_UDP_PORT_DST)
220 			strcat(string, ".dst");
221 		else
222 			strcat(string, ".unknown field");
223 	} else if (prot == NET_PROT_ICMP) {
224 		strcpy(string, "icmp");
225 		if (field == NH_FLD_ICMP_TYPE)
226 			strcat(string, ".type");
227 		else if (field == NH_FLD_ICMP_CODE)
228 			strcat(string, ".code");
229 		else
230 			strcat(string, ".unknown field");
231 	} else if (prot == NET_PROT_SCTP) {
232 		strcpy(string, "sctp");
233 		if (field == NH_FLD_SCTP_PORT_SRC)
234 			strcat(string, ".src");
235 		else if (field == NH_FLD_SCTP_PORT_DST)
236 			strcat(string, ".dst");
237 		else
238 			strcat(string, ".unknown field");
239 	} else if (prot == NET_PROT_GRE) {
240 		strcpy(string, "gre");
241 		if (field == NH_FLD_GRE_TYPE)
242 			strcat(string, ".type");
243 		else
244 			strcat(string, ".unknown field");
245 	} else if (prot == NET_PROT_GTP) {
246 		rte_strscpy(string, "gtp", DPAA2_PROT_FIELD_STRING_SIZE);
247 		if (field == NH_FLD_GTP_TEID)
248 			strcat(string, ".teid");
249 		else
250 			strcat(string, ".unknown field");
251 	} else {
252 		strcpy(string, "unknown protocol");
253 	}
254 }
255 
256 static inline void
257 dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv)
258 {
259 	int idx;
260 	char string[32];
261 	const struct dpkg_profile_cfg *dpkg =
262 		&priv->extract.qos_key_extract.dpkg;
263 	const struct dpkg_extract *extract;
264 	enum dpkg_extract_type type;
265 	enum net_prot prot;
266 	uint32_t field;
267 
268 	if (!dpaa2_flow_control_log)
269 		return;
270 
271 	DPAA2_FLOW_DUMP("QoS table: %d extracts\r\n",
272 		dpkg->num_extracts);
273 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
274 		extract = &dpkg->extracts[idx];
275 		type = extract->type;
276 		if (type == DPKG_EXTRACT_FROM_HDR) {
277 			prot = extract->extract.from_hdr.prot;
278 			field = extract->extract.from_hdr.field;
279 			dpaa2_prot_field_string(prot, field,
280 				string);
281 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
282 			sprintf(string, "raw offset/len: %d/%d",
283 				extract->extract.from_data.offset,
284 				extract->extract.from_data.size);
285 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
286 			sprintf(string, "parse offset/len: %d/%d",
287 				extract->extract.from_parse.offset,
288 				extract->extract.from_parse.size);
289 		}
290 		DPAA2_FLOW_DUMP("%s", string);
291 		if ((idx + 1) < dpkg->num_extracts)
292 			DPAA2_FLOW_DUMP(" / ");
293 	}
294 	DPAA2_FLOW_DUMP("\r\n");
295 }
296 
297 static inline void
298 dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv,
299 	int tc_id)
300 {
301 	int idx;
302 	char string[32];
303 	const struct dpkg_profile_cfg *dpkg =
304 		&priv->extract.tc_key_extract[tc_id].dpkg;
305 	const struct dpkg_extract *extract;
306 	enum dpkg_extract_type type;
307 	enum net_prot prot;
308 	uint32_t field;
309 
310 	if (!dpaa2_flow_control_log)
311 		return;
312 
313 	DPAA2_FLOW_DUMP("FS table: %d extracts in TC[%d]\r\n",
314 		dpkg->num_extracts, tc_id);
315 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
316 		extract = &dpkg->extracts[idx];
317 		type = extract->type;
318 		if (type == DPKG_EXTRACT_FROM_HDR) {
319 			prot = extract->extract.from_hdr.prot;
320 			field = extract->extract.from_hdr.field;
321 			dpaa2_prot_field_string(prot, field,
322 				string);
323 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
324 			sprintf(string, "raw offset/len: %d/%d",
325 				extract->extract.from_data.offset,
326 				extract->extract.from_data.size);
327 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
328 			sprintf(string, "parse offset/len: %d/%d",
329 				extract->extract.from_parse.offset,
330 				extract->extract.from_parse.size);
331 		}
332 		DPAA2_FLOW_DUMP("%s", string);
333 		if ((idx + 1) < dpkg->num_extracts)
334 			DPAA2_FLOW_DUMP(" / ");
335 	}
336 	DPAA2_FLOW_DUMP("\r\n");
337 }
338 
339 static inline void
340 dpaa2_flow_qos_entry_log(const char *log_info,
341 	const struct dpaa2_dev_flow *flow, int qos_index)
342 {
343 	int idx;
344 	uint8_t *key, *mask;
345 
346 	if (!dpaa2_flow_control_log)
347 		return;
348 
349 	if (qos_index >= 0) {
350 		DPAA2_FLOW_DUMP("%s QoS entry[%d](size %d/%d) for TC[%d]\r\n",
351 			log_info, qos_index, flow->qos_rule_size,
352 			flow->qos_rule.key_size,
353 			flow->tc_id);
354 	} else {
355 		DPAA2_FLOW_DUMP("%s QoS entry(size %d/%d) for TC[%d]\r\n",
356 			log_info, flow->qos_rule_size,
357 			flow->qos_rule.key_size,
358 			flow->tc_id);
359 	}
360 
361 	key = flow->qos_key_addr;
362 	mask = flow->qos_mask_addr;
363 
364 	DPAA2_FLOW_DUMP("key:\r\n");
365 	for (idx = 0; idx < flow->qos_rule_size; idx++)
366 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
367 
368 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
369 	for (idx = 0; idx < flow->qos_rule_size; idx++)
370 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
371 	DPAA2_FLOW_DUMP("\r\n");
372 }
373 
374 static inline void
375 dpaa2_flow_fs_entry_log(const char *log_info,
376 	const struct dpaa2_dev_flow *flow)
377 {
378 	int idx;
379 	uint8_t *key, *mask;
380 
381 	if (!dpaa2_flow_control_log)
382 		return;
383 
384 	DPAA2_FLOW_DUMP("%s FS/TC entry[%d](size %d/%d) of TC[%d]\r\n",
385 		log_info, flow->tc_index,
386 		flow->fs_rule_size, flow->fs_rule.key_size,
387 		flow->tc_id);
388 
389 	key = flow->fs_key_addr;
390 	mask = flow->fs_mask_addr;
391 
392 	DPAA2_FLOW_DUMP("key:\r\n");
393 	for (idx = 0; idx < flow->fs_rule_size; idx++)
394 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
395 
396 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
397 	for (idx = 0; idx < flow->fs_rule_size; idx++)
398 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
399 	DPAA2_FLOW_DUMP("\r\n");
400 }
401 
402 /** For LX2160A, LS2088A and LS1088A*/
403 #define WRIOP_CCSR_BASE 0x8b80000
404 #define WRIOP_CCSR_CTLU_OFFSET 0
405 #define WRIOP_CCSR_CTLU_PARSER_OFFSET 0
406 #define WRIOP_CCSR_CTLU_PARSER_INGRESS_OFFSET 0
407 
408 #define WRIOP_INGRESS_PARSER_PHY \
409 	(WRIOP_CCSR_BASE + WRIOP_CCSR_CTLU_OFFSET + \
410 	WRIOP_CCSR_CTLU_PARSER_OFFSET + \
411 	WRIOP_CCSR_CTLU_PARSER_INGRESS_OFFSET)
412 
413 struct dpaa2_parser_ccsr {
414 	uint32_t psr_cfg;
415 	uint32_t psr_idle;
416 	uint32_t psr_pclm;
417 	uint8_t psr_ver_min;
418 	uint8_t psr_ver_maj;
419 	uint8_t psr_id1_l;
420 	uint8_t psr_id1_h;
421 	uint32_t psr_rev2;
422 	uint8_t rsv[0x2c];
423 	uint8_t sp_ins[4032];
424 };
425 
426 int
427 dpaa2_soft_parser_loaded(void)
428 {
429 	int fd, i, ret = 0;
430 	struct dpaa2_parser_ccsr *parser_ccsr = NULL;
431 
432 	dpaa2_flow_control_log = getenv("DPAA2_FLOW_CONTROL_LOG");
433 
434 	if (dpaa2_sp_loaded >= 0)
435 		return dpaa2_sp_loaded;
436 
437 	fd = open("/dev/mem", O_RDWR | O_SYNC);
438 	if (fd < 0) {
439 		DPAA2_PMD_ERR("open \"/dev/mem\" ERROR(%d)", fd);
440 		ret = fd;
441 		goto exit;
442 	}
443 
444 	parser_ccsr = mmap(NULL, sizeof(struct dpaa2_parser_ccsr),
445 		PROT_READ | PROT_WRITE, MAP_SHARED, fd,
446 		WRIOP_INGRESS_PARSER_PHY);
447 	if (!parser_ccsr) {
448 		DPAA2_PMD_ERR("Map 0x%" PRIx64 "(size=0x%x) failed",
449 			(uint64_t)WRIOP_INGRESS_PARSER_PHY,
450 			(uint32_t)sizeof(struct dpaa2_parser_ccsr));
451 		ret = -ENOBUFS;
452 		goto exit;
453 	}
454 
455 	DPAA2_PMD_INFO("Parser ID:0x%02x%02x, Rev:major(%02x), minor(%02x)",
456 		parser_ccsr->psr_id1_h, parser_ccsr->psr_id1_l,
457 		parser_ccsr->psr_ver_maj, parser_ccsr->psr_ver_min);
458 
459 	if (dpaa2_flow_control_log) {
460 		for (i = 0; i < 64; i++) {
461 			DPAA2_FLOW_DUMP("%02x ",
462 				parser_ccsr->sp_ins[i]);
463 			if (!((i + 1) % 16))
464 				DPAA2_FLOW_DUMP("\r\n");
465 		}
466 	}
467 
468 	for (i = 0; i < 16; i++) {
469 		if (parser_ccsr->sp_ins[i]) {
470 			dpaa2_sp_loaded = 1;
471 			break;
472 		}
473 	}
474 	if (dpaa2_sp_loaded < 0)
475 		dpaa2_sp_loaded = 0;
476 
477 	ret = dpaa2_sp_loaded;
478 
479 exit:
480 	if (parser_ccsr)
481 		munmap(parser_ccsr, sizeof(struct dpaa2_parser_ccsr));
482 	if (fd >= 0)
483 		close(fd);
484 
485 	return ret;
486 }
487 
488 static int
489 dpaa2_flow_ip_address_extract(enum net_prot prot,
490 	uint32_t field)
491 {
492 	if (prot == NET_PROT_IPV4 &&
493 		(field == NH_FLD_IPV4_SRC_IP ||
494 		field == NH_FLD_IPV4_DST_IP))
495 		return true;
496 	else if (prot == NET_PROT_IPV6 &&
497 		(field == NH_FLD_IPV6_SRC_IP ||
498 		field == NH_FLD_IPV6_DST_IP))
499 		return true;
500 	else if (prot == NET_PROT_IP &&
501 		(field == NH_FLD_IP_SRC ||
502 		field == NH_FLD_IP_DST))
503 		return true;
504 
505 	return false;
506 }
507 
508 static int
509 dpaa2_flow_l4_src_port_extract(enum net_prot prot,
510 	uint32_t field)
511 {
512 	if (prot == NET_PROT_TCP &&
513 		field == NH_FLD_TCP_PORT_SRC)
514 		return true;
515 	else if (prot == NET_PROT_UDP &&
516 		field == NH_FLD_UDP_PORT_SRC)
517 		return true;
518 	else if (prot == NET_PROT_SCTP &&
519 		field == NH_FLD_SCTP_PORT_SRC)
520 		return true;
521 
522 	return false;
523 }
524 
525 static int
526 dpaa2_flow_l4_dst_port_extract(enum net_prot prot,
527 	uint32_t field)
528 {
529 	if (prot == NET_PROT_TCP &&
530 		field == NH_FLD_TCP_PORT_DST)
531 		return true;
532 	else if (prot == NET_PROT_UDP &&
533 		field == NH_FLD_UDP_PORT_DST)
534 		return true;
535 	else if (prot == NET_PROT_SCTP &&
536 		field == NH_FLD_SCTP_PORT_DST)
537 		return true;
538 
539 	return false;
540 }
541 
542 static int
543 dpaa2_flow_add_qos_rule(struct dpaa2_dev_priv *priv,
544 	struct dpaa2_dev_flow *flow)
545 {
546 	uint16_t qos_index;
547 	int ret;
548 	struct fsl_mc_io *dpni = priv->hw;
549 
550 	if (priv->num_rx_tc <= 1 &&
551 		flow->action_type != RTE_FLOW_ACTION_TYPE_RSS) {
552 		DPAA2_PMD_WARN("No QoS Table for FS");
553 		return -EINVAL;
554 	}
555 
556 	/* QoS entry added is only effective for multiple TCs.*/
557 	qos_index = flow->tc_id * priv->fs_entries + flow->tc_index;
558 	if (qos_index >= priv->qos_entries) {
559 		DPAA2_PMD_ERR("QoS table full(%d >= %d)",
560 			qos_index, priv->qos_entries);
561 		return -EINVAL;
562 	}
563 
564 	dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
565 
566 	ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
567 			priv->token, &flow->qos_rule,
568 			flow->tc_id, qos_index,
569 			0, 0);
570 	if (ret < 0) {
571 		DPAA2_PMD_ERR("Add entry(%d) to table(%d) failed",
572 			qos_index, flow->tc_id);
573 		return ret;
574 	}
575 
576 	return 0;
577 }
578 
579 static int
580 dpaa2_flow_add_fs_rule(struct dpaa2_dev_priv *priv,
581 	struct dpaa2_dev_flow *flow)
582 {
583 	int ret;
584 	struct fsl_mc_io *dpni = priv->hw;
585 
586 	if (flow->tc_index >= priv->fs_entries) {
587 		DPAA2_PMD_ERR("FS table full(%d >= %d)",
588 			flow->tc_index, priv->fs_entries);
589 		return -EINVAL;
590 	}
591 
592 	dpaa2_flow_fs_entry_log("Start add", flow);
593 
594 	ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
595 			priv->token, flow->tc_id,
596 			flow->tc_index, &flow->fs_rule,
597 			&flow->fs_action_cfg);
598 	if (ret < 0) {
599 		DPAA2_PMD_ERR("Add rule(%d) to FS table(%d) failed",
600 			flow->tc_index, flow->tc_id);
601 		return ret;
602 	}
603 
604 	return 0;
605 }
606 
607 static int
608 dpaa2_flow_rule_insert_hole(struct dpaa2_dev_flow *flow,
609 	int offset, int size,
610 	enum dpaa2_flow_dist_type dist_type)
611 {
612 	int end;
613 
614 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
615 		end = flow->qos_rule_size;
616 		if (end > offset) {
617 			memmove(flow->qos_key_addr + offset + size,
618 					flow->qos_key_addr + offset,
619 					end - offset);
620 			memset(flow->qos_key_addr + offset,
621 					0, size);
622 
623 			memmove(flow->qos_mask_addr + offset + size,
624 					flow->qos_mask_addr + offset,
625 					end - offset);
626 			memset(flow->qos_mask_addr + offset,
627 					0, size);
628 		}
629 		flow->qos_rule_size += size;
630 	}
631 
632 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
633 		end = flow->fs_rule_size;
634 		if (end > offset) {
635 			memmove(flow->fs_key_addr + offset + size,
636 					flow->fs_key_addr + offset,
637 					end - offset);
638 			memset(flow->fs_key_addr + offset,
639 					0, size);
640 
641 			memmove(flow->fs_mask_addr + offset + size,
642 					flow->fs_mask_addr + offset,
643 					end - offset);
644 			memset(flow->fs_mask_addr + offset,
645 					0, size);
646 		}
647 		flow->fs_rule_size += size;
648 	}
649 
650 	return 0;
651 }
652 
653 static int
654 dpaa2_flow_rule_add_all(struct dpaa2_dev_priv *priv,
655 	enum dpaa2_flow_dist_type dist_type,
656 	uint16_t entry_size, uint8_t tc_id)
657 {
658 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
659 	int ret;
660 
661 	while (curr) {
662 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
663 			if (priv->num_rx_tc > 1 ||
664 				curr->action_type ==
665 				RTE_FLOW_ACTION_TYPE_RSS) {
666 				curr->qos_rule.key_size = entry_size;
667 				ret = dpaa2_flow_add_qos_rule(priv, curr);
668 				if (ret)
669 					return ret;
670 			}
671 		}
672 		if (dist_type & DPAA2_FLOW_FS_TYPE &&
673 			curr->tc_id == tc_id) {
674 			curr->fs_rule.key_size = entry_size;
675 			ret = dpaa2_flow_add_fs_rule(priv, curr);
676 			if (ret)
677 				return ret;
678 		}
679 		curr = LIST_NEXT(curr, next);
680 	}
681 
682 	return 0;
683 }
684 
685 static int
686 dpaa2_flow_qos_rule_insert_hole(struct dpaa2_dev_priv *priv,
687 	int offset, int size)
688 {
689 	struct dpaa2_dev_flow *curr;
690 	int ret;
691 
692 	curr = priv->curr;
693 	if (!curr) {
694 		DPAA2_PMD_ERR("Current qos flow insert hole failed.");
695 		return -EINVAL;
696 	} else {
697 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
698 				DPAA2_FLOW_QOS_TYPE);
699 		if (ret)
700 			return ret;
701 	}
702 
703 	curr = LIST_FIRST(&priv->flows);
704 	while (curr) {
705 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
706 				DPAA2_FLOW_QOS_TYPE);
707 		if (ret)
708 			return ret;
709 		curr = LIST_NEXT(curr, next);
710 	}
711 
712 	return 0;
713 }
714 
715 static int
716 dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv,
717 	int offset, int size, int tc_id)
718 {
719 	struct dpaa2_dev_flow *curr;
720 	int ret;
721 
722 	curr = priv->curr;
723 	if (!curr || curr->tc_id != tc_id) {
724 		DPAA2_PMD_ERR("Current flow insert hole failed.");
725 		return -EINVAL;
726 	} else {
727 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
728 				DPAA2_FLOW_FS_TYPE);
729 		if (ret)
730 			return ret;
731 	}
732 
733 	curr = LIST_FIRST(&priv->flows);
734 
735 	while (curr) {
736 		if (curr->tc_id != tc_id) {
737 			curr = LIST_NEXT(curr, next);
738 			continue;
739 		}
740 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
741 				DPAA2_FLOW_FS_TYPE);
742 		if (ret)
743 			return ret;
744 		curr = LIST_NEXT(curr, next);
745 	}
746 
747 	return 0;
748 }
749 
750 static int
751 dpaa2_flow_faf_advance(struct dpaa2_dev_priv *priv,
752 	int faf_byte, enum dpaa2_flow_dist_type dist_type, int tc_id,
753 	int *insert_offset)
754 {
755 	int offset, ret;
756 	struct dpaa2_key_profile *key_profile;
757 	int num, pos;
758 
759 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
760 		key_profile = &priv->extract.qos_key_extract.key_profile;
761 	else
762 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
763 
764 	num = key_profile->num;
765 
766 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
767 		DPAA2_PMD_ERR("Number of extracts overflows");
768 		return -EINVAL;
769 	}
770 
771 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
772 		offset = key_profile->ip_addr_extract_off;
773 		pos = key_profile->ip_addr_extract_pos;
774 		key_profile->ip_addr_extract_pos++;
775 		key_profile->ip_addr_extract_off++;
776 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
777 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
778 					offset, 1);
779 		} else {
780 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
781 				offset, 1, tc_id);
782 		}
783 		if (ret)
784 			return ret;
785 	} else {
786 		pos = num;
787 	}
788 
789 	if (pos > 0) {
790 		key_profile->key_offset[pos] =
791 			key_profile->key_offset[pos - 1] +
792 			key_profile->key_size[pos - 1];
793 	} else {
794 		key_profile->key_offset[pos] = 0;
795 	}
796 
797 	key_profile->key_size[pos] = 1;
798 	key_profile->prot_field[pos].type = DPAA2_FAF_KEY;
799 	key_profile->prot_field[pos].key_field = faf_byte;
800 	key_profile->num++;
801 
802 	if (insert_offset)
803 		*insert_offset = key_profile->key_offset[pos];
804 
805 	key_profile->key_max_size++;
806 
807 	return pos;
808 }
809 
810 static int
811 dpaa2_flow_pr_advance(struct dpaa2_dev_priv *priv,
812 	uint32_t pr_offset, uint32_t pr_size,
813 	enum dpaa2_flow_dist_type dist_type, int tc_id,
814 	int *insert_offset)
815 {
816 	int offset, ret;
817 	struct dpaa2_key_profile *key_profile;
818 	int num, pos;
819 
820 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
821 		key_profile = &priv->extract.qos_key_extract.key_profile;
822 	else
823 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
824 
825 	num = key_profile->num;
826 
827 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
828 		DPAA2_PMD_ERR("Number of extracts overflows");
829 		return -EINVAL;
830 	}
831 
832 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
833 		offset = key_profile->ip_addr_extract_off;
834 		pos = key_profile->ip_addr_extract_pos;
835 		key_profile->ip_addr_extract_pos++;
836 		key_profile->ip_addr_extract_off += pr_size;
837 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
838 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
839 					offset, pr_size);
840 		} else {
841 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
842 				offset, pr_size, tc_id);
843 		}
844 		if (ret)
845 			return ret;
846 	} else {
847 		pos = num;
848 	}
849 
850 	if (pos > 0) {
851 		key_profile->key_offset[pos] =
852 			key_profile->key_offset[pos - 1] +
853 			key_profile->key_size[pos - 1];
854 	} else {
855 		key_profile->key_offset[pos] = 0;
856 	}
857 
858 	key_profile->key_size[pos] = pr_size;
859 	key_profile->prot_field[pos].type = DPAA2_PR_KEY;
860 	key_profile->prot_field[pos].key_field =
861 		(pr_offset << 16) | pr_size;
862 	key_profile->num++;
863 
864 	if (insert_offset)
865 		*insert_offset = key_profile->key_offset[pos];
866 
867 	key_profile->key_max_size += pr_size;
868 
869 	return pos;
870 }
871 
872 /* Move IPv4/IPv6 addresses to fill new extract previous IP address.
873  * Current MC/WRIOP only support generic IP extract but IP address
874  * is not fixed, so we have to put them at end of extracts, otherwise,
875  * the extracts position following them can't be identified.
876  */
877 static int
878 dpaa2_flow_key_profile_advance(enum net_prot prot,
879 	uint32_t field, uint8_t field_size,
880 	struct dpaa2_dev_priv *priv,
881 	enum dpaa2_flow_dist_type dist_type, int tc_id,
882 	int *insert_offset)
883 {
884 	int offset, ret;
885 	struct dpaa2_key_profile *key_profile;
886 	int num, pos;
887 
888 	if (dpaa2_flow_ip_address_extract(prot, field)) {
889 		DPAA2_PMD_ERR("%s only for none IP address extract",
890 			__func__);
891 		return -EINVAL;
892 	}
893 
894 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
895 		key_profile = &priv->extract.qos_key_extract.key_profile;
896 	else
897 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
898 
899 	num = key_profile->num;
900 
901 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
902 		DPAA2_PMD_ERR("Number of extracts overflows");
903 		return -EINVAL;
904 	}
905 
906 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
907 		offset = key_profile->ip_addr_extract_off;
908 		pos = key_profile->ip_addr_extract_pos;
909 		key_profile->ip_addr_extract_pos++;
910 		key_profile->ip_addr_extract_off += field_size;
911 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
912 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
913 					offset, field_size);
914 		} else {
915 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
916 				offset, field_size, tc_id);
917 		}
918 		if (ret)
919 			return ret;
920 	} else {
921 		pos = num;
922 	}
923 
924 	if (pos > 0) {
925 		key_profile->key_offset[pos] =
926 			key_profile->key_offset[pos - 1] +
927 			key_profile->key_size[pos - 1];
928 	} else {
929 		key_profile->key_offset[pos] = 0;
930 	}
931 
932 	key_profile->key_size[pos] = field_size;
933 	key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
934 	key_profile->prot_field[pos].prot = prot;
935 	key_profile->prot_field[pos].key_field = field;
936 	key_profile->num++;
937 
938 	if (insert_offset)
939 		*insert_offset = key_profile->key_offset[pos];
940 
941 	if (dpaa2_flow_l4_src_port_extract(prot, field)) {
942 		key_profile->l4_src_port_present = 1;
943 		key_profile->l4_src_port_pos = pos;
944 		key_profile->l4_src_port_offset =
945 			key_profile->key_offset[pos];
946 	} else if (dpaa2_flow_l4_dst_port_extract(prot, field)) {
947 		key_profile->l4_dst_port_present = 1;
948 		key_profile->l4_dst_port_pos = pos;
949 		key_profile->l4_dst_port_offset =
950 			key_profile->key_offset[pos];
951 	}
952 	key_profile->key_max_size += field_size;
953 
954 	return pos;
955 }
956 
957 static int
958 dpaa2_flow_faf_add_hdr(int faf_byte,
959 	struct dpaa2_dev_priv *priv,
960 	enum dpaa2_flow_dist_type dist_type, int tc_id,
961 	int *insert_offset)
962 {
963 	int pos, i, offset;
964 	struct dpaa2_key_extract *key_extract;
965 	struct dpkg_profile_cfg *dpkg;
966 	struct dpkg_extract *extracts;
967 
968 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
969 		key_extract = &priv->extract.qos_key_extract;
970 	else
971 		key_extract = &priv->extract.tc_key_extract[tc_id];
972 
973 	dpkg = &key_extract->dpkg;
974 	extracts = dpkg->extracts;
975 
976 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
977 		DPAA2_PMD_ERR("Number of extracts overflows");
978 		return -EINVAL;
979 	}
980 
981 	pos = dpaa2_flow_faf_advance(priv,
982 			faf_byte, dist_type, tc_id,
983 			insert_offset);
984 	if (pos < 0)
985 		return pos;
986 
987 	if (pos != dpkg->num_extracts) {
988 		/* Not the last pos, must have IP address extract.*/
989 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
990 			memcpy(&extracts[i + 1],
991 				&extracts[i], sizeof(struct dpkg_extract));
992 		}
993 	}
994 
995 	offset = DPAA2_FAFE_PSR_OFFSET + faf_byte;
996 
997 	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
998 	extracts[pos].extract.from_parse.offset = offset;
999 	extracts[pos].extract.from_parse.size = 1;
1000 
1001 	dpkg->num_extracts++;
1002 
1003 	return 0;
1004 }
1005 
1006 static int
1007 dpaa2_flow_pr_add_hdr(uint32_t pr_offset,
1008 	uint32_t pr_size, struct dpaa2_dev_priv *priv,
1009 	enum dpaa2_flow_dist_type dist_type, int tc_id,
1010 	int *insert_offset)
1011 {
1012 	int pos, i;
1013 	struct dpaa2_key_extract *key_extract;
1014 	struct dpkg_profile_cfg *dpkg;
1015 	struct dpkg_extract *extracts;
1016 
1017 	if ((pr_offset + pr_size) > DPAA2_FAPR_SIZE) {
1018 		DPAA2_PMD_ERR("PR extracts(%d:%d) overflow",
1019 			pr_offset, pr_size);
1020 		return -EINVAL;
1021 	}
1022 
1023 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1024 		key_extract = &priv->extract.qos_key_extract;
1025 	else
1026 		key_extract = &priv->extract.tc_key_extract[tc_id];
1027 
1028 	dpkg = &key_extract->dpkg;
1029 	extracts = dpkg->extracts;
1030 
1031 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
1032 		DPAA2_PMD_ERR("Number of extracts overflows");
1033 		return -EINVAL;
1034 	}
1035 
1036 	pos = dpaa2_flow_pr_advance(priv,
1037 			pr_offset, pr_size, dist_type, tc_id,
1038 			insert_offset);
1039 	if (pos < 0)
1040 		return pos;
1041 
1042 	if (pos != dpkg->num_extracts) {
1043 		/* Not the last pos, must have IP address extract.*/
1044 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
1045 			memcpy(&extracts[i + 1],
1046 				&extracts[i], sizeof(struct dpkg_extract));
1047 		}
1048 	}
1049 
1050 	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
1051 	extracts[pos].extract.from_parse.offset = pr_offset;
1052 	extracts[pos].extract.from_parse.size = pr_size;
1053 
1054 	dpkg->num_extracts++;
1055 
1056 	return 0;
1057 }
1058 
1059 static int
1060 dpaa2_flow_extract_add_hdr(enum net_prot prot,
1061 	uint32_t field, uint8_t field_size,
1062 	struct dpaa2_dev_priv *priv,
1063 	enum dpaa2_flow_dist_type dist_type, int tc_id,
1064 	int *insert_offset)
1065 {
1066 	int pos, i;
1067 	struct dpaa2_key_extract *key_extract;
1068 	struct dpkg_profile_cfg *dpkg;
1069 	struct dpkg_extract *extracts;
1070 
1071 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1072 		key_extract = &priv->extract.qos_key_extract;
1073 	else
1074 		key_extract = &priv->extract.tc_key_extract[tc_id];
1075 
1076 	dpkg = &key_extract->dpkg;
1077 	extracts = dpkg->extracts;
1078 
1079 	if (dpaa2_flow_ip_address_extract(prot, field)) {
1080 		DPAA2_PMD_ERR("%s only for none IP address extract",
1081 			__func__);
1082 		return -EINVAL;
1083 	}
1084 
1085 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
1086 		DPAA2_PMD_ERR("Number of extracts overflows");
1087 		return -EINVAL;
1088 	}
1089 
1090 	pos = dpaa2_flow_key_profile_advance(prot,
1091 			field, field_size, priv,
1092 			dist_type, tc_id,
1093 			insert_offset);
1094 	if (pos < 0)
1095 		return pos;
1096 
1097 	if (pos != dpkg->num_extracts) {
1098 		/* Not the last pos, must have IP address extract.*/
1099 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
1100 			memcpy(&extracts[i + 1],
1101 				&extracts[i], sizeof(struct dpkg_extract));
1102 		}
1103 	}
1104 
1105 	extracts[pos].type = DPKG_EXTRACT_FROM_HDR;
1106 	extracts[pos].extract.from_hdr.prot = prot;
1107 	extracts[pos].extract.from_hdr.type = DPKG_FULL_FIELD;
1108 	extracts[pos].extract.from_hdr.field = field;
1109 
1110 	dpkg->num_extracts++;
1111 
1112 	return 0;
1113 }
1114 
1115 static int
1116 dpaa2_flow_extract_new_raw(struct dpaa2_dev_priv *priv,
1117 	int offset, int size,
1118 	enum dpaa2_flow_dist_type dist_type, int tc_id)
1119 {
1120 	struct dpaa2_key_extract *key_extract;
1121 	struct dpkg_profile_cfg *dpkg;
1122 	struct dpaa2_key_profile *key_profile;
1123 	int last_extract_size, index, pos, item_size;
1124 	uint8_t num_extracts;
1125 	uint32_t field;
1126 
1127 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1128 		key_extract = &priv->extract.qos_key_extract;
1129 	else
1130 		key_extract = &priv->extract.tc_key_extract[tc_id];
1131 
1132 	dpkg = &key_extract->dpkg;
1133 	key_profile = &key_extract->key_profile;
1134 
1135 	key_profile->raw_region.raw_start = 0;
1136 	key_profile->raw_region.raw_size = 0;
1137 
1138 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
1139 	num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
1140 	if (last_extract_size)
1141 		num_extracts++;
1142 	else
1143 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
1144 
1145 	for (index = 0; index < num_extracts; index++) {
1146 		if (index == num_extracts - 1)
1147 			item_size = last_extract_size;
1148 		else
1149 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
1150 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1151 		field |= item_size;
1152 
1153 		pos = dpaa2_flow_key_profile_advance(NET_PROT_PAYLOAD,
1154 				field, item_size, priv, dist_type,
1155 				tc_id, NULL);
1156 		if (pos < 0)
1157 			return pos;
1158 
1159 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
1160 		dpkg->extracts[pos].extract.from_data.size = item_size;
1161 		dpkg->extracts[pos].extract.from_data.offset = offset;
1162 
1163 		if (index == 0) {
1164 			key_profile->raw_extract_pos = pos;
1165 			key_profile->raw_extract_off =
1166 				key_profile->key_offset[pos];
1167 			key_profile->raw_region.raw_start = offset;
1168 		}
1169 		key_profile->raw_extract_num++;
1170 		key_profile->raw_region.raw_size +=
1171 			key_profile->key_size[pos];
1172 
1173 		offset += item_size;
1174 		dpkg->num_extracts++;
1175 	}
1176 
1177 	return 0;
1178 }
1179 
1180 static int
1181 dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv,
1182 	int offset, int size, enum dpaa2_flow_dist_type dist_type,
1183 	int tc_id, int *recfg)
1184 {
1185 	struct dpaa2_key_profile *key_profile;
1186 	struct dpaa2_raw_region *raw_region;
1187 	int end = offset + size, ret = 0, extract_extended, sz_extend;
1188 	int start_cmp, end_cmp, new_size, index, pos, end_pos;
1189 	int last_extract_size, item_size, num_extracts, bk_num = 0;
1190 	struct dpkg_extract extract_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1191 	uint8_t key_offset_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1192 	uint8_t key_size_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1193 	struct key_prot_field prot_field_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1194 	struct dpaa2_raw_region raw_hole;
1195 	struct dpkg_profile_cfg *dpkg;
1196 	enum net_prot prot;
1197 	uint32_t field;
1198 
1199 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1200 		key_profile = &priv->extract.qos_key_extract.key_profile;
1201 		dpkg = &priv->extract.qos_key_extract.dpkg;
1202 	} else {
1203 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
1204 		dpkg = &priv->extract.tc_key_extract[tc_id].dpkg;
1205 	}
1206 
1207 	raw_region = &key_profile->raw_region;
1208 	if (!raw_region->raw_size) {
1209 		/* New RAW region*/
1210 		ret = dpaa2_flow_extract_new_raw(priv, offset, size,
1211 			dist_type, tc_id);
1212 		if (!ret && recfg)
1213 			(*recfg) |= dist_type;
1214 
1215 		return ret;
1216 	}
1217 	start_cmp = raw_region->raw_start;
1218 	end_cmp = raw_region->raw_start + raw_region->raw_size;
1219 
1220 	if (offset >= start_cmp && end <= end_cmp)
1221 		return 0;
1222 
1223 	sz_extend = 0;
1224 	new_size = raw_region->raw_size;
1225 	if (offset < start_cmp) {
1226 		sz_extend += start_cmp - offset;
1227 		new_size += (start_cmp - offset);
1228 	}
1229 	if (end > end_cmp) {
1230 		sz_extend += end - end_cmp;
1231 		new_size += (end - end_cmp);
1232 	}
1233 
1234 	last_extract_size = (new_size % DPAA2_FLOW_MAX_KEY_SIZE);
1235 	num_extracts = (new_size / DPAA2_FLOW_MAX_KEY_SIZE);
1236 	if (last_extract_size)
1237 		num_extracts++;
1238 	else
1239 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
1240 
1241 	if ((key_profile->num + num_extracts -
1242 		key_profile->raw_extract_num) >=
1243 		DPKG_MAX_NUM_OF_EXTRACTS) {
1244 		DPAA2_PMD_ERR("%s Failed to expand raw extracts",
1245 			__func__);
1246 		return -EINVAL;
1247 	}
1248 
1249 	if (offset < start_cmp) {
1250 		raw_hole.raw_start = key_profile->raw_extract_off;
1251 		raw_hole.raw_size = start_cmp - offset;
1252 		raw_region->raw_start = offset;
1253 		raw_region->raw_size += start_cmp - offset;
1254 
1255 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1256 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1257 					raw_hole.raw_start,
1258 					raw_hole.raw_size);
1259 			if (ret)
1260 				return ret;
1261 		}
1262 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1263 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1264 					raw_hole.raw_start,
1265 					raw_hole.raw_size, tc_id);
1266 			if (ret)
1267 				return ret;
1268 		}
1269 	}
1270 
1271 	if (end > end_cmp) {
1272 		raw_hole.raw_start =
1273 			key_profile->raw_extract_off +
1274 			raw_region->raw_size;
1275 		raw_hole.raw_size = end - end_cmp;
1276 		raw_region->raw_size += end - end_cmp;
1277 
1278 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1279 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1280 					raw_hole.raw_start,
1281 					raw_hole.raw_size);
1282 			if (ret)
1283 				return ret;
1284 		}
1285 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1286 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1287 					raw_hole.raw_start,
1288 					raw_hole.raw_size, tc_id);
1289 			if (ret)
1290 				return ret;
1291 		}
1292 	}
1293 
1294 	end_pos = key_profile->raw_extract_pos +
1295 		key_profile->raw_extract_num;
1296 	if (key_profile->num > end_pos) {
1297 		bk_num = key_profile->num - end_pos;
1298 		memcpy(extract_bk, &dpkg->extracts[end_pos],
1299 			bk_num * sizeof(struct dpkg_extract));
1300 		memcpy(key_offset_bk, &key_profile->key_offset[end_pos],
1301 			bk_num * sizeof(uint8_t));
1302 		memcpy(key_size_bk, &key_profile->key_size[end_pos],
1303 			bk_num * sizeof(uint8_t));
1304 		memcpy(prot_field_bk, &key_profile->prot_field[end_pos],
1305 			bk_num * sizeof(struct key_prot_field));
1306 
1307 		for (index = 0; index < bk_num; index++) {
1308 			key_offset_bk[index] += sz_extend;
1309 			prot = prot_field_bk[index].prot;
1310 			field = prot_field_bk[index].key_field;
1311 			if (dpaa2_flow_l4_src_port_extract(prot,
1312 				field)) {
1313 				key_profile->l4_src_port_present = 1;
1314 				key_profile->l4_src_port_pos = end_pos + index;
1315 				key_profile->l4_src_port_offset =
1316 					key_offset_bk[index];
1317 			} else if (dpaa2_flow_l4_dst_port_extract(prot,
1318 				field)) {
1319 				key_profile->l4_dst_port_present = 1;
1320 				key_profile->l4_dst_port_pos = end_pos + index;
1321 				key_profile->l4_dst_port_offset =
1322 					key_offset_bk[index];
1323 			}
1324 		}
1325 	}
1326 
1327 	pos = key_profile->raw_extract_pos;
1328 
1329 	for (index = 0; index < num_extracts; index++) {
1330 		if (index == num_extracts - 1)
1331 			item_size = last_extract_size;
1332 		else
1333 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
1334 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1335 		field |= item_size;
1336 
1337 		if (pos > 0) {
1338 			key_profile->key_offset[pos] =
1339 				key_profile->key_offset[pos - 1] +
1340 				key_profile->key_size[pos - 1];
1341 		} else {
1342 			key_profile->key_offset[pos] = 0;
1343 		}
1344 		key_profile->key_size[pos] = item_size;
1345 		key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
1346 		key_profile->prot_field[pos].prot = NET_PROT_PAYLOAD;
1347 		key_profile->prot_field[pos].key_field = field;
1348 
1349 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
1350 		dpkg->extracts[pos].extract.from_data.size = item_size;
1351 		dpkg->extracts[pos].extract.from_data.offset = offset;
1352 		offset += item_size;
1353 		pos++;
1354 	}
1355 
1356 	if (bk_num) {
1357 		memcpy(&dpkg->extracts[pos], extract_bk,
1358 			bk_num * sizeof(struct dpkg_extract));
1359 		memcpy(&key_profile->key_offset[end_pos],
1360 			key_offset_bk, bk_num * sizeof(uint8_t));
1361 		memcpy(&key_profile->key_size[end_pos],
1362 			key_size_bk, bk_num * sizeof(uint8_t));
1363 		memcpy(&key_profile->prot_field[end_pos],
1364 			prot_field_bk, bk_num * sizeof(struct key_prot_field));
1365 	}
1366 
1367 	extract_extended = num_extracts - key_profile->raw_extract_num;
1368 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
1369 		key_profile->ip_addr_extract_pos += extract_extended;
1370 		key_profile->ip_addr_extract_off += sz_extend;
1371 	}
1372 	key_profile->raw_extract_num = num_extracts;
1373 	key_profile->num += extract_extended;
1374 	key_profile->key_max_size += sz_extend;
1375 
1376 	dpkg->num_extracts += extract_extended;
1377 	if (!ret && recfg)
1378 		(*recfg) |= dist_type;
1379 
1380 	return ret;
1381 }
1382 
1383 static inline int
1384 dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
1385 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1386 {
1387 	int pos;
1388 	struct key_prot_field *prot_field;
1389 
1390 	if (dpaa2_flow_ip_address_extract(prot, key_field)) {
1391 		DPAA2_PMD_ERR("%s only for none IP address extract",
1392 			__func__);
1393 		return -EINVAL;
1394 	}
1395 
1396 	prot_field = key_profile->prot_field;
1397 	for (pos = 0; pos < key_profile->num; pos++) {
1398 		if (type == DPAA2_NET_PROT_KEY &&
1399 			prot_field[pos].prot == prot &&
1400 			prot_field[pos].key_field == key_field &&
1401 			prot_field[pos].type == type)
1402 			return pos;
1403 		else if (type == DPAA2_FAF_KEY &&
1404 			prot_field[pos].key_field == key_field &&
1405 			prot_field[pos].type == type)
1406 			return pos;
1407 		else if (type == DPAA2_PR_KEY &&
1408 			prot_field[pos].key_field == key_field &&
1409 			prot_field[pos].type == type)
1410 			return pos;
1411 	}
1412 
1413 	if (type == DPAA2_NET_PROT_KEY &&
1414 		dpaa2_flow_l4_src_port_extract(prot, key_field)) {
1415 		if (key_profile->l4_src_port_present)
1416 			return key_profile->l4_src_port_pos;
1417 	} else if (type == DPAA2_NET_PROT_KEY &&
1418 		dpaa2_flow_l4_dst_port_extract(prot, key_field)) {
1419 		if (key_profile->l4_dst_port_present)
1420 			return key_profile->l4_dst_port_pos;
1421 	}
1422 
1423 	return -ENXIO;
1424 }
1425 
1426 static inline int
1427 dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile,
1428 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1429 {
1430 	int i;
1431 
1432 	i = dpaa2_flow_extract_search(key_profile, type, prot, key_field);
1433 	if (i >= 0)
1434 		return key_profile->key_offset[i];
1435 	else
1436 		return i;
1437 }
1438 
1439 static int
1440 dpaa2_flow_faf_add_rule(struct dpaa2_dev_priv *priv,
1441 	struct dpaa2_dev_flow *flow,
1442 	enum dpaa2_rx_faf_offset faf_bit_off,
1443 	int group,
1444 	enum dpaa2_flow_dist_type dist_type)
1445 {
1446 	int offset;
1447 	uint8_t *key_addr;
1448 	uint8_t *mask_addr;
1449 	struct dpaa2_key_extract *key_extract;
1450 	struct dpaa2_key_profile *key_profile;
1451 	uint8_t faf_byte = faf_bit_off / 8;
1452 	uint8_t faf_bit_in_byte = faf_bit_off % 8;
1453 
1454 	faf_bit_in_byte = 7 - faf_bit_in_byte;
1455 
1456 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1457 		key_extract = &priv->extract.qos_key_extract;
1458 		key_profile = &key_extract->key_profile;
1459 
1460 		offset = dpaa2_flow_extract_key_offset(key_profile,
1461 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1462 		if (offset < 0) {
1463 			DPAA2_PMD_ERR("%s QoS key extract failed", __func__);
1464 			return -EINVAL;
1465 		}
1466 		key_addr = flow->qos_key_addr + offset;
1467 		mask_addr = flow->qos_mask_addr + offset;
1468 
1469 		if (!(*key_addr) &&
1470 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1471 			flow->qos_rule_size++;
1472 
1473 		*key_addr |=  (1 << faf_bit_in_byte);
1474 		*mask_addr |=  (1 << faf_bit_in_byte);
1475 	}
1476 
1477 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1478 		key_extract = &priv->extract.tc_key_extract[group];
1479 		key_profile = &key_extract->key_profile;
1480 
1481 		offset = dpaa2_flow_extract_key_offset(key_profile,
1482 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1483 		if (offset < 0) {
1484 			DPAA2_PMD_ERR("%s TC[%d] key extract failed",
1485 				__func__, group);
1486 			return -EINVAL;
1487 		}
1488 		key_addr = flow->fs_key_addr + offset;
1489 		mask_addr = flow->fs_mask_addr + offset;
1490 
1491 		if (!(*key_addr) &&
1492 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1493 			flow->fs_rule_size++;
1494 
1495 		*key_addr |=  (1 << faf_bit_in_byte);
1496 		*mask_addr |=  (1 << faf_bit_in_byte);
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 static inline int
1503 dpaa2_flow_pr_rule_data_set(struct dpaa2_dev_flow *flow,
1504 	struct dpaa2_key_profile *key_profile,
1505 	uint32_t pr_offset, uint32_t pr_size,
1506 	const void *key, const void *mask,
1507 	enum dpaa2_flow_dist_type dist_type)
1508 {
1509 	int offset;
1510 	uint32_t pr_field = pr_offset << 16 | pr_size;
1511 
1512 	offset = dpaa2_flow_extract_key_offset(key_profile,
1513 			DPAA2_PR_KEY, NET_PROT_NONE, pr_field);
1514 	if (offset < 0) {
1515 		DPAA2_PMD_ERR("PR off(%d)/size(%d) does not exist!",
1516 			pr_offset, pr_size);
1517 		return -EINVAL;
1518 	}
1519 
1520 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1521 		memcpy((flow->qos_key_addr + offset), key, pr_size);
1522 		memcpy((flow->qos_mask_addr + offset), mask, pr_size);
1523 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1524 			flow->qos_rule_size = offset + pr_size;
1525 	}
1526 
1527 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1528 		memcpy((flow->fs_key_addr + offset), key, pr_size);
1529 		memcpy((flow->fs_mask_addr + offset), mask, pr_size);
1530 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1531 			flow->fs_rule_size = offset + pr_size;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 static inline int
1538 dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow,
1539 	struct dpaa2_key_profile *key_profile,
1540 	enum net_prot prot, uint32_t field, int size,
1541 	const void *key, const void *mask,
1542 	enum dpaa2_flow_dist_type dist_type)
1543 {
1544 	int offset;
1545 
1546 	if (dpaa2_flow_ip_address_extract(prot, field)) {
1547 		DPAA2_PMD_ERR("%s only for none IP address extract",
1548 			__func__);
1549 		return -EINVAL;
1550 	}
1551 
1552 	offset = dpaa2_flow_extract_key_offset(key_profile,
1553 			DPAA2_NET_PROT_KEY, prot, field);
1554 	if (offset < 0) {
1555 		DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!",
1556 			prot, field);
1557 		return -EINVAL;
1558 	}
1559 
1560 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1561 		memcpy((flow->qos_key_addr + offset), key, size);
1562 		memcpy((flow->qos_mask_addr + offset), mask, size);
1563 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1564 			flow->qos_rule_size = offset + size;
1565 	}
1566 
1567 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1568 		memcpy((flow->fs_key_addr + offset), key, size);
1569 		memcpy((flow->fs_mask_addr + offset), mask, size);
1570 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1571 			flow->fs_rule_size = offset + size;
1572 	}
1573 
1574 	return 0;
1575 }
1576 
1577 static inline int
1578 dpaa2_flow_raw_rule_data_set(struct dpaa2_dev_flow *flow,
1579 	struct dpaa2_key_profile *key_profile,
1580 	uint32_t extract_offset, int size,
1581 	const void *key, const void *mask,
1582 	enum dpaa2_flow_dist_type dist_type)
1583 {
1584 	int extract_size = size > DPAA2_FLOW_MAX_KEY_SIZE ?
1585 		DPAA2_FLOW_MAX_KEY_SIZE : size;
1586 	int offset, field;
1587 
1588 	field = extract_offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1589 	field |= extract_size;
1590 	offset = dpaa2_flow_extract_key_offset(key_profile,
1591 			DPAA2_NET_PROT_KEY, NET_PROT_PAYLOAD, field);
1592 	if (offset < 0) {
1593 		DPAA2_PMD_ERR("offset(%d)/size(%d) raw extract failed",
1594 			extract_offset, size);
1595 		return -EINVAL;
1596 	}
1597 
1598 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1599 		memcpy((flow->qos_key_addr + offset), key, size);
1600 		memcpy((flow->qos_mask_addr + offset), mask, size);
1601 		flow->qos_rule_size = offset + size;
1602 	}
1603 
1604 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1605 		memcpy((flow->fs_key_addr + offset), key, size);
1606 		memcpy((flow->fs_mask_addr + offset), mask, size);
1607 		flow->fs_rule_size = offset + size;
1608 	}
1609 
1610 	return 0;
1611 }
1612 
1613 static int
1614 dpaa2_flow_extract_support(const uint8_t *mask_src,
1615 	enum rte_flow_item_type type)
1616 {
1617 	char mask[64];
1618 	int i, size = 0;
1619 	const char *mask_support = 0;
1620 
1621 	switch (type) {
1622 	case RTE_FLOW_ITEM_TYPE_ETH:
1623 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
1624 		size = sizeof(struct rte_flow_item_eth);
1625 		break;
1626 	case RTE_FLOW_ITEM_TYPE_VLAN:
1627 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
1628 		size = sizeof(struct rte_flow_item_vlan);
1629 		break;
1630 	case RTE_FLOW_ITEM_TYPE_IPV4:
1631 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
1632 		size = sizeof(struct rte_flow_item_ipv4);
1633 		break;
1634 	case RTE_FLOW_ITEM_TYPE_IPV6:
1635 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
1636 		size = sizeof(struct rte_flow_item_ipv6);
1637 		break;
1638 	case RTE_FLOW_ITEM_TYPE_ICMP:
1639 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
1640 		size = sizeof(struct rte_flow_item_icmp);
1641 		break;
1642 	case RTE_FLOW_ITEM_TYPE_UDP:
1643 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
1644 		size = sizeof(struct rte_flow_item_udp);
1645 		break;
1646 	case RTE_FLOW_ITEM_TYPE_TCP:
1647 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
1648 		size = sizeof(struct rte_flow_item_tcp);
1649 		break;
1650 	case RTE_FLOW_ITEM_TYPE_SCTP:
1651 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
1652 		size = sizeof(struct rte_flow_item_sctp);
1653 		break;
1654 	case RTE_FLOW_ITEM_TYPE_GRE:
1655 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
1656 		size = sizeof(struct rte_flow_item_gre);
1657 		break;
1658 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1659 		mask_support = (const char *)&dpaa2_flow_item_vxlan_mask;
1660 		size = sizeof(struct rte_flow_item_vxlan);
1661 		break;
1662 	case RTE_FLOW_ITEM_TYPE_ECPRI:
1663 		mask_support = (const char *)&dpaa2_flow_item_ecpri_mask;
1664 		size = sizeof(struct rte_flow_item_ecpri);
1665 		break;
1666 	case RTE_FLOW_ITEM_TYPE_GTP:
1667 		mask_support = (const char *)&dpaa2_flow_item_gtp_mask;
1668 		size = sizeof(struct rte_flow_item_gtp);
1669 		break;
1670 	default:
1671 		return -EINVAL;
1672 	}
1673 
1674 	memcpy(mask, mask_support, size);
1675 
1676 	for (i = 0; i < size; i++)
1677 		mask[i] = (mask[i] | mask_src[i]);
1678 
1679 	if (memcmp(mask, mask_support, size))
1680 		return -1;
1681 
1682 	return 0;
1683 }
1684 
1685 static int
1686 dpaa2_flow_identify_by_faf(struct dpaa2_dev_priv *priv,
1687 	struct dpaa2_dev_flow *flow,
1688 	enum dpaa2_rx_faf_offset faf_off,
1689 	enum dpaa2_flow_dist_type dist_type,
1690 	int group, int *recfg)
1691 {
1692 	int ret, index, local_cfg = 0;
1693 	struct dpaa2_key_extract *extract;
1694 	struct dpaa2_key_profile *key_profile;
1695 	uint8_t faf_byte = faf_off / 8;
1696 
1697 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1698 		extract = &priv->extract.qos_key_extract;
1699 		key_profile = &extract->key_profile;
1700 
1701 		index = dpaa2_flow_extract_search(key_profile,
1702 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1703 		if (index < 0) {
1704 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1705 					priv, DPAA2_FLOW_QOS_TYPE, group,
1706 					NULL);
1707 			if (ret) {
1708 				DPAA2_PMD_ERR("QOS faf extract add failed");
1709 
1710 				return -EINVAL;
1711 			}
1712 			local_cfg |= DPAA2_FLOW_QOS_TYPE;
1713 		}
1714 
1715 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1716 				DPAA2_FLOW_QOS_TYPE);
1717 		if (ret) {
1718 			DPAA2_PMD_ERR("QoS faf rule set failed");
1719 			return -EINVAL;
1720 		}
1721 	}
1722 
1723 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1724 		extract = &priv->extract.tc_key_extract[group];
1725 		key_profile = &extract->key_profile;
1726 
1727 		index = dpaa2_flow_extract_search(key_profile,
1728 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1729 		if (index < 0) {
1730 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1731 					priv, DPAA2_FLOW_FS_TYPE, group,
1732 					NULL);
1733 			if (ret) {
1734 				DPAA2_PMD_ERR("FS[%d] faf extract add failed",
1735 					group);
1736 
1737 				return -EINVAL;
1738 			}
1739 			local_cfg |= DPAA2_FLOW_FS_TYPE;
1740 		}
1741 
1742 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1743 				DPAA2_FLOW_FS_TYPE);
1744 		if (ret) {
1745 			DPAA2_PMD_ERR("FS[%d] faf rule set failed",
1746 				group);
1747 			return -EINVAL;
1748 		}
1749 	}
1750 
1751 	if (recfg)
1752 		*recfg |= local_cfg;
1753 
1754 	return 0;
1755 }
1756 
1757 static int
1758 dpaa2_flow_add_pr_extract_rule(struct dpaa2_dev_flow *flow,
1759 	uint32_t pr_offset, uint32_t pr_size,
1760 	const void *key, const void *mask,
1761 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1762 	enum dpaa2_flow_dist_type dist_type)
1763 {
1764 	int index, ret, local_cfg = 0;
1765 	struct dpaa2_key_extract *key_extract;
1766 	struct dpaa2_key_profile *key_profile;
1767 	uint32_t pr_field = pr_offset << 16 | pr_size;
1768 
1769 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1770 		key_extract = &priv->extract.qos_key_extract;
1771 	else
1772 		key_extract = &priv->extract.tc_key_extract[tc_id];
1773 
1774 	key_profile = &key_extract->key_profile;
1775 
1776 	index = dpaa2_flow_extract_search(key_profile,
1777 			DPAA2_PR_KEY, NET_PROT_NONE, pr_field);
1778 	if (index < 0) {
1779 		ret = dpaa2_flow_pr_add_hdr(pr_offset,
1780 				pr_size, priv,
1781 				dist_type, tc_id, NULL);
1782 		if (ret) {
1783 			DPAA2_PMD_ERR("PR add off(%d)/size(%d) failed",
1784 				pr_offset, pr_size);
1785 
1786 			return ret;
1787 		}
1788 		local_cfg |= dist_type;
1789 	}
1790 
1791 	ret = dpaa2_flow_pr_rule_data_set(flow, key_profile,
1792 			pr_offset, pr_size, key, mask, dist_type);
1793 	if (ret) {
1794 		DPAA2_PMD_ERR("PR off(%d)/size(%d) rule data set failed",
1795 			pr_offset, pr_size);
1796 
1797 		return ret;
1798 	}
1799 
1800 	if (recfg)
1801 		*recfg |= local_cfg;
1802 
1803 	return 0;
1804 }
1805 
1806 static int
1807 dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow,
1808 	enum net_prot prot, uint32_t field,
1809 	const void *key, const void *mask, int size,
1810 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1811 	enum dpaa2_flow_dist_type dist_type)
1812 {
1813 	int index, ret, local_cfg = 0;
1814 	struct dpaa2_key_extract *key_extract;
1815 	struct dpaa2_key_profile *key_profile;
1816 
1817 	if (dpaa2_flow_ip_address_extract(prot, field))
1818 		return -EINVAL;
1819 
1820 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1821 		key_extract = &priv->extract.qos_key_extract;
1822 	else
1823 		key_extract = &priv->extract.tc_key_extract[tc_id];
1824 
1825 	key_profile = &key_extract->key_profile;
1826 
1827 	index = dpaa2_flow_extract_search(key_profile,
1828 			DPAA2_NET_PROT_KEY, prot, field);
1829 	if (index < 0) {
1830 		ret = dpaa2_flow_extract_add_hdr(prot,
1831 				field, size, priv,
1832 				dist_type, tc_id, NULL);
1833 		if (ret) {
1834 			DPAA2_PMD_ERR("QoS Extract P(%d)/F(%d) failed",
1835 				prot, field);
1836 
1837 			return ret;
1838 		}
1839 		local_cfg |= dist_type;
1840 	}
1841 
1842 	ret = dpaa2_flow_hdr_rule_data_set(flow, key_profile,
1843 			prot, field, size, key, mask, dist_type);
1844 	if (ret) {
1845 		DPAA2_PMD_ERR("QoS P(%d)/F(%d) rule data set failed",
1846 			prot, field);
1847 
1848 		return ret;
1849 	}
1850 
1851 	if (recfg)
1852 		*recfg |= local_cfg;
1853 
1854 	return 0;
1855 }
1856 
1857 static int
1858 dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow,
1859 	enum net_prot prot, uint32_t field,
1860 	const void *key, const void *mask, int size,
1861 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1862 	enum dpaa2_flow_dist_type dist_type)
1863 {
1864 	int local_cfg = 0, num, ipaddr_extract_len = 0;
1865 	struct dpaa2_key_extract *key_extract;
1866 	struct dpaa2_key_profile *key_profile;
1867 	struct dpkg_profile_cfg *dpkg;
1868 	uint8_t *key_addr, *mask_addr;
1869 	union ip_addr_extract_rule *ip_addr_data;
1870 	union ip_addr_extract_rule *ip_addr_mask;
1871 	enum net_prot orig_prot;
1872 	uint32_t orig_field;
1873 
1874 	if (prot != NET_PROT_IPV4 && prot != NET_PROT_IPV6)
1875 		return -EINVAL;
1876 
1877 	if (prot == NET_PROT_IPV4 && field != NH_FLD_IPV4_SRC_IP &&
1878 		field != NH_FLD_IPV4_DST_IP) {
1879 		return -EINVAL;
1880 	}
1881 
1882 	if (prot == NET_PROT_IPV6 && field != NH_FLD_IPV6_SRC_IP &&
1883 		field != NH_FLD_IPV6_DST_IP) {
1884 		return -EINVAL;
1885 	}
1886 
1887 	orig_prot = prot;
1888 	orig_field = field;
1889 
1890 	if (prot == NET_PROT_IPV4 &&
1891 		field == NH_FLD_IPV4_SRC_IP) {
1892 		prot = NET_PROT_IP;
1893 		field = NH_FLD_IP_SRC;
1894 	} else if (prot == NET_PROT_IPV4 &&
1895 		field == NH_FLD_IPV4_DST_IP) {
1896 		prot = NET_PROT_IP;
1897 		field = NH_FLD_IP_DST;
1898 	} else if (prot == NET_PROT_IPV6 &&
1899 		field == NH_FLD_IPV6_SRC_IP) {
1900 		prot = NET_PROT_IP;
1901 		field = NH_FLD_IP_SRC;
1902 	} else if (prot == NET_PROT_IPV6 &&
1903 		field == NH_FLD_IPV6_DST_IP) {
1904 		prot = NET_PROT_IP;
1905 		field = NH_FLD_IP_DST;
1906 	} else {
1907 		DPAA2_PMD_ERR("Inval P(%d)/F(%d) to extract ip address",
1908 			prot, field);
1909 		return -EINVAL;
1910 	}
1911 
1912 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1913 		key_extract = &priv->extract.qos_key_extract;
1914 		key_profile = &key_extract->key_profile;
1915 		dpkg = &key_extract->dpkg;
1916 		num = key_profile->num;
1917 		key_addr = flow->qos_key_addr;
1918 		mask_addr = flow->qos_mask_addr;
1919 	} else {
1920 		key_extract = &priv->extract.tc_key_extract[tc_id];
1921 		key_profile = &key_extract->key_profile;
1922 		dpkg = &key_extract->dpkg;
1923 		num = key_profile->num;
1924 		key_addr = flow->fs_key_addr;
1925 		mask_addr = flow->fs_mask_addr;
1926 	}
1927 
1928 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
1929 		DPAA2_PMD_ERR("Number of extracts overflows");
1930 		return -EINVAL;
1931 	}
1932 
1933 	if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) {
1934 		if (field == NH_FLD_IP_SRC)
1935 			key_profile->ip_addr_type = IP_SRC_EXTRACT;
1936 		else
1937 			key_profile->ip_addr_type = IP_DST_EXTRACT;
1938 		ipaddr_extract_len = size;
1939 
1940 		key_profile->ip_addr_extract_pos = num;
1941 		if (num > 0) {
1942 			key_profile->ip_addr_extract_off =
1943 				key_profile->key_offset[num - 1] +
1944 				key_profile->key_size[num - 1];
1945 		} else {
1946 			key_profile->ip_addr_extract_off = 0;
1947 		}
1948 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1949 	} else if (key_profile->ip_addr_type == IP_SRC_EXTRACT) {
1950 		if (field == NH_FLD_IP_SRC) {
1951 			ipaddr_extract_len = size;
1952 			goto rule_configure;
1953 		}
1954 		key_profile->ip_addr_type = IP_SRC_DST_EXTRACT;
1955 		ipaddr_extract_len = size * 2;
1956 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1957 	} else if (key_profile->ip_addr_type == IP_DST_EXTRACT) {
1958 		if (field == NH_FLD_IP_DST) {
1959 			ipaddr_extract_len = size;
1960 			goto rule_configure;
1961 		}
1962 		key_profile->ip_addr_type = IP_DST_SRC_EXTRACT;
1963 		ipaddr_extract_len = size * 2;
1964 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1965 	}
1966 	key_profile->num++;
1967 	key_profile->prot_field[num].type = DPAA2_NET_PROT_KEY;
1968 
1969 	dpkg->extracts[num].extract.from_hdr.prot = prot;
1970 	dpkg->extracts[num].extract.from_hdr.field = field;
1971 	dpkg->extracts[num].extract.from_hdr.type = DPKG_FULL_FIELD;
1972 	dpkg->num_extracts++;
1973 
1974 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1975 		local_cfg = DPAA2_FLOW_QOS_TYPE;
1976 	else
1977 		local_cfg = DPAA2_FLOW_FS_TYPE;
1978 
1979 rule_configure:
1980 	key_addr += key_profile->ip_addr_extract_off;
1981 	ip_addr_data = (union ip_addr_extract_rule *)key_addr;
1982 	mask_addr += key_profile->ip_addr_extract_off;
1983 	ip_addr_mask = (union ip_addr_extract_rule *)mask_addr;
1984 
1985 	if (orig_prot == NET_PROT_IPV4 &&
1986 		orig_field == NH_FLD_IPV4_SRC_IP) {
1987 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
1988 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
1989 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_src,
1990 				key, size);
1991 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_src,
1992 				mask, size);
1993 		} else {
1994 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_src,
1995 				key, size);
1996 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_src,
1997 				mask, size);
1998 		}
1999 	} else if (orig_prot == NET_PROT_IPV4 &&
2000 		orig_field == NH_FLD_IPV4_DST_IP) {
2001 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
2002 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
2003 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_dst,
2004 				key, size);
2005 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_dst,
2006 				mask, size);
2007 		} else {
2008 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_dst,
2009 				key, size);
2010 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_dst,
2011 				mask, size);
2012 		}
2013 	} else if (orig_prot == NET_PROT_IPV6 &&
2014 		orig_field == NH_FLD_IPV6_SRC_IP) {
2015 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
2016 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
2017 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_src,
2018 				key, size);
2019 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_src,
2020 				mask, size);
2021 		} else {
2022 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_src,
2023 				key, size);
2024 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_src,
2025 				mask, size);
2026 		}
2027 	} else if (orig_prot == NET_PROT_IPV6 &&
2028 		orig_field == NH_FLD_IPV6_DST_IP) {
2029 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
2030 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
2031 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_dst,
2032 				key, size);
2033 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_dst,
2034 				mask, size);
2035 		} else {
2036 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_dst,
2037 				key, size);
2038 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_dst,
2039 				mask, size);
2040 		}
2041 	}
2042 
2043 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
2044 		flow->qos_rule_size =
2045 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
2046 	} else {
2047 		flow->fs_rule_size =
2048 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
2049 	}
2050 
2051 	if (recfg)
2052 		*recfg |= local_cfg;
2053 
2054 	return 0;
2055 }
2056 
2057 static int
2058 dpaa2_configure_flow_tunnel_eth(struct dpaa2_dev_flow *flow,
2059 	struct rte_eth_dev *dev,
2060 	const struct rte_flow_attr *attr,
2061 	const struct rte_flow_item *pattern,
2062 	int *device_configured)
2063 {
2064 	int ret, local_cfg = 0;
2065 	uint32_t group;
2066 	const struct rte_flow_item_eth *spec, *mask;
2067 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2068 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
2069 
2070 	group = attr->group;
2071 
2072 	/* Parse pattern list to get the matching parameters */
2073 	spec = pattern->spec;
2074 	mask = pattern->mask ?
2075 			pattern->mask : &dpaa2_flow_item_eth_mask;
2076 
2077 	/* Get traffic class index and flow id to be configured */
2078 	flow->tc_id = group;
2079 	flow->tc_index = attr->priority;
2080 
2081 	if (!spec)
2082 		return 0;
2083 
2084 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2085 		RTE_FLOW_ITEM_TYPE_ETH)) {
2086 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
2087 
2088 		return -EINVAL;
2089 	}
2090 
2091 	if (memcmp((const char *)&mask->src,
2092 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2093 		/*SRC[0:1]*/
2094 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2095 			DPAA2_VXLAN_IN_SADDR0_OFFSET,
2096 			1, &spec->src.addr_bytes[0],
2097 			&mask->src.addr_bytes[0],
2098 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2099 		if (ret)
2100 			return ret;
2101 		/*SRC[1:2]*/
2102 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2103 			DPAA2_VXLAN_IN_SADDR1_OFFSET,
2104 			2, &spec->src.addr_bytes[1],
2105 			&mask->src.addr_bytes[1],
2106 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2107 		if (ret)
2108 			return ret;
2109 		/*SRC[3:1]*/
2110 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2111 			DPAA2_VXLAN_IN_SADDR3_OFFSET,
2112 			1, &spec->src.addr_bytes[3],
2113 			&mask->src.addr_bytes[3],
2114 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2115 		if (ret)
2116 			return ret;
2117 		/*SRC[4:2]*/
2118 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2119 			DPAA2_VXLAN_IN_SADDR4_OFFSET,
2120 			2, &spec->src.addr_bytes[4],
2121 			&mask->src.addr_bytes[4],
2122 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2123 		if (ret)
2124 			return ret;
2125 
2126 		/*SRC[0:1]*/
2127 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2128 			DPAA2_VXLAN_IN_SADDR0_OFFSET,
2129 			1, &spec->src.addr_bytes[0],
2130 			&mask->src.addr_bytes[0],
2131 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2132 		if (ret)
2133 			return ret;
2134 		/*SRC[1:2]*/
2135 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2136 			DPAA2_VXLAN_IN_SADDR1_OFFSET,
2137 			2, &spec->src.addr_bytes[1],
2138 			&mask->src.addr_bytes[1],
2139 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2140 		if (ret)
2141 			return ret;
2142 		/*SRC[3:1]*/
2143 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2144 			DPAA2_VXLAN_IN_SADDR3_OFFSET,
2145 			1, &spec->src.addr_bytes[3],
2146 			&mask->src.addr_bytes[3],
2147 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2148 		if (ret)
2149 			return ret;
2150 		/*SRC[4:2]*/
2151 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2152 			DPAA2_VXLAN_IN_SADDR4_OFFSET,
2153 			2, &spec->src.addr_bytes[4],
2154 			&mask->src.addr_bytes[4],
2155 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2156 		if (ret)
2157 			return ret;
2158 	}
2159 
2160 	if (memcmp((const char *)&mask->dst,
2161 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2162 		/*DST[0:1]*/
2163 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2164 			DPAA2_VXLAN_IN_DADDR0_OFFSET,
2165 			1, &spec->dst.addr_bytes[0],
2166 			&mask->dst.addr_bytes[0],
2167 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2168 		if (ret)
2169 			return ret;
2170 		/*DST[1:1]*/
2171 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2172 			DPAA2_VXLAN_IN_DADDR1_OFFSET,
2173 			1, &spec->dst.addr_bytes[1],
2174 			&mask->dst.addr_bytes[1],
2175 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2176 		if (ret)
2177 			return ret;
2178 		/*DST[2:3]*/
2179 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2180 			DPAA2_VXLAN_IN_DADDR2_OFFSET,
2181 			3, &spec->dst.addr_bytes[2],
2182 			&mask->dst.addr_bytes[2],
2183 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2184 		if (ret)
2185 			return ret;
2186 		/*DST[5:1]*/
2187 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2188 			DPAA2_VXLAN_IN_DADDR5_OFFSET,
2189 			1, &spec->dst.addr_bytes[5],
2190 			&mask->dst.addr_bytes[5],
2191 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2192 		if (ret)
2193 			return ret;
2194 
2195 		/*DST[0:1]*/
2196 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2197 			DPAA2_VXLAN_IN_DADDR0_OFFSET,
2198 			1, &spec->dst.addr_bytes[0],
2199 			&mask->dst.addr_bytes[0],
2200 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2201 		if (ret)
2202 			return ret;
2203 		/*DST[1:1]*/
2204 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2205 			DPAA2_VXLAN_IN_DADDR1_OFFSET,
2206 			1, &spec->dst.addr_bytes[1],
2207 			&mask->dst.addr_bytes[1],
2208 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2209 		if (ret)
2210 			return ret;
2211 		/*DST[2:3]*/
2212 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2213 			DPAA2_VXLAN_IN_DADDR2_OFFSET,
2214 			3, &spec->dst.addr_bytes[2],
2215 			&mask->dst.addr_bytes[2],
2216 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2217 		if (ret)
2218 			return ret;
2219 		/*DST[5:1]*/
2220 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2221 			DPAA2_VXLAN_IN_DADDR5_OFFSET,
2222 			1, &spec->dst.addr_bytes[5],
2223 			&mask->dst.addr_bytes[5],
2224 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2225 		if (ret)
2226 			return ret;
2227 	}
2228 
2229 	if (memcmp((const char *)&mask->type,
2230 		zero_cmp, sizeof(rte_be16_t))) {
2231 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2232 			DPAA2_VXLAN_IN_TYPE_OFFSET,
2233 			sizeof(rte_be16_t), &spec->type, &mask->type,
2234 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2235 		if (ret)
2236 			return ret;
2237 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2238 			DPAA2_VXLAN_IN_TYPE_OFFSET,
2239 			sizeof(rte_be16_t), &spec->type, &mask->type,
2240 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2241 		if (ret)
2242 			return ret;
2243 	}
2244 
2245 	(*device_configured) |= local_cfg;
2246 
2247 	return 0;
2248 }
2249 
2250 static int
2251 dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
2252 	struct rte_eth_dev *dev,
2253 	const struct rte_flow_attr *attr,
2254 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2255 	const struct rte_flow_action actions[] __rte_unused,
2256 	struct rte_flow_error *error __rte_unused,
2257 	int *device_configured)
2258 {
2259 	int ret, local_cfg = 0;
2260 	uint32_t group;
2261 	const struct rte_flow_item_eth *spec, *mask;
2262 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2263 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
2264 	const struct rte_flow_item *pattern =
2265 		&dpaa2_pattern->generic_item;
2266 
2267 	if (dpaa2_pattern->in_tunnel) {
2268 		return dpaa2_configure_flow_tunnel_eth(flow,
2269 				dev, attr, pattern, device_configured);
2270 	}
2271 
2272 	group = attr->group;
2273 
2274 	/* Parse pattern list to get the matching parameters */
2275 	spec = pattern->spec;
2276 	mask = pattern->mask ?
2277 			pattern->mask : &dpaa2_flow_item_eth_mask;
2278 
2279 	/* Get traffic class index and flow id to be configured */
2280 	flow->tc_id = group;
2281 	flow->tc_index = attr->priority;
2282 
2283 	if (!spec) {
2284 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2285 				FAF_ETH_FRAM, DPAA2_FLOW_QOS_TYPE,
2286 				group, &local_cfg);
2287 		if (ret)
2288 			return ret;
2289 
2290 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2291 				FAF_ETH_FRAM, DPAA2_FLOW_FS_TYPE,
2292 				group, &local_cfg);
2293 		if (ret)
2294 			return ret;
2295 
2296 		(*device_configured) |= local_cfg;
2297 		return 0;
2298 	}
2299 
2300 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2301 		RTE_FLOW_ITEM_TYPE_ETH)) {
2302 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
2303 
2304 		return -EINVAL;
2305 	}
2306 
2307 	if (memcmp((const char *)&mask->src,
2308 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2309 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2310 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
2311 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
2312 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2313 		if (ret)
2314 			return ret;
2315 
2316 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2317 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
2318 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
2319 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2320 		if (ret)
2321 			return ret;
2322 	}
2323 
2324 	if (memcmp((const char *)&mask->dst,
2325 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2326 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2327 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
2328 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
2329 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2330 		if (ret)
2331 			return ret;
2332 
2333 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2334 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
2335 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
2336 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2337 		if (ret)
2338 			return ret;
2339 	}
2340 
2341 	if (memcmp((const char *)&mask->type,
2342 		zero_cmp, sizeof(rte_be16_t))) {
2343 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2344 			NH_FLD_ETH_TYPE, &spec->type,
2345 			&mask->type, sizeof(rte_be16_t),
2346 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2347 		if (ret)
2348 			return ret;
2349 
2350 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2351 			NH_FLD_ETH_TYPE, &spec->type,
2352 			&mask->type, sizeof(rte_be16_t),
2353 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2354 		if (ret)
2355 			return ret;
2356 	}
2357 
2358 	(*device_configured) |= local_cfg;
2359 
2360 	return 0;
2361 }
2362 
2363 static int
2364 dpaa2_configure_flow_tunnel_vlan(struct dpaa2_dev_flow *flow,
2365 	struct rte_eth_dev *dev,
2366 	const struct rte_flow_attr *attr,
2367 	const struct rte_flow_item *pattern,
2368 	int *device_configured)
2369 {
2370 	int ret, local_cfg = 0;
2371 	uint32_t group;
2372 	const struct rte_flow_item_vlan *spec, *mask;
2373 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2374 
2375 	group = attr->group;
2376 
2377 	/* Parse pattern list to get the matching parameters */
2378 	spec = pattern->spec;
2379 	mask = pattern->mask ?
2380 		pattern->mask : &dpaa2_flow_item_vlan_mask;
2381 
2382 	/* Get traffic class index and flow id to be configured */
2383 	flow->tc_id = group;
2384 	flow->tc_index = attr->priority;
2385 
2386 	if (!spec) {
2387 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2388 				FAFE_VXLAN_IN_VLAN_FRAM,
2389 				DPAA2_FLOW_QOS_TYPE,
2390 				group, &local_cfg);
2391 		if (ret)
2392 			return ret;
2393 
2394 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2395 				FAFE_VXLAN_IN_VLAN_FRAM,
2396 				DPAA2_FLOW_FS_TYPE,
2397 				group, &local_cfg);
2398 		if (ret)
2399 			return ret;
2400 
2401 		(*device_configured) |= local_cfg;
2402 		return 0;
2403 	}
2404 
2405 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2406 		RTE_FLOW_ITEM_TYPE_VLAN)) {
2407 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
2408 
2409 		return -EINVAL;
2410 	}
2411 
2412 	if (!mask->tci)
2413 		return 0;
2414 
2415 	ret = dpaa2_flow_add_pr_extract_rule(flow,
2416 			DPAA2_VXLAN_IN_TCI_OFFSET,
2417 			sizeof(rte_be16_t), &spec->tci, &mask->tci,
2418 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2419 	if (ret)
2420 		return ret;
2421 
2422 	ret = dpaa2_flow_add_pr_extract_rule(flow,
2423 			DPAA2_VXLAN_IN_TCI_OFFSET,
2424 			sizeof(rte_be16_t), &spec->tci, &mask->tci,
2425 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2426 	if (ret)
2427 		return ret;
2428 
2429 	(*device_configured) |= local_cfg;
2430 
2431 	return 0;
2432 }
2433 
2434 static int
2435 dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
2436 	struct rte_eth_dev *dev,
2437 	const struct rte_flow_attr *attr,
2438 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2439 	const struct rte_flow_action actions[] __rte_unused,
2440 	struct rte_flow_error *error __rte_unused,
2441 	int *device_configured)
2442 {
2443 	int ret, local_cfg = 0;
2444 	uint32_t group;
2445 	const struct rte_flow_item_vlan *spec, *mask;
2446 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2447 	const struct rte_flow_item *pattern =
2448 		&dpaa2_pattern->generic_item;
2449 
2450 	if (dpaa2_pattern->in_tunnel) {
2451 		return dpaa2_configure_flow_tunnel_vlan(flow,
2452 				dev, attr, pattern, device_configured);
2453 	}
2454 
2455 	group = attr->group;
2456 
2457 	/* Parse pattern list to get the matching parameters */
2458 	spec = pattern->spec;
2459 	mask = pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask;
2460 
2461 	/* Get traffic class index and flow id to be configured */
2462 	flow->tc_id = group;
2463 	flow->tc_index = attr->priority;
2464 
2465 	if (!spec) {
2466 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
2467 						 DPAA2_FLOW_QOS_TYPE, group,
2468 						 &local_cfg);
2469 		if (ret)
2470 			return ret;
2471 
2472 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
2473 						 DPAA2_FLOW_FS_TYPE, group,
2474 						 &local_cfg);
2475 		if (ret)
2476 			return ret;
2477 
2478 		(*device_configured) |= local_cfg;
2479 		return 0;
2480 	}
2481 
2482 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2483 				       RTE_FLOW_ITEM_TYPE_VLAN)) {
2484 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
2485 		return -EINVAL;
2486 	}
2487 
2488 	if (!mask->tci)
2489 		return 0;
2490 
2491 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
2492 					      NH_FLD_VLAN_TCI, &spec->tci,
2493 					      &mask->tci, sizeof(rte_be16_t),
2494 					      priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2495 	if (ret)
2496 		return ret;
2497 
2498 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
2499 					      NH_FLD_VLAN_TCI, &spec->tci,
2500 					      &mask->tci, sizeof(rte_be16_t),
2501 					      priv, group, &local_cfg,
2502 					      DPAA2_FLOW_FS_TYPE);
2503 	if (ret)
2504 		return ret;
2505 
2506 	(*device_configured) |= local_cfg;
2507 	return 0;
2508 }
2509 
2510 static int
2511 dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
2512 			  const struct rte_flow_attr *attr,
2513 			  const struct rte_dpaa2_flow_item *dpaa2_pattern,
2514 			  const struct rte_flow_action actions[] __rte_unused,
2515 			  struct rte_flow_error *error __rte_unused,
2516 			  int *device_configured)
2517 {
2518 	int ret, local_cfg = 0;
2519 	uint32_t group;
2520 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0, *mask_ipv4 = 0;
2521 	const void *key, *mask;
2522 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2523 	int size;
2524 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2525 
2526 	group = attr->group;
2527 
2528 	/* Parse pattern list to get the matching parameters */
2529 	spec_ipv4 = pattern->spec;
2530 	mask_ipv4 = pattern->mask ?
2531 		    pattern->mask : &dpaa2_flow_item_ipv4_mask;
2532 
2533 	if (dpaa2_pattern->in_tunnel) {
2534 		if (spec_ipv4) {
2535 			DPAA2_PMD_ERR("Tunnel-IPv4 distribution not support");
2536 			return -ENOTSUP;
2537 		}
2538 
2539 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2540 						 FAFE_VXLAN_IN_IPV4_FRAM,
2541 						 DPAA2_FLOW_QOS_TYPE, group,
2542 						 &local_cfg);
2543 		if (ret)
2544 			return ret;
2545 
2546 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2547 						 FAFE_VXLAN_IN_IPV4_FRAM,
2548 						 DPAA2_FLOW_FS_TYPE, group,
2549 						 &local_cfg);
2550 		return ret;
2551 	}
2552 
2553 	/* Get traffic class index and flow id to be configured */
2554 	flow->tc_id = group;
2555 	flow->tc_index = attr->priority;
2556 
2557 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
2558 					 DPAA2_FLOW_QOS_TYPE, group,
2559 					 &local_cfg);
2560 	if (ret)
2561 		return ret;
2562 
2563 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
2564 					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2565 	if (ret)
2566 		return ret;
2567 
2568 	if (!spec_ipv4) {
2569 		(*device_configured) |= local_cfg;
2570 		return 0;
2571 	}
2572 
2573 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
2574 				       RTE_FLOW_ITEM_TYPE_IPV4)) {
2575 		DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
2576 		return -EINVAL;
2577 	}
2578 
2579 	if (mask_ipv4->hdr.src_addr) {
2580 		key = &spec_ipv4->hdr.src_addr;
2581 		mask = &mask_ipv4->hdr.src_addr;
2582 		size = sizeof(rte_be32_t);
2583 
2584 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2585 							 NH_FLD_IPV4_SRC_IP,
2586 							 key, mask, size, priv,
2587 							 group, &local_cfg,
2588 							 DPAA2_FLOW_QOS_TYPE);
2589 		if (ret)
2590 			return ret;
2591 
2592 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2593 							 NH_FLD_IPV4_SRC_IP,
2594 							 key, mask, size, priv,
2595 							 group, &local_cfg,
2596 							 DPAA2_FLOW_FS_TYPE);
2597 		if (ret)
2598 			return ret;
2599 	}
2600 
2601 	if (mask_ipv4->hdr.dst_addr) {
2602 		key = &spec_ipv4->hdr.dst_addr;
2603 		mask = &mask_ipv4->hdr.dst_addr;
2604 		size = sizeof(rte_be32_t);
2605 
2606 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2607 							 NH_FLD_IPV4_DST_IP,
2608 							 key, mask, size, priv,
2609 							 group, &local_cfg,
2610 							 DPAA2_FLOW_QOS_TYPE);
2611 		if (ret)
2612 			return ret;
2613 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2614 							 NH_FLD_IPV4_DST_IP,
2615 							 key, mask, size, priv,
2616 							 group, &local_cfg,
2617 							 DPAA2_FLOW_FS_TYPE);
2618 		if (ret)
2619 			return ret;
2620 	}
2621 
2622 	if (mask_ipv4->hdr.next_proto_id) {
2623 		key = &spec_ipv4->hdr.next_proto_id;
2624 		mask = &mask_ipv4->hdr.next_proto_id;
2625 		size = sizeof(uint8_t);
2626 
2627 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2628 						      NH_FLD_IP_PROTO, key,
2629 						      mask, size, priv, group,
2630 						      &local_cfg,
2631 						      DPAA2_FLOW_QOS_TYPE);
2632 		if (ret)
2633 			return ret;
2634 
2635 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2636 						      NH_FLD_IP_PROTO, key,
2637 						      mask, size, priv, group,
2638 						      &local_cfg,
2639 						      DPAA2_FLOW_FS_TYPE);
2640 		if (ret)
2641 			return ret;
2642 	}
2643 
2644 	(*device_configured) |= local_cfg;
2645 	return 0;
2646 }
2647 
2648 static int
2649 dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
2650 			  const struct rte_flow_attr *attr,
2651 			  const struct rte_dpaa2_flow_item *dpaa2_pattern,
2652 			  const struct rte_flow_action actions[] __rte_unused,
2653 			  struct rte_flow_error *error __rte_unused,
2654 			  int *device_configured)
2655 {
2656 	int ret, local_cfg = 0;
2657 	uint32_t group;
2658 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0, *mask_ipv6 = 0;
2659 	const void *key, *mask;
2660 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2661 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
2662 	int size;
2663 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2664 
2665 	group = attr->group;
2666 
2667 	/* Parse pattern list to get the matching parameters */
2668 	spec_ipv6 = pattern->spec;
2669 	mask_ipv6 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv6_mask;
2670 
2671 	/* Get traffic class index and flow id to be configured */
2672 	flow->tc_id = group;
2673 	flow->tc_index = attr->priority;
2674 
2675 	if (dpaa2_pattern->in_tunnel) {
2676 		if (spec_ipv6) {
2677 			DPAA2_PMD_ERR("Tunnel-IPv6 distribution not support");
2678 			return -ENOTSUP;
2679 		}
2680 
2681 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2682 						 FAFE_VXLAN_IN_IPV6_FRAM,
2683 						 DPAA2_FLOW_QOS_TYPE, group,
2684 						 &local_cfg);
2685 		if (ret)
2686 			return ret;
2687 
2688 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2689 						 FAFE_VXLAN_IN_IPV6_FRAM,
2690 						 DPAA2_FLOW_FS_TYPE, group,
2691 						 &local_cfg);
2692 		return ret;
2693 	}
2694 
2695 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2696 					 DPAA2_FLOW_QOS_TYPE, group,
2697 					 &local_cfg);
2698 	if (ret)
2699 		return ret;
2700 
2701 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2702 					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2703 	if (ret)
2704 		return ret;
2705 
2706 	if (!spec_ipv6) {
2707 		(*device_configured) |= local_cfg;
2708 		return 0;
2709 	}
2710 
2711 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
2712 				       RTE_FLOW_ITEM_TYPE_IPV6)) {
2713 		DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
2714 		return -EINVAL;
2715 	}
2716 
2717 	if (memcmp((const char *)&mask_ipv6->hdr.src_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2718 		key = &spec_ipv6->hdr.src_addr;
2719 		mask = &mask_ipv6->hdr.src_addr;
2720 		size = NH_FLD_IPV6_ADDR_SIZE;
2721 
2722 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2723 							 NH_FLD_IPV6_SRC_IP,
2724 							 key, mask, size, priv,
2725 							 group, &local_cfg,
2726 							 DPAA2_FLOW_QOS_TYPE);
2727 		if (ret)
2728 			return ret;
2729 
2730 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2731 							 NH_FLD_IPV6_SRC_IP,
2732 							 key, mask, size, priv,
2733 							 group, &local_cfg,
2734 							 DPAA2_FLOW_FS_TYPE);
2735 		if (ret)
2736 			return ret;
2737 	}
2738 
2739 	if (memcmp((const char *)&mask_ipv6->hdr.dst_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2740 		key = &spec_ipv6->hdr.dst_addr;
2741 		mask = &mask_ipv6->hdr.dst_addr;
2742 		size = NH_FLD_IPV6_ADDR_SIZE;
2743 
2744 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2745 							 NH_FLD_IPV6_DST_IP,
2746 							 key, mask, size, priv,
2747 							 group, &local_cfg,
2748 							 DPAA2_FLOW_QOS_TYPE);
2749 		if (ret)
2750 			return ret;
2751 
2752 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2753 							 NH_FLD_IPV6_DST_IP,
2754 							 key, mask, size, priv,
2755 							 group, &local_cfg,
2756 							 DPAA2_FLOW_FS_TYPE);
2757 		if (ret)
2758 			return ret;
2759 	}
2760 
2761 	if (mask_ipv6->hdr.proto) {
2762 		key = &spec_ipv6->hdr.proto;
2763 		mask = &mask_ipv6->hdr.proto;
2764 		size = sizeof(uint8_t);
2765 
2766 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2767 						      NH_FLD_IP_PROTO, key,
2768 						      mask, size, priv, group,
2769 						      &local_cfg,
2770 						      DPAA2_FLOW_QOS_TYPE);
2771 		if (ret)
2772 			return ret;
2773 
2774 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2775 						      NH_FLD_IP_PROTO, key,
2776 						      mask, size, priv, group,
2777 						      &local_cfg,
2778 						      DPAA2_FLOW_FS_TYPE);
2779 		if (ret)
2780 			return ret;
2781 	}
2782 
2783 	(*device_configured) |= local_cfg;
2784 	return 0;
2785 }
2786 
2787 static int
2788 dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
2789 	struct rte_eth_dev *dev,
2790 	const struct rte_flow_attr *attr,
2791 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2792 	const struct rte_flow_action actions[] __rte_unused,
2793 	struct rte_flow_error *error __rte_unused,
2794 	int *device_configured)
2795 {
2796 	int ret, local_cfg = 0;
2797 	uint32_t group;
2798 	const struct rte_flow_item_icmp *spec, *mask;
2799 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2800 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2801 
2802 	group = attr->group;
2803 
2804 	/* Parse pattern list to get the matching parameters */
2805 	spec = pattern->spec;
2806 	mask = pattern->mask ?
2807 		pattern->mask : &dpaa2_flow_item_icmp_mask;
2808 
2809 	/* Get traffic class index and flow id to be configured */
2810 	flow->tc_id = group;
2811 	flow->tc_index = attr->priority;
2812 
2813 	if (dpaa2_pattern->in_tunnel) {
2814 		DPAA2_PMD_ERR("Tunnel-ICMP distribution not support");
2815 		return -ENOTSUP;
2816 	}
2817 
2818 	if (!spec) {
2819 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2820 				FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE,
2821 				group, &local_cfg);
2822 		if (ret)
2823 			return ret;
2824 
2825 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2826 				FAF_ICMP_FRAM, DPAA2_FLOW_FS_TYPE,
2827 				group, &local_cfg);
2828 		if (ret)
2829 			return ret;
2830 
2831 		(*device_configured) |= local_cfg;
2832 		return 0;
2833 	}
2834 
2835 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2836 		RTE_FLOW_ITEM_TYPE_ICMP)) {
2837 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
2838 
2839 		return -EINVAL;
2840 	}
2841 
2842 	if (mask->hdr.icmp_type) {
2843 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2844 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2845 			&mask->hdr.icmp_type, sizeof(uint8_t),
2846 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2847 		if (ret)
2848 			return ret;
2849 
2850 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2851 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2852 			&mask->hdr.icmp_type, sizeof(uint8_t),
2853 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2854 		if (ret)
2855 			return ret;
2856 	}
2857 
2858 	if (mask->hdr.icmp_code) {
2859 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2860 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2861 			&mask->hdr.icmp_code, sizeof(uint8_t),
2862 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2863 		if (ret)
2864 			return ret;
2865 
2866 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2867 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2868 			&mask->hdr.icmp_code, sizeof(uint8_t),
2869 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2870 		if (ret)
2871 			return ret;
2872 	}
2873 
2874 	(*device_configured) |= local_cfg;
2875 
2876 	return 0;
2877 }
2878 
2879 static int
2880 dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
2881 	struct rte_eth_dev *dev,
2882 	const struct rte_flow_attr *attr,
2883 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2884 	const struct rte_flow_action actions[] __rte_unused,
2885 	struct rte_flow_error *error __rte_unused,
2886 	int *device_configured)
2887 {
2888 	int ret, local_cfg = 0;
2889 	uint32_t group;
2890 	const struct rte_flow_item_udp *spec, *mask;
2891 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2892 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2893 
2894 	group = attr->group;
2895 
2896 	/* Parse pattern list to get the matching parameters */
2897 	spec = pattern->spec;
2898 	mask = pattern->mask ?
2899 		pattern->mask : &dpaa2_flow_item_udp_mask;
2900 
2901 	/* Get traffic class index and flow id to be configured */
2902 	flow->tc_id = group;
2903 	flow->tc_index = attr->priority;
2904 
2905 	if (dpaa2_pattern->in_tunnel) {
2906 		if (spec) {
2907 			DPAA2_PMD_ERR("Tunnel-UDP distribution not support");
2908 			return -ENOTSUP;
2909 		}
2910 
2911 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2912 						 FAFE_VXLAN_IN_UDP_FRAM,
2913 						 DPAA2_FLOW_QOS_TYPE, group,
2914 						 &local_cfg);
2915 		if (ret)
2916 			return ret;
2917 
2918 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2919 						 FAFE_VXLAN_IN_UDP_FRAM,
2920 						 DPAA2_FLOW_FS_TYPE, group,
2921 						 &local_cfg);
2922 		return ret;
2923 	}
2924 
2925 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2926 			FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE,
2927 			group, &local_cfg);
2928 	if (ret)
2929 		return ret;
2930 
2931 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2932 			FAF_UDP_FRAM, DPAA2_FLOW_FS_TYPE,
2933 			group, &local_cfg);
2934 	if (ret)
2935 		return ret;
2936 
2937 	if (!spec) {
2938 		(*device_configured) |= local_cfg;
2939 		return 0;
2940 	}
2941 
2942 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2943 		RTE_FLOW_ITEM_TYPE_UDP)) {
2944 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2945 
2946 		return -EINVAL;
2947 	}
2948 
2949 	if (mask->hdr.src_port) {
2950 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2951 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2952 			&mask->hdr.src_port, sizeof(rte_be16_t),
2953 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2954 		if (ret)
2955 			return ret;
2956 
2957 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2958 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2959 			&mask->hdr.src_port, sizeof(rte_be16_t),
2960 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2961 		if (ret)
2962 			return ret;
2963 	}
2964 
2965 	if (mask->hdr.dst_port) {
2966 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2967 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
2968 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2969 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2970 		if (ret)
2971 			return ret;
2972 
2973 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2974 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
2975 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2976 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2977 		if (ret)
2978 			return ret;
2979 	}
2980 
2981 	(*device_configured) |= local_cfg;
2982 
2983 	return 0;
2984 }
2985 
2986 static int
2987 dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
2988 	struct rte_eth_dev *dev,
2989 	const struct rte_flow_attr *attr,
2990 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2991 	const struct rte_flow_action actions[] __rte_unused,
2992 	struct rte_flow_error *error __rte_unused,
2993 	int *device_configured)
2994 {
2995 	int ret, local_cfg = 0;
2996 	uint32_t group;
2997 	const struct rte_flow_item_tcp *spec, *mask;
2998 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2999 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3000 
3001 	group = attr->group;
3002 
3003 	/* Parse pattern list to get the matching parameters */
3004 	spec = pattern->spec;
3005 	mask = pattern->mask ?
3006 		pattern->mask : &dpaa2_flow_item_tcp_mask;
3007 
3008 	/* Get traffic class index and flow id to be configured */
3009 	flow->tc_id = group;
3010 	flow->tc_index = attr->priority;
3011 
3012 	if (dpaa2_pattern->in_tunnel) {
3013 		if (spec) {
3014 			DPAA2_PMD_ERR("Tunnel-TCP distribution not support");
3015 			return -ENOTSUP;
3016 		}
3017 
3018 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3019 						 FAFE_VXLAN_IN_TCP_FRAM,
3020 						 DPAA2_FLOW_QOS_TYPE, group,
3021 						 &local_cfg);
3022 		if (ret)
3023 			return ret;
3024 
3025 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3026 						 FAFE_VXLAN_IN_TCP_FRAM,
3027 						 DPAA2_FLOW_FS_TYPE, group,
3028 						 &local_cfg);
3029 		return ret;
3030 	}
3031 
3032 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3033 			FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE,
3034 			group, &local_cfg);
3035 	if (ret)
3036 		return ret;
3037 
3038 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3039 			FAF_TCP_FRAM, DPAA2_FLOW_FS_TYPE,
3040 			group, &local_cfg);
3041 	if (ret)
3042 		return ret;
3043 
3044 	if (!spec) {
3045 		(*device_configured) |= local_cfg;
3046 		return 0;
3047 	}
3048 
3049 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3050 		RTE_FLOW_ITEM_TYPE_TCP)) {
3051 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
3052 
3053 		return -EINVAL;
3054 	}
3055 
3056 	if (mask->hdr.src_port) {
3057 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3058 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
3059 			&mask->hdr.src_port, sizeof(rte_be16_t),
3060 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3061 		if (ret)
3062 			return ret;
3063 
3064 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3065 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
3066 			&mask->hdr.src_port, sizeof(rte_be16_t),
3067 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3068 		if (ret)
3069 			return ret;
3070 	}
3071 
3072 	if (mask->hdr.dst_port) {
3073 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3074 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
3075 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3076 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3077 		if (ret)
3078 			return ret;
3079 
3080 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
3081 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
3082 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3083 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3084 		if (ret)
3085 			return ret;
3086 	}
3087 
3088 	(*device_configured) |= local_cfg;
3089 
3090 	return 0;
3091 }
3092 
3093 static int
3094 dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
3095 	struct rte_eth_dev *dev,
3096 	const struct rte_flow_attr *attr,
3097 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3098 	const struct rte_flow_action actions[] __rte_unused,
3099 	struct rte_flow_error *error __rte_unused,
3100 	int *device_configured)
3101 {
3102 	int ret, local_cfg = 0;
3103 	uint32_t group;
3104 	const struct rte_flow_item_sctp *spec, *mask;
3105 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3106 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3107 
3108 	group = attr->group;
3109 
3110 	/* Parse pattern list to get the matching parameters */
3111 	spec = pattern->spec;
3112 	mask = pattern->mask ?
3113 		pattern->mask : &dpaa2_flow_item_sctp_mask;
3114 
3115 	/* Get traffic class index and flow id to be configured */
3116 	flow->tc_id = group;
3117 	flow->tc_index = attr->priority;
3118 
3119 	if (dpaa2_pattern->in_tunnel) {
3120 		DPAA2_PMD_ERR("Tunnel-SCTP distribution not support");
3121 		return -ENOTSUP;
3122 	}
3123 
3124 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3125 			FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE,
3126 			group, &local_cfg);
3127 	if (ret)
3128 		return ret;
3129 
3130 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3131 			FAF_SCTP_FRAM, DPAA2_FLOW_FS_TYPE,
3132 			group, &local_cfg);
3133 	if (ret)
3134 		return ret;
3135 
3136 	if (!spec) {
3137 		(*device_configured) |= local_cfg;
3138 		return 0;
3139 	}
3140 
3141 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3142 		RTE_FLOW_ITEM_TYPE_SCTP)) {
3143 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
3144 
3145 		return -1;
3146 	}
3147 
3148 	if (mask->hdr.src_port) {
3149 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3150 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
3151 			&mask->hdr.src_port, sizeof(rte_be16_t),
3152 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3153 		if (ret)
3154 			return ret;
3155 
3156 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3157 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
3158 			&mask->hdr.src_port, sizeof(rte_be16_t),
3159 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3160 		if (ret)
3161 			return ret;
3162 	}
3163 
3164 	if (mask->hdr.dst_port) {
3165 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3166 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
3167 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3168 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3169 		if (ret)
3170 			return ret;
3171 
3172 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3173 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
3174 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3175 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3176 		if (ret)
3177 			return ret;
3178 	}
3179 
3180 	(*device_configured) |= local_cfg;
3181 
3182 	return 0;
3183 }
3184 
3185 static int
3186 dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
3187 	struct rte_eth_dev *dev,
3188 	const struct rte_flow_attr *attr,
3189 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3190 	const struct rte_flow_action actions[] __rte_unused,
3191 	struct rte_flow_error *error __rte_unused,
3192 	int *device_configured)
3193 {
3194 	int ret, local_cfg = 0;
3195 	uint32_t group;
3196 	const struct rte_flow_item_gre *spec, *mask;
3197 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3198 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3199 
3200 	group = attr->group;
3201 
3202 	/* Parse pattern list to get the matching parameters */
3203 	spec = pattern->spec;
3204 	mask = pattern->mask ?
3205 		pattern->mask : &dpaa2_flow_item_gre_mask;
3206 
3207 	/* Get traffic class index and flow id to be configured */
3208 	flow->tc_id = group;
3209 	flow->tc_index = attr->priority;
3210 
3211 	if (dpaa2_pattern->in_tunnel) {
3212 		DPAA2_PMD_ERR("Tunnel-GRE distribution not support");
3213 		return -ENOTSUP;
3214 	}
3215 
3216 	if (!spec) {
3217 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3218 				FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE,
3219 				group, &local_cfg);
3220 		if (ret)
3221 			return ret;
3222 
3223 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3224 				FAF_GRE_FRAM, DPAA2_FLOW_FS_TYPE,
3225 				group, &local_cfg);
3226 		if (ret)
3227 			return ret;
3228 
3229 		(*device_configured) |= local_cfg;
3230 		return 0;
3231 	}
3232 
3233 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3234 		RTE_FLOW_ITEM_TYPE_GRE)) {
3235 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
3236 
3237 		return -1;
3238 	}
3239 
3240 	if (!mask->protocol)
3241 		return 0;
3242 
3243 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
3244 			NH_FLD_GRE_TYPE, &spec->protocol,
3245 			&mask->protocol, sizeof(rte_be16_t),
3246 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3247 	if (ret)
3248 		return ret;
3249 
3250 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
3251 			NH_FLD_GRE_TYPE, &spec->protocol,
3252 			&mask->protocol, sizeof(rte_be16_t),
3253 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3254 	if (ret)
3255 		return ret;
3256 
3257 	(*device_configured) |= local_cfg;
3258 
3259 	return 0;
3260 }
3261 
3262 static int
3263 dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow,
3264 	struct rte_eth_dev *dev,
3265 	const struct rte_flow_attr *attr,
3266 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3267 	const struct rte_flow_action actions[] __rte_unused,
3268 	struct rte_flow_error *error __rte_unused,
3269 	int *device_configured)
3270 {
3271 	int ret, local_cfg = 0;
3272 	uint32_t group;
3273 	const struct rte_flow_item_vxlan *spec, *mask;
3274 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3275 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3276 
3277 	group = attr->group;
3278 
3279 	/* Parse pattern list to get the matching parameters */
3280 	spec = pattern->spec;
3281 	mask = pattern->mask ?
3282 		pattern->mask : &dpaa2_flow_item_vxlan_mask;
3283 
3284 	/* Get traffic class index and flow id to be configured */
3285 	flow->tc_id = group;
3286 	flow->tc_index = attr->priority;
3287 
3288 	if (dpaa2_pattern->in_tunnel) {
3289 		DPAA2_PMD_ERR("Tunnel-VXLAN distribution not support");
3290 		return -ENOTSUP;
3291 	}
3292 
3293 	if (!spec) {
3294 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3295 				FAF_VXLAN_FRAM, DPAA2_FLOW_QOS_TYPE,
3296 				group, &local_cfg);
3297 		if (ret)
3298 			return ret;
3299 
3300 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3301 				FAF_VXLAN_FRAM, DPAA2_FLOW_FS_TYPE,
3302 				group, &local_cfg);
3303 		if (ret)
3304 			return ret;
3305 
3306 		(*device_configured) |= local_cfg;
3307 		return 0;
3308 	}
3309 
3310 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3311 		RTE_FLOW_ITEM_TYPE_VXLAN)) {
3312 		DPAA2_PMD_WARN("Extract field(s) of VXLAN not support.");
3313 
3314 		return -1;
3315 	}
3316 
3317 	if (mask->flags) {
3318 		if (spec->flags != VXLAN_HF_VNI) {
3319 			DPAA2_PMD_ERR("vxlan flag(0x%02x) must be 0x%02x.",
3320 				spec->flags, VXLAN_HF_VNI);
3321 			return -EINVAL;
3322 		}
3323 		if (mask->flags != 0xff) {
3324 			DPAA2_PMD_ERR("Not support to extract vxlan flag.");
3325 			return -EINVAL;
3326 		}
3327 	}
3328 
3329 	if (mask->vni[0] || mask->vni[1] || mask->vni[2]) {
3330 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3331 			DPAA2_VXLAN_VNI_OFFSET,
3332 			sizeof(mask->vni), spec->vni,
3333 			mask->vni,
3334 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3335 		if (ret)
3336 			return ret;
3337 
3338 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3339 			DPAA2_VXLAN_VNI_OFFSET,
3340 			sizeof(mask->vni), spec->vni,
3341 			mask->vni,
3342 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3343 		if (ret)
3344 			return ret;
3345 	}
3346 
3347 	(*device_configured) |= local_cfg;
3348 
3349 	return 0;
3350 }
3351 
3352 static int
3353 dpaa2_configure_flow_ecpri(struct dpaa2_dev_flow *flow,
3354 	struct rte_eth_dev *dev,
3355 	const struct rte_flow_attr *attr,
3356 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3357 	const struct rte_flow_action actions[] __rte_unused,
3358 	struct rte_flow_error *error __rte_unused,
3359 	int *device_configured)
3360 {
3361 	int ret, local_cfg = 0;
3362 	uint32_t group;
3363 	const struct rte_flow_item_ecpri *spec, *mask;
3364 	struct rte_flow_item_ecpri local_mask;
3365 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3366 	const struct rte_flow_item *pattern =
3367 		&dpaa2_pattern->generic_item;
3368 	uint8_t extract_nb = 0, i;
3369 	uint64_t rule_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
3370 	uint64_t mask_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
3371 	uint8_t extract_size[DPAA2_ECPRI_MAX_EXTRACT_NB];
3372 	uint8_t extract_off[DPAA2_ECPRI_MAX_EXTRACT_NB];
3373 
3374 	group = attr->group;
3375 
3376 	/* Parse pattern list to get the matching parameters */
3377 	spec = pattern->spec;
3378 	if (pattern->mask) {
3379 		memcpy(&local_mask, pattern->mask,
3380 			sizeof(struct rte_flow_item_ecpri));
3381 		local_mask.hdr.common.u32 =
3382 			rte_be_to_cpu_32(local_mask.hdr.common.u32);
3383 		mask = &local_mask;
3384 	} else {
3385 		mask = &dpaa2_flow_item_ecpri_mask;
3386 	}
3387 
3388 	/* Get traffic class index and flow id to be configured */
3389 	flow->tc_id = group;
3390 	flow->tc_index = attr->priority;
3391 
3392 	if (dpaa2_pattern->in_tunnel) {
3393 		DPAA2_PMD_ERR("Tunnel-ECPRI distribution not support");
3394 		return -ENOTSUP;
3395 	}
3396 
3397 	if (!spec) {
3398 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3399 			FAFE_ECPRI_FRAM, DPAA2_FLOW_QOS_TYPE,
3400 			group, &local_cfg);
3401 		if (ret)
3402 			return ret;
3403 
3404 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3405 			FAFE_ECPRI_FRAM, DPAA2_FLOW_FS_TYPE,
3406 			group, &local_cfg);
3407 		if (ret)
3408 			return ret;
3409 
3410 		(*device_configured) |= local_cfg;
3411 		return 0;
3412 	}
3413 
3414 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3415 		RTE_FLOW_ITEM_TYPE_ECPRI)) {
3416 		DPAA2_PMD_WARN("Extract field(s) of ECPRI not support.");
3417 
3418 		return -1;
3419 	}
3420 
3421 	if (mask->hdr.common.type != 0xff) {
3422 		DPAA2_PMD_WARN("ECPRI header type not specified.");
3423 
3424 		return -1;
3425 	}
3426 
3427 	if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA) {
3428 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_0;
3429 		mask_data[extract_nb] = 0xff;
3430 		extract_size[extract_nb] = sizeof(uint8_t);
3431 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3432 		extract_nb++;
3433 
3434 		if (mask->hdr.type0.pc_id) {
3435 			rule_data[extract_nb] = spec->hdr.type0.pc_id;
3436 			mask_data[extract_nb] = mask->hdr.type0.pc_id;
3437 			extract_size[extract_nb] = sizeof(rte_be16_t);
3438 			extract_off[extract_nb] =
3439 				DPAA2_ECPRI_MSG_OFFSET +
3440 				offsetof(struct rte_ecpri_msg_iq_data, pc_id);
3441 			extract_nb++;
3442 		}
3443 		if (mask->hdr.type0.seq_id) {
3444 			rule_data[extract_nb] = spec->hdr.type0.seq_id;
3445 			mask_data[extract_nb] = mask->hdr.type0.seq_id;
3446 			extract_size[extract_nb] = sizeof(rte_be16_t);
3447 			extract_off[extract_nb] =
3448 				DPAA2_ECPRI_MSG_OFFSET +
3449 				offsetof(struct rte_ecpri_msg_iq_data, seq_id);
3450 			extract_nb++;
3451 		}
3452 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_BIT_SEQ) {
3453 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_1;
3454 		mask_data[extract_nb] = 0xff;
3455 		extract_size[extract_nb] = sizeof(uint8_t);
3456 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3457 		extract_nb++;
3458 
3459 		if (mask->hdr.type1.pc_id) {
3460 			rule_data[extract_nb] = spec->hdr.type1.pc_id;
3461 			mask_data[extract_nb] = mask->hdr.type1.pc_id;
3462 			extract_size[extract_nb] = sizeof(rte_be16_t);
3463 			extract_off[extract_nb] =
3464 				DPAA2_ECPRI_MSG_OFFSET +
3465 				offsetof(struct rte_ecpri_msg_bit_seq, pc_id);
3466 			extract_nb++;
3467 		}
3468 		if (mask->hdr.type1.seq_id) {
3469 			rule_data[extract_nb] = spec->hdr.type1.seq_id;
3470 			mask_data[extract_nb] = mask->hdr.type1.seq_id;
3471 			extract_size[extract_nb] = sizeof(rte_be16_t);
3472 			extract_off[extract_nb] =
3473 				DPAA2_ECPRI_MSG_OFFSET +
3474 				offsetof(struct rte_ecpri_msg_bit_seq, seq_id);
3475 			extract_nb++;
3476 		}
3477 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RTC_CTRL) {
3478 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_2;
3479 		mask_data[extract_nb] = 0xff;
3480 		extract_size[extract_nb] = sizeof(uint8_t);
3481 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3482 		extract_nb++;
3483 
3484 		if (mask->hdr.type2.rtc_id) {
3485 			rule_data[extract_nb] = spec->hdr.type2.rtc_id;
3486 			mask_data[extract_nb] = mask->hdr.type2.rtc_id;
3487 			extract_size[extract_nb] = sizeof(rte_be16_t);
3488 			extract_off[extract_nb] =
3489 				DPAA2_ECPRI_MSG_OFFSET +
3490 				offsetof(struct rte_ecpri_msg_rtc_ctrl, rtc_id);
3491 			extract_nb++;
3492 		}
3493 		if (mask->hdr.type2.seq_id) {
3494 			rule_data[extract_nb] = spec->hdr.type2.seq_id;
3495 			mask_data[extract_nb] = mask->hdr.type2.seq_id;
3496 			extract_size[extract_nb] = sizeof(rte_be16_t);
3497 			extract_off[extract_nb] =
3498 				DPAA2_ECPRI_MSG_OFFSET +
3499 				offsetof(struct rte_ecpri_msg_rtc_ctrl, seq_id);
3500 			extract_nb++;
3501 		}
3502 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_GEN_DATA) {
3503 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_3;
3504 		mask_data[extract_nb] = 0xff;
3505 		extract_size[extract_nb] = sizeof(uint8_t);
3506 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3507 		extract_nb++;
3508 
3509 		if (mask->hdr.type3.pc_id || mask->hdr.type3.seq_id)
3510 			DPAA2_PMD_WARN("Extract type3 msg not support.");
3511 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RM_ACC) {
3512 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_4;
3513 		mask_data[extract_nb] = 0xff;
3514 		extract_size[extract_nb] = sizeof(uint8_t);
3515 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3516 		extract_nb++;
3517 
3518 		if (mask->hdr.type4.rma_id) {
3519 			rule_data[extract_nb] = spec->hdr.type4.rma_id;
3520 			mask_data[extract_nb] = mask->hdr.type4.rma_id;
3521 			extract_size[extract_nb] = sizeof(uint8_t);
3522 			extract_off[extract_nb] =
3523 				DPAA2_ECPRI_MSG_OFFSET + 0;
3524 				/** Compiler not support to take address
3525 				 * of bit-field
3526 				 * offsetof(struct rte_ecpri_msg_rm_access,
3527 				 * rma_id);
3528 				 */
3529 			extract_nb++;
3530 		}
3531 		if (mask->hdr.type4.ele_id) {
3532 			rule_data[extract_nb] = spec->hdr.type4.ele_id;
3533 			mask_data[extract_nb] = mask->hdr.type4.ele_id;
3534 			extract_size[extract_nb] = sizeof(rte_be16_t);
3535 			extract_off[extract_nb] =
3536 				DPAA2_ECPRI_MSG_OFFSET + 2;
3537 				/** Compiler not support to take address
3538 				 * of bit-field
3539 				 * offsetof(struct rte_ecpri_msg_rm_access,
3540 				 * ele_id);
3541 				 */
3542 			extract_nb++;
3543 		}
3544 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_DLY_MSR) {
3545 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_5;
3546 		mask_data[extract_nb] = 0xff;
3547 		extract_size[extract_nb] = sizeof(uint8_t);
3548 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3549 		extract_nb++;
3550 
3551 		if (mask->hdr.type5.msr_id) {
3552 			rule_data[extract_nb] = spec->hdr.type5.msr_id;
3553 			mask_data[extract_nb] = mask->hdr.type5.msr_id;
3554 			extract_size[extract_nb] = sizeof(uint8_t);
3555 			extract_off[extract_nb] =
3556 				DPAA2_ECPRI_MSG_OFFSET +
3557 				offsetof(struct rte_ecpri_msg_delay_measure,
3558 					msr_id);
3559 			extract_nb++;
3560 		}
3561 		if (mask->hdr.type5.act_type) {
3562 			rule_data[extract_nb] = spec->hdr.type5.act_type;
3563 			mask_data[extract_nb] = mask->hdr.type5.act_type;
3564 			extract_size[extract_nb] = sizeof(uint8_t);
3565 			extract_off[extract_nb] =
3566 				DPAA2_ECPRI_MSG_OFFSET +
3567 				offsetof(struct rte_ecpri_msg_delay_measure,
3568 					act_type);
3569 			extract_nb++;
3570 		}
3571 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RMT_RST) {
3572 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_6;
3573 		mask_data[extract_nb] = 0xff;
3574 		extract_size[extract_nb] = sizeof(uint8_t);
3575 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3576 		extract_nb++;
3577 
3578 		if (mask->hdr.type6.rst_id) {
3579 			rule_data[extract_nb] = spec->hdr.type6.rst_id;
3580 			mask_data[extract_nb] = mask->hdr.type6.rst_id;
3581 			extract_size[extract_nb] = sizeof(rte_be16_t);
3582 			extract_off[extract_nb] =
3583 				DPAA2_ECPRI_MSG_OFFSET +
3584 				offsetof(struct rte_ecpri_msg_remote_reset,
3585 					rst_id);
3586 			extract_nb++;
3587 		}
3588 		if (mask->hdr.type6.rst_op) {
3589 			rule_data[extract_nb] = spec->hdr.type6.rst_op;
3590 			mask_data[extract_nb] = mask->hdr.type6.rst_op;
3591 			extract_size[extract_nb] = sizeof(uint8_t);
3592 			extract_off[extract_nb] =
3593 				DPAA2_ECPRI_MSG_OFFSET +
3594 				offsetof(struct rte_ecpri_msg_remote_reset,
3595 					rst_op);
3596 			extract_nb++;
3597 		}
3598 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_EVT_IND) {
3599 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_7;
3600 		mask_data[extract_nb] = 0xff;
3601 		extract_size[extract_nb] = sizeof(uint8_t);
3602 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3603 		extract_nb++;
3604 
3605 		if (mask->hdr.type7.evt_id) {
3606 			rule_data[extract_nb] = spec->hdr.type7.evt_id;
3607 			mask_data[extract_nb] = mask->hdr.type7.evt_id;
3608 			extract_size[extract_nb] = sizeof(uint8_t);
3609 			extract_off[extract_nb] =
3610 				DPAA2_ECPRI_MSG_OFFSET +
3611 				offsetof(struct rte_ecpri_msg_event_ind,
3612 					evt_id);
3613 			extract_nb++;
3614 		}
3615 		if (mask->hdr.type7.evt_type) {
3616 			rule_data[extract_nb] = spec->hdr.type7.evt_type;
3617 			mask_data[extract_nb] = mask->hdr.type7.evt_type;
3618 			extract_size[extract_nb] = sizeof(uint8_t);
3619 			extract_off[extract_nb] =
3620 				DPAA2_ECPRI_MSG_OFFSET +
3621 				offsetof(struct rte_ecpri_msg_event_ind,
3622 					evt_type);
3623 			extract_nb++;
3624 		}
3625 		if (mask->hdr.type7.seq) {
3626 			rule_data[extract_nb] = spec->hdr.type7.seq;
3627 			mask_data[extract_nb] = mask->hdr.type7.seq;
3628 			extract_size[extract_nb] = sizeof(uint8_t);
3629 			extract_off[extract_nb] =
3630 				DPAA2_ECPRI_MSG_OFFSET +
3631 				offsetof(struct rte_ecpri_msg_event_ind,
3632 					seq);
3633 			extract_nb++;
3634 		}
3635 		if (mask->hdr.type7.number) {
3636 			rule_data[extract_nb] = spec->hdr.type7.number;
3637 			mask_data[extract_nb] = mask->hdr.type7.number;
3638 			extract_size[extract_nb] = sizeof(uint8_t);
3639 			extract_off[extract_nb] =
3640 				DPAA2_ECPRI_MSG_OFFSET +
3641 				offsetof(struct rte_ecpri_msg_event_ind,
3642 					number);
3643 			extract_nb++;
3644 		}
3645 	} else {
3646 		DPAA2_PMD_ERR("Invalid ecpri header type(%d)",
3647 				spec->hdr.common.type);
3648 		return -EINVAL;
3649 	}
3650 
3651 	for (i = 0; i < extract_nb; i++) {
3652 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3653 			extract_off[i],
3654 			extract_size[i], &rule_data[i], &mask_data[i],
3655 			priv, group,
3656 			device_configured,
3657 			DPAA2_FLOW_QOS_TYPE);
3658 		if (ret)
3659 			return ret;
3660 
3661 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3662 			extract_off[i],
3663 			extract_size[i], &rule_data[i], &mask_data[i],
3664 			priv, group,
3665 			device_configured,
3666 			DPAA2_FLOW_FS_TYPE);
3667 		if (ret)
3668 			return ret;
3669 	}
3670 
3671 	(*device_configured) |= local_cfg;
3672 
3673 	return 0;
3674 }
3675 
3676 static int
3677 dpaa2_configure_flow_gtp(struct dpaa2_dev_flow *flow,
3678 	struct rte_eth_dev *dev,
3679 	const struct rte_flow_attr *attr,
3680 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3681 	const struct rte_flow_action actions[] __rte_unused,
3682 	struct rte_flow_error *error __rte_unused,
3683 	int *device_configured)
3684 {
3685 	int ret, local_cfg = 0;
3686 	uint32_t group;
3687 	const struct rte_flow_item_gtp *spec, *mask;
3688 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3689 	const struct rte_flow_item *pattern =
3690 		&dpaa2_pattern->generic_item;
3691 
3692 	group = attr->group;
3693 
3694 	/* Parse pattern list to get the matching parameters */
3695 	spec = pattern->spec;
3696 	mask = pattern->mask ?
3697 		pattern->mask : &dpaa2_flow_item_gtp_mask;
3698 
3699 	/* Get traffic class index and flow id to be configured */
3700 	flow->tc_id = group;
3701 	flow->tc_index = attr->priority;
3702 
3703 	if (dpaa2_pattern->in_tunnel) {
3704 		DPAA2_PMD_ERR("Tunnel-GTP distribution not support");
3705 		return -ENOTSUP;
3706 	}
3707 
3708 	if (!spec) {
3709 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3710 				FAF_GTP_FRAM, DPAA2_FLOW_QOS_TYPE,
3711 				group, &local_cfg);
3712 		if (ret)
3713 			return ret;
3714 
3715 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3716 				FAF_GTP_FRAM, DPAA2_FLOW_FS_TYPE,
3717 				group, &local_cfg);
3718 		if (ret)
3719 			return ret;
3720 
3721 		(*device_configured) |= local_cfg;
3722 		return 0;
3723 	}
3724 
3725 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3726 		RTE_FLOW_ITEM_TYPE_GTP)) {
3727 		DPAA2_PMD_WARN("Extract field(s) of GTP not support.");
3728 
3729 		return -1;
3730 	}
3731 
3732 	if (!mask->teid)
3733 		return 0;
3734 
3735 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GTP,
3736 			NH_FLD_GTP_TEID, &spec->teid,
3737 			&mask->teid, sizeof(rte_be32_t),
3738 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3739 	if (ret)
3740 		return ret;
3741 
3742 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GTP,
3743 			NH_FLD_GTP_TEID, &spec->teid,
3744 			&mask->teid, sizeof(rte_be32_t),
3745 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3746 	if (ret)
3747 		return ret;
3748 
3749 	(*device_configured) |= local_cfg;
3750 
3751 	return 0;
3752 }
3753 
3754 static int
3755 dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
3756 	struct rte_eth_dev *dev,
3757 	const struct rte_flow_attr *attr,
3758 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3759 	const struct rte_flow_action actions[] __rte_unused,
3760 	struct rte_flow_error *error __rte_unused,
3761 	int *device_configured)
3762 {
3763 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3764 	int local_cfg = 0, ret;
3765 	uint32_t group;
3766 	struct dpaa2_key_extract *qos_key_extract;
3767 	struct dpaa2_key_extract *tc_key_extract;
3768 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3769 	const struct rte_flow_item_raw *spec = pattern->spec;
3770 	const struct rte_flow_item_raw *mask = pattern->mask;
3771 
3772 	/* Need both spec and mask */
3773 	if (!spec || !mask) {
3774 		DPAA2_PMD_ERR("spec or mask not present.");
3775 		return -EINVAL;
3776 	}
3777 
3778 	if (spec->relative) {
3779 		/* TBD: relative offset support.
3780 		 * To support relative offset of previous L3 protocol item,
3781 		 * extracts should be expanded to identify if the frame is:
3782 		 * vlan or none-vlan.
3783 		 *
3784 		 * To support relative offset of previous L4 protocol item,
3785 		 * extracts should be expanded to identify if the frame is:
3786 		 * vlan/IPv4 or vlan/IPv6 or none-vlan/IPv4 or none-vlan/IPv6.
3787 		 */
3788 		DPAA2_PMD_ERR("relative not supported.");
3789 		return -EINVAL;
3790 	}
3791 
3792 	if (spec->search) {
3793 		DPAA2_PMD_ERR("search not supported.");
3794 		return -EINVAL;
3795 	}
3796 
3797 	/* Spec len and mask len should be same */
3798 	if (spec->length != mask->length) {
3799 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
3800 		return -EINVAL;
3801 	}
3802 
3803 	/* Get traffic class index and flow id to be configured */
3804 	group = attr->group;
3805 	flow->tc_id = group;
3806 	flow->tc_index = attr->priority;
3807 
3808 	qos_key_extract = &priv->extract.qos_key_extract;
3809 	tc_key_extract = &priv->extract.tc_key_extract[group];
3810 
3811 	ret = dpaa2_flow_extract_add_raw(priv,
3812 			spec->offset, spec->length,
3813 			DPAA2_FLOW_QOS_TYPE, 0, &local_cfg);
3814 	if (ret) {
3815 		DPAA2_PMD_ERR("QoS Extract RAW add failed.");
3816 		return -EINVAL;
3817 	}
3818 
3819 	ret = dpaa2_flow_extract_add_raw(priv,
3820 			spec->offset, spec->length,
3821 			DPAA2_FLOW_FS_TYPE, group, &local_cfg);
3822 	if (ret) {
3823 		DPAA2_PMD_ERR("FS[%d] Extract RAW add failed.",
3824 			group);
3825 		return -EINVAL;
3826 	}
3827 
3828 	ret = dpaa2_flow_raw_rule_data_set(flow,
3829 			&qos_key_extract->key_profile,
3830 			spec->offset, spec->length,
3831 			spec->pattern, mask->pattern,
3832 			DPAA2_FLOW_QOS_TYPE);
3833 	if (ret) {
3834 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
3835 		return -EINVAL;
3836 	}
3837 
3838 	ret = dpaa2_flow_raw_rule_data_set(flow,
3839 			&tc_key_extract->key_profile,
3840 			spec->offset, spec->length,
3841 			spec->pattern, mask->pattern,
3842 			DPAA2_FLOW_FS_TYPE);
3843 	if (ret) {
3844 		DPAA2_PMD_ERR("FS RAW rule data set failed");
3845 		return -EINVAL;
3846 	}
3847 
3848 	(*device_configured) |= local_cfg;
3849 
3850 	return 0;
3851 }
3852 
3853 static inline int
3854 dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv,
3855 	const struct rte_flow_attr *attr)
3856 {
3857 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
3858 
3859 	while (curr) {
3860 		if (curr->tc_id == attr->group &&
3861 			curr->tc_index == attr->priority) {
3862 			DPAA2_PMD_ERR("Flow(TC[%d].entry[%d] exists",
3863 				attr->group, attr->priority);
3864 
3865 			return -EINVAL;
3866 		}
3867 		curr = LIST_NEXT(curr, next);
3868 	}
3869 
3870 	return 0;
3871 }
3872 
3873 static inline struct rte_eth_dev *
3874 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3875 	const struct rte_flow_action *action)
3876 {
3877 	const struct rte_flow_action_port_id *port_id;
3878 	const struct rte_flow_action_ethdev *ethdev;
3879 	int idx = -1;
3880 	struct rte_eth_dev *dest_dev;
3881 
3882 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3883 		port_id = action->conf;
3884 		if (!port_id->original)
3885 			idx = port_id->id;
3886 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3887 		ethdev = action->conf;
3888 		idx = ethdev->port_id;
3889 	} else {
3890 		return NULL;
3891 	}
3892 
3893 	if (idx >= 0) {
3894 		if (!rte_eth_dev_is_valid_port(idx))
3895 			return NULL;
3896 		if (!rte_pmd_dpaa2_dev_is_dpaa2(idx))
3897 			return NULL;
3898 		dest_dev = &rte_eth_devices[idx];
3899 	} else {
3900 		dest_dev = priv->eth_dev;
3901 	}
3902 
3903 	return dest_dev;
3904 }
3905 
3906 static inline int
3907 dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv,
3908 	const struct rte_flow_attr *attr,
3909 	const struct rte_flow_action actions[])
3910 {
3911 	int end_of_list = 0, i, j = 0;
3912 	const struct rte_flow_action_queue *dest_queue;
3913 	const struct rte_flow_action_rss *rss_conf;
3914 	struct dpaa2_queue *rxq;
3915 
3916 	while (!end_of_list) {
3917 		switch (actions[j].type) {
3918 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3919 			dest_queue = actions[j].conf;
3920 			rxq = priv->rx_vq[dest_queue->index];
3921 			if (attr->group != rxq->tc_index) {
3922 				DPAA2_PMD_ERR("FSQ(%d.%d) not in TC[%d]",
3923 					rxq->tc_index, rxq->flow_id,
3924 					attr->group);
3925 
3926 				return -ENOTSUP;
3927 			}
3928 			break;
3929 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3930 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3931 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3932 				DPAA2_PMD_ERR("Invalid port id of action");
3933 				return -ENOTSUP;
3934 			}
3935 			break;
3936 		case RTE_FLOW_ACTION_TYPE_RSS:
3937 			rss_conf = (const struct rte_flow_action_rss *)
3938 					(actions[j].conf);
3939 			if (rss_conf->queue_num > priv->dist_queues) {
3940 				DPAA2_PMD_ERR("RSS number too large");
3941 				return -ENOTSUP;
3942 			}
3943 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3944 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3945 					DPAA2_PMD_ERR("RSS queue not in range");
3946 					return -ENOTSUP;
3947 				}
3948 				rxq = priv->rx_vq[rss_conf->queue[i]];
3949 				if (rxq->tc_index != attr->group) {
3950 					DPAA2_PMD_ERR("RSS queue not in group");
3951 					return -ENOTSUP;
3952 				}
3953 			}
3954 
3955 			break;
3956 		case RTE_FLOW_ACTION_TYPE_PF:
3957 			/* Skip this action, have to add for vxlan */
3958 			break;
3959 		case RTE_FLOW_ACTION_TYPE_END:
3960 			end_of_list = 1;
3961 			break;
3962 		default:
3963 			DPAA2_PMD_ERR("Invalid action type");
3964 			return -ENOTSUP;
3965 		}
3966 		j++;
3967 	}
3968 
3969 	return 0;
3970 }
3971 
3972 static int
3973 dpaa2_configure_flow_fs_action(struct dpaa2_dev_priv *priv,
3974 	struct dpaa2_dev_flow *flow,
3975 	const struct rte_flow_action *rte_action)
3976 {
3977 	struct rte_eth_dev *dest_dev;
3978 	struct dpaa2_dev_priv *dest_priv;
3979 	const struct rte_flow_action_queue *dest_queue;
3980 	struct dpaa2_queue *dest_q;
3981 
3982 	memset(&flow->fs_action_cfg, 0,
3983 		sizeof(struct dpni_fs_action_cfg));
3984 	flow->action_type = rte_action->type;
3985 
3986 	if (flow->action_type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3987 		dest_queue = rte_action->conf;
3988 		dest_q = priv->rx_vq[dest_queue->index];
3989 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
3990 	} else if (flow->action_type == RTE_FLOW_ACTION_TYPE_PORT_ID ||
3991 		   flow->action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3992 		dest_dev = dpaa2_flow_redirect_dev(priv, rte_action);
3993 		if (!dest_dev) {
3994 			DPAA2_PMD_ERR("Invalid device to redirect");
3995 			return -EINVAL;
3996 		}
3997 
3998 		dest_priv = dest_dev->data->dev_private;
3999 		dest_q = dest_priv->tx_vq[0];
4000 		flow->fs_action_cfg.options =
4001 			DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
4002 		flow->fs_action_cfg.redirect_obj_token =
4003 			dest_priv->token;
4004 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
4005 	}
4006 
4007 	return 0;
4008 }
4009 
4010 static inline uint16_t
4011 dpaa2_flow_entry_size(uint16_t key_max_size)
4012 {
4013 	if (key_max_size > DPAA2_FLOW_ENTRY_MAX_SIZE) {
4014 		DPAA2_PMD_ERR("Key size(%d) > max(%d)",
4015 			key_max_size,
4016 			DPAA2_FLOW_ENTRY_MAX_SIZE);
4017 
4018 		return 0;
4019 	}
4020 
4021 	if (key_max_size > DPAA2_FLOW_ENTRY_MIN_SIZE)
4022 		return DPAA2_FLOW_ENTRY_MAX_SIZE;
4023 
4024 	/* Current MC only support fixed entry size(56)*/
4025 	return DPAA2_FLOW_ENTRY_MAX_SIZE;
4026 }
4027 
4028 static inline int
4029 dpaa2_flow_clear_fs_table(struct dpaa2_dev_priv *priv,
4030 	uint8_t tc_id)
4031 {
4032 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
4033 	int need_clear = 0, ret;
4034 	struct fsl_mc_io *dpni = priv->hw;
4035 
4036 	while (curr) {
4037 		if (curr->tc_id == tc_id) {
4038 			need_clear = 1;
4039 			break;
4040 		}
4041 		curr = LIST_NEXT(curr, next);
4042 	}
4043 
4044 	if (need_clear) {
4045 		ret = dpni_clear_fs_entries(dpni, CMD_PRI_LOW,
4046 				priv->token, tc_id);
4047 		if (ret) {
4048 			DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
4049 			return ret;
4050 		}
4051 	}
4052 
4053 	return 0;
4054 }
4055 
4056 static int
4057 dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv,
4058 	uint8_t tc_id, uint16_t dist_size, int rss_dist)
4059 {
4060 	struct dpaa2_key_extract *tc_extract;
4061 	uint8_t *key_cfg_buf;
4062 	uint64_t key_cfg_iova;
4063 	int ret;
4064 	struct dpni_rx_dist_cfg tc_cfg;
4065 	struct fsl_mc_io *dpni = priv->hw;
4066 	uint16_t entry_size;
4067 	uint16_t key_max_size;
4068 
4069 	ret = dpaa2_flow_clear_fs_table(priv, tc_id);
4070 	if (ret < 0) {
4071 		DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
4072 		return ret;
4073 	}
4074 
4075 	tc_extract = &priv->extract.tc_key_extract[tc_id];
4076 	key_cfg_buf = priv->extract.tc_extract_param[tc_id];
4077 	key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
4078 
4079 	key_max_size = tc_extract->key_profile.key_max_size;
4080 	entry_size = dpaa2_flow_entry_size(key_max_size);
4081 
4082 	dpaa2_flow_fs_extracts_log(priv, tc_id);
4083 	ret = dpkg_prepare_key_cfg(&tc_extract->dpkg,
4084 			key_cfg_buf);
4085 	if (ret < 0) {
4086 		DPAA2_PMD_ERR("TC[%d] prepare key failed", tc_id);
4087 		return ret;
4088 	}
4089 
4090 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
4091 	tc_cfg.dist_size = dist_size;
4092 	tc_cfg.key_cfg_iova = key_cfg_iova;
4093 	if (rss_dist)
4094 		tc_cfg.enable = true;
4095 	else
4096 		tc_cfg.enable = false;
4097 	tc_cfg.tc = tc_id;
4098 	ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
4099 			priv->token, &tc_cfg);
4100 	if (ret < 0) {
4101 		if (rss_dist) {
4102 			DPAA2_PMD_ERR("RSS TC[%d] set failed",
4103 				tc_id);
4104 		} else {
4105 			DPAA2_PMD_ERR("FS TC[%d] hash disable failed",
4106 				tc_id);
4107 		}
4108 
4109 		return ret;
4110 	}
4111 
4112 	if (rss_dist)
4113 		return 0;
4114 
4115 	tc_cfg.enable = true;
4116 	tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
4117 	ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
4118 			priv->token, &tc_cfg);
4119 	if (ret < 0) {
4120 		DPAA2_PMD_ERR("TC[%d] FS configured failed", tc_id);
4121 		return ret;
4122 	}
4123 
4124 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_FS_TYPE,
4125 			entry_size, tc_id);
4126 	if (ret)
4127 		return ret;
4128 
4129 	return 0;
4130 }
4131 
4132 static int
4133 dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv,
4134 	int rss_dist)
4135 {
4136 	struct dpaa2_key_extract *qos_extract;
4137 	uint8_t *key_cfg_buf;
4138 	uint64_t key_cfg_iova;
4139 	int ret;
4140 	struct dpni_qos_tbl_cfg qos_cfg;
4141 	struct fsl_mc_io *dpni = priv->hw;
4142 	uint16_t entry_size;
4143 	uint16_t key_max_size;
4144 
4145 	if (!rss_dist && priv->num_rx_tc <= 1) {
4146 		/* QoS table is effecitive for FS multiple TCs or RSS.*/
4147 		return 0;
4148 	}
4149 
4150 	if (LIST_FIRST(&priv->flows)) {
4151 		ret = dpni_clear_qos_table(dpni, CMD_PRI_LOW,
4152 				priv->token);
4153 		if (ret < 0) {
4154 			DPAA2_PMD_ERR("QoS table clear failed");
4155 			return ret;
4156 		}
4157 	}
4158 
4159 	qos_extract = &priv->extract.qos_key_extract;
4160 	key_cfg_buf = priv->extract.qos_extract_param;
4161 	key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
4162 
4163 	key_max_size = qos_extract->key_profile.key_max_size;
4164 	entry_size = dpaa2_flow_entry_size(key_max_size);
4165 
4166 	dpaa2_flow_qos_extracts_log(priv);
4167 
4168 	ret = dpkg_prepare_key_cfg(&qos_extract->dpkg,
4169 			key_cfg_buf);
4170 	if (ret < 0) {
4171 		DPAA2_PMD_ERR("QoS prepare extract failed");
4172 		return ret;
4173 	}
4174 	memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
4175 	qos_cfg.keep_entries = true;
4176 	qos_cfg.key_cfg_iova = key_cfg_iova;
4177 	if (rss_dist) {
4178 		qos_cfg.discard_on_miss = true;
4179 	} else {
4180 		qos_cfg.discard_on_miss = false;
4181 		qos_cfg.default_tc = 0;
4182 	}
4183 
4184 	ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
4185 			priv->token, &qos_cfg);
4186 	if (ret < 0) {
4187 		DPAA2_PMD_ERR("QoS table set failed");
4188 		return ret;
4189 	}
4190 
4191 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_QOS_TYPE,
4192 			entry_size, 0);
4193 	if (ret)
4194 		return ret;
4195 
4196 	return 0;
4197 }
4198 
4199 static int
4200 dpaa2_flow_item_convert(const struct rte_flow_item pattern[],
4201 			struct rte_dpaa2_flow_item **dpaa2_pattern)
4202 {
4203 	struct rte_dpaa2_flow_item *new_pattern;
4204 	int num = 0, tunnel_start = 0;
4205 
4206 	while (1) {
4207 		num++;
4208 		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_END)
4209 			break;
4210 	}
4211 
4212 	new_pattern = rte_malloc(NULL, sizeof(struct rte_dpaa2_flow_item) * num,
4213 				 RTE_CACHE_LINE_SIZE);
4214 	if (!new_pattern) {
4215 		DPAA2_PMD_ERR("Failed to alloc %d flow items", num);
4216 		return -ENOMEM;
4217 	}
4218 
4219 	num = 0;
4220 	while (pattern[num].type != RTE_FLOW_ITEM_TYPE_END) {
4221 		memcpy(&new_pattern[num].generic_item, &pattern[num],
4222 		       sizeof(struct rte_flow_item));
4223 		new_pattern[num].in_tunnel = 0;
4224 
4225 		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_VXLAN)
4226 			tunnel_start = 1;
4227 		else if (tunnel_start)
4228 			new_pattern[num].in_tunnel = 1;
4229 		num++;
4230 	}
4231 
4232 	new_pattern[num].generic_item.type = RTE_FLOW_ITEM_TYPE_END;
4233 	*dpaa2_pattern = new_pattern;
4234 
4235 	return 0;
4236 }
4237 
4238 static int
4239 dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
4240 	struct rte_eth_dev *dev,
4241 	const struct rte_flow_attr *attr,
4242 	const struct rte_flow_item pattern[],
4243 	const struct rte_flow_action actions[],
4244 	struct rte_flow_error *error)
4245 {
4246 	const struct rte_flow_action_rss *rss_conf;
4247 	int is_keycfg_configured = 0, end_of_list = 0;
4248 	int ret = 0, i = 0, j = 0;
4249 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4250 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
4251 	uint16_t dist_size, key_size;
4252 	struct dpaa2_key_extract *qos_key_extract;
4253 	struct dpaa2_key_extract *tc_key_extract;
4254 	struct rte_dpaa2_flow_item *dpaa2_pattern = NULL;
4255 
4256 	ret = dpaa2_flow_verify_attr(priv, attr);
4257 	if (ret)
4258 		return ret;
4259 
4260 	ret = dpaa2_flow_verify_action(priv, attr, actions);
4261 	if (ret)
4262 		return ret;
4263 
4264 	ret = dpaa2_flow_item_convert(pattern, &dpaa2_pattern);
4265 	if (ret)
4266 		return ret;
4267 
4268 	/* Parse pattern list to get the matching parameters */
4269 	while (!end_of_list) {
4270 		switch (pattern[i].type) {
4271 		case RTE_FLOW_ITEM_TYPE_ETH:
4272 			ret = dpaa2_configure_flow_eth(flow, dev, attr,
4273 					&dpaa2_pattern[i],
4274 					actions, error,
4275 					&is_keycfg_configured);
4276 			if (ret) {
4277 				DPAA2_PMD_ERR("ETH flow config failed!");
4278 				goto end_flow_set;
4279 			}
4280 			break;
4281 		case RTE_FLOW_ITEM_TYPE_VLAN:
4282 			ret = dpaa2_configure_flow_vlan(flow, dev, attr,
4283 					&dpaa2_pattern[i],
4284 					actions, error,
4285 					&is_keycfg_configured);
4286 			if (ret) {
4287 				DPAA2_PMD_ERR("vLan flow config failed!");
4288 				goto end_flow_set;
4289 			}
4290 			break;
4291 		case RTE_FLOW_ITEM_TYPE_IPV4:
4292 			ret = dpaa2_configure_flow_ipv4(flow, dev, attr,
4293 					&dpaa2_pattern[i],
4294 					actions, error,
4295 					&is_keycfg_configured);
4296 			if (ret) {
4297 				DPAA2_PMD_ERR("IPV4 flow config failed!");
4298 				goto end_flow_set;
4299 			}
4300 			break;
4301 		case RTE_FLOW_ITEM_TYPE_IPV6:
4302 			ret = dpaa2_configure_flow_ipv6(flow, dev, attr,
4303 					&dpaa2_pattern[i],
4304 					actions, error,
4305 					&is_keycfg_configured);
4306 			if (ret) {
4307 				DPAA2_PMD_ERR("IPV6 flow config failed!");
4308 				goto end_flow_set;
4309 			}
4310 			break;
4311 		case RTE_FLOW_ITEM_TYPE_ICMP:
4312 			ret = dpaa2_configure_flow_icmp(flow, dev, attr,
4313 					&dpaa2_pattern[i],
4314 					actions, error,
4315 					&is_keycfg_configured);
4316 			if (ret) {
4317 				DPAA2_PMD_ERR("ICMP flow config failed!");
4318 				goto end_flow_set;
4319 			}
4320 			break;
4321 		case RTE_FLOW_ITEM_TYPE_UDP:
4322 			ret = dpaa2_configure_flow_udp(flow, dev, attr,
4323 					&dpaa2_pattern[i],
4324 					actions, error,
4325 					&is_keycfg_configured);
4326 			if (ret) {
4327 				DPAA2_PMD_ERR("UDP flow config failed!");
4328 				goto end_flow_set;
4329 			}
4330 			break;
4331 		case RTE_FLOW_ITEM_TYPE_TCP:
4332 			ret = dpaa2_configure_flow_tcp(flow, dev, attr,
4333 					&dpaa2_pattern[i],
4334 					actions, error,
4335 					&is_keycfg_configured);
4336 			if (ret) {
4337 				DPAA2_PMD_ERR("TCP flow config failed!");
4338 				goto end_flow_set;
4339 			}
4340 			break;
4341 		case RTE_FLOW_ITEM_TYPE_SCTP:
4342 			ret = dpaa2_configure_flow_sctp(flow, dev, attr,
4343 					&dpaa2_pattern[i],
4344 					actions, error,
4345 					&is_keycfg_configured);
4346 			if (ret) {
4347 				DPAA2_PMD_ERR("SCTP flow config failed!");
4348 				goto end_flow_set;
4349 			}
4350 			break;
4351 		case RTE_FLOW_ITEM_TYPE_GRE:
4352 			ret = dpaa2_configure_flow_gre(flow, dev, attr,
4353 					&dpaa2_pattern[i],
4354 					actions, error,
4355 					&is_keycfg_configured);
4356 			if (ret) {
4357 				DPAA2_PMD_ERR("GRE flow config failed!");
4358 				goto end_flow_set;
4359 			}
4360 			break;
4361 		case RTE_FLOW_ITEM_TYPE_VXLAN:
4362 			ret = dpaa2_configure_flow_vxlan(flow, dev, attr,
4363 					&dpaa2_pattern[i],
4364 					actions, error,
4365 					&is_keycfg_configured);
4366 			if (ret) {
4367 				DPAA2_PMD_ERR("VXLAN flow config failed!");
4368 				goto end_flow_set;
4369 			}
4370 			break;
4371 		case RTE_FLOW_ITEM_TYPE_ECPRI:
4372 			ret = dpaa2_configure_flow_ecpri(flow,
4373 					dev, attr, &dpaa2_pattern[i],
4374 					actions, error,
4375 					&is_keycfg_configured);
4376 			if (ret) {
4377 				DPAA2_PMD_ERR("ECPRI flow config failed!");
4378 				goto end_flow_set;
4379 			}
4380 			break;
4381 		case RTE_FLOW_ITEM_TYPE_GTP:
4382 			ret = dpaa2_configure_flow_gtp(flow,
4383 					dev, attr, &dpaa2_pattern[i],
4384 					actions, error,
4385 					&is_keycfg_configured);
4386 			if (ret) {
4387 				DPAA2_PMD_ERR("GTP flow config failed!");
4388 				goto end_flow_set;
4389 			}
4390 			break;
4391 		case RTE_FLOW_ITEM_TYPE_RAW:
4392 			ret = dpaa2_configure_flow_raw(flow, dev, attr,
4393 					&dpaa2_pattern[i],
4394 					actions, error,
4395 					&is_keycfg_configured);
4396 			if (ret) {
4397 				DPAA2_PMD_ERR("RAW flow config failed!");
4398 				goto end_flow_set;
4399 			}
4400 			break;
4401 		case RTE_FLOW_ITEM_TYPE_END:
4402 			end_of_list = 1;
4403 			break; /*End of List*/
4404 		default:
4405 			DPAA2_PMD_ERR("Invalid flow item[%d] type(%d)",
4406 				i, pattern[i].type);
4407 			ret = -ENOTSUP;
4408 			break;
4409 		}
4410 		i++;
4411 	}
4412 
4413 	qos_key_extract = &priv->extract.qos_key_extract;
4414 	key_size = qos_key_extract->key_profile.key_max_size;
4415 	flow->qos_rule.key_size = dpaa2_flow_entry_size(key_size);
4416 
4417 	tc_key_extract = &priv->extract.tc_key_extract[flow->tc_id];
4418 	key_size = tc_key_extract->key_profile.key_max_size;
4419 	flow->fs_rule.key_size = dpaa2_flow_entry_size(key_size);
4420 
4421 	/* Let's parse action on matching traffic */
4422 	end_of_list = 0;
4423 	while (!end_of_list) {
4424 		switch (actions[j].type) {
4425 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4426 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4427 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
4428 			ret = dpaa2_configure_flow_fs_action(priv, flow,
4429 							     &actions[j]);
4430 			if (ret)
4431 				goto end_flow_set;
4432 
4433 			/* Configure FS table first*/
4434 			dist_size = priv->nb_rx_queues / priv->num_rx_tc;
4435 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
4436 				ret = dpaa2_configure_fs_rss_table(priv,
4437 								   flow->tc_id,
4438 								   dist_size,
4439 								   false);
4440 				if (ret)
4441 					goto end_flow_set;
4442 			}
4443 
4444 			/* Configure QoS table then.*/
4445 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
4446 				ret = dpaa2_configure_qos_table(priv, false);
4447 				if (ret)
4448 					goto end_flow_set;
4449 			}
4450 
4451 			if (priv->num_rx_tc > 1) {
4452 				ret = dpaa2_flow_add_qos_rule(priv, flow);
4453 				if (ret)
4454 					goto end_flow_set;
4455 			}
4456 
4457 			if (flow->tc_index >= priv->fs_entries) {
4458 				DPAA2_PMD_ERR("FS table with %d entries full",
4459 					priv->fs_entries);
4460 				return -1;
4461 			}
4462 
4463 			ret = dpaa2_flow_add_fs_rule(priv, flow);
4464 			if (ret)
4465 				goto end_flow_set;
4466 
4467 			break;
4468 		case RTE_FLOW_ACTION_TYPE_RSS:
4469 			rss_conf = actions[j].conf;
4470 			flow->action_type = RTE_FLOW_ACTION_TYPE_RSS;
4471 
4472 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
4473 					&tc_key_extract->dpkg);
4474 			if (ret < 0) {
4475 				DPAA2_PMD_ERR("TC[%d] distset RSS failed",
4476 					      flow->tc_id);
4477 				goto end_flow_set;
4478 			}
4479 
4480 			dist_size = rss_conf->queue_num;
4481 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
4482 				ret = dpaa2_configure_fs_rss_table(priv,
4483 								   flow->tc_id,
4484 								   dist_size,
4485 								   true);
4486 				if (ret)
4487 					goto end_flow_set;
4488 			}
4489 
4490 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
4491 				ret = dpaa2_configure_qos_table(priv, true);
4492 				if (ret)
4493 					goto end_flow_set;
4494 			}
4495 
4496 			ret = dpaa2_flow_add_qos_rule(priv, flow);
4497 			if (ret)
4498 				goto end_flow_set;
4499 
4500 			ret = dpaa2_flow_add_fs_rule(priv, flow);
4501 			if (ret)
4502 				goto end_flow_set;
4503 
4504 			break;
4505 		case RTE_FLOW_ACTION_TYPE_PF:
4506 			/* Skip this action, have to add for vxlan */
4507 			break;
4508 		case RTE_FLOW_ACTION_TYPE_END:
4509 			end_of_list = 1;
4510 			break;
4511 		default:
4512 			DPAA2_PMD_ERR("Invalid action type");
4513 			ret = -ENOTSUP;
4514 			break;
4515 		}
4516 		j++;
4517 	}
4518 
4519 end_flow_set:
4520 	if (!ret) {
4521 		/* New rules are inserted. */
4522 		if (!curr) {
4523 			LIST_INSERT_HEAD(&priv->flows, flow, next);
4524 		} else {
4525 			while (LIST_NEXT(curr, next))
4526 				curr = LIST_NEXT(curr, next);
4527 			LIST_INSERT_AFTER(curr, flow, next);
4528 		}
4529 	}
4530 
4531 	if (dpaa2_pattern)
4532 		rte_free(dpaa2_pattern);
4533 
4534 	return ret;
4535 }
4536 
4537 static inline int
4538 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
4539 	const struct rte_flow_attr *attr)
4540 {
4541 	int ret = 0;
4542 
4543 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
4544 		DPAA2_PMD_ERR("Group/TC(%d) is out of range(%d)",
4545 			attr->group, dpni_attr->num_rx_tcs);
4546 		ret = -ENOTSUP;
4547 	}
4548 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
4549 		DPAA2_PMD_ERR("Priority(%d) within group is out of range(%d)",
4550 			attr->priority, dpni_attr->fs_entries);
4551 		ret = -ENOTSUP;
4552 	}
4553 	if (unlikely(attr->egress)) {
4554 		DPAA2_PMD_ERR("Egress flow configuration is not supported");
4555 		ret = -ENOTSUP;
4556 	}
4557 	if (unlikely(!attr->ingress)) {
4558 		DPAA2_PMD_ERR("Ingress flag must be configured");
4559 		ret = -EINVAL;
4560 	}
4561 	return ret;
4562 }
4563 
4564 static inline int
4565 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
4566 {
4567 	unsigned int i, j, is_found = 0;
4568 	int ret = 0;
4569 	const enum rte_flow_item_type *hp_supported;
4570 	const enum rte_flow_item_type *sp_supported;
4571 	uint64_t hp_supported_num, sp_supported_num;
4572 
4573 	hp_supported = dpaa2_hp_supported_pattern_type;
4574 	hp_supported_num = RTE_DIM(dpaa2_hp_supported_pattern_type);
4575 
4576 	sp_supported = dpaa2_sp_supported_pattern_type;
4577 	sp_supported_num = RTE_DIM(dpaa2_sp_supported_pattern_type);
4578 
4579 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
4580 		is_found = 0;
4581 		for (i = 0; i < hp_supported_num; i++) {
4582 			if (hp_supported[i] == pattern[j].type) {
4583 				is_found = 1;
4584 				break;
4585 			}
4586 		}
4587 		if (is_found)
4588 			continue;
4589 		if (dpaa2_sp_loaded > 0) {
4590 			for (i = 0; i < sp_supported_num; i++) {
4591 				if (sp_supported[i] == pattern[j].type) {
4592 					is_found = 1;
4593 					break;
4594 				}
4595 			}
4596 		}
4597 		if (!is_found) {
4598 			DPAA2_PMD_WARN("Flow type(%d) not supported",
4599 				pattern[j].type);
4600 			ret = -ENOTSUP;
4601 			break;
4602 		}
4603 	}
4604 
4605 	return ret;
4606 }
4607 
4608 static inline int
4609 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
4610 {
4611 	unsigned int i, j, is_found = 0;
4612 	int ret = 0;
4613 
4614 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
4615 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
4616 			if (dpaa2_supported_action_type[i] == actions[j].type) {
4617 				is_found = 1;
4618 				break;
4619 			}
4620 		}
4621 		if (!is_found) {
4622 			ret = -ENOTSUP;
4623 			break;
4624 		}
4625 	}
4626 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
4627 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
4628 		    !actions[j].conf)
4629 			ret = -EINVAL;
4630 	}
4631 	return ret;
4632 }
4633 
4634 static int
4635 dpaa2_flow_validate(struct rte_eth_dev *dev,
4636 	const struct rte_flow_attr *flow_attr,
4637 	const struct rte_flow_item pattern[],
4638 	const struct rte_flow_action actions[],
4639 	struct rte_flow_error *error)
4640 {
4641 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4642 	struct dpni_attr dpni_attr;
4643 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4644 	uint16_t token = priv->token;
4645 	int ret = 0;
4646 
4647 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
4648 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
4649 	if (ret < 0) {
4650 		DPAA2_PMD_ERR("Get dpni@%d attribute failed(%d)",
4651 			priv->hw_id, ret);
4652 		rte_flow_error_set(error, EPERM,
4653 			RTE_FLOW_ERROR_TYPE_ATTR,
4654 			flow_attr, "invalid");
4655 		return ret;
4656 	}
4657 
4658 	/* Verify input attributes */
4659 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
4660 	if (ret < 0) {
4661 		DPAA2_PMD_ERR("Invalid attributes are given");
4662 		rte_flow_error_set(error, EPERM,
4663 			RTE_FLOW_ERROR_TYPE_ATTR,
4664 			flow_attr, "invalid");
4665 		goto not_valid_params;
4666 	}
4667 	/* Verify input pattern list */
4668 	ret = dpaa2_dev_verify_patterns(pattern);
4669 	if (ret < 0) {
4670 		DPAA2_PMD_ERR("Invalid pattern list is given");
4671 		rte_flow_error_set(error, EPERM,
4672 			RTE_FLOW_ERROR_TYPE_ITEM,
4673 			pattern, "invalid");
4674 		goto not_valid_params;
4675 	}
4676 	/* Verify input action list */
4677 	ret = dpaa2_dev_verify_actions(actions);
4678 	if (ret < 0) {
4679 		DPAA2_PMD_ERR("Invalid action list is given");
4680 		rte_flow_error_set(error, EPERM,
4681 			RTE_FLOW_ERROR_TYPE_ACTION,
4682 			actions, "invalid");
4683 		goto not_valid_params;
4684 	}
4685 not_valid_params:
4686 	return ret;
4687 }
4688 
4689 static struct rte_flow *
4690 dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4691 		  const struct rte_flow_item pattern[],
4692 		  const struct rte_flow_action actions[],
4693 		  struct rte_flow_error *error)
4694 {
4695 	struct dpaa2_dev_flow *flow = NULL;
4696 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4697 	int ret;
4698 
4699 	dpaa2_flow_control_log =
4700 		getenv("DPAA2_FLOW_CONTROL_LOG");
4701 
4702 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
4703 		dpaa2_flow_miss_flow_id =
4704 			(uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
4705 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
4706 			DPAA2_PMD_ERR("Missed flow ID %d >= dist size(%d)",
4707 				      dpaa2_flow_miss_flow_id,
4708 				      priv->dist_queues);
4709 			return NULL;
4710 		}
4711 	}
4712 
4713 	flow = rte_zmalloc(NULL, sizeof(struct dpaa2_dev_flow),
4714 			   RTE_CACHE_LINE_SIZE);
4715 	if (!flow) {
4716 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4717 		goto mem_failure;
4718 	}
4719 
4720 	/* Allocate DMA'ble memory to write the qos rules */
4721 	flow->qos_key_addr = rte_zmalloc(NULL, 256, 64);
4722 	if (!flow->qos_key_addr) {
4723 		DPAA2_PMD_ERR("Memory allocation failed");
4724 		goto mem_failure;
4725 	}
4726 	flow->qos_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->qos_key_addr);
4727 
4728 	flow->qos_mask_addr = rte_zmalloc(NULL, 256, 64);
4729 	if (!flow->qos_mask_addr) {
4730 		DPAA2_PMD_ERR("Memory allocation failed");
4731 		goto mem_failure;
4732 	}
4733 	flow->qos_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->qos_mask_addr);
4734 
4735 	/* Allocate DMA'ble memory to write the FS rules */
4736 	flow->fs_key_addr = rte_zmalloc(NULL, 256, 64);
4737 	if (!flow->fs_key_addr) {
4738 		DPAA2_PMD_ERR("Memory allocation failed");
4739 		goto mem_failure;
4740 	}
4741 	flow->fs_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->fs_key_addr);
4742 
4743 	flow->fs_mask_addr = rte_zmalloc(NULL, 256, 64);
4744 	if (!flow->fs_mask_addr) {
4745 		DPAA2_PMD_ERR("Memory allocation failed");
4746 		goto mem_failure;
4747 	}
4748 	flow->fs_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->fs_mask_addr);
4749 
4750 	priv->curr = flow;
4751 
4752 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, actions, error);
4753 	if (ret < 0) {
4754 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4755 			rte_flow_error_set(error, EPERM,
4756 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4757 					   attr, "unknown");
4758 		DPAA2_PMD_ERR("Create flow failed (%d)", ret);
4759 		goto creation_error;
4760 	}
4761 
4762 	priv->curr = NULL;
4763 	return (struct rte_flow *)flow;
4764 
4765 mem_failure:
4766 	rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4767 			   "memory alloc");
4768 
4769 creation_error:
4770 	if (flow) {
4771 		if (flow->qos_key_addr)
4772 			rte_free(flow->qos_key_addr);
4773 		if (flow->qos_mask_addr)
4774 			rte_free(flow->qos_mask_addr);
4775 		if (flow->fs_key_addr)
4776 			rte_free(flow->fs_key_addr);
4777 		if (flow->fs_mask_addr)
4778 			rte_free(flow->fs_mask_addr);
4779 		rte_free(flow);
4780 	}
4781 	priv->curr = NULL;
4782 
4783 	return NULL;
4784 }
4785 
4786 static int
4787 dpaa2_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *_flow,
4788 		   struct rte_flow_error *error)
4789 {
4790 	int ret = 0;
4791 	struct dpaa2_dev_flow *flow;
4792 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4793 	struct fsl_mc_io *dpni = priv->hw;
4794 
4795 	flow = (struct dpaa2_dev_flow *)_flow;
4796 
4797 	switch (flow->action_type) {
4798 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4799 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4800 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
4801 		if (priv->num_rx_tc > 1) {
4802 			/* Remove entry from QoS table first */
4803 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
4804 						    priv->token,
4805 						    &flow->qos_rule);
4806 			if (ret < 0) {
4807 				DPAA2_PMD_ERR("Remove FS QoS entry failed");
4808 				dpaa2_flow_qos_entry_log("Delete failed", flow,
4809 							 -1);
4810 				abort();
4811 				goto error;
4812 			}
4813 		}
4814 
4815 		/* Then remove entry from FS table */
4816 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4817 					   flow->tc_id, &flow->fs_rule);
4818 		if (ret < 0) {
4819 			DPAA2_PMD_ERR("Remove entry from FS[%d] failed",
4820 				      flow->tc_id);
4821 			goto error;
4822 		}
4823 		break;
4824 	case RTE_FLOW_ACTION_TYPE_RSS:
4825 		if (priv->num_rx_tc > 1) {
4826 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
4827 						    priv->token,
4828 						    &flow->qos_rule);
4829 			if (ret < 0) {
4830 				DPAA2_PMD_ERR("Remove RSS QoS entry failed");
4831 				goto error;
4832 			}
4833 		}
4834 		break;
4835 	default:
4836 		DPAA2_PMD_ERR("Action(%d) not supported", flow->action_type);
4837 		ret = -ENOTSUP;
4838 		break;
4839 	}
4840 
4841 	LIST_REMOVE(flow, next);
4842 	if (flow->qos_key_addr)
4843 		rte_free(flow->qos_key_addr);
4844 	if (flow->qos_mask_addr)
4845 		rte_free(flow->qos_mask_addr);
4846 	if (flow->fs_key_addr)
4847 		rte_free(flow->fs_key_addr);
4848 	if (flow->fs_mask_addr)
4849 		rte_free(flow->fs_mask_addr);
4850 	/* Now free the flow */
4851 	rte_free(flow);
4852 
4853 error:
4854 	if (ret)
4855 		rte_flow_error_set(error, EPERM,
4856 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4857 				   NULL, "unknown");
4858 	return ret;
4859 }
4860 
4861 /**
4862  * Destroy user-configured flow rules.
4863  *
4864  * This function skips internal flows rules.
4865  *
4866  * @see rte_flow_flush()
4867  * @see rte_flow_ops
4868  */
4869 static int
4870 dpaa2_flow_flush(struct rte_eth_dev *dev,
4871 		struct rte_flow_error *error)
4872 {
4873 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4874 	struct dpaa2_dev_flow *flow = LIST_FIRST(&priv->flows);
4875 
4876 	while (flow) {
4877 		struct dpaa2_dev_flow *next = LIST_NEXT(flow, next);
4878 
4879 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, error);
4880 		flow = next;
4881 	}
4882 	return 0;
4883 }
4884 
4885 static int
4886 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4887 	struct rte_flow *_flow __rte_unused,
4888 	const struct rte_flow_action *actions __rte_unused,
4889 	void *data __rte_unused,
4890 	struct rte_flow_error *error __rte_unused)
4891 {
4892 	return 0;
4893 }
4894 
4895 /**
4896  * Clean up all flow rules.
4897  *
4898  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4899  * rules regardless of whether they are internal or user-configured.
4900  *
4901  * @param priv
4902  *   Pointer to private structure.
4903  */
4904 void
4905 dpaa2_flow_clean(struct rte_eth_dev *dev)
4906 {
4907 	struct dpaa2_dev_flow *flow;
4908 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4909 
4910 	while ((flow = LIST_FIRST(&priv->flows)))
4911 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, NULL);
4912 }
4913 
4914 const struct rte_flow_ops dpaa2_flow_ops = {
4915 	.create	= dpaa2_flow_create,
4916 	.validate = dpaa2_flow_validate,
4917 	.destroy = dpaa2_flow_destroy,
4918 	.flush	= dpaa2_flow_flush,
4919 	.query	= dpaa2_flow_query,
4920 };
4921