xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision bac4a29663fd387e86c2033254da51db304c1c71)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 
34 #define FIXED_ENTRY_SIZE 54
35 
36 enum flow_rule_ipaddr_type {
37 	FLOW_NONE_IPADDR,
38 	FLOW_IPV4_ADDR,
39 	FLOW_IPV6_ADDR
40 };
41 
42 struct flow_rule_ipaddr {
43 	enum flow_rule_ipaddr_type ipaddr_type;
44 	int qos_ipsrc_offset;
45 	int qos_ipdst_offset;
46 	int fs_ipsrc_offset;
47 	int fs_ipdst_offset;
48 };
49 
50 struct rte_flow {
51 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
52 	struct dpni_rule_cfg qos_rule;
53 	struct dpni_rule_cfg fs_rule;
54 	uint8_t qos_real_key_size;
55 	uint8_t fs_real_key_size;
56 	uint8_t tc_id; /** Traffic Class ID. */
57 	uint8_t tc_index; /** index within this Traffic Class. */
58 	enum rte_flow_action_type action;
59 	/* Special for IP address to specify the offset
60 	 * in key/mask.
61 	 */
62 	struct flow_rule_ipaddr ipaddr_rule;
63 	struct dpni_fs_action_cfg action_cfg;
64 };
65 
66 static const
67 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
68 	RTE_FLOW_ITEM_TYPE_END,
69 	RTE_FLOW_ITEM_TYPE_ETH,
70 	RTE_FLOW_ITEM_TYPE_VLAN,
71 	RTE_FLOW_ITEM_TYPE_IPV4,
72 	RTE_FLOW_ITEM_TYPE_IPV6,
73 	RTE_FLOW_ITEM_TYPE_ICMP,
74 	RTE_FLOW_ITEM_TYPE_UDP,
75 	RTE_FLOW_ITEM_TYPE_TCP,
76 	RTE_FLOW_ITEM_TYPE_SCTP,
77 	RTE_FLOW_ITEM_TYPE_GRE,
78 };
79 
80 static const
81 enum rte_flow_action_type dpaa2_supported_action_type[] = {
82 	RTE_FLOW_ACTION_TYPE_END,
83 	RTE_FLOW_ACTION_TYPE_QUEUE,
84 	RTE_FLOW_ACTION_TYPE_RSS
85 };
86 
87 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
88 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
89 
90 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
91 
92 #ifndef __cplusplus
93 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
94 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
95 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
96 	.type = RTE_BE16(0xffff),
97 };
98 
99 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
100 	.tci = RTE_BE16(0xffff),
101 };
102 
103 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
104 	.hdr.src_addr = RTE_BE32(0xffffffff),
105 	.hdr.dst_addr = RTE_BE32(0xffffffff),
106 	.hdr.next_proto_id = 0xff,
107 };
108 
109 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
110 	.hdr = {
111 		.src_addr =
112 			"\xff\xff\xff\xff\xff\xff\xff\xff"
113 			"\xff\xff\xff\xff\xff\xff\xff\xff",
114 		.dst_addr =
115 			"\xff\xff\xff\xff\xff\xff\xff\xff"
116 			"\xff\xff\xff\xff\xff\xff\xff\xff",
117 		.proto = 0xff
118 	},
119 };
120 
121 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
122 	.hdr.icmp_type = 0xff,
123 	.hdr.icmp_code = 0xff,
124 };
125 
126 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
127 	.hdr = {
128 		.src_port = RTE_BE16(0xffff),
129 		.dst_port = RTE_BE16(0xffff),
130 	},
131 };
132 
133 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
134 	.hdr = {
135 		.src_port = RTE_BE16(0xffff),
136 		.dst_port = RTE_BE16(0xffff),
137 	},
138 };
139 
140 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
141 	.hdr = {
142 		.src_port = RTE_BE16(0xffff),
143 		.dst_port = RTE_BE16(0xffff),
144 	},
145 };
146 
147 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
148 	.protocol = RTE_BE16(0xffff),
149 };
150 
151 #endif
152 
153 static inline void dpaa2_prot_field_string(
154 	enum net_prot prot, uint32_t field,
155 	char *string)
156 {
157 	if (!dpaa2_flow_control_log)
158 		return;
159 
160 	if (prot == NET_PROT_ETH) {
161 		strcpy(string, "eth");
162 		if (field == NH_FLD_ETH_DA)
163 			strcat(string, ".dst");
164 		else if (field == NH_FLD_ETH_SA)
165 			strcat(string, ".src");
166 		else if (field == NH_FLD_ETH_TYPE)
167 			strcat(string, ".type");
168 		else
169 			strcat(string, ".unknown field");
170 	} else if (prot == NET_PROT_VLAN) {
171 		strcpy(string, "vlan");
172 		if (field == NH_FLD_VLAN_TCI)
173 			strcat(string, ".tci");
174 		else
175 			strcat(string, ".unknown field");
176 	} else if (prot == NET_PROT_IP) {
177 		strcpy(string, "ip");
178 		if (field == NH_FLD_IP_SRC)
179 			strcat(string, ".src");
180 		else if (field == NH_FLD_IP_DST)
181 			strcat(string, ".dst");
182 		else if (field == NH_FLD_IP_PROTO)
183 			strcat(string, ".proto");
184 		else
185 			strcat(string, ".unknown field");
186 	} else if (prot == NET_PROT_TCP) {
187 		strcpy(string, "tcp");
188 		if (field == NH_FLD_TCP_PORT_SRC)
189 			strcat(string, ".src");
190 		else if (field == NH_FLD_TCP_PORT_DST)
191 			strcat(string, ".dst");
192 		else
193 			strcat(string, ".unknown field");
194 	} else if (prot == NET_PROT_UDP) {
195 		strcpy(string, "udp");
196 		if (field == NH_FLD_UDP_PORT_SRC)
197 			strcat(string, ".src");
198 		else if (field == NH_FLD_UDP_PORT_DST)
199 			strcat(string, ".dst");
200 		else
201 			strcat(string, ".unknown field");
202 	} else if (prot == NET_PROT_ICMP) {
203 		strcpy(string, "icmp");
204 		if (field == NH_FLD_ICMP_TYPE)
205 			strcat(string, ".type");
206 		else if (field == NH_FLD_ICMP_CODE)
207 			strcat(string, ".code");
208 		else
209 			strcat(string, ".unknown field");
210 	} else if (prot == NET_PROT_SCTP) {
211 		strcpy(string, "sctp");
212 		if (field == NH_FLD_SCTP_PORT_SRC)
213 			strcat(string, ".src");
214 		else if (field == NH_FLD_SCTP_PORT_DST)
215 			strcat(string, ".dst");
216 		else
217 			strcat(string, ".unknown field");
218 	} else if (prot == NET_PROT_GRE) {
219 		strcpy(string, "gre");
220 		if (field == NH_FLD_GRE_TYPE)
221 			strcat(string, ".type");
222 		else
223 			strcat(string, ".unknown field");
224 	} else {
225 		strcpy(string, "unknown protocol");
226 	}
227 }
228 
229 static inline void dpaa2_flow_qos_table_extracts_log(
230 	const struct dpaa2_dev_priv *priv)
231 {
232 	int idx;
233 	char string[32];
234 
235 	if (!dpaa2_flow_control_log)
236 		return;
237 
238 	printf("Setup QoS table: number of extracts: %d\r\n",
239 			priv->extract.qos_key_extract.dpkg.num_extracts);
240 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
241 		idx++) {
242 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
243 			.extracts[idx].extract.from_hdr.prot,
244 			priv->extract.qos_key_extract.dpkg.extracts[idx]
245 			.extract.from_hdr.field,
246 			string);
247 		printf("%s", string);
248 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
249 			printf(" / ");
250 	}
251 	printf("\r\n");
252 }
253 
254 static inline void dpaa2_flow_fs_table_extracts_log(
255 	const struct dpaa2_dev_priv *priv, int tc_id)
256 {
257 	int idx;
258 	char string[32];
259 
260 	if (!dpaa2_flow_control_log)
261 		return;
262 
263 	printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
264 			tc_id, priv->extract.tc_key_extract[tc_id]
265 			.dpkg.num_extracts);
266 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
267 		.dpkg.num_extracts; idx++) {
268 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
269 			.dpkg.extracts[idx].extract.from_hdr.prot,
270 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
271 			.extract.from_hdr.field,
272 			string);
273 		printf("%s", string);
274 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
275 			.dpkg.num_extracts)
276 			printf(" / ");
277 	}
278 	printf("\r\n");
279 }
280 
281 static inline void dpaa2_flow_qos_entry_log(
282 	const char *log_info, const struct rte_flow *flow, int qos_index)
283 {
284 	int idx;
285 	uint8_t *key, *mask;
286 
287 	if (!dpaa2_flow_control_log)
288 		return;
289 
290 	printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
291 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
292 
293 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
294 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
295 
296 	printf("key:\r\n");
297 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
298 		printf("%02x ", key[idx]);
299 
300 	printf("\r\nmask:\r\n");
301 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
302 		printf("%02x ", mask[idx]);
303 
304 	printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
305 		flow->ipaddr_rule.qos_ipsrc_offset,
306 		flow->ipaddr_rule.qos_ipdst_offset);
307 }
308 
309 static inline void dpaa2_flow_fs_entry_log(
310 	const char *log_info, const struct rte_flow *flow)
311 {
312 	int idx;
313 	uint8_t *key, *mask;
314 
315 	if (!dpaa2_flow_control_log)
316 		return;
317 
318 	printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
319 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
320 
321 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
322 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
323 
324 	printf("key:\r\n");
325 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
326 		printf("%02x ", key[idx]);
327 
328 	printf("\r\nmask:\r\n");
329 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
330 		printf("%02x ", mask[idx]);
331 
332 	printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
333 		flow->ipaddr_rule.fs_ipsrc_offset,
334 		flow->ipaddr_rule.fs_ipdst_offset);
335 }
336 
337 static inline void dpaa2_flow_extract_key_set(
338 	struct dpaa2_key_info *key_info, int index, uint8_t size)
339 {
340 	key_info->key_size[index] = size;
341 	if (index > 0) {
342 		key_info->key_offset[index] =
343 			key_info->key_offset[index - 1] +
344 			key_info->key_size[index - 1];
345 	} else {
346 		key_info->key_offset[index] = 0;
347 	}
348 	key_info->key_total_size += size;
349 }
350 
351 static int dpaa2_flow_extract_add(
352 	struct dpaa2_key_extract *key_extract,
353 	enum net_prot prot,
354 	uint32_t field, uint8_t field_size)
355 {
356 	int index, ip_src = -1, ip_dst = -1;
357 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
358 	struct dpaa2_key_info *key_info = &key_extract->key_info;
359 
360 	if (dpkg->num_extracts >=
361 		DPKG_MAX_NUM_OF_EXTRACTS) {
362 		DPAA2_PMD_WARN("Number of extracts overflows");
363 		return -1;
364 	}
365 	/* Before reorder, the IP SRC and IP DST are already last
366 	 * extract(s).
367 	 */
368 	for (index = 0; index < dpkg->num_extracts; index++) {
369 		if (dpkg->extracts[index].extract.from_hdr.prot ==
370 			NET_PROT_IP) {
371 			if (dpkg->extracts[index].extract.from_hdr.field ==
372 				NH_FLD_IP_SRC) {
373 				ip_src = index;
374 			}
375 			if (dpkg->extracts[index].extract.from_hdr.field ==
376 				NH_FLD_IP_DST) {
377 				ip_dst = index;
378 			}
379 		}
380 	}
381 
382 	if (ip_src >= 0)
383 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
384 
385 	if (ip_dst >= 0)
386 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
387 
388 	if (prot == NET_PROT_IP &&
389 		(field == NH_FLD_IP_SRC ||
390 		field == NH_FLD_IP_DST)) {
391 		index = dpkg->num_extracts;
392 	} else {
393 		if (ip_src >= 0 && ip_dst >= 0)
394 			index = dpkg->num_extracts - 2;
395 		else if (ip_src >= 0 || ip_dst >= 0)
396 			index = dpkg->num_extracts - 1;
397 		else
398 			index = dpkg->num_extracts;
399 	}
400 
401 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
402 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
403 	dpkg->extracts[index].extract.from_hdr.prot = prot;
404 	dpkg->extracts[index].extract.from_hdr.field = field;
405 	if (prot == NET_PROT_IP &&
406 		(field == NH_FLD_IP_SRC ||
407 		field == NH_FLD_IP_DST)) {
408 		dpaa2_flow_extract_key_set(key_info, index, 0);
409 	} else {
410 		dpaa2_flow_extract_key_set(key_info, index, field_size);
411 	}
412 
413 	if (prot == NET_PROT_IP) {
414 		if (field == NH_FLD_IP_SRC) {
415 			if (key_info->ipv4_dst_offset >= 0) {
416 				key_info->ipv4_src_offset =
417 					key_info->ipv4_dst_offset +
418 					NH_FLD_IPV4_ADDR_SIZE;
419 			} else {
420 				key_info->ipv4_src_offset =
421 					key_info->key_offset[index - 1] +
422 						key_info->key_size[index - 1];
423 			}
424 			if (key_info->ipv6_dst_offset >= 0) {
425 				key_info->ipv6_src_offset =
426 					key_info->ipv6_dst_offset +
427 					NH_FLD_IPV6_ADDR_SIZE;
428 			} else {
429 				key_info->ipv6_src_offset =
430 					key_info->key_offset[index - 1] +
431 						key_info->key_size[index - 1];
432 			}
433 		} else if (field == NH_FLD_IP_DST) {
434 			if (key_info->ipv4_src_offset >= 0) {
435 				key_info->ipv4_dst_offset =
436 					key_info->ipv4_src_offset +
437 					NH_FLD_IPV4_ADDR_SIZE;
438 			} else {
439 				key_info->ipv4_dst_offset =
440 					key_info->key_offset[index - 1] +
441 						key_info->key_size[index - 1];
442 			}
443 			if (key_info->ipv6_src_offset >= 0) {
444 				key_info->ipv6_dst_offset =
445 					key_info->ipv6_src_offset +
446 					NH_FLD_IPV6_ADDR_SIZE;
447 			} else {
448 				key_info->ipv6_dst_offset =
449 					key_info->key_offset[index - 1] +
450 						key_info->key_size[index - 1];
451 			}
452 		}
453 	}
454 
455 	if (index == dpkg->num_extracts) {
456 		dpkg->num_extracts++;
457 		return 0;
458 	}
459 
460 	if (ip_src >= 0) {
461 		ip_src++;
462 		dpkg->extracts[ip_src].type =
463 			DPKG_EXTRACT_FROM_HDR;
464 		dpkg->extracts[ip_src].extract.from_hdr.type =
465 			DPKG_FULL_FIELD;
466 		dpkg->extracts[ip_src].extract.from_hdr.prot =
467 			NET_PROT_IP;
468 		dpkg->extracts[ip_src].extract.from_hdr.field =
469 			NH_FLD_IP_SRC;
470 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
471 		key_info->ipv4_src_offset += field_size;
472 		key_info->ipv6_src_offset += field_size;
473 	}
474 	if (ip_dst >= 0) {
475 		ip_dst++;
476 		dpkg->extracts[ip_dst].type =
477 			DPKG_EXTRACT_FROM_HDR;
478 		dpkg->extracts[ip_dst].extract.from_hdr.type =
479 			DPKG_FULL_FIELD;
480 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
481 			NET_PROT_IP;
482 		dpkg->extracts[ip_dst].extract.from_hdr.field =
483 			NH_FLD_IP_DST;
484 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
485 		key_info->ipv4_dst_offset += field_size;
486 		key_info->ipv6_dst_offset += field_size;
487 	}
488 
489 	dpkg->num_extracts++;
490 
491 	return 0;
492 }
493 
494 /* Protocol discrimination.
495  * Discriminate IPv4/IPv6/vLan by Eth type.
496  * Discriminate UDP/TCP/ICMP by next proto of IP.
497  */
498 static inline int
499 dpaa2_flow_proto_discrimination_extract(
500 	struct dpaa2_key_extract *key_extract,
501 	enum rte_flow_item_type type)
502 {
503 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
504 		return dpaa2_flow_extract_add(
505 				key_extract, NET_PROT_ETH,
506 				NH_FLD_ETH_TYPE,
507 				sizeof(rte_be16_t));
508 	} else if (type == (enum rte_flow_item_type)
509 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
510 		return dpaa2_flow_extract_add(
511 				key_extract, NET_PROT_IP,
512 				NH_FLD_IP_PROTO,
513 				NH_FLD_IP_PROTO_SIZE);
514 	}
515 
516 	return -1;
517 }
518 
519 static inline int dpaa2_flow_extract_search(
520 	struct dpkg_profile_cfg *dpkg,
521 	enum net_prot prot, uint32_t field)
522 {
523 	int i;
524 
525 	for (i = 0; i < dpkg->num_extracts; i++) {
526 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
527 			dpkg->extracts[i].extract.from_hdr.field == field) {
528 			return i;
529 		}
530 	}
531 
532 	return -1;
533 }
534 
535 static inline int dpaa2_flow_extract_key_offset(
536 	struct dpaa2_key_extract *key_extract,
537 	enum net_prot prot, uint32_t field)
538 {
539 	int i;
540 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
541 	struct dpaa2_key_info *key_info = &key_extract->key_info;
542 
543 	if (prot == NET_PROT_IPV4 ||
544 		prot == NET_PROT_IPV6)
545 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
546 	else
547 		i = dpaa2_flow_extract_search(dpkg, prot, field);
548 
549 	if (i >= 0) {
550 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
551 			return key_info->ipv4_src_offset;
552 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
553 			return key_info->ipv4_dst_offset;
554 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
555 			return key_info->ipv6_src_offset;
556 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
557 			return key_info->ipv6_dst_offset;
558 		else
559 			return key_info->key_offset[i];
560 	} else {
561 		return -1;
562 	}
563 }
564 
565 struct proto_discrimination {
566 	enum rte_flow_item_type type;
567 	union {
568 		rte_be16_t eth_type;
569 		uint8_t ip_proto;
570 	};
571 };
572 
573 static int
574 dpaa2_flow_proto_discrimination_rule(
575 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
576 	struct proto_discrimination proto, int group)
577 {
578 	enum net_prot prot;
579 	uint32_t field;
580 	int offset;
581 	size_t key_iova;
582 	size_t mask_iova;
583 	rte_be16_t eth_type;
584 	uint8_t ip_proto;
585 
586 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
587 		prot = NET_PROT_ETH;
588 		field = NH_FLD_ETH_TYPE;
589 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
590 		prot = NET_PROT_IP;
591 		field = NH_FLD_IP_PROTO;
592 	} else {
593 		DPAA2_PMD_ERR(
594 			"Only Eth and IP support to discriminate next proto.");
595 		return -1;
596 	}
597 
598 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
599 			prot, field);
600 	if (offset < 0) {
601 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
602 				prot, field);
603 		return -1;
604 	}
605 	key_iova = flow->qos_rule.key_iova + offset;
606 	mask_iova = flow->qos_rule.mask_iova + offset;
607 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
608 		eth_type = proto.eth_type;
609 		memcpy((void *)key_iova, (const void *)(&eth_type),
610 			sizeof(rte_be16_t));
611 		eth_type = 0xffff;
612 		memcpy((void *)mask_iova, (const void *)(&eth_type),
613 			sizeof(rte_be16_t));
614 	} else {
615 		ip_proto = proto.ip_proto;
616 		memcpy((void *)key_iova, (const void *)(&ip_proto),
617 			sizeof(uint8_t));
618 		ip_proto = 0xff;
619 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
620 			sizeof(uint8_t));
621 	}
622 
623 	offset = dpaa2_flow_extract_key_offset(
624 			&priv->extract.tc_key_extract[group],
625 			prot, field);
626 	if (offset < 0) {
627 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
628 				prot, field);
629 		return -1;
630 	}
631 	key_iova = flow->fs_rule.key_iova + offset;
632 	mask_iova = flow->fs_rule.mask_iova + offset;
633 
634 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
635 		eth_type = proto.eth_type;
636 		memcpy((void *)key_iova, (const void *)(&eth_type),
637 			sizeof(rte_be16_t));
638 		eth_type = 0xffff;
639 		memcpy((void *)mask_iova, (const void *)(&eth_type),
640 			sizeof(rte_be16_t));
641 	} else {
642 		ip_proto = proto.ip_proto;
643 		memcpy((void *)key_iova, (const void *)(&ip_proto),
644 			sizeof(uint8_t));
645 		ip_proto = 0xff;
646 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
647 			sizeof(uint8_t));
648 	}
649 
650 	return 0;
651 }
652 
653 static inline int
654 dpaa2_flow_rule_data_set(
655 	struct dpaa2_key_extract *key_extract,
656 	struct dpni_rule_cfg *rule,
657 	enum net_prot prot, uint32_t field,
658 	const void *key, const void *mask, int size)
659 {
660 	int offset = dpaa2_flow_extract_key_offset(key_extract,
661 				prot, field);
662 
663 	if (offset < 0) {
664 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
665 			prot, field);
666 		return -1;
667 	}
668 
669 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
670 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
671 
672 	return 0;
673 }
674 
675 static inline int
676 _dpaa2_flow_rule_move_ipaddr_tail(
677 	struct dpaa2_key_extract *key_extract,
678 	struct dpni_rule_cfg *rule, int src_offset,
679 	uint32_t field, bool ipv4)
680 {
681 	size_t key_src;
682 	size_t mask_src;
683 	size_t key_dst;
684 	size_t mask_dst;
685 	int dst_offset, len;
686 	enum net_prot prot;
687 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
688 
689 	if (field != NH_FLD_IP_SRC &&
690 		field != NH_FLD_IP_DST) {
691 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
692 		return -1;
693 	}
694 	if (ipv4)
695 		prot = NET_PROT_IPV4;
696 	else
697 		prot = NET_PROT_IPV6;
698 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
699 				prot, field);
700 	if (dst_offset < 0) {
701 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
702 		return -1;
703 	}
704 	key_src = rule->key_iova + src_offset;
705 	mask_src = rule->mask_iova + src_offset;
706 	key_dst = rule->key_iova + dst_offset;
707 	mask_dst = rule->mask_iova + dst_offset;
708 	if (ipv4)
709 		len = sizeof(rte_be32_t);
710 	else
711 		len = NH_FLD_IPV6_ADDR_SIZE;
712 
713 	memcpy(tmp, (char *)key_src, len);
714 	memset((char *)key_src, 0, len);
715 	memcpy((char *)key_dst, tmp, len);
716 
717 	memcpy(tmp, (char *)mask_src, len);
718 	memset((char *)mask_src, 0, len);
719 	memcpy((char *)mask_dst, tmp, len);
720 
721 	return 0;
722 }
723 
724 static inline int
725 dpaa2_flow_rule_move_ipaddr_tail(
726 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
727 	int fs_group)
728 {
729 	int ret;
730 	enum net_prot prot;
731 
732 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
733 		return 0;
734 
735 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
736 		prot = NET_PROT_IPV4;
737 	else
738 		prot = NET_PROT_IPV6;
739 
740 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
741 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
742 				&priv->extract.qos_key_extract,
743 				&flow->qos_rule,
744 				flow->ipaddr_rule.qos_ipsrc_offset,
745 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
746 		if (ret) {
747 			DPAA2_PMD_ERR("QoS src address reorder failed");
748 			return -1;
749 		}
750 		flow->ipaddr_rule.qos_ipsrc_offset =
751 			dpaa2_flow_extract_key_offset(
752 				&priv->extract.qos_key_extract,
753 				prot, NH_FLD_IP_SRC);
754 	}
755 
756 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
757 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
758 				&priv->extract.qos_key_extract,
759 				&flow->qos_rule,
760 				flow->ipaddr_rule.qos_ipdst_offset,
761 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
762 		if (ret) {
763 			DPAA2_PMD_ERR("QoS dst address reorder failed");
764 			return -1;
765 		}
766 		flow->ipaddr_rule.qos_ipdst_offset =
767 			dpaa2_flow_extract_key_offset(
768 				&priv->extract.qos_key_extract,
769 				prot, NH_FLD_IP_DST);
770 	}
771 
772 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
773 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
774 				&priv->extract.tc_key_extract[fs_group],
775 				&flow->fs_rule,
776 				flow->ipaddr_rule.fs_ipsrc_offset,
777 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
778 		if (ret) {
779 			DPAA2_PMD_ERR("FS src address reorder failed");
780 			return -1;
781 		}
782 		flow->ipaddr_rule.fs_ipsrc_offset =
783 			dpaa2_flow_extract_key_offset(
784 				&priv->extract.tc_key_extract[fs_group],
785 				prot, NH_FLD_IP_SRC);
786 	}
787 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
788 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
789 				&priv->extract.tc_key_extract[fs_group],
790 				&flow->fs_rule,
791 				flow->ipaddr_rule.fs_ipdst_offset,
792 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
793 		if (ret) {
794 			DPAA2_PMD_ERR("FS dst address reorder failed");
795 			return -1;
796 		}
797 		flow->ipaddr_rule.fs_ipdst_offset =
798 			dpaa2_flow_extract_key_offset(
799 				&priv->extract.tc_key_extract[fs_group],
800 				prot, NH_FLD_IP_DST);
801 	}
802 
803 	return 0;
804 }
805 
806 static int
807 dpaa2_flow_extract_support(
808 	const uint8_t *mask_src,
809 	enum rte_flow_item_type type)
810 {
811 	char mask[64];
812 	int i, size = 0;
813 	const char *mask_support = 0;
814 
815 	switch (type) {
816 	case RTE_FLOW_ITEM_TYPE_ETH:
817 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
818 		size = sizeof(struct rte_flow_item_eth);
819 		break;
820 	case RTE_FLOW_ITEM_TYPE_VLAN:
821 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
822 		size = sizeof(struct rte_flow_item_vlan);
823 		break;
824 	case RTE_FLOW_ITEM_TYPE_IPV4:
825 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
826 		size = sizeof(struct rte_flow_item_ipv4);
827 		break;
828 	case RTE_FLOW_ITEM_TYPE_IPV6:
829 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
830 		size = sizeof(struct rte_flow_item_ipv6);
831 		break;
832 	case RTE_FLOW_ITEM_TYPE_ICMP:
833 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
834 		size = sizeof(struct rte_flow_item_icmp);
835 		break;
836 	case RTE_FLOW_ITEM_TYPE_UDP:
837 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
838 		size = sizeof(struct rte_flow_item_udp);
839 		break;
840 	case RTE_FLOW_ITEM_TYPE_TCP:
841 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
842 		size = sizeof(struct rte_flow_item_tcp);
843 		break;
844 	case RTE_FLOW_ITEM_TYPE_SCTP:
845 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
846 		size = sizeof(struct rte_flow_item_sctp);
847 		break;
848 	case RTE_FLOW_ITEM_TYPE_GRE:
849 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
850 		size = sizeof(struct rte_flow_item_gre);
851 		break;
852 	default:
853 		return -1;
854 	}
855 
856 	memcpy(mask, mask_support, size);
857 
858 	for (i = 0; i < size; i++)
859 		mask[i] = (mask[i] | mask_src[i]);
860 
861 	if (memcmp(mask, mask_support, size))
862 		return -1;
863 
864 	return 0;
865 }
866 
867 static int
868 dpaa2_configure_flow_eth(struct rte_flow *flow,
869 			 struct rte_eth_dev *dev,
870 			 const struct rte_flow_attr *attr,
871 			 const struct rte_flow_item *pattern,
872 			 const struct rte_flow_action actions[] __rte_unused,
873 			 struct rte_flow_error *error __rte_unused,
874 			 int *device_configured)
875 {
876 	int index, ret;
877 	int local_cfg = 0;
878 	uint32_t group;
879 	const struct rte_flow_item_eth *spec, *mask;
880 
881 	/* TODO: Currently upper bound of range parameter is not implemented */
882 	const struct rte_flow_item_eth *last __rte_unused;
883 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
884 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
885 
886 	group = attr->group;
887 
888 	/* Parse pattern list to get the matching parameters */
889 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
890 	last    = (const struct rte_flow_item_eth *)pattern->last;
891 	mask    = (const struct rte_flow_item_eth *)
892 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
893 	if (!spec) {
894 		/* Don't care any field of eth header,
895 		 * only care eth protocol.
896 		 */
897 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
898 		return 0;
899 	}
900 
901 	/* Get traffic class index and flow id to be configured */
902 	flow->tc_id = group;
903 	flow->tc_index = attr->priority;
904 
905 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
906 		RTE_FLOW_ITEM_TYPE_ETH)) {
907 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
908 
909 		return -1;
910 	}
911 
912 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
913 		index = dpaa2_flow_extract_search(
914 				&priv->extract.qos_key_extract.dpkg,
915 				NET_PROT_ETH, NH_FLD_ETH_SA);
916 		if (index < 0) {
917 			ret = dpaa2_flow_extract_add(
918 					&priv->extract.qos_key_extract,
919 					NET_PROT_ETH, NH_FLD_ETH_SA,
920 					RTE_ETHER_ADDR_LEN);
921 			if (ret) {
922 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
923 
924 				return -1;
925 			}
926 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
927 		}
928 		index = dpaa2_flow_extract_search(
929 				&priv->extract.tc_key_extract[group].dpkg,
930 				NET_PROT_ETH, NH_FLD_ETH_SA);
931 		if (index < 0) {
932 			ret = dpaa2_flow_extract_add(
933 					&priv->extract.tc_key_extract[group],
934 					NET_PROT_ETH, NH_FLD_ETH_SA,
935 					RTE_ETHER_ADDR_LEN);
936 			if (ret) {
937 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
938 				return -1;
939 			}
940 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
941 		}
942 
943 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
944 		if (ret) {
945 			DPAA2_PMD_ERR(
946 				"Move ipaddr before ETH_SA rule set failed");
947 			return -1;
948 		}
949 
950 		ret = dpaa2_flow_rule_data_set(
951 				&priv->extract.qos_key_extract,
952 				&flow->qos_rule,
953 				NET_PROT_ETH,
954 				NH_FLD_ETH_SA,
955 				&spec->src.addr_bytes,
956 				&mask->src.addr_bytes,
957 				sizeof(struct rte_ether_addr));
958 		if (ret) {
959 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
960 			return -1;
961 		}
962 
963 		ret = dpaa2_flow_rule_data_set(
964 				&priv->extract.tc_key_extract[group],
965 				&flow->fs_rule,
966 				NET_PROT_ETH,
967 				NH_FLD_ETH_SA,
968 				&spec->src.addr_bytes,
969 				&mask->src.addr_bytes,
970 				sizeof(struct rte_ether_addr));
971 		if (ret) {
972 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
973 			return -1;
974 		}
975 	}
976 
977 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
978 		index = dpaa2_flow_extract_search(
979 				&priv->extract.qos_key_extract.dpkg,
980 				NET_PROT_ETH, NH_FLD_ETH_DA);
981 		if (index < 0) {
982 			ret = dpaa2_flow_extract_add(
983 					&priv->extract.qos_key_extract,
984 					NET_PROT_ETH, NH_FLD_ETH_DA,
985 					RTE_ETHER_ADDR_LEN);
986 			if (ret) {
987 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
988 
989 				return -1;
990 			}
991 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
992 		}
993 
994 		index = dpaa2_flow_extract_search(
995 				&priv->extract.tc_key_extract[group].dpkg,
996 				NET_PROT_ETH, NH_FLD_ETH_DA);
997 		if (index < 0) {
998 			ret = dpaa2_flow_extract_add(
999 					&priv->extract.tc_key_extract[group],
1000 					NET_PROT_ETH, NH_FLD_ETH_DA,
1001 					RTE_ETHER_ADDR_LEN);
1002 			if (ret) {
1003 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1004 
1005 				return -1;
1006 			}
1007 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1008 		}
1009 
1010 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1011 		if (ret) {
1012 			DPAA2_PMD_ERR(
1013 				"Move ipaddr before ETH DA rule set failed");
1014 			return -1;
1015 		}
1016 
1017 		ret = dpaa2_flow_rule_data_set(
1018 				&priv->extract.qos_key_extract,
1019 				&flow->qos_rule,
1020 				NET_PROT_ETH,
1021 				NH_FLD_ETH_DA,
1022 				&spec->dst.addr_bytes,
1023 				&mask->dst.addr_bytes,
1024 				sizeof(struct rte_ether_addr));
1025 		if (ret) {
1026 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1027 			return -1;
1028 		}
1029 
1030 		ret = dpaa2_flow_rule_data_set(
1031 				&priv->extract.tc_key_extract[group],
1032 				&flow->fs_rule,
1033 				NET_PROT_ETH,
1034 				NH_FLD_ETH_DA,
1035 				&spec->dst.addr_bytes,
1036 				&mask->dst.addr_bytes,
1037 				sizeof(struct rte_ether_addr));
1038 		if (ret) {
1039 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1040 			return -1;
1041 		}
1042 	}
1043 
1044 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1045 		index = dpaa2_flow_extract_search(
1046 				&priv->extract.qos_key_extract.dpkg,
1047 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1048 		if (index < 0) {
1049 			ret = dpaa2_flow_extract_add(
1050 					&priv->extract.qos_key_extract,
1051 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1052 					RTE_ETHER_TYPE_LEN);
1053 			if (ret) {
1054 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1055 
1056 				return -1;
1057 			}
1058 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1059 		}
1060 		index = dpaa2_flow_extract_search(
1061 				&priv->extract.tc_key_extract[group].dpkg,
1062 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1063 		if (index < 0) {
1064 			ret = dpaa2_flow_extract_add(
1065 					&priv->extract.tc_key_extract[group],
1066 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1067 					RTE_ETHER_TYPE_LEN);
1068 			if (ret) {
1069 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1070 
1071 				return -1;
1072 			}
1073 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1074 		}
1075 
1076 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1077 		if (ret) {
1078 			DPAA2_PMD_ERR(
1079 				"Move ipaddr before ETH TYPE rule set failed");
1080 				return -1;
1081 		}
1082 
1083 		ret = dpaa2_flow_rule_data_set(
1084 				&priv->extract.qos_key_extract,
1085 				&flow->qos_rule,
1086 				NET_PROT_ETH,
1087 				NH_FLD_ETH_TYPE,
1088 				&spec->type,
1089 				&mask->type,
1090 				sizeof(rte_be16_t));
1091 		if (ret) {
1092 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1093 			return -1;
1094 		}
1095 
1096 		ret = dpaa2_flow_rule_data_set(
1097 				&priv->extract.tc_key_extract[group],
1098 				&flow->fs_rule,
1099 				NET_PROT_ETH,
1100 				NH_FLD_ETH_TYPE,
1101 				&spec->type,
1102 				&mask->type,
1103 				sizeof(rte_be16_t));
1104 		if (ret) {
1105 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1106 			return -1;
1107 		}
1108 	}
1109 
1110 	(*device_configured) |= local_cfg;
1111 
1112 	return 0;
1113 }
1114 
1115 static int
1116 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1117 			  struct rte_eth_dev *dev,
1118 			  const struct rte_flow_attr *attr,
1119 			  const struct rte_flow_item *pattern,
1120 			  const struct rte_flow_action actions[] __rte_unused,
1121 			  struct rte_flow_error *error __rte_unused,
1122 			  int *device_configured)
1123 {
1124 	int index, ret;
1125 	int local_cfg = 0;
1126 	uint32_t group;
1127 	const struct rte_flow_item_vlan *spec, *mask;
1128 
1129 	const struct rte_flow_item_vlan *last __rte_unused;
1130 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1131 
1132 	group = attr->group;
1133 
1134 	/* Parse pattern list to get the matching parameters */
1135 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1136 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1137 	mask    = (const struct rte_flow_item_vlan *)
1138 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1139 
1140 	/* Get traffic class index and flow id to be configured */
1141 	flow->tc_id = group;
1142 	flow->tc_index = attr->priority;
1143 
1144 	if (!spec) {
1145 		/* Don't care any field of vlan header,
1146 		 * only care vlan protocol.
1147 		 */
1148 		/* Eth type is actually used for vLan classification.
1149 		 */
1150 		struct proto_discrimination proto;
1151 
1152 		index = dpaa2_flow_extract_search(
1153 				&priv->extract.qos_key_extract.dpkg,
1154 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1155 		if (index < 0) {
1156 			ret = dpaa2_flow_proto_discrimination_extract(
1157 						&priv->extract.qos_key_extract,
1158 						RTE_FLOW_ITEM_TYPE_ETH);
1159 			if (ret) {
1160 				DPAA2_PMD_ERR(
1161 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1162 
1163 				return -1;
1164 			}
1165 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1166 		}
1167 
1168 		index = dpaa2_flow_extract_search(
1169 				&priv->extract.tc_key_extract[group].dpkg,
1170 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1171 		if (index < 0) {
1172 			ret = dpaa2_flow_proto_discrimination_extract(
1173 					&priv->extract.tc_key_extract[group],
1174 					RTE_FLOW_ITEM_TYPE_ETH);
1175 			if (ret) {
1176 				DPAA2_PMD_ERR(
1177 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1178 
1179 				return -1;
1180 			}
1181 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1182 		}
1183 
1184 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1185 		if (ret) {
1186 			DPAA2_PMD_ERR(
1187 			"Move ipaddr before vLan discrimination set failed");
1188 			return -1;
1189 		}
1190 
1191 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1192 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1193 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1194 							proto, group);
1195 		if (ret) {
1196 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1197 			return -1;
1198 		}
1199 
1200 		(*device_configured) |= local_cfg;
1201 
1202 		return 0;
1203 	}
1204 
1205 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1206 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1207 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1208 
1209 		return -1;
1210 	}
1211 
1212 	if (!mask->tci)
1213 		return 0;
1214 
1215 	index = dpaa2_flow_extract_search(
1216 				&priv->extract.qos_key_extract.dpkg,
1217 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1218 	if (index < 0) {
1219 		ret = dpaa2_flow_extract_add(
1220 						&priv->extract.qos_key_extract,
1221 						NET_PROT_VLAN,
1222 						NH_FLD_VLAN_TCI,
1223 						sizeof(rte_be16_t));
1224 		if (ret) {
1225 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1226 
1227 			return -1;
1228 		}
1229 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1230 	}
1231 
1232 	index = dpaa2_flow_extract_search(
1233 			&priv->extract.tc_key_extract[group].dpkg,
1234 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1235 	if (index < 0) {
1236 		ret = dpaa2_flow_extract_add(
1237 				&priv->extract.tc_key_extract[group],
1238 				NET_PROT_VLAN,
1239 				NH_FLD_VLAN_TCI,
1240 				sizeof(rte_be16_t));
1241 		if (ret) {
1242 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1243 
1244 			return -1;
1245 		}
1246 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1247 	}
1248 
1249 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1250 	if (ret) {
1251 		DPAA2_PMD_ERR(
1252 			"Move ipaddr before VLAN TCI rule set failed");
1253 		return -1;
1254 	}
1255 
1256 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1257 				&flow->qos_rule,
1258 				NET_PROT_VLAN,
1259 				NH_FLD_VLAN_TCI,
1260 				&spec->tci,
1261 				&mask->tci,
1262 				sizeof(rte_be16_t));
1263 	if (ret) {
1264 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1265 		return -1;
1266 	}
1267 
1268 	ret = dpaa2_flow_rule_data_set(
1269 			&priv->extract.tc_key_extract[group],
1270 			&flow->fs_rule,
1271 			NET_PROT_VLAN,
1272 			NH_FLD_VLAN_TCI,
1273 			&spec->tci,
1274 			&mask->tci,
1275 			sizeof(rte_be16_t));
1276 	if (ret) {
1277 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1278 		return -1;
1279 	}
1280 
1281 	(*device_configured) |= local_cfg;
1282 
1283 	return 0;
1284 }
1285 
1286 static int
1287 dpaa2_configure_flow_ip_discrimation(
1288 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1289 	const struct rte_flow_item *pattern,
1290 	int *local_cfg,	int *device_configured,
1291 	uint32_t group)
1292 {
1293 	int index, ret;
1294 	struct proto_discrimination proto;
1295 
1296 	index = dpaa2_flow_extract_search(
1297 			&priv->extract.qos_key_extract.dpkg,
1298 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1299 	if (index < 0) {
1300 		ret = dpaa2_flow_proto_discrimination_extract(
1301 				&priv->extract.qos_key_extract,
1302 				RTE_FLOW_ITEM_TYPE_ETH);
1303 		if (ret) {
1304 			DPAA2_PMD_ERR(
1305 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1306 			return -1;
1307 		}
1308 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1309 	}
1310 
1311 	index = dpaa2_flow_extract_search(
1312 			&priv->extract.tc_key_extract[group].dpkg,
1313 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1314 	if (index < 0) {
1315 		ret = dpaa2_flow_proto_discrimination_extract(
1316 				&priv->extract.tc_key_extract[group],
1317 				RTE_FLOW_ITEM_TYPE_ETH);
1318 		if (ret) {
1319 			DPAA2_PMD_ERR(
1320 			"FS Extract ETH_TYPE to discriminate IP failed.");
1321 			return -1;
1322 		}
1323 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1324 	}
1325 
1326 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1327 	if (ret) {
1328 		DPAA2_PMD_ERR(
1329 			"Move ipaddr before IP discrimination set failed");
1330 		return -1;
1331 	}
1332 
1333 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1334 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1335 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1336 	else
1337 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1338 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1339 	if (ret) {
1340 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1341 		return -1;
1342 	}
1343 
1344 	(*device_configured) |= (*local_cfg);
1345 
1346 	return 0;
1347 }
1348 
1349 
1350 static int
1351 dpaa2_configure_flow_generic_ip(
1352 	struct rte_flow *flow,
1353 	struct rte_eth_dev *dev,
1354 	const struct rte_flow_attr *attr,
1355 	const struct rte_flow_item *pattern,
1356 	const struct rte_flow_action actions[] __rte_unused,
1357 	struct rte_flow_error *error __rte_unused,
1358 	int *device_configured)
1359 {
1360 	int index, ret;
1361 	int local_cfg = 0;
1362 	uint32_t group;
1363 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1364 		*mask_ipv4 = 0;
1365 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1366 		*mask_ipv6 = 0;
1367 	const void *key, *mask;
1368 	enum net_prot prot;
1369 
1370 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1371 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1372 	int size;
1373 
1374 	group = attr->group;
1375 
1376 	/* Parse pattern list to get the matching parameters */
1377 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1378 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1379 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1380 			(pattern->mask ? pattern->mask :
1381 					&dpaa2_flow_item_ipv4_mask);
1382 	} else {
1383 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1384 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1385 			(pattern->mask ? pattern->mask :
1386 					&dpaa2_flow_item_ipv6_mask);
1387 	}
1388 
1389 	/* Get traffic class index and flow id to be configured */
1390 	flow->tc_id = group;
1391 	flow->tc_index = attr->priority;
1392 
1393 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1394 			flow, pattern, &local_cfg,
1395 			device_configured, group);
1396 	if (ret) {
1397 		DPAA2_PMD_ERR("IP discrimation failed!");
1398 		return -1;
1399 	}
1400 
1401 	if (!spec_ipv4 && !spec_ipv6)
1402 		return 0;
1403 
1404 	if (mask_ipv4) {
1405 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1406 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1407 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1408 
1409 			return -1;
1410 		}
1411 	}
1412 
1413 	if (mask_ipv6) {
1414 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1415 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1416 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1417 
1418 			return -1;
1419 		}
1420 	}
1421 
1422 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1423 		mask_ipv4->hdr.dst_addr)) {
1424 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1425 	} else if (mask_ipv6 &&
1426 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1427 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1428 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1429 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1430 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1431 	}
1432 
1433 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1434 		(mask_ipv6 &&
1435 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1436 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1437 		index = dpaa2_flow_extract_search(
1438 				&priv->extract.qos_key_extract.dpkg,
1439 				NET_PROT_IP, NH_FLD_IP_SRC);
1440 		if (index < 0) {
1441 			ret = dpaa2_flow_extract_add(
1442 					&priv->extract.qos_key_extract,
1443 					NET_PROT_IP,
1444 					NH_FLD_IP_SRC,
1445 					0);
1446 			if (ret) {
1447 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1448 
1449 				return -1;
1450 			}
1451 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1452 		}
1453 
1454 		index = dpaa2_flow_extract_search(
1455 				&priv->extract.tc_key_extract[group].dpkg,
1456 				NET_PROT_IP, NH_FLD_IP_SRC);
1457 		if (index < 0) {
1458 			ret = dpaa2_flow_extract_add(
1459 					&priv->extract.tc_key_extract[group],
1460 					NET_PROT_IP,
1461 					NH_FLD_IP_SRC,
1462 					0);
1463 			if (ret) {
1464 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1465 
1466 				return -1;
1467 			}
1468 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1469 		}
1470 
1471 		if (spec_ipv4)
1472 			key = &spec_ipv4->hdr.src_addr;
1473 		else
1474 			key = &spec_ipv6->hdr.src_addr[0];
1475 		if (mask_ipv4) {
1476 			mask = &mask_ipv4->hdr.src_addr;
1477 			size = NH_FLD_IPV4_ADDR_SIZE;
1478 			prot = NET_PROT_IPV4;
1479 		} else {
1480 			mask = &mask_ipv6->hdr.src_addr[0];
1481 			size = NH_FLD_IPV6_ADDR_SIZE;
1482 			prot = NET_PROT_IPV6;
1483 		}
1484 
1485 		ret = dpaa2_flow_rule_data_set(
1486 				&priv->extract.qos_key_extract,
1487 				&flow->qos_rule,
1488 				prot, NH_FLD_IP_SRC,
1489 				key,	mask, size);
1490 		if (ret) {
1491 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1492 			return -1;
1493 		}
1494 
1495 		ret = dpaa2_flow_rule_data_set(
1496 				&priv->extract.tc_key_extract[group],
1497 				&flow->fs_rule,
1498 				prot, NH_FLD_IP_SRC,
1499 				key,	mask, size);
1500 		if (ret) {
1501 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1502 			return -1;
1503 		}
1504 
1505 		flow->ipaddr_rule.qos_ipsrc_offset =
1506 			dpaa2_flow_extract_key_offset(
1507 				&priv->extract.qos_key_extract,
1508 				prot, NH_FLD_IP_SRC);
1509 		flow->ipaddr_rule.fs_ipsrc_offset =
1510 			dpaa2_flow_extract_key_offset(
1511 				&priv->extract.tc_key_extract[group],
1512 				prot, NH_FLD_IP_SRC);
1513 	}
1514 
1515 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1516 		(mask_ipv6 &&
1517 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1518 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1519 		index = dpaa2_flow_extract_search(
1520 				&priv->extract.qos_key_extract.dpkg,
1521 				NET_PROT_IP, NH_FLD_IP_DST);
1522 		if (index < 0) {
1523 			if (mask_ipv4)
1524 				size = NH_FLD_IPV4_ADDR_SIZE;
1525 			else
1526 				size = NH_FLD_IPV6_ADDR_SIZE;
1527 			ret = dpaa2_flow_extract_add(
1528 					&priv->extract.qos_key_extract,
1529 					NET_PROT_IP,
1530 					NH_FLD_IP_DST,
1531 					size);
1532 			if (ret) {
1533 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1534 
1535 				return -1;
1536 			}
1537 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1538 		}
1539 
1540 		index = dpaa2_flow_extract_search(
1541 				&priv->extract.tc_key_extract[group].dpkg,
1542 				NET_PROT_IP, NH_FLD_IP_DST);
1543 		if (index < 0) {
1544 			if (mask_ipv4)
1545 				size = NH_FLD_IPV4_ADDR_SIZE;
1546 			else
1547 				size = NH_FLD_IPV6_ADDR_SIZE;
1548 			ret = dpaa2_flow_extract_add(
1549 					&priv->extract.tc_key_extract[group],
1550 					NET_PROT_IP,
1551 					NH_FLD_IP_DST,
1552 					size);
1553 			if (ret) {
1554 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1555 
1556 				return -1;
1557 			}
1558 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1559 		}
1560 
1561 		if (spec_ipv4)
1562 			key = &spec_ipv4->hdr.dst_addr;
1563 		else
1564 			key = spec_ipv6->hdr.dst_addr;
1565 		if (mask_ipv4) {
1566 			mask = &mask_ipv4->hdr.dst_addr;
1567 			size = NH_FLD_IPV4_ADDR_SIZE;
1568 			prot = NET_PROT_IPV4;
1569 		} else {
1570 			mask = &mask_ipv6->hdr.dst_addr[0];
1571 			size = NH_FLD_IPV6_ADDR_SIZE;
1572 			prot = NET_PROT_IPV6;
1573 		}
1574 
1575 		ret = dpaa2_flow_rule_data_set(
1576 				&priv->extract.qos_key_extract,
1577 				&flow->qos_rule,
1578 				prot, NH_FLD_IP_DST,
1579 				key,	mask, size);
1580 		if (ret) {
1581 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1582 			return -1;
1583 		}
1584 
1585 		ret = dpaa2_flow_rule_data_set(
1586 				&priv->extract.tc_key_extract[group],
1587 				&flow->fs_rule,
1588 				prot, NH_FLD_IP_DST,
1589 				key,	mask, size);
1590 		if (ret) {
1591 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1592 			return -1;
1593 		}
1594 		flow->ipaddr_rule.qos_ipdst_offset =
1595 			dpaa2_flow_extract_key_offset(
1596 				&priv->extract.qos_key_extract,
1597 				prot, NH_FLD_IP_DST);
1598 		flow->ipaddr_rule.fs_ipdst_offset =
1599 			dpaa2_flow_extract_key_offset(
1600 				&priv->extract.tc_key_extract[group],
1601 				prot, NH_FLD_IP_DST);
1602 	}
1603 
1604 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1605 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1606 		index = dpaa2_flow_extract_search(
1607 				&priv->extract.qos_key_extract.dpkg,
1608 				NET_PROT_IP, NH_FLD_IP_PROTO);
1609 		if (index < 0) {
1610 			ret = dpaa2_flow_extract_add(
1611 				&priv->extract.qos_key_extract,
1612 				NET_PROT_IP,
1613 				NH_FLD_IP_PROTO,
1614 				NH_FLD_IP_PROTO_SIZE);
1615 			if (ret) {
1616 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1617 
1618 				return -1;
1619 			}
1620 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1621 		}
1622 
1623 		index = dpaa2_flow_extract_search(
1624 				&priv->extract.tc_key_extract[group].dpkg,
1625 				NET_PROT_IP, NH_FLD_IP_PROTO);
1626 		if (index < 0) {
1627 			ret = dpaa2_flow_extract_add(
1628 					&priv->extract.tc_key_extract[group],
1629 					NET_PROT_IP,
1630 					NH_FLD_IP_PROTO,
1631 					NH_FLD_IP_PROTO_SIZE);
1632 			if (ret) {
1633 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1634 
1635 				return -1;
1636 			}
1637 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1638 		}
1639 
1640 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1641 		if (ret) {
1642 			DPAA2_PMD_ERR(
1643 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1644 			return -1;
1645 		}
1646 
1647 		if (spec_ipv4)
1648 			key = &spec_ipv4->hdr.next_proto_id;
1649 		else
1650 			key = &spec_ipv6->hdr.proto;
1651 		if (mask_ipv4)
1652 			mask = &mask_ipv4->hdr.next_proto_id;
1653 		else
1654 			mask = &mask_ipv6->hdr.proto;
1655 
1656 		ret = dpaa2_flow_rule_data_set(
1657 				&priv->extract.qos_key_extract,
1658 				&flow->qos_rule,
1659 				NET_PROT_IP,
1660 				NH_FLD_IP_PROTO,
1661 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1662 		if (ret) {
1663 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1664 			return -1;
1665 		}
1666 
1667 		ret = dpaa2_flow_rule_data_set(
1668 				&priv->extract.tc_key_extract[group],
1669 				&flow->fs_rule,
1670 				NET_PROT_IP,
1671 				NH_FLD_IP_PROTO,
1672 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1673 		if (ret) {
1674 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1675 			return -1;
1676 		}
1677 	}
1678 
1679 	(*device_configured) |= local_cfg;
1680 
1681 	return 0;
1682 }
1683 
1684 static int
1685 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1686 			  struct rte_eth_dev *dev,
1687 			  const struct rte_flow_attr *attr,
1688 			  const struct rte_flow_item *pattern,
1689 			  const struct rte_flow_action actions[] __rte_unused,
1690 			  struct rte_flow_error *error __rte_unused,
1691 			  int *device_configured)
1692 {
1693 	int index, ret;
1694 	int local_cfg = 0;
1695 	uint32_t group;
1696 	const struct rte_flow_item_icmp *spec, *mask;
1697 
1698 	const struct rte_flow_item_icmp *last __rte_unused;
1699 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1700 
1701 	group = attr->group;
1702 
1703 	/* Parse pattern list to get the matching parameters */
1704 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1705 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1706 	mask    = (const struct rte_flow_item_icmp *)
1707 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1708 
1709 	/* Get traffic class index and flow id to be configured */
1710 	flow->tc_id = group;
1711 	flow->tc_index = attr->priority;
1712 
1713 	if (!spec) {
1714 		/* Don't care any field of ICMP header,
1715 		 * only care ICMP protocol.
1716 		 * Example: flow create 0 ingress pattern icmp /
1717 		 */
1718 		/* Next proto of Generical IP is actually used
1719 		 * for ICMP identification.
1720 		 */
1721 		struct proto_discrimination proto;
1722 
1723 		index = dpaa2_flow_extract_search(
1724 				&priv->extract.qos_key_extract.dpkg,
1725 				NET_PROT_IP, NH_FLD_IP_PROTO);
1726 		if (index < 0) {
1727 			ret = dpaa2_flow_proto_discrimination_extract(
1728 					&priv->extract.qos_key_extract,
1729 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1730 			if (ret) {
1731 				DPAA2_PMD_ERR(
1732 					"QoS Extract IP protocol to discriminate ICMP failed.");
1733 
1734 				return -1;
1735 			}
1736 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1737 		}
1738 
1739 		index = dpaa2_flow_extract_search(
1740 				&priv->extract.tc_key_extract[group].dpkg,
1741 				NET_PROT_IP, NH_FLD_IP_PROTO);
1742 		if (index < 0) {
1743 			ret = dpaa2_flow_proto_discrimination_extract(
1744 					&priv->extract.tc_key_extract[group],
1745 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1746 			if (ret) {
1747 				DPAA2_PMD_ERR(
1748 					"FS Extract IP protocol to discriminate ICMP failed.");
1749 
1750 				return -1;
1751 			}
1752 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1753 		}
1754 
1755 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1756 		if (ret) {
1757 			DPAA2_PMD_ERR(
1758 				"Move IP addr before ICMP discrimination set failed");
1759 			return -1;
1760 		}
1761 
1762 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1763 		proto.ip_proto = IPPROTO_ICMP;
1764 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1765 							proto, group);
1766 		if (ret) {
1767 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1768 			return -1;
1769 		}
1770 
1771 		(*device_configured) |= local_cfg;
1772 
1773 		return 0;
1774 	}
1775 
1776 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1777 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1778 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1779 
1780 		return -1;
1781 	}
1782 
1783 	if (mask->hdr.icmp_type) {
1784 		index = dpaa2_flow_extract_search(
1785 				&priv->extract.qos_key_extract.dpkg,
1786 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1787 		if (index < 0) {
1788 			ret = dpaa2_flow_extract_add(
1789 					&priv->extract.qos_key_extract,
1790 					NET_PROT_ICMP,
1791 					NH_FLD_ICMP_TYPE,
1792 					NH_FLD_ICMP_TYPE_SIZE);
1793 			if (ret) {
1794 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1795 
1796 				return -1;
1797 			}
1798 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1799 		}
1800 
1801 		index = dpaa2_flow_extract_search(
1802 				&priv->extract.tc_key_extract[group].dpkg,
1803 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1804 		if (index < 0) {
1805 			ret = dpaa2_flow_extract_add(
1806 					&priv->extract.tc_key_extract[group],
1807 					NET_PROT_ICMP,
1808 					NH_FLD_ICMP_TYPE,
1809 					NH_FLD_ICMP_TYPE_SIZE);
1810 			if (ret) {
1811 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1812 
1813 				return -1;
1814 			}
1815 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1816 		}
1817 
1818 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1819 		if (ret) {
1820 			DPAA2_PMD_ERR(
1821 				"Move ipaddr before ICMP TYPE set failed");
1822 			return -1;
1823 		}
1824 
1825 		ret = dpaa2_flow_rule_data_set(
1826 				&priv->extract.qos_key_extract,
1827 				&flow->qos_rule,
1828 				NET_PROT_ICMP,
1829 				NH_FLD_ICMP_TYPE,
1830 				&spec->hdr.icmp_type,
1831 				&mask->hdr.icmp_type,
1832 				NH_FLD_ICMP_TYPE_SIZE);
1833 		if (ret) {
1834 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1835 			return -1;
1836 		}
1837 
1838 		ret = dpaa2_flow_rule_data_set(
1839 				&priv->extract.tc_key_extract[group],
1840 				&flow->fs_rule,
1841 				NET_PROT_ICMP,
1842 				NH_FLD_ICMP_TYPE,
1843 				&spec->hdr.icmp_type,
1844 				&mask->hdr.icmp_type,
1845 				NH_FLD_ICMP_TYPE_SIZE);
1846 		if (ret) {
1847 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1848 			return -1;
1849 		}
1850 	}
1851 
1852 	if (mask->hdr.icmp_code) {
1853 		index = dpaa2_flow_extract_search(
1854 				&priv->extract.qos_key_extract.dpkg,
1855 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1856 		if (index < 0) {
1857 			ret = dpaa2_flow_extract_add(
1858 					&priv->extract.qos_key_extract,
1859 					NET_PROT_ICMP,
1860 					NH_FLD_ICMP_CODE,
1861 					NH_FLD_ICMP_CODE_SIZE);
1862 			if (ret) {
1863 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1864 
1865 				return -1;
1866 			}
1867 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1868 		}
1869 
1870 		index = dpaa2_flow_extract_search(
1871 				&priv->extract.tc_key_extract[group].dpkg,
1872 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1873 		if (index < 0) {
1874 			ret = dpaa2_flow_extract_add(
1875 					&priv->extract.tc_key_extract[group],
1876 					NET_PROT_ICMP,
1877 					NH_FLD_ICMP_CODE,
1878 					NH_FLD_ICMP_CODE_SIZE);
1879 			if (ret) {
1880 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1881 
1882 				return -1;
1883 			}
1884 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1885 		}
1886 
1887 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1888 		if (ret) {
1889 			DPAA2_PMD_ERR(
1890 				"Move ipaddr after ICMP CODE set failed");
1891 			return -1;
1892 		}
1893 
1894 		ret = dpaa2_flow_rule_data_set(
1895 				&priv->extract.qos_key_extract,
1896 				&flow->qos_rule,
1897 				NET_PROT_ICMP,
1898 				NH_FLD_ICMP_CODE,
1899 				&spec->hdr.icmp_code,
1900 				&mask->hdr.icmp_code,
1901 				NH_FLD_ICMP_CODE_SIZE);
1902 		if (ret) {
1903 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1904 			return -1;
1905 		}
1906 
1907 		ret = dpaa2_flow_rule_data_set(
1908 				&priv->extract.tc_key_extract[group],
1909 				&flow->fs_rule,
1910 				NET_PROT_ICMP,
1911 				NH_FLD_ICMP_CODE,
1912 				&spec->hdr.icmp_code,
1913 				&mask->hdr.icmp_code,
1914 				NH_FLD_ICMP_CODE_SIZE);
1915 		if (ret) {
1916 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1917 			return -1;
1918 		}
1919 	}
1920 
1921 	(*device_configured) |= local_cfg;
1922 
1923 	return 0;
1924 }
1925 
1926 static int
1927 dpaa2_configure_flow_udp(struct rte_flow *flow,
1928 			 struct rte_eth_dev *dev,
1929 			  const struct rte_flow_attr *attr,
1930 			  const struct rte_flow_item *pattern,
1931 			  const struct rte_flow_action actions[] __rte_unused,
1932 			  struct rte_flow_error *error __rte_unused,
1933 			  int *device_configured)
1934 {
1935 	int index, ret;
1936 	int local_cfg = 0;
1937 	uint32_t group;
1938 	const struct rte_flow_item_udp *spec, *mask;
1939 
1940 	const struct rte_flow_item_udp *last __rte_unused;
1941 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1942 
1943 	group = attr->group;
1944 
1945 	/* Parse pattern list to get the matching parameters */
1946 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1947 	last    = (const struct rte_flow_item_udp *)pattern->last;
1948 	mask    = (const struct rte_flow_item_udp *)
1949 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1950 
1951 	/* Get traffic class index and flow id to be configured */
1952 	flow->tc_id = group;
1953 	flow->tc_index = attr->priority;
1954 
1955 	if (!spec || !mc_l4_port_identification) {
1956 		struct proto_discrimination proto;
1957 
1958 		index = dpaa2_flow_extract_search(
1959 				&priv->extract.qos_key_extract.dpkg,
1960 				NET_PROT_IP, NH_FLD_IP_PROTO);
1961 		if (index < 0) {
1962 			ret = dpaa2_flow_proto_discrimination_extract(
1963 					&priv->extract.qos_key_extract,
1964 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1965 			if (ret) {
1966 				DPAA2_PMD_ERR(
1967 					"QoS Extract IP protocol to discriminate UDP failed.");
1968 
1969 				return -1;
1970 			}
1971 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1972 		}
1973 
1974 		index = dpaa2_flow_extract_search(
1975 				&priv->extract.tc_key_extract[group].dpkg,
1976 				NET_PROT_IP, NH_FLD_IP_PROTO);
1977 		if (index < 0) {
1978 			ret = dpaa2_flow_proto_discrimination_extract(
1979 				&priv->extract.tc_key_extract[group],
1980 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1981 			if (ret) {
1982 				DPAA2_PMD_ERR(
1983 					"FS Extract IP protocol to discriminate UDP failed.");
1984 
1985 				return -1;
1986 			}
1987 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1988 		}
1989 
1990 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1991 		if (ret) {
1992 			DPAA2_PMD_ERR(
1993 				"Move IP addr before UDP discrimination set failed");
1994 			return -1;
1995 		}
1996 
1997 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1998 		proto.ip_proto = IPPROTO_UDP;
1999 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2000 							proto, group);
2001 		if (ret) {
2002 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2003 			return -1;
2004 		}
2005 
2006 		(*device_configured) |= local_cfg;
2007 
2008 		if (!spec)
2009 			return 0;
2010 	}
2011 
2012 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2013 		RTE_FLOW_ITEM_TYPE_UDP)) {
2014 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2015 
2016 		return -1;
2017 	}
2018 
2019 	if (mask->hdr.src_port) {
2020 		index = dpaa2_flow_extract_search(
2021 				&priv->extract.qos_key_extract.dpkg,
2022 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2023 		if (index < 0) {
2024 			ret = dpaa2_flow_extract_add(
2025 					&priv->extract.qos_key_extract,
2026 				NET_PROT_UDP,
2027 				NH_FLD_UDP_PORT_SRC,
2028 				NH_FLD_UDP_PORT_SIZE);
2029 			if (ret) {
2030 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2031 
2032 				return -1;
2033 			}
2034 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2035 		}
2036 
2037 		index = dpaa2_flow_extract_search(
2038 				&priv->extract.tc_key_extract[group].dpkg,
2039 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2040 		if (index < 0) {
2041 			ret = dpaa2_flow_extract_add(
2042 					&priv->extract.tc_key_extract[group],
2043 					NET_PROT_UDP,
2044 					NH_FLD_UDP_PORT_SRC,
2045 					NH_FLD_UDP_PORT_SIZE);
2046 			if (ret) {
2047 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2048 
2049 				return -1;
2050 			}
2051 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2052 		}
2053 
2054 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2055 		if (ret) {
2056 			DPAA2_PMD_ERR(
2057 				"Move ipaddr before UDP_PORT_SRC set failed");
2058 			return -1;
2059 		}
2060 
2061 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2062 				&flow->qos_rule,
2063 				NET_PROT_UDP,
2064 				NH_FLD_UDP_PORT_SRC,
2065 				&spec->hdr.src_port,
2066 				&mask->hdr.src_port,
2067 				NH_FLD_UDP_PORT_SIZE);
2068 		if (ret) {
2069 			DPAA2_PMD_ERR(
2070 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2071 			return -1;
2072 		}
2073 
2074 		ret = dpaa2_flow_rule_data_set(
2075 				&priv->extract.tc_key_extract[group],
2076 				&flow->fs_rule,
2077 				NET_PROT_UDP,
2078 				NH_FLD_UDP_PORT_SRC,
2079 				&spec->hdr.src_port,
2080 				&mask->hdr.src_port,
2081 				NH_FLD_UDP_PORT_SIZE);
2082 		if (ret) {
2083 			DPAA2_PMD_ERR(
2084 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2085 			return -1;
2086 		}
2087 	}
2088 
2089 	if (mask->hdr.dst_port) {
2090 		index = dpaa2_flow_extract_search(
2091 				&priv->extract.qos_key_extract.dpkg,
2092 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2093 		if (index < 0) {
2094 			ret = dpaa2_flow_extract_add(
2095 					&priv->extract.qos_key_extract,
2096 					NET_PROT_UDP,
2097 					NH_FLD_UDP_PORT_DST,
2098 					NH_FLD_UDP_PORT_SIZE);
2099 			if (ret) {
2100 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2101 
2102 				return -1;
2103 			}
2104 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2105 		}
2106 
2107 		index = dpaa2_flow_extract_search(
2108 				&priv->extract.tc_key_extract[group].dpkg,
2109 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2110 		if (index < 0) {
2111 			ret = dpaa2_flow_extract_add(
2112 					&priv->extract.tc_key_extract[group],
2113 					NET_PROT_UDP,
2114 					NH_FLD_UDP_PORT_DST,
2115 					NH_FLD_UDP_PORT_SIZE);
2116 			if (ret) {
2117 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2118 
2119 				return -1;
2120 			}
2121 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2122 		}
2123 
2124 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2125 		if (ret) {
2126 			DPAA2_PMD_ERR(
2127 				"Move ipaddr before UDP_PORT_DST set failed");
2128 			return -1;
2129 		}
2130 
2131 		ret = dpaa2_flow_rule_data_set(
2132 				&priv->extract.qos_key_extract,
2133 				&flow->qos_rule,
2134 				NET_PROT_UDP,
2135 				NH_FLD_UDP_PORT_DST,
2136 				&spec->hdr.dst_port,
2137 				&mask->hdr.dst_port,
2138 				NH_FLD_UDP_PORT_SIZE);
2139 		if (ret) {
2140 			DPAA2_PMD_ERR(
2141 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2142 			return -1;
2143 		}
2144 
2145 		ret = dpaa2_flow_rule_data_set(
2146 				&priv->extract.tc_key_extract[group],
2147 				&flow->fs_rule,
2148 				NET_PROT_UDP,
2149 				NH_FLD_UDP_PORT_DST,
2150 				&spec->hdr.dst_port,
2151 				&mask->hdr.dst_port,
2152 				NH_FLD_UDP_PORT_SIZE);
2153 		if (ret) {
2154 			DPAA2_PMD_ERR(
2155 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2156 			return -1;
2157 		}
2158 	}
2159 
2160 	(*device_configured) |= local_cfg;
2161 
2162 	return 0;
2163 }
2164 
2165 static int
2166 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2167 			 struct rte_eth_dev *dev,
2168 			 const struct rte_flow_attr *attr,
2169 			 const struct rte_flow_item *pattern,
2170 			 const struct rte_flow_action actions[] __rte_unused,
2171 			 struct rte_flow_error *error __rte_unused,
2172 			 int *device_configured)
2173 {
2174 	int index, ret;
2175 	int local_cfg = 0;
2176 	uint32_t group;
2177 	const struct rte_flow_item_tcp *spec, *mask;
2178 
2179 	const struct rte_flow_item_tcp *last __rte_unused;
2180 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2181 
2182 	group = attr->group;
2183 
2184 	/* Parse pattern list to get the matching parameters */
2185 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2186 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2187 	mask    = (const struct rte_flow_item_tcp *)
2188 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2189 
2190 	/* Get traffic class index and flow id to be configured */
2191 	flow->tc_id = group;
2192 	flow->tc_index = attr->priority;
2193 
2194 	if (!spec || !mc_l4_port_identification) {
2195 		struct proto_discrimination proto;
2196 
2197 		index = dpaa2_flow_extract_search(
2198 				&priv->extract.qos_key_extract.dpkg,
2199 				NET_PROT_IP, NH_FLD_IP_PROTO);
2200 		if (index < 0) {
2201 			ret = dpaa2_flow_proto_discrimination_extract(
2202 					&priv->extract.qos_key_extract,
2203 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2204 			if (ret) {
2205 				DPAA2_PMD_ERR(
2206 					"QoS Extract IP protocol to discriminate TCP failed.");
2207 
2208 				return -1;
2209 			}
2210 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2211 		}
2212 
2213 		index = dpaa2_flow_extract_search(
2214 				&priv->extract.tc_key_extract[group].dpkg,
2215 				NET_PROT_IP, NH_FLD_IP_PROTO);
2216 		if (index < 0) {
2217 			ret = dpaa2_flow_proto_discrimination_extract(
2218 				&priv->extract.tc_key_extract[group],
2219 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2220 			if (ret) {
2221 				DPAA2_PMD_ERR(
2222 					"FS Extract IP protocol to discriminate TCP failed.");
2223 
2224 				return -1;
2225 			}
2226 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2227 		}
2228 
2229 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2230 		if (ret) {
2231 			DPAA2_PMD_ERR(
2232 				"Move IP addr before TCP discrimination set failed");
2233 			return -1;
2234 		}
2235 
2236 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2237 		proto.ip_proto = IPPROTO_TCP;
2238 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2239 							proto, group);
2240 		if (ret) {
2241 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2242 			return -1;
2243 		}
2244 
2245 		(*device_configured) |= local_cfg;
2246 
2247 		if (!spec)
2248 			return 0;
2249 	}
2250 
2251 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2252 		RTE_FLOW_ITEM_TYPE_TCP)) {
2253 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2254 
2255 		return -1;
2256 	}
2257 
2258 	if (mask->hdr.src_port) {
2259 		index = dpaa2_flow_extract_search(
2260 				&priv->extract.qos_key_extract.dpkg,
2261 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2262 		if (index < 0) {
2263 			ret = dpaa2_flow_extract_add(
2264 					&priv->extract.qos_key_extract,
2265 					NET_PROT_TCP,
2266 					NH_FLD_TCP_PORT_SRC,
2267 					NH_FLD_TCP_PORT_SIZE);
2268 			if (ret) {
2269 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2270 
2271 				return -1;
2272 			}
2273 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2274 		}
2275 
2276 		index = dpaa2_flow_extract_search(
2277 				&priv->extract.tc_key_extract[group].dpkg,
2278 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2279 		if (index < 0) {
2280 			ret = dpaa2_flow_extract_add(
2281 					&priv->extract.tc_key_extract[group],
2282 					NET_PROT_TCP,
2283 					NH_FLD_TCP_PORT_SRC,
2284 					NH_FLD_TCP_PORT_SIZE);
2285 			if (ret) {
2286 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2287 
2288 				return -1;
2289 			}
2290 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2291 		}
2292 
2293 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2294 		if (ret) {
2295 			DPAA2_PMD_ERR(
2296 				"Move ipaddr before TCP_PORT_SRC set failed");
2297 			return -1;
2298 		}
2299 
2300 		ret = dpaa2_flow_rule_data_set(
2301 				&priv->extract.qos_key_extract,
2302 				&flow->qos_rule,
2303 				NET_PROT_TCP,
2304 				NH_FLD_TCP_PORT_SRC,
2305 				&spec->hdr.src_port,
2306 				&mask->hdr.src_port,
2307 				NH_FLD_TCP_PORT_SIZE);
2308 		if (ret) {
2309 			DPAA2_PMD_ERR(
2310 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2311 			return -1;
2312 		}
2313 
2314 		ret = dpaa2_flow_rule_data_set(
2315 				&priv->extract.tc_key_extract[group],
2316 				&flow->fs_rule,
2317 				NET_PROT_TCP,
2318 				NH_FLD_TCP_PORT_SRC,
2319 				&spec->hdr.src_port,
2320 				&mask->hdr.src_port,
2321 				NH_FLD_TCP_PORT_SIZE);
2322 		if (ret) {
2323 			DPAA2_PMD_ERR(
2324 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2325 			return -1;
2326 		}
2327 	}
2328 
2329 	if (mask->hdr.dst_port) {
2330 		index = dpaa2_flow_extract_search(
2331 				&priv->extract.qos_key_extract.dpkg,
2332 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2333 		if (index < 0) {
2334 			ret = dpaa2_flow_extract_add(
2335 					&priv->extract.qos_key_extract,
2336 					NET_PROT_TCP,
2337 					NH_FLD_TCP_PORT_DST,
2338 					NH_FLD_TCP_PORT_SIZE);
2339 			if (ret) {
2340 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2341 
2342 				return -1;
2343 			}
2344 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2345 		}
2346 
2347 		index = dpaa2_flow_extract_search(
2348 				&priv->extract.tc_key_extract[group].dpkg,
2349 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2350 		if (index < 0) {
2351 			ret = dpaa2_flow_extract_add(
2352 					&priv->extract.tc_key_extract[group],
2353 					NET_PROT_TCP,
2354 					NH_FLD_TCP_PORT_DST,
2355 					NH_FLD_TCP_PORT_SIZE);
2356 			if (ret) {
2357 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2358 
2359 				return -1;
2360 			}
2361 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2362 		}
2363 
2364 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2365 		if (ret) {
2366 			DPAA2_PMD_ERR(
2367 				"Move ipaddr before TCP_PORT_DST set failed");
2368 			return -1;
2369 		}
2370 
2371 		ret = dpaa2_flow_rule_data_set(
2372 				&priv->extract.qos_key_extract,
2373 				&flow->qos_rule,
2374 				NET_PROT_TCP,
2375 				NH_FLD_TCP_PORT_DST,
2376 				&spec->hdr.dst_port,
2377 				&mask->hdr.dst_port,
2378 				NH_FLD_TCP_PORT_SIZE);
2379 		if (ret) {
2380 			DPAA2_PMD_ERR(
2381 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2382 			return -1;
2383 		}
2384 
2385 		ret = dpaa2_flow_rule_data_set(
2386 				&priv->extract.tc_key_extract[group],
2387 				&flow->fs_rule,
2388 				NET_PROT_TCP,
2389 				NH_FLD_TCP_PORT_DST,
2390 				&spec->hdr.dst_port,
2391 				&mask->hdr.dst_port,
2392 				NH_FLD_TCP_PORT_SIZE);
2393 		if (ret) {
2394 			DPAA2_PMD_ERR(
2395 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2396 			return -1;
2397 		}
2398 	}
2399 
2400 	(*device_configured) |= local_cfg;
2401 
2402 	return 0;
2403 }
2404 
2405 static int
2406 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2407 			  struct rte_eth_dev *dev,
2408 			  const struct rte_flow_attr *attr,
2409 			  const struct rte_flow_item *pattern,
2410 			  const struct rte_flow_action actions[] __rte_unused,
2411 			  struct rte_flow_error *error __rte_unused,
2412 			  int *device_configured)
2413 {
2414 	int index, ret;
2415 	int local_cfg = 0;
2416 	uint32_t group;
2417 	const struct rte_flow_item_sctp *spec, *mask;
2418 
2419 	const struct rte_flow_item_sctp *last __rte_unused;
2420 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2421 
2422 	group = attr->group;
2423 
2424 	/* Parse pattern list to get the matching parameters */
2425 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2426 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2427 	mask    = (const struct rte_flow_item_sctp *)
2428 			(pattern->mask ? pattern->mask :
2429 				&dpaa2_flow_item_sctp_mask);
2430 
2431 	/* Get traffic class index and flow id to be configured */
2432 	flow->tc_id = group;
2433 	flow->tc_index = attr->priority;
2434 
2435 	if (!spec || !mc_l4_port_identification) {
2436 		struct proto_discrimination proto;
2437 
2438 		index = dpaa2_flow_extract_search(
2439 				&priv->extract.qos_key_extract.dpkg,
2440 				NET_PROT_IP, NH_FLD_IP_PROTO);
2441 		if (index < 0) {
2442 			ret = dpaa2_flow_proto_discrimination_extract(
2443 					&priv->extract.qos_key_extract,
2444 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2445 			if (ret) {
2446 				DPAA2_PMD_ERR(
2447 					"QoS Extract IP protocol to discriminate SCTP failed.");
2448 
2449 				return -1;
2450 			}
2451 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2452 		}
2453 
2454 		index = dpaa2_flow_extract_search(
2455 				&priv->extract.tc_key_extract[group].dpkg,
2456 				NET_PROT_IP, NH_FLD_IP_PROTO);
2457 		if (index < 0) {
2458 			ret = dpaa2_flow_proto_discrimination_extract(
2459 					&priv->extract.tc_key_extract[group],
2460 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2461 			if (ret) {
2462 				DPAA2_PMD_ERR(
2463 					"FS Extract IP protocol to discriminate SCTP failed.");
2464 
2465 				return -1;
2466 			}
2467 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2468 		}
2469 
2470 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2471 		if (ret) {
2472 			DPAA2_PMD_ERR(
2473 				"Move ipaddr before SCTP discrimination set failed");
2474 			return -1;
2475 		}
2476 
2477 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2478 		proto.ip_proto = IPPROTO_SCTP;
2479 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2480 							proto, group);
2481 		if (ret) {
2482 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2483 			return -1;
2484 		}
2485 
2486 		(*device_configured) |= local_cfg;
2487 
2488 		if (!spec)
2489 			return 0;
2490 	}
2491 
2492 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2493 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2494 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2495 
2496 		return -1;
2497 	}
2498 
2499 	if (mask->hdr.src_port) {
2500 		index = dpaa2_flow_extract_search(
2501 				&priv->extract.qos_key_extract.dpkg,
2502 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2503 		if (index < 0) {
2504 			ret = dpaa2_flow_extract_add(
2505 					&priv->extract.qos_key_extract,
2506 					NET_PROT_SCTP,
2507 					NH_FLD_SCTP_PORT_SRC,
2508 					NH_FLD_SCTP_PORT_SIZE);
2509 			if (ret) {
2510 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2511 
2512 				return -1;
2513 			}
2514 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2515 		}
2516 
2517 		index = dpaa2_flow_extract_search(
2518 				&priv->extract.tc_key_extract[group].dpkg,
2519 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2520 		if (index < 0) {
2521 			ret = dpaa2_flow_extract_add(
2522 					&priv->extract.tc_key_extract[group],
2523 					NET_PROT_SCTP,
2524 					NH_FLD_SCTP_PORT_SRC,
2525 					NH_FLD_SCTP_PORT_SIZE);
2526 			if (ret) {
2527 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2528 
2529 				return -1;
2530 			}
2531 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2532 		}
2533 
2534 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2535 		if (ret) {
2536 			DPAA2_PMD_ERR(
2537 				"Move ipaddr before SCTP_PORT_SRC set failed");
2538 			return -1;
2539 		}
2540 
2541 		ret = dpaa2_flow_rule_data_set(
2542 				&priv->extract.qos_key_extract,
2543 				&flow->qos_rule,
2544 				NET_PROT_SCTP,
2545 				NH_FLD_SCTP_PORT_SRC,
2546 				&spec->hdr.src_port,
2547 				&mask->hdr.src_port,
2548 				NH_FLD_SCTP_PORT_SIZE);
2549 		if (ret) {
2550 			DPAA2_PMD_ERR(
2551 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2552 			return -1;
2553 		}
2554 
2555 		ret = dpaa2_flow_rule_data_set(
2556 				&priv->extract.tc_key_extract[group],
2557 				&flow->fs_rule,
2558 				NET_PROT_SCTP,
2559 				NH_FLD_SCTP_PORT_SRC,
2560 				&spec->hdr.src_port,
2561 				&mask->hdr.src_port,
2562 				NH_FLD_SCTP_PORT_SIZE);
2563 		if (ret) {
2564 			DPAA2_PMD_ERR(
2565 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2566 			return -1;
2567 		}
2568 	}
2569 
2570 	if (mask->hdr.dst_port) {
2571 		index = dpaa2_flow_extract_search(
2572 				&priv->extract.qos_key_extract.dpkg,
2573 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2574 		if (index < 0) {
2575 			ret = dpaa2_flow_extract_add(
2576 					&priv->extract.qos_key_extract,
2577 					NET_PROT_SCTP,
2578 					NH_FLD_SCTP_PORT_DST,
2579 					NH_FLD_SCTP_PORT_SIZE);
2580 			if (ret) {
2581 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2582 
2583 				return -1;
2584 			}
2585 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2586 		}
2587 
2588 		index = dpaa2_flow_extract_search(
2589 				&priv->extract.tc_key_extract[group].dpkg,
2590 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2591 		if (index < 0) {
2592 			ret = dpaa2_flow_extract_add(
2593 					&priv->extract.tc_key_extract[group],
2594 					NET_PROT_SCTP,
2595 					NH_FLD_SCTP_PORT_DST,
2596 					NH_FLD_SCTP_PORT_SIZE);
2597 			if (ret) {
2598 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2599 
2600 				return -1;
2601 			}
2602 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2603 		}
2604 
2605 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2606 		if (ret) {
2607 			DPAA2_PMD_ERR(
2608 				"Move ipaddr before SCTP_PORT_DST set failed");
2609 			return -1;
2610 		}
2611 
2612 		ret = dpaa2_flow_rule_data_set(
2613 				&priv->extract.qos_key_extract,
2614 				&flow->qos_rule,
2615 				NET_PROT_SCTP,
2616 				NH_FLD_SCTP_PORT_DST,
2617 				&spec->hdr.dst_port,
2618 				&mask->hdr.dst_port,
2619 				NH_FLD_SCTP_PORT_SIZE);
2620 		if (ret) {
2621 			DPAA2_PMD_ERR(
2622 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2623 			return -1;
2624 		}
2625 
2626 		ret = dpaa2_flow_rule_data_set(
2627 				&priv->extract.tc_key_extract[group],
2628 				&flow->fs_rule,
2629 				NET_PROT_SCTP,
2630 				NH_FLD_SCTP_PORT_DST,
2631 				&spec->hdr.dst_port,
2632 				&mask->hdr.dst_port,
2633 				NH_FLD_SCTP_PORT_SIZE);
2634 		if (ret) {
2635 			DPAA2_PMD_ERR(
2636 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2637 			return -1;
2638 		}
2639 	}
2640 
2641 	(*device_configured) |= local_cfg;
2642 
2643 	return 0;
2644 }
2645 
2646 static int
2647 dpaa2_configure_flow_gre(struct rte_flow *flow,
2648 			 struct rte_eth_dev *dev,
2649 			 const struct rte_flow_attr *attr,
2650 			 const struct rte_flow_item *pattern,
2651 			 const struct rte_flow_action actions[] __rte_unused,
2652 			 struct rte_flow_error *error __rte_unused,
2653 			 int *device_configured)
2654 {
2655 	int index, ret;
2656 	int local_cfg = 0;
2657 	uint32_t group;
2658 	const struct rte_flow_item_gre *spec, *mask;
2659 
2660 	const struct rte_flow_item_gre *last __rte_unused;
2661 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2662 
2663 	group = attr->group;
2664 
2665 	/* Parse pattern list to get the matching parameters */
2666 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2667 	last    = (const struct rte_flow_item_gre *)pattern->last;
2668 	mask    = (const struct rte_flow_item_gre *)
2669 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2670 
2671 	/* Get traffic class index and flow id to be configured */
2672 	flow->tc_id = group;
2673 	flow->tc_index = attr->priority;
2674 
2675 	if (!spec) {
2676 		struct proto_discrimination proto;
2677 
2678 		index = dpaa2_flow_extract_search(
2679 				&priv->extract.qos_key_extract.dpkg,
2680 				NET_PROT_IP, NH_FLD_IP_PROTO);
2681 		if (index < 0) {
2682 			ret = dpaa2_flow_proto_discrimination_extract(
2683 					&priv->extract.qos_key_extract,
2684 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2685 			if (ret) {
2686 				DPAA2_PMD_ERR(
2687 					"QoS Extract IP protocol to discriminate GRE failed.");
2688 
2689 				return -1;
2690 			}
2691 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2692 		}
2693 
2694 		index = dpaa2_flow_extract_search(
2695 				&priv->extract.tc_key_extract[group].dpkg,
2696 				NET_PROT_IP, NH_FLD_IP_PROTO);
2697 		if (index < 0) {
2698 			ret = dpaa2_flow_proto_discrimination_extract(
2699 					&priv->extract.tc_key_extract[group],
2700 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2701 			if (ret) {
2702 				DPAA2_PMD_ERR(
2703 					"FS Extract IP protocol to discriminate GRE failed.");
2704 
2705 				return -1;
2706 			}
2707 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2708 		}
2709 
2710 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2711 		if (ret) {
2712 			DPAA2_PMD_ERR(
2713 				"Move IP addr before GRE discrimination set failed");
2714 			return -1;
2715 		}
2716 
2717 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2718 		proto.ip_proto = IPPROTO_GRE;
2719 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2720 							proto, group);
2721 		if (ret) {
2722 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2723 			return -1;
2724 		}
2725 
2726 		(*device_configured) |= local_cfg;
2727 
2728 		return 0;
2729 	}
2730 
2731 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2732 		RTE_FLOW_ITEM_TYPE_GRE)) {
2733 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2734 
2735 		return -1;
2736 	}
2737 
2738 	if (!mask->protocol)
2739 		return 0;
2740 
2741 	index = dpaa2_flow_extract_search(
2742 			&priv->extract.qos_key_extract.dpkg,
2743 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2744 	if (index < 0) {
2745 		ret = dpaa2_flow_extract_add(
2746 				&priv->extract.qos_key_extract,
2747 				NET_PROT_GRE,
2748 				NH_FLD_GRE_TYPE,
2749 				sizeof(rte_be16_t));
2750 		if (ret) {
2751 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2752 
2753 			return -1;
2754 		}
2755 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2756 	}
2757 
2758 	index = dpaa2_flow_extract_search(
2759 			&priv->extract.tc_key_extract[group].dpkg,
2760 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2761 	if (index < 0) {
2762 		ret = dpaa2_flow_extract_add(
2763 				&priv->extract.tc_key_extract[group],
2764 				NET_PROT_GRE,
2765 				NH_FLD_GRE_TYPE,
2766 				sizeof(rte_be16_t));
2767 		if (ret) {
2768 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2769 
2770 			return -1;
2771 		}
2772 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2773 	}
2774 
2775 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2776 	if (ret) {
2777 		DPAA2_PMD_ERR(
2778 			"Move ipaddr before GRE_TYPE set failed");
2779 		return -1;
2780 	}
2781 
2782 	ret = dpaa2_flow_rule_data_set(
2783 				&priv->extract.qos_key_extract,
2784 				&flow->qos_rule,
2785 				NET_PROT_GRE,
2786 				NH_FLD_GRE_TYPE,
2787 				&spec->protocol,
2788 				&mask->protocol,
2789 				sizeof(rte_be16_t));
2790 	if (ret) {
2791 		DPAA2_PMD_ERR(
2792 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2793 		return -1;
2794 	}
2795 
2796 	ret = dpaa2_flow_rule_data_set(
2797 			&priv->extract.tc_key_extract[group],
2798 			&flow->fs_rule,
2799 			NET_PROT_GRE,
2800 			NH_FLD_GRE_TYPE,
2801 			&spec->protocol,
2802 			&mask->protocol,
2803 			sizeof(rte_be16_t));
2804 	if (ret) {
2805 		DPAA2_PMD_ERR(
2806 			"FS NH_FLD_GRE_TYPE rule data set failed");
2807 		return -1;
2808 	}
2809 
2810 	(*device_configured) |= local_cfg;
2811 
2812 	return 0;
2813 }
2814 
2815 /* The existing QoS/FS entry with IP address(es)
2816  * needs update after
2817  * new extract(s) are inserted before IP
2818  * address(es) extract(s).
2819  */
2820 static int
2821 dpaa2_flow_entry_update(
2822 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2823 {
2824 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2825 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2826 	int ret;
2827 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2828 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2829 	struct dpaa2_key_extract *qos_key_extract =
2830 		&priv->extract.qos_key_extract;
2831 	struct dpaa2_key_extract *tc_key_extract =
2832 		&priv->extract.tc_key_extract[tc_id];
2833 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2834 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2835 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2836 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2837 	int extend = -1, extend1, size = -1;
2838 	uint16_t qos_index;
2839 
2840 	while (curr) {
2841 		if (curr->ipaddr_rule.ipaddr_type ==
2842 			FLOW_NONE_IPADDR) {
2843 			curr = LIST_NEXT(curr, next);
2844 			continue;
2845 		}
2846 
2847 		if (curr->ipaddr_rule.ipaddr_type ==
2848 			FLOW_IPV4_ADDR) {
2849 			qos_ipsrc_offset =
2850 				qos_key_extract->key_info.ipv4_src_offset;
2851 			qos_ipdst_offset =
2852 				qos_key_extract->key_info.ipv4_dst_offset;
2853 			fs_ipsrc_offset =
2854 				tc_key_extract->key_info.ipv4_src_offset;
2855 			fs_ipdst_offset =
2856 				tc_key_extract->key_info.ipv4_dst_offset;
2857 			size = NH_FLD_IPV4_ADDR_SIZE;
2858 		} else {
2859 			qos_ipsrc_offset =
2860 				qos_key_extract->key_info.ipv6_src_offset;
2861 			qos_ipdst_offset =
2862 				qos_key_extract->key_info.ipv6_dst_offset;
2863 			fs_ipsrc_offset =
2864 				tc_key_extract->key_info.ipv6_src_offset;
2865 			fs_ipdst_offset =
2866 				tc_key_extract->key_info.ipv6_dst_offset;
2867 			size = NH_FLD_IPV6_ADDR_SIZE;
2868 		}
2869 
2870 		qos_index = curr->tc_id * priv->fs_entries +
2871 			curr->tc_index;
2872 
2873 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
2874 
2875 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
2876 				priv->token, &curr->qos_rule);
2877 		if (ret) {
2878 			DPAA2_PMD_ERR("Qos entry remove failed.");
2879 			return -1;
2880 		}
2881 
2882 		extend = -1;
2883 
2884 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2885 			RTE_ASSERT(qos_ipsrc_offset >=
2886 				curr->ipaddr_rule.qos_ipsrc_offset);
2887 			extend1 = qos_ipsrc_offset -
2888 				curr->ipaddr_rule.qos_ipsrc_offset;
2889 			if (extend >= 0)
2890 				RTE_ASSERT(extend == extend1);
2891 			else
2892 				extend = extend1;
2893 
2894 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2895 				(size == NH_FLD_IPV6_ADDR_SIZE));
2896 
2897 			memcpy(ipsrc_key,
2898 				(char *)(size_t)curr->qos_rule.key_iova +
2899 				curr->ipaddr_rule.qos_ipsrc_offset,
2900 				size);
2901 			memset((char *)(size_t)curr->qos_rule.key_iova +
2902 				curr->ipaddr_rule.qos_ipsrc_offset,
2903 				0, size);
2904 
2905 			memcpy(ipsrc_mask,
2906 				(char *)(size_t)curr->qos_rule.mask_iova +
2907 				curr->ipaddr_rule.qos_ipsrc_offset,
2908 				size);
2909 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2910 				curr->ipaddr_rule.qos_ipsrc_offset,
2911 				0, size);
2912 
2913 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
2914 		}
2915 
2916 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2917 			RTE_ASSERT(qos_ipdst_offset >=
2918 				curr->ipaddr_rule.qos_ipdst_offset);
2919 			extend1 = qos_ipdst_offset -
2920 				curr->ipaddr_rule.qos_ipdst_offset;
2921 			if (extend >= 0)
2922 				RTE_ASSERT(extend == extend1);
2923 			else
2924 				extend = extend1;
2925 
2926 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2927 				(size == NH_FLD_IPV6_ADDR_SIZE));
2928 
2929 			memcpy(ipdst_key,
2930 				(char *)(size_t)curr->qos_rule.key_iova +
2931 				curr->ipaddr_rule.qos_ipdst_offset,
2932 				size);
2933 			memset((char *)(size_t)curr->qos_rule.key_iova +
2934 				curr->ipaddr_rule.qos_ipdst_offset,
2935 				0, size);
2936 
2937 			memcpy(ipdst_mask,
2938 				(char *)(size_t)curr->qos_rule.mask_iova +
2939 				curr->ipaddr_rule.qos_ipdst_offset,
2940 				size);
2941 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2942 				curr->ipaddr_rule.qos_ipdst_offset,
2943 				0, size);
2944 
2945 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
2946 		}
2947 
2948 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2949 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2950 				(size == NH_FLD_IPV6_ADDR_SIZE));
2951 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2952 				curr->ipaddr_rule.qos_ipsrc_offset,
2953 				ipsrc_key,
2954 				size);
2955 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2956 				curr->ipaddr_rule.qos_ipsrc_offset,
2957 				ipsrc_mask,
2958 				size);
2959 		}
2960 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2961 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2962 				(size == NH_FLD_IPV6_ADDR_SIZE));
2963 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2964 				curr->ipaddr_rule.qos_ipdst_offset,
2965 				ipdst_key,
2966 				size);
2967 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2968 				curr->ipaddr_rule.qos_ipdst_offset,
2969 				ipdst_mask,
2970 				size);
2971 		}
2972 
2973 		if (extend >= 0)
2974 			curr->qos_real_key_size += extend;
2975 
2976 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
2977 
2978 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
2979 
2980 		ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
2981 				priv->token, &curr->qos_rule,
2982 				curr->tc_id, qos_index,
2983 				0, 0);
2984 		if (ret) {
2985 			DPAA2_PMD_ERR("Qos entry update failed.");
2986 			return -1;
2987 		}
2988 
2989 		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
2990 			curr = LIST_NEXT(curr, next);
2991 			continue;
2992 		}
2993 
2994 		dpaa2_flow_fs_entry_log("Before update", curr);
2995 		extend = -1;
2996 
2997 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
2998 				priv->token, curr->tc_id, &curr->fs_rule);
2999 		if (ret) {
3000 			DPAA2_PMD_ERR("FS entry remove failed.");
3001 			return -1;
3002 		}
3003 
3004 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3005 			tc_id == curr->tc_id) {
3006 			RTE_ASSERT(fs_ipsrc_offset >=
3007 				curr->ipaddr_rule.fs_ipsrc_offset);
3008 			extend1 = fs_ipsrc_offset -
3009 				curr->ipaddr_rule.fs_ipsrc_offset;
3010 			if (extend >= 0)
3011 				RTE_ASSERT(extend == extend1);
3012 			else
3013 				extend = extend1;
3014 
3015 			memcpy(ipsrc_key,
3016 				(char *)(size_t)curr->fs_rule.key_iova +
3017 				curr->ipaddr_rule.fs_ipsrc_offset,
3018 				size);
3019 			memset((char *)(size_t)curr->fs_rule.key_iova +
3020 				curr->ipaddr_rule.fs_ipsrc_offset,
3021 				0, size);
3022 
3023 			memcpy(ipsrc_mask,
3024 				(char *)(size_t)curr->fs_rule.mask_iova +
3025 				curr->ipaddr_rule.fs_ipsrc_offset,
3026 				size);
3027 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3028 				curr->ipaddr_rule.fs_ipsrc_offset,
3029 				0, size);
3030 
3031 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3032 		}
3033 
3034 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3035 			tc_id == curr->tc_id) {
3036 			RTE_ASSERT(fs_ipdst_offset >=
3037 				curr->ipaddr_rule.fs_ipdst_offset);
3038 			extend1 = fs_ipdst_offset -
3039 				curr->ipaddr_rule.fs_ipdst_offset;
3040 			if (extend >= 0)
3041 				RTE_ASSERT(extend == extend1);
3042 			else
3043 				extend = extend1;
3044 
3045 			memcpy(ipdst_key,
3046 				(char *)(size_t)curr->fs_rule.key_iova +
3047 				curr->ipaddr_rule.fs_ipdst_offset,
3048 				size);
3049 			memset((char *)(size_t)curr->fs_rule.key_iova +
3050 				curr->ipaddr_rule.fs_ipdst_offset,
3051 				0, size);
3052 
3053 			memcpy(ipdst_mask,
3054 				(char *)(size_t)curr->fs_rule.mask_iova +
3055 				curr->ipaddr_rule.fs_ipdst_offset,
3056 				size);
3057 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3058 				curr->ipaddr_rule.fs_ipdst_offset,
3059 				0, size);
3060 
3061 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3062 		}
3063 
3064 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3065 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3066 				curr->ipaddr_rule.fs_ipsrc_offset,
3067 				ipsrc_key,
3068 				size);
3069 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3070 				curr->ipaddr_rule.fs_ipsrc_offset,
3071 				ipsrc_mask,
3072 				size);
3073 		}
3074 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3075 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3076 				curr->ipaddr_rule.fs_ipdst_offset,
3077 				ipdst_key,
3078 				size);
3079 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3080 				curr->ipaddr_rule.fs_ipdst_offset,
3081 				ipdst_mask,
3082 				size);
3083 		}
3084 
3085 		if (extend >= 0)
3086 			curr->fs_real_key_size += extend;
3087 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3088 
3089 		dpaa2_flow_fs_entry_log("Start update", curr);
3090 
3091 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3092 				priv->token, curr->tc_id, curr->tc_index,
3093 				&curr->fs_rule, &curr->action_cfg);
3094 		if (ret) {
3095 			DPAA2_PMD_ERR("FS entry update failed.");
3096 			return -1;
3097 		}
3098 
3099 		curr = LIST_NEXT(curr, next);
3100 	}
3101 
3102 	return 0;
3103 }
3104 
3105 static inline int
3106 dpaa2_flow_verify_attr(
3107 	struct dpaa2_dev_priv *priv,
3108 	const struct rte_flow_attr *attr)
3109 {
3110 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3111 
3112 	while (curr) {
3113 		if (curr->tc_id == attr->group &&
3114 			curr->tc_index == attr->priority) {
3115 			DPAA2_PMD_ERR(
3116 				"Flow with group %d and priority %d already exists.",
3117 				attr->group, attr->priority);
3118 
3119 			return -1;
3120 		}
3121 		curr = LIST_NEXT(curr, next);
3122 	}
3123 
3124 	return 0;
3125 }
3126 
3127 static inline int
3128 dpaa2_flow_verify_action(
3129 	struct dpaa2_dev_priv *priv,
3130 	const struct rte_flow_attr *attr,
3131 	const struct rte_flow_action actions[])
3132 {
3133 	int end_of_list = 0, i, j = 0;
3134 	const struct rte_flow_action_queue *dest_queue;
3135 	const struct rte_flow_action_rss *rss_conf;
3136 	struct dpaa2_queue *rxq;
3137 
3138 	while (!end_of_list) {
3139 		switch (actions[j].type) {
3140 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3141 			dest_queue = (const struct rte_flow_action_queue *)
3142 					(actions[j].conf);
3143 			rxq = priv->rx_vq[dest_queue->index];
3144 			if (attr->group != rxq->tc_index) {
3145 				DPAA2_PMD_ERR(
3146 					"RXQ[%d] does not belong to the group %d",
3147 					dest_queue->index, attr->group);
3148 
3149 				return -1;
3150 			}
3151 			break;
3152 		case RTE_FLOW_ACTION_TYPE_RSS:
3153 			rss_conf = (const struct rte_flow_action_rss *)
3154 					(actions[j].conf);
3155 			if (rss_conf->queue_num > priv->dist_queues) {
3156 				DPAA2_PMD_ERR(
3157 					"RSS number exceeds the distrbution size");
3158 				return -ENOTSUP;
3159 			}
3160 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3161 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3162 					DPAA2_PMD_ERR(
3163 						"RSS queue index exceeds the number of RXQs");
3164 					return -ENOTSUP;
3165 				}
3166 				rxq = priv->rx_vq[rss_conf->queue[i]];
3167 				if (rxq->tc_index != attr->group) {
3168 					DPAA2_PMD_ERR(
3169 						"Queue/Group combination are not supported\n");
3170 					return -ENOTSUP;
3171 				}
3172 			}
3173 
3174 			break;
3175 		case RTE_FLOW_ACTION_TYPE_END:
3176 			end_of_list = 1;
3177 			break;
3178 		default:
3179 			DPAA2_PMD_ERR("Invalid action type");
3180 			return -ENOTSUP;
3181 		}
3182 		j++;
3183 	}
3184 
3185 	return 0;
3186 }
3187 
3188 static int
3189 dpaa2_generic_flow_set(struct rte_flow *flow,
3190 		       struct rte_eth_dev *dev,
3191 		       const struct rte_flow_attr *attr,
3192 		       const struct rte_flow_item pattern[],
3193 		       const struct rte_flow_action actions[],
3194 		       struct rte_flow_error *error)
3195 {
3196 	const struct rte_flow_action_queue *dest_queue;
3197 	const struct rte_flow_action_rss *rss_conf;
3198 	int is_keycfg_configured = 0, end_of_list = 0;
3199 	int ret = 0, i = 0, j = 0;
3200 	struct dpni_rx_tc_dist_cfg tc_cfg;
3201 	struct dpni_qos_tbl_cfg qos_cfg;
3202 	struct dpni_fs_action_cfg action;
3203 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3204 	struct dpaa2_queue *rxq;
3205 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3206 	size_t param;
3207 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3208 	uint16_t qos_index;
3209 
3210 	ret = dpaa2_flow_verify_attr(priv, attr);
3211 	if (ret)
3212 		return ret;
3213 
3214 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3215 	if (ret)
3216 		return ret;
3217 
3218 	/* Parse pattern list to get the matching parameters */
3219 	while (!end_of_list) {
3220 		switch (pattern[i].type) {
3221 		case RTE_FLOW_ITEM_TYPE_ETH:
3222 			ret = dpaa2_configure_flow_eth(flow,
3223 					dev, attr, &pattern[i], actions, error,
3224 					&is_keycfg_configured);
3225 			if (ret) {
3226 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3227 				return ret;
3228 			}
3229 			break;
3230 		case RTE_FLOW_ITEM_TYPE_VLAN:
3231 			ret = dpaa2_configure_flow_vlan(flow,
3232 					dev, attr, &pattern[i], actions, error,
3233 					&is_keycfg_configured);
3234 			if (ret) {
3235 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3236 				return ret;
3237 			}
3238 			break;
3239 		case RTE_FLOW_ITEM_TYPE_IPV4:
3240 		case RTE_FLOW_ITEM_TYPE_IPV6:
3241 			ret = dpaa2_configure_flow_generic_ip(flow,
3242 					dev, attr, &pattern[i], actions, error,
3243 					&is_keycfg_configured);
3244 			if (ret) {
3245 				DPAA2_PMD_ERR("IP flow configuration failed!");
3246 				return ret;
3247 			}
3248 			break;
3249 		case RTE_FLOW_ITEM_TYPE_ICMP:
3250 			ret = dpaa2_configure_flow_icmp(flow,
3251 					dev, attr, &pattern[i], actions, error,
3252 					&is_keycfg_configured);
3253 			if (ret) {
3254 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3255 				return ret;
3256 			}
3257 			break;
3258 		case RTE_FLOW_ITEM_TYPE_UDP:
3259 			ret = dpaa2_configure_flow_udp(flow,
3260 					dev, attr, &pattern[i], actions, error,
3261 					&is_keycfg_configured);
3262 			if (ret) {
3263 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3264 				return ret;
3265 			}
3266 			break;
3267 		case RTE_FLOW_ITEM_TYPE_TCP:
3268 			ret = dpaa2_configure_flow_tcp(flow,
3269 					dev, attr, &pattern[i], actions, error,
3270 					&is_keycfg_configured);
3271 			if (ret) {
3272 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3273 				return ret;
3274 			}
3275 			break;
3276 		case RTE_FLOW_ITEM_TYPE_SCTP:
3277 			ret = dpaa2_configure_flow_sctp(flow,
3278 					dev, attr, &pattern[i], actions, error,
3279 					&is_keycfg_configured);
3280 			if (ret) {
3281 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3282 				return ret;
3283 			}
3284 			break;
3285 		case RTE_FLOW_ITEM_TYPE_GRE:
3286 			ret = dpaa2_configure_flow_gre(flow,
3287 					dev, attr, &pattern[i], actions, error,
3288 					&is_keycfg_configured);
3289 			if (ret) {
3290 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3291 				return ret;
3292 			}
3293 			break;
3294 		case RTE_FLOW_ITEM_TYPE_END:
3295 			end_of_list = 1;
3296 			break; /*End of List*/
3297 		default:
3298 			DPAA2_PMD_ERR("Invalid action type");
3299 			ret = -ENOTSUP;
3300 			break;
3301 		}
3302 		i++;
3303 	}
3304 
3305 	/* Let's parse action on matching traffic */
3306 	end_of_list = 0;
3307 	while (!end_of_list) {
3308 		switch (actions[j].type) {
3309 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3310 			dest_queue =
3311 				(const struct rte_flow_action_queue *)(actions[j].conf);
3312 			rxq = priv->rx_vq[dest_queue->index];
3313 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3314 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3315 			action.flow_id = rxq->flow_id;
3316 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3317 				dpaa2_flow_qos_table_extracts_log(priv);
3318 				if (dpkg_prepare_key_cfg(
3319 					&priv->extract.qos_key_extract.dpkg,
3320 					(uint8_t *)(size_t)priv->extract.qos_extract_param)
3321 					< 0) {
3322 					DPAA2_PMD_ERR(
3323 					"Unable to prepare extract parameters");
3324 					return -1;
3325 				}
3326 
3327 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3328 				qos_cfg.discard_on_miss = true;
3329 				qos_cfg.keep_entries = true;
3330 				qos_cfg.key_cfg_iova =
3331 					(size_t)priv->extract.qos_extract_param;
3332 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3333 						priv->token, &qos_cfg);
3334 				if (ret < 0) {
3335 					DPAA2_PMD_ERR(
3336 					"Distribution cannot be configured.(%d)"
3337 					, ret);
3338 					return -1;
3339 				}
3340 			}
3341 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3342 				dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3343 				if (dpkg_prepare_key_cfg(
3344 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3345 				(uint8_t *)(size_t)priv->extract
3346 				.tc_extract_param[flow->tc_id]) < 0) {
3347 					DPAA2_PMD_ERR(
3348 					"Unable to prepare extract parameters");
3349 					return -1;
3350 				}
3351 
3352 				memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3353 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3354 				tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
3355 				tc_cfg.key_cfg_iova =
3356 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3357 				tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3358 				tc_cfg.fs_cfg.keep_entries = true;
3359 				ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3360 							 priv->token,
3361 							 flow->tc_id, &tc_cfg);
3362 				if (ret < 0) {
3363 					DPAA2_PMD_ERR(
3364 					"Distribution cannot be configured.(%d)"
3365 					, ret);
3366 					return -1;
3367 				}
3368 			}
3369 			/* Configure QoS table first */
3370 
3371 			qos_index = flow->tc_id * priv->fs_entries +
3372 				flow->tc_index;
3373 
3374 			if (qos_index >= priv->qos_entries) {
3375 				DPAA2_PMD_ERR("QoS table with %d entries full",
3376 					priv->qos_entries);
3377 				return -1;
3378 			}
3379 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3380 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3381 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3382 					flow->ipaddr_rule.qos_ipsrc_offset) {
3383 					flow->qos_real_key_size =
3384 						flow->ipaddr_rule.qos_ipdst_offset +
3385 						NH_FLD_IPV4_ADDR_SIZE;
3386 				} else {
3387 					flow->qos_real_key_size =
3388 						flow->ipaddr_rule.qos_ipsrc_offset +
3389 						NH_FLD_IPV4_ADDR_SIZE;
3390 				}
3391 			} else if (flow->ipaddr_rule.ipaddr_type ==
3392 				FLOW_IPV6_ADDR) {
3393 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3394 					flow->ipaddr_rule.qos_ipsrc_offset) {
3395 					flow->qos_real_key_size =
3396 						flow->ipaddr_rule.qos_ipdst_offset +
3397 						NH_FLD_IPV6_ADDR_SIZE;
3398 				} else {
3399 					flow->qos_real_key_size =
3400 						flow->ipaddr_rule.qos_ipsrc_offset +
3401 						NH_FLD_IPV6_ADDR_SIZE;
3402 				}
3403 			}
3404 
3405 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3406 
3407 			dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3408 
3409 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3410 						priv->token, &flow->qos_rule,
3411 						flow->tc_id, qos_index,
3412 						0, 0);
3413 			if (ret < 0) {
3414 				DPAA2_PMD_ERR(
3415 				"Error in addnig entry to QoS table(%d)", ret);
3416 				return ret;
3417 			}
3418 
3419 			/* Then Configure FS table */
3420 			if (flow->tc_index >= priv->fs_entries) {
3421 				DPAA2_PMD_ERR("FS table with %d entries full",
3422 					priv->fs_entries);
3423 				return -1;
3424 			}
3425 
3426 			flow->fs_real_key_size =
3427 				priv->extract.tc_key_extract[flow->tc_id]
3428 				.key_info.key_total_size;
3429 
3430 			if (flow->ipaddr_rule.ipaddr_type ==
3431 				FLOW_IPV4_ADDR) {
3432 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3433 					flow->ipaddr_rule.fs_ipsrc_offset) {
3434 					flow->fs_real_key_size =
3435 						flow->ipaddr_rule.fs_ipdst_offset +
3436 						NH_FLD_IPV4_ADDR_SIZE;
3437 				} else {
3438 					flow->fs_real_key_size =
3439 						flow->ipaddr_rule.fs_ipsrc_offset +
3440 						NH_FLD_IPV4_ADDR_SIZE;
3441 				}
3442 			} else if (flow->ipaddr_rule.ipaddr_type ==
3443 				FLOW_IPV6_ADDR) {
3444 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3445 					flow->ipaddr_rule.fs_ipsrc_offset) {
3446 					flow->fs_real_key_size =
3447 						flow->ipaddr_rule.fs_ipdst_offset +
3448 						NH_FLD_IPV6_ADDR_SIZE;
3449 				} else {
3450 					flow->fs_real_key_size =
3451 						flow->ipaddr_rule.fs_ipsrc_offset +
3452 						NH_FLD_IPV6_ADDR_SIZE;
3453 				}
3454 			}
3455 
3456 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3457 
3458 			dpaa2_flow_fs_entry_log("Start add", flow);
3459 
3460 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3461 						flow->tc_id, flow->tc_index,
3462 						&flow->fs_rule, &action);
3463 			if (ret < 0) {
3464 				DPAA2_PMD_ERR(
3465 				"Error in adding entry to FS table(%d)", ret);
3466 				return ret;
3467 			}
3468 			memcpy(&flow->action_cfg, &action,
3469 				sizeof(struct dpni_fs_action_cfg));
3470 			break;
3471 		case RTE_FLOW_ACTION_TYPE_RSS:
3472 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3473 
3474 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3475 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3476 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3477 			if (ret < 0) {
3478 				DPAA2_PMD_ERR(
3479 				"unable to set flow distribution.please check queue config\n");
3480 				return ret;
3481 			}
3482 
3483 			/* Allocate DMA'ble memory to write the rules */
3484 			param = (size_t)rte_malloc(NULL, 256, 64);
3485 			if (!param) {
3486 				DPAA2_PMD_ERR("Memory allocation failure\n");
3487 				return -1;
3488 			}
3489 
3490 			if (dpkg_prepare_key_cfg(
3491 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3492 				(uint8_t *)param) < 0) {
3493 				DPAA2_PMD_ERR(
3494 				"Unable to prepare extract parameters");
3495 				rte_free((void *)param);
3496 				return -1;
3497 			}
3498 
3499 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3500 			tc_cfg.dist_size = rss_conf->queue_num;
3501 			tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3502 			tc_cfg.key_cfg_iova = (size_t)param;
3503 			tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3504 
3505 			ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3506 						 priv->token, flow->tc_id,
3507 						 &tc_cfg);
3508 			if (ret < 0) {
3509 				DPAA2_PMD_ERR(
3510 				"Distribution cannot be configured: %d\n", ret);
3511 				rte_free((void *)param);
3512 				return -1;
3513 			}
3514 
3515 			rte_free((void *)param);
3516 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3517 				if (dpkg_prepare_key_cfg(
3518 					&priv->extract.qos_key_extract.dpkg,
3519 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3520 					DPAA2_PMD_ERR(
3521 					"Unable to prepare extract parameters");
3522 					return -1;
3523 				}
3524 				memset(&qos_cfg, 0,
3525 					sizeof(struct dpni_qos_tbl_cfg));
3526 				qos_cfg.discard_on_miss = true;
3527 				qos_cfg.keep_entries = true;
3528 				qos_cfg.key_cfg_iova =
3529 					(size_t)priv->extract.qos_extract_param;
3530 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3531 							 priv->token, &qos_cfg);
3532 				if (ret < 0) {
3533 					DPAA2_PMD_ERR(
3534 					"Distribution can't be configured %d\n",
3535 					ret);
3536 					return -1;
3537 				}
3538 			}
3539 
3540 			/* Add Rule into QoS table */
3541 			qos_index = flow->tc_id * priv->fs_entries +
3542 				flow->tc_index;
3543 			if (qos_index >= priv->qos_entries) {
3544 				DPAA2_PMD_ERR("QoS table with %d entries full",
3545 					priv->qos_entries);
3546 				return -1;
3547 			}
3548 
3549 			flow->qos_real_key_size =
3550 			  priv->extract.qos_key_extract.key_info.key_total_size;
3551 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3552 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3553 						&flow->qos_rule, flow->tc_id,
3554 						qos_index, 0, 0);
3555 			if (ret < 0) {
3556 				DPAA2_PMD_ERR(
3557 				"Error in entry addition in QoS table(%d)",
3558 				ret);
3559 				return ret;
3560 			}
3561 			break;
3562 		case RTE_FLOW_ACTION_TYPE_END:
3563 			end_of_list = 1;
3564 			break;
3565 		default:
3566 			DPAA2_PMD_ERR("Invalid action type");
3567 			ret = -ENOTSUP;
3568 			break;
3569 		}
3570 		j++;
3571 	}
3572 
3573 	if (!ret) {
3574 		if (is_keycfg_configured &
3575 			(DPAA2_QOS_TABLE_RECONFIGURE |
3576 			DPAA2_FS_TABLE_RECONFIGURE)) {
3577 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3578 			if (ret) {
3579 				DPAA2_PMD_ERR("Flow entry update failed.");
3580 
3581 				return -1;
3582 			}
3583 		}
3584 		/* New rules are inserted. */
3585 		if (!curr) {
3586 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3587 		} else {
3588 			while (LIST_NEXT(curr, next))
3589 				curr = LIST_NEXT(curr, next);
3590 			LIST_INSERT_AFTER(curr, flow, next);
3591 		}
3592 	}
3593 	return ret;
3594 }
3595 
3596 static inline int
3597 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3598 		      const struct rte_flow_attr *attr)
3599 {
3600 	int ret = 0;
3601 
3602 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3603 		DPAA2_PMD_ERR("Priority group is out of range\n");
3604 		ret = -ENOTSUP;
3605 	}
3606 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3607 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3608 		ret = -ENOTSUP;
3609 	}
3610 	if (unlikely(attr->egress)) {
3611 		DPAA2_PMD_ERR(
3612 			"Flow configuration is not supported on egress side\n");
3613 		ret = -ENOTSUP;
3614 	}
3615 	if (unlikely(!attr->ingress)) {
3616 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3617 		ret = -EINVAL;
3618 	}
3619 	return ret;
3620 }
3621 
3622 static inline int
3623 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3624 {
3625 	unsigned int i, j, is_found = 0;
3626 	int ret = 0;
3627 
3628 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3629 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3630 			if (dpaa2_supported_pattern_type[i]
3631 					== pattern[j].type) {
3632 				is_found = 1;
3633 				break;
3634 			}
3635 		}
3636 		if (!is_found) {
3637 			ret = -ENOTSUP;
3638 			break;
3639 		}
3640 	}
3641 	/* Lets verify other combinations of given pattern rules */
3642 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3643 		if (!pattern[j].spec) {
3644 			ret = -EINVAL;
3645 			break;
3646 		}
3647 	}
3648 
3649 	return ret;
3650 }
3651 
3652 static inline int
3653 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3654 {
3655 	unsigned int i, j, is_found = 0;
3656 	int ret = 0;
3657 
3658 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3659 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3660 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3661 				is_found = 1;
3662 				break;
3663 			}
3664 		}
3665 		if (!is_found) {
3666 			ret = -ENOTSUP;
3667 			break;
3668 		}
3669 	}
3670 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3671 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3672 				!actions[j].conf)
3673 			ret = -EINVAL;
3674 	}
3675 	return ret;
3676 }
3677 
3678 static
3679 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3680 			const struct rte_flow_attr *flow_attr,
3681 			const struct rte_flow_item pattern[],
3682 			const struct rte_flow_action actions[],
3683 			struct rte_flow_error *error)
3684 {
3685 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3686 	struct dpni_attr dpni_attr;
3687 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3688 	uint16_t token = priv->token;
3689 	int ret = 0;
3690 
3691 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3692 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3693 	if (ret < 0) {
3694 		DPAA2_PMD_ERR(
3695 			"Failure to get dpni@%p attribute, err code  %d\n",
3696 			dpni, ret);
3697 		rte_flow_error_set(error, EPERM,
3698 			   RTE_FLOW_ERROR_TYPE_ATTR,
3699 			   flow_attr, "invalid");
3700 		return ret;
3701 	}
3702 
3703 	/* Verify input attributes */
3704 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3705 	if (ret < 0) {
3706 		DPAA2_PMD_ERR(
3707 			"Invalid attributes are given\n");
3708 		rte_flow_error_set(error, EPERM,
3709 			   RTE_FLOW_ERROR_TYPE_ATTR,
3710 			   flow_attr, "invalid");
3711 		goto not_valid_params;
3712 	}
3713 	/* Verify input pattern list */
3714 	ret = dpaa2_dev_verify_patterns(pattern);
3715 	if (ret < 0) {
3716 		DPAA2_PMD_ERR(
3717 			"Invalid pattern list is given\n");
3718 		rte_flow_error_set(error, EPERM,
3719 			   RTE_FLOW_ERROR_TYPE_ITEM,
3720 			   pattern, "invalid");
3721 		goto not_valid_params;
3722 	}
3723 	/* Verify input action list */
3724 	ret = dpaa2_dev_verify_actions(actions);
3725 	if (ret < 0) {
3726 		DPAA2_PMD_ERR(
3727 			"Invalid action list is given\n");
3728 		rte_flow_error_set(error, EPERM,
3729 			   RTE_FLOW_ERROR_TYPE_ACTION,
3730 			   actions, "invalid");
3731 		goto not_valid_params;
3732 	}
3733 not_valid_params:
3734 	return ret;
3735 }
3736 
3737 static
3738 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3739 				   const struct rte_flow_attr *attr,
3740 				   const struct rte_flow_item pattern[],
3741 				   const struct rte_flow_action actions[],
3742 				   struct rte_flow_error *error)
3743 {
3744 	struct rte_flow *flow = NULL;
3745 	size_t key_iova = 0, mask_iova = 0;
3746 	int ret;
3747 
3748 	dpaa2_flow_control_log =
3749 		getenv("DPAA2_FLOW_CONTROL_LOG");
3750 
3751 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3752 	if (!flow) {
3753 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3754 		goto mem_failure;
3755 	}
3756 	/* Allocate DMA'ble memory to write the rules */
3757 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3758 	if (!key_iova) {
3759 		DPAA2_PMD_ERR(
3760 			"Memory allocation failure for rule configuration\n");
3761 		goto mem_failure;
3762 	}
3763 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3764 	if (!mask_iova) {
3765 		DPAA2_PMD_ERR(
3766 			"Memory allocation failure for rule configuration\n");
3767 		goto mem_failure;
3768 	}
3769 
3770 	flow->qos_rule.key_iova = key_iova;
3771 	flow->qos_rule.mask_iova = mask_iova;
3772 
3773 	/* Allocate DMA'ble memory to write the rules */
3774 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3775 	if (!key_iova) {
3776 		DPAA2_PMD_ERR(
3777 			"Memory allocation failure for rule configuration\n");
3778 		goto mem_failure;
3779 	}
3780 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3781 	if (!mask_iova) {
3782 		DPAA2_PMD_ERR(
3783 			"Memory allocation failure for rule configuration\n");
3784 		goto mem_failure;
3785 	}
3786 
3787 	flow->fs_rule.key_iova = key_iova;
3788 	flow->fs_rule.mask_iova = mask_iova;
3789 
3790 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3791 	flow->ipaddr_rule.qos_ipsrc_offset =
3792 		IP_ADDRESS_OFFSET_INVALID;
3793 	flow->ipaddr_rule.qos_ipdst_offset =
3794 		IP_ADDRESS_OFFSET_INVALID;
3795 	flow->ipaddr_rule.fs_ipsrc_offset =
3796 		IP_ADDRESS_OFFSET_INVALID;
3797 	flow->ipaddr_rule.fs_ipdst_offset =
3798 		IP_ADDRESS_OFFSET_INVALID;
3799 
3800 	switch (dpaa2_filter_type) {
3801 	case RTE_ETH_FILTER_GENERIC:
3802 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3803 					     actions, error);
3804 		if (ret < 0) {
3805 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3806 				rte_flow_error_set(error, EPERM,
3807 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3808 						attr, "unknown");
3809 			DPAA2_PMD_ERR(
3810 			"Failure to create flow, return code (%d)", ret);
3811 			goto creation_error;
3812 		}
3813 		break;
3814 	default:
3815 		DPAA2_PMD_ERR("Filter type (%d) not supported",
3816 		dpaa2_filter_type);
3817 		break;
3818 	}
3819 
3820 	return flow;
3821 mem_failure:
3822 	rte_flow_error_set(error, EPERM,
3823 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3824 			   NULL, "memory alloc");
3825 creation_error:
3826 	rte_free((void *)flow);
3827 	rte_free((void *)key_iova);
3828 	rte_free((void *)mask_iova);
3829 
3830 	return NULL;
3831 }
3832 
3833 static
3834 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3835 		       struct rte_flow *flow,
3836 		       struct rte_flow_error *error)
3837 {
3838 	int ret = 0;
3839 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3840 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3841 
3842 	switch (flow->action) {
3843 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3844 		/* Remove entry from QoS table first */
3845 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3846 					   &flow->qos_rule);
3847 		if (ret < 0) {
3848 			DPAA2_PMD_ERR(
3849 				"Error in adding entry to QoS table(%d)", ret);
3850 			goto error;
3851 		}
3852 
3853 		/* Then remove entry from FS table */
3854 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3855 					   flow->tc_id, &flow->fs_rule);
3856 		if (ret < 0) {
3857 			DPAA2_PMD_ERR(
3858 				"Error in entry addition in FS table(%d)", ret);
3859 			goto error;
3860 		}
3861 		break;
3862 	case RTE_FLOW_ACTION_TYPE_RSS:
3863 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3864 					   &flow->qos_rule);
3865 		if (ret < 0) {
3866 			DPAA2_PMD_ERR(
3867 			"Error in entry addition in QoS table(%d)", ret);
3868 			goto error;
3869 		}
3870 		break;
3871 	default:
3872 		DPAA2_PMD_ERR(
3873 		"Action type (%d) is not supported", flow->action);
3874 		ret = -ENOTSUP;
3875 		break;
3876 	}
3877 
3878 	LIST_REMOVE(flow, next);
3879 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
3880 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
3881 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
3882 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
3883 	/* Now free the flow */
3884 	rte_free(flow);
3885 
3886 error:
3887 	if (ret)
3888 		rte_flow_error_set(error, EPERM,
3889 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3890 				   NULL, "unknown");
3891 	return ret;
3892 }
3893 
3894 /**
3895  * Destroy user-configured flow rules.
3896  *
3897  * This function skips internal flows rules.
3898  *
3899  * @see rte_flow_flush()
3900  * @see rte_flow_ops
3901  */
3902 static int
3903 dpaa2_flow_flush(struct rte_eth_dev *dev,
3904 		struct rte_flow_error *error)
3905 {
3906 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3907 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
3908 
3909 	while (flow) {
3910 		struct rte_flow *next = LIST_NEXT(flow, next);
3911 
3912 		dpaa2_flow_destroy(dev, flow, error);
3913 		flow = next;
3914 	}
3915 	return 0;
3916 }
3917 
3918 static int
3919 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3920 		struct rte_flow *flow __rte_unused,
3921 		const struct rte_flow_action *actions __rte_unused,
3922 		void *data __rte_unused,
3923 		struct rte_flow_error *error __rte_unused)
3924 {
3925 	return 0;
3926 }
3927 
3928 /**
3929  * Clean up all flow rules.
3930  *
3931  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3932  * rules regardless of whether they are internal or user-configured.
3933  *
3934  * @param priv
3935  *   Pointer to private structure.
3936  */
3937 void
3938 dpaa2_flow_clean(struct rte_eth_dev *dev)
3939 {
3940 	struct rte_flow *flow;
3941 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3942 
3943 	while ((flow = LIST_FIRST(&priv->flows)))
3944 		dpaa2_flow_destroy(dev, flow, NULL);
3945 }
3946 
3947 const struct rte_flow_ops dpaa2_flow_ops = {
3948 	.create	= dpaa2_flow_create,
3949 	.validate = dpaa2_flow_validate,
3950 	.destroy = dpaa2_flow_destroy,
3951 	.flush	= dpaa2_flow_flush,
3952 	.query	= dpaa2_flow_query,
3953 };
3954