xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 40f840253100e4105f16e71e9cf8fd33bb51a6a0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static int dpaa2_flow_miss_flow_id =
34 	DPNI_FS_MISS_DROP;
35 
36 #define FIXED_ENTRY_SIZE 54
37 
38 enum flow_rule_ipaddr_type {
39 	FLOW_NONE_IPADDR,
40 	FLOW_IPV4_ADDR,
41 	FLOW_IPV6_ADDR
42 };
43 
44 struct flow_rule_ipaddr {
45 	enum flow_rule_ipaddr_type ipaddr_type;
46 	int qos_ipsrc_offset;
47 	int qos_ipdst_offset;
48 	int fs_ipsrc_offset;
49 	int fs_ipdst_offset;
50 };
51 
52 struct rte_flow {
53 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 	struct dpni_rule_cfg qos_rule;
55 	struct dpni_rule_cfg fs_rule;
56 	uint8_t qos_real_key_size;
57 	uint8_t fs_real_key_size;
58 	uint8_t tc_id; /** Traffic Class ID. */
59 	uint8_t tc_index; /** index within this Traffic Class. */
60 	enum rte_flow_action_type action;
61 	/* Special for IP address to specify the offset
62 	 * in key/mask.
63 	 */
64 	struct flow_rule_ipaddr ipaddr_rule;
65 	struct dpni_fs_action_cfg action_cfg;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 };
81 
82 static const
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 	RTE_FLOW_ACTION_TYPE_END,
85 	RTE_FLOW_ACTION_TYPE_QUEUE,
86 	RTE_FLOW_ACTION_TYPE_RSS
87 };
88 
89 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
90 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
91 
92 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
93 
94 #ifndef __cplusplus
95 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
96 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
97 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
98 	.type = RTE_BE16(0xffff),
99 };
100 
101 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
102 	.tci = RTE_BE16(0xffff),
103 };
104 
105 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
106 	.hdr.src_addr = RTE_BE32(0xffffffff),
107 	.hdr.dst_addr = RTE_BE32(0xffffffff),
108 	.hdr.next_proto_id = 0xff,
109 };
110 
111 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
112 	.hdr = {
113 		.src_addr =
114 			"\xff\xff\xff\xff\xff\xff\xff\xff"
115 			"\xff\xff\xff\xff\xff\xff\xff\xff",
116 		.dst_addr =
117 			"\xff\xff\xff\xff\xff\xff\xff\xff"
118 			"\xff\xff\xff\xff\xff\xff\xff\xff",
119 		.proto = 0xff
120 	},
121 };
122 
123 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
124 	.hdr.icmp_type = 0xff,
125 	.hdr.icmp_code = 0xff,
126 };
127 
128 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
129 	.hdr = {
130 		.src_port = RTE_BE16(0xffff),
131 		.dst_port = RTE_BE16(0xffff),
132 	},
133 };
134 
135 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
136 	.hdr = {
137 		.src_port = RTE_BE16(0xffff),
138 		.dst_port = RTE_BE16(0xffff),
139 	},
140 };
141 
142 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
143 	.hdr = {
144 		.src_port = RTE_BE16(0xffff),
145 		.dst_port = RTE_BE16(0xffff),
146 	},
147 };
148 
149 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
150 	.protocol = RTE_BE16(0xffff),
151 };
152 
153 #endif
154 
155 static inline void dpaa2_prot_field_string(
156 	enum net_prot prot, uint32_t field,
157 	char *string)
158 {
159 	if (!dpaa2_flow_control_log)
160 		return;
161 
162 	if (prot == NET_PROT_ETH) {
163 		strcpy(string, "eth");
164 		if (field == NH_FLD_ETH_DA)
165 			strcat(string, ".dst");
166 		else if (field == NH_FLD_ETH_SA)
167 			strcat(string, ".src");
168 		else if (field == NH_FLD_ETH_TYPE)
169 			strcat(string, ".type");
170 		else
171 			strcat(string, ".unknown field");
172 	} else if (prot == NET_PROT_VLAN) {
173 		strcpy(string, "vlan");
174 		if (field == NH_FLD_VLAN_TCI)
175 			strcat(string, ".tci");
176 		else
177 			strcat(string, ".unknown field");
178 	} else if (prot == NET_PROT_IP) {
179 		strcpy(string, "ip");
180 		if (field == NH_FLD_IP_SRC)
181 			strcat(string, ".src");
182 		else if (field == NH_FLD_IP_DST)
183 			strcat(string, ".dst");
184 		else if (field == NH_FLD_IP_PROTO)
185 			strcat(string, ".proto");
186 		else
187 			strcat(string, ".unknown field");
188 	} else if (prot == NET_PROT_TCP) {
189 		strcpy(string, "tcp");
190 		if (field == NH_FLD_TCP_PORT_SRC)
191 			strcat(string, ".src");
192 		else if (field == NH_FLD_TCP_PORT_DST)
193 			strcat(string, ".dst");
194 		else
195 			strcat(string, ".unknown field");
196 	} else if (prot == NET_PROT_UDP) {
197 		strcpy(string, "udp");
198 		if (field == NH_FLD_UDP_PORT_SRC)
199 			strcat(string, ".src");
200 		else if (field == NH_FLD_UDP_PORT_DST)
201 			strcat(string, ".dst");
202 		else
203 			strcat(string, ".unknown field");
204 	} else if (prot == NET_PROT_ICMP) {
205 		strcpy(string, "icmp");
206 		if (field == NH_FLD_ICMP_TYPE)
207 			strcat(string, ".type");
208 		else if (field == NH_FLD_ICMP_CODE)
209 			strcat(string, ".code");
210 		else
211 			strcat(string, ".unknown field");
212 	} else if (prot == NET_PROT_SCTP) {
213 		strcpy(string, "sctp");
214 		if (field == NH_FLD_SCTP_PORT_SRC)
215 			strcat(string, ".src");
216 		else if (field == NH_FLD_SCTP_PORT_DST)
217 			strcat(string, ".dst");
218 		else
219 			strcat(string, ".unknown field");
220 	} else if (prot == NET_PROT_GRE) {
221 		strcpy(string, "gre");
222 		if (field == NH_FLD_GRE_TYPE)
223 			strcat(string, ".type");
224 		else
225 			strcat(string, ".unknown field");
226 	} else {
227 		strcpy(string, "unknown protocol");
228 	}
229 }
230 
231 static inline void dpaa2_flow_qos_table_extracts_log(
232 	const struct dpaa2_dev_priv *priv)
233 {
234 	int idx;
235 	char string[32];
236 
237 	if (!dpaa2_flow_control_log)
238 		return;
239 
240 	printf("Setup QoS table: number of extracts: %d\r\n",
241 			priv->extract.qos_key_extract.dpkg.num_extracts);
242 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
243 		idx++) {
244 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
245 			.extracts[idx].extract.from_hdr.prot,
246 			priv->extract.qos_key_extract.dpkg.extracts[idx]
247 			.extract.from_hdr.field,
248 			string);
249 		printf("%s", string);
250 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
251 			printf(" / ");
252 	}
253 	printf("\r\n");
254 }
255 
256 static inline void dpaa2_flow_fs_table_extracts_log(
257 	const struct dpaa2_dev_priv *priv, int tc_id)
258 {
259 	int idx;
260 	char string[32];
261 
262 	if (!dpaa2_flow_control_log)
263 		return;
264 
265 	printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
266 			tc_id, priv->extract.tc_key_extract[tc_id]
267 			.dpkg.num_extracts);
268 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
269 		.dpkg.num_extracts; idx++) {
270 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
271 			.dpkg.extracts[idx].extract.from_hdr.prot,
272 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
273 			.extract.from_hdr.field,
274 			string);
275 		printf("%s", string);
276 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
277 			.dpkg.num_extracts)
278 			printf(" / ");
279 	}
280 	printf("\r\n");
281 }
282 
283 static inline void dpaa2_flow_qos_entry_log(
284 	const char *log_info, const struct rte_flow *flow, int qos_index)
285 {
286 	int idx;
287 	uint8_t *key, *mask;
288 
289 	if (!dpaa2_flow_control_log)
290 		return;
291 
292 	printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
293 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
294 
295 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
296 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
297 
298 	printf("key:\r\n");
299 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
300 		printf("%02x ", key[idx]);
301 
302 	printf("\r\nmask:\r\n");
303 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
304 		printf("%02x ", mask[idx]);
305 
306 	printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
307 		flow->ipaddr_rule.qos_ipsrc_offset,
308 		flow->ipaddr_rule.qos_ipdst_offset);
309 }
310 
311 static inline void dpaa2_flow_fs_entry_log(
312 	const char *log_info, const struct rte_flow *flow)
313 {
314 	int idx;
315 	uint8_t *key, *mask;
316 
317 	if (!dpaa2_flow_control_log)
318 		return;
319 
320 	printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
321 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
322 
323 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
324 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
325 
326 	printf("key:\r\n");
327 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
328 		printf("%02x ", key[idx]);
329 
330 	printf("\r\nmask:\r\n");
331 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
332 		printf("%02x ", mask[idx]);
333 
334 	printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
335 		flow->ipaddr_rule.fs_ipsrc_offset,
336 		flow->ipaddr_rule.fs_ipdst_offset);
337 }
338 
339 static inline void dpaa2_flow_extract_key_set(
340 	struct dpaa2_key_info *key_info, int index, uint8_t size)
341 {
342 	key_info->key_size[index] = size;
343 	if (index > 0) {
344 		key_info->key_offset[index] =
345 			key_info->key_offset[index - 1] +
346 			key_info->key_size[index - 1];
347 	} else {
348 		key_info->key_offset[index] = 0;
349 	}
350 	key_info->key_total_size += size;
351 }
352 
353 static int dpaa2_flow_extract_add(
354 	struct dpaa2_key_extract *key_extract,
355 	enum net_prot prot,
356 	uint32_t field, uint8_t field_size)
357 {
358 	int index, ip_src = -1, ip_dst = -1;
359 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
360 	struct dpaa2_key_info *key_info = &key_extract->key_info;
361 
362 	if (dpkg->num_extracts >=
363 		DPKG_MAX_NUM_OF_EXTRACTS) {
364 		DPAA2_PMD_WARN("Number of extracts overflows");
365 		return -1;
366 	}
367 	/* Before reorder, the IP SRC and IP DST are already last
368 	 * extract(s).
369 	 */
370 	for (index = 0; index < dpkg->num_extracts; index++) {
371 		if (dpkg->extracts[index].extract.from_hdr.prot ==
372 			NET_PROT_IP) {
373 			if (dpkg->extracts[index].extract.from_hdr.field ==
374 				NH_FLD_IP_SRC) {
375 				ip_src = index;
376 			}
377 			if (dpkg->extracts[index].extract.from_hdr.field ==
378 				NH_FLD_IP_DST) {
379 				ip_dst = index;
380 			}
381 		}
382 	}
383 
384 	if (ip_src >= 0)
385 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
386 
387 	if (ip_dst >= 0)
388 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
389 
390 	if (prot == NET_PROT_IP &&
391 		(field == NH_FLD_IP_SRC ||
392 		field == NH_FLD_IP_DST)) {
393 		index = dpkg->num_extracts;
394 	} else {
395 		if (ip_src >= 0 && ip_dst >= 0)
396 			index = dpkg->num_extracts - 2;
397 		else if (ip_src >= 0 || ip_dst >= 0)
398 			index = dpkg->num_extracts - 1;
399 		else
400 			index = dpkg->num_extracts;
401 	}
402 
403 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
404 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
405 	dpkg->extracts[index].extract.from_hdr.prot = prot;
406 	dpkg->extracts[index].extract.from_hdr.field = field;
407 	if (prot == NET_PROT_IP &&
408 		(field == NH_FLD_IP_SRC ||
409 		field == NH_FLD_IP_DST)) {
410 		dpaa2_flow_extract_key_set(key_info, index, 0);
411 	} else {
412 		dpaa2_flow_extract_key_set(key_info, index, field_size);
413 	}
414 
415 	if (prot == NET_PROT_IP) {
416 		if (field == NH_FLD_IP_SRC) {
417 			if (key_info->ipv4_dst_offset >= 0) {
418 				key_info->ipv4_src_offset =
419 					key_info->ipv4_dst_offset +
420 					NH_FLD_IPV4_ADDR_SIZE;
421 			} else {
422 				key_info->ipv4_src_offset =
423 					key_info->key_offset[index - 1] +
424 						key_info->key_size[index - 1];
425 			}
426 			if (key_info->ipv6_dst_offset >= 0) {
427 				key_info->ipv6_src_offset =
428 					key_info->ipv6_dst_offset +
429 					NH_FLD_IPV6_ADDR_SIZE;
430 			} else {
431 				key_info->ipv6_src_offset =
432 					key_info->key_offset[index - 1] +
433 						key_info->key_size[index - 1];
434 			}
435 		} else if (field == NH_FLD_IP_DST) {
436 			if (key_info->ipv4_src_offset >= 0) {
437 				key_info->ipv4_dst_offset =
438 					key_info->ipv4_src_offset +
439 					NH_FLD_IPV4_ADDR_SIZE;
440 			} else {
441 				key_info->ipv4_dst_offset =
442 					key_info->key_offset[index - 1] +
443 						key_info->key_size[index - 1];
444 			}
445 			if (key_info->ipv6_src_offset >= 0) {
446 				key_info->ipv6_dst_offset =
447 					key_info->ipv6_src_offset +
448 					NH_FLD_IPV6_ADDR_SIZE;
449 			} else {
450 				key_info->ipv6_dst_offset =
451 					key_info->key_offset[index - 1] +
452 						key_info->key_size[index - 1];
453 			}
454 		}
455 	}
456 
457 	if (index == dpkg->num_extracts) {
458 		dpkg->num_extracts++;
459 		return 0;
460 	}
461 
462 	if (ip_src >= 0) {
463 		ip_src++;
464 		dpkg->extracts[ip_src].type =
465 			DPKG_EXTRACT_FROM_HDR;
466 		dpkg->extracts[ip_src].extract.from_hdr.type =
467 			DPKG_FULL_FIELD;
468 		dpkg->extracts[ip_src].extract.from_hdr.prot =
469 			NET_PROT_IP;
470 		dpkg->extracts[ip_src].extract.from_hdr.field =
471 			NH_FLD_IP_SRC;
472 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
473 		key_info->ipv4_src_offset += field_size;
474 		key_info->ipv6_src_offset += field_size;
475 	}
476 	if (ip_dst >= 0) {
477 		ip_dst++;
478 		dpkg->extracts[ip_dst].type =
479 			DPKG_EXTRACT_FROM_HDR;
480 		dpkg->extracts[ip_dst].extract.from_hdr.type =
481 			DPKG_FULL_FIELD;
482 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
483 			NET_PROT_IP;
484 		dpkg->extracts[ip_dst].extract.from_hdr.field =
485 			NH_FLD_IP_DST;
486 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
487 		key_info->ipv4_dst_offset += field_size;
488 		key_info->ipv6_dst_offset += field_size;
489 	}
490 
491 	dpkg->num_extracts++;
492 
493 	return 0;
494 }
495 
496 /* Protocol discrimination.
497  * Discriminate IPv4/IPv6/vLan by Eth type.
498  * Discriminate UDP/TCP/ICMP by next proto of IP.
499  */
500 static inline int
501 dpaa2_flow_proto_discrimination_extract(
502 	struct dpaa2_key_extract *key_extract,
503 	enum rte_flow_item_type type)
504 {
505 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
506 		return dpaa2_flow_extract_add(
507 				key_extract, NET_PROT_ETH,
508 				NH_FLD_ETH_TYPE,
509 				sizeof(rte_be16_t));
510 	} else if (type == (enum rte_flow_item_type)
511 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
512 		return dpaa2_flow_extract_add(
513 				key_extract, NET_PROT_IP,
514 				NH_FLD_IP_PROTO,
515 				NH_FLD_IP_PROTO_SIZE);
516 	}
517 
518 	return -1;
519 }
520 
521 static inline int dpaa2_flow_extract_search(
522 	struct dpkg_profile_cfg *dpkg,
523 	enum net_prot prot, uint32_t field)
524 {
525 	int i;
526 
527 	for (i = 0; i < dpkg->num_extracts; i++) {
528 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
529 			dpkg->extracts[i].extract.from_hdr.field == field) {
530 			return i;
531 		}
532 	}
533 
534 	return -1;
535 }
536 
537 static inline int dpaa2_flow_extract_key_offset(
538 	struct dpaa2_key_extract *key_extract,
539 	enum net_prot prot, uint32_t field)
540 {
541 	int i;
542 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
543 	struct dpaa2_key_info *key_info = &key_extract->key_info;
544 
545 	if (prot == NET_PROT_IPV4 ||
546 		prot == NET_PROT_IPV6)
547 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
548 	else
549 		i = dpaa2_flow_extract_search(dpkg, prot, field);
550 
551 	if (i >= 0) {
552 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
553 			return key_info->ipv4_src_offset;
554 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
555 			return key_info->ipv4_dst_offset;
556 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
557 			return key_info->ipv6_src_offset;
558 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
559 			return key_info->ipv6_dst_offset;
560 		else
561 			return key_info->key_offset[i];
562 	} else {
563 		return -1;
564 	}
565 }
566 
567 struct proto_discrimination {
568 	enum rte_flow_item_type type;
569 	union {
570 		rte_be16_t eth_type;
571 		uint8_t ip_proto;
572 	};
573 };
574 
575 static int
576 dpaa2_flow_proto_discrimination_rule(
577 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
578 	struct proto_discrimination proto, int group)
579 {
580 	enum net_prot prot;
581 	uint32_t field;
582 	int offset;
583 	size_t key_iova;
584 	size_t mask_iova;
585 	rte_be16_t eth_type;
586 	uint8_t ip_proto;
587 
588 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
589 		prot = NET_PROT_ETH;
590 		field = NH_FLD_ETH_TYPE;
591 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
592 		prot = NET_PROT_IP;
593 		field = NH_FLD_IP_PROTO;
594 	} else {
595 		DPAA2_PMD_ERR(
596 			"Only Eth and IP support to discriminate next proto.");
597 		return -1;
598 	}
599 
600 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
601 			prot, field);
602 	if (offset < 0) {
603 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
604 				prot, field);
605 		return -1;
606 	}
607 	key_iova = flow->qos_rule.key_iova + offset;
608 	mask_iova = flow->qos_rule.mask_iova + offset;
609 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
610 		eth_type = proto.eth_type;
611 		memcpy((void *)key_iova, (const void *)(&eth_type),
612 			sizeof(rte_be16_t));
613 		eth_type = 0xffff;
614 		memcpy((void *)mask_iova, (const void *)(&eth_type),
615 			sizeof(rte_be16_t));
616 	} else {
617 		ip_proto = proto.ip_proto;
618 		memcpy((void *)key_iova, (const void *)(&ip_proto),
619 			sizeof(uint8_t));
620 		ip_proto = 0xff;
621 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
622 			sizeof(uint8_t));
623 	}
624 
625 	offset = dpaa2_flow_extract_key_offset(
626 			&priv->extract.tc_key_extract[group],
627 			prot, field);
628 	if (offset < 0) {
629 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
630 				prot, field);
631 		return -1;
632 	}
633 	key_iova = flow->fs_rule.key_iova + offset;
634 	mask_iova = flow->fs_rule.mask_iova + offset;
635 
636 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
637 		eth_type = proto.eth_type;
638 		memcpy((void *)key_iova, (const void *)(&eth_type),
639 			sizeof(rte_be16_t));
640 		eth_type = 0xffff;
641 		memcpy((void *)mask_iova, (const void *)(&eth_type),
642 			sizeof(rte_be16_t));
643 	} else {
644 		ip_proto = proto.ip_proto;
645 		memcpy((void *)key_iova, (const void *)(&ip_proto),
646 			sizeof(uint8_t));
647 		ip_proto = 0xff;
648 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
649 			sizeof(uint8_t));
650 	}
651 
652 	return 0;
653 }
654 
655 static inline int
656 dpaa2_flow_rule_data_set(
657 	struct dpaa2_key_extract *key_extract,
658 	struct dpni_rule_cfg *rule,
659 	enum net_prot prot, uint32_t field,
660 	const void *key, const void *mask, int size)
661 {
662 	int offset = dpaa2_flow_extract_key_offset(key_extract,
663 				prot, field);
664 
665 	if (offset < 0) {
666 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
667 			prot, field);
668 		return -1;
669 	}
670 
671 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
672 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
673 
674 	return 0;
675 }
676 
677 static inline int
678 _dpaa2_flow_rule_move_ipaddr_tail(
679 	struct dpaa2_key_extract *key_extract,
680 	struct dpni_rule_cfg *rule, int src_offset,
681 	uint32_t field, bool ipv4)
682 {
683 	size_t key_src;
684 	size_t mask_src;
685 	size_t key_dst;
686 	size_t mask_dst;
687 	int dst_offset, len;
688 	enum net_prot prot;
689 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
690 
691 	if (field != NH_FLD_IP_SRC &&
692 		field != NH_FLD_IP_DST) {
693 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
694 		return -1;
695 	}
696 	if (ipv4)
697 		prot = NET_PROT_IPV4;
698 	else
699 		prot = NET_PROT_IPV6;
700 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
701 				prot, field);
702 	if (dst_offset < 0) {
703 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
704 		return -1;
705 	}
706 	key_src = rule->key_iova + src_offset;
707 	mask_src = rule->mask_iova + src_offset;
708 	key_dst = rule->key_iova + dst_offset;
709 	mask_dst = rule->mask_iova + dst_offset;
710 	if (ipv4)
711 		len = sizeof(rte_be32_t);
712 	else
713 		len = NH_FLD_IPV6_ADDR_SIZE;
714 
715 	memcpy(tmp, (char *)key_src, len);
716 	memset((char *)key_src, 0, len);
717 	memcpy((char *)key_dst, tmp, len);
718 
719 	memcpy(tmp, (char *)mask_src, len);
720 	memset((char *)mask_src, 0, len);
721 	memcpy((char *)mask_dst, tmp, len);
722 
723 	return 0;
724 }
725 
726 static inline int
727 dpaa2_flow_rule_move_ipaddr_tail(
728 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
729 	int fs_group)
730 {
731 	int ret;
732 	enum net_prot prot;
733 
734 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
735 		return 0;
736 
737 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
738 		prot = NET_PROT_IPV4;
739 	else
740 		prot = NET_PROT_IPV6;
741 
742 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
743 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
744 				&priv->extract.qos_key_extract,
745 				&flow->qos_rule,
746 				flow->ipaddr_rule.qos_ipsrc_offset,
747 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
748 		if (ret) {
749 			DPAA2_PMD_ERR("QoS src address reorder failed");
750 			return -1;
751 		}
752 		flow->ipaddr_rule.qos_ipsrc_offset =
753 			dpaa2_flow_extract_key_offset(
754 				&priv->extract.qos_key_extract,
755 				prot, NH_FLD_IP_SRC);
756 	}
757 
758 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
759 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
760 				&priv->extract.qos_key_extract,
761 				&flow->qos_rule,
762 				flow->ipaddr_rule.qos_ipdst_offset,
763 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
764 		if (ret) {
765 			DPAA2_PMD_ERR("QoS dst address reorder failed");
766 			return -1;
767 		}
768 		flow->ipaddr_rule.qos_ipdst_offset =
769 			dpaa2_flow_extract_key_offset(
770 				&priv->extract.qos_key_extract,
771 				prot, NH_FLD_IP_DST);
772 	}
773 
774 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
775 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
776 				&priv->extract.tc_key_extract[fs_group],
777 				&flow->fs_rule,
778 				flow->ipaddr_rule.fs_ipsrc_offset,
779 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
780 		if (ret) {
781 			DPAA2_PMD_ERR("FS src address reorder failed");
782 			return -1;
783 		}
784 		flow->ipaddr_rule.fs_ipsrc_offset =
785 			dpaa2_flow_extract_key_offset(
786 				&priv->extract.tc_key_extract[fs_group],
787 				prot, NH_FLD_IP_SRC);
788 	}
789 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
790 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
791 				&priv->extract.tc_key_extract[fs_group],
792 				&flow->fs_rule,
793 				flow->ipaddr_rule.fs_ipdst_offset,
794 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
795 		if (ret) {
796 			DPAA2_PMD_ERR("FS dst address reorder failed");
797 			return -1;
798 		}
799 		flow->ipaddr_rule.fs_ipdst_offset =
800 			dpaa2_flow_extract_key_offset(
801 				&priv->extract.tc_key_extract[fs_group],
802 				prot, NH_FLD_IP_DST);
803 	}
804 
805 	return 0;
806 }
807 
808 static int
809 dpaa2_flow_extract_support(
810 	const uint8_t *mask_src,
811 	enum rte_flow_item_type type)
812 {
813 	char mask[64];
814 	int i, size = 0;
815 	const char *mask_support = 0;
816 
817 	switch (type) {
818 	case RTE_FLOW_ITEM_TYPE_ETH:
819 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
820 		size = sizeof(struct rte_flow_item_eth);
821 		break;
822 	case RTE_FLOW_ITEM_TYPE_VLAN:
823 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
824 		size = sizeof(struct rte_flow_item_vlan);
825 		break;
826 	case RTE_FLOW_ITEM_TYPE_IPV4:
827 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
828 		size = sizeof(struct rte_flow_item_ipv4);
829 		break;
830 	case RTE_FLOW_ITEM_TYPE_IPV6:
831 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
832 		size = sizeof(struct rte_flow_item_ipv6);
833 		break;
834 	case RTE_FLOW_ITEM_TYPE_ICMP:
835 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
836 		size = sizeof(struct rte_flow_item_icmp);
837 		break;
838 	case RTE_FLOW_ITEM_TYPE_UDP:
839 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
840 		size = sizeof(struct rte_flow_item_udp);
841 		break;
842 	case RTE_FLOW_ITEM_TYPE_TCP:
843 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
844 		size = sizeof(struct rte_flow_item_tcp);
845 		break;
846 	case RTE_FLOW_ITEM_TYPE_SCTP:
847 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
848 		size = sizeof(struct rte_flow_item_sctp);
849 		break;
850 	case RTE_FLOW_ITEM_TYPE_GRE:
851 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
852 		size = sizeof(struct rte_flow_item_gre);
853 		break;
854 	default:
855 		return -1;
856 	}
857 
858 	memcpy(mask, mask_support, size);
859 
860 	for (i = 0; i < size; i++)
861 		mask[i] = (mask[i] | mask_src[i]);
862 
863 	if (memcmp(mask, mask_support, size))
864 		return -1;
865 
866 	return 0;
867 }
868 
869 static int
870 dpaa2_configure_flow_eth(struct rte_flow *flow,
871 			 struct rte_eth_dev *dev,
872 			 const struct rte_flow_attr *attr,
873 			 const struct rte_flow_item *pattern,
874 			 const struct rte_flow_action actions[] __rte_unused,
875 			 struct rte_flow_error *error __rte_unused,
876 			 int *device_configured)
877 {
878 	int index, ret;
879 	int local_cfg = 0;
880 	uint32_t group;
881 	const struct rte_flow_item_eth *spec, *mask;
882 
883 	/* TODO: Currently upper bound of range parameter is not implemented */
884 	const struct rte_flow_item_eth *last __rte_unused;
885 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
886 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
887 
888 	group = attr->group;
889 
890 	/* Parse pattern list to get the matching parameters */
891 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
892 	last    = (const struct rte_flow_item_eth *)pattern->last;
893 	mask    = (const struct rte_flow_item_eth *)
894 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
895 	if (!spec) {
896 		/* Don't care any field of eth header,
897 		 * only care eth protocol.
898 		 */
899 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
900 		return 0;
901 	}
902 
903 	/* Get traffic class index and flow id to be configured */
904 	flow->tc_id = group;
905 	flow->tc_index = attr->priority;
906 
907 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
908 		RTE_FLOW_ITEM_TYPE_ETH)) {
909 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
910 
911 		return -1;
912 	}
913 
914 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
915 		index = dpaa2_flow_extract_search(
916 				&priv->extract.qos_key_extract.dpkg,
917 				NET_PROT_ETH, NH_FLD_ETH_SA);
918 		if (index < 0) {
919 			ret = dpaa2_flow_extract_add(
920 					&priv->extract.qos_key_extract,
921 					NET_PROT_ETH, NH_FLD_ETH_SA,
922 					RTE_ETHER_ADDR_LEN);
923 			if (ret) {
924 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
925 
926 				return -1;
927 			}
928 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
929 		}
930 		index = dpaa2_flow_extract_search(
931 				&priv->extract.tc_key_extract[group].dpkg,
932 				NET_PROT_ETH, NH_FLD_ETH_SA);
933 		if (index < 0) {
934 			ret = dpaa2_flow_extract_add(
935 					&priv->extract.tc_key_extract[group],
936 					NET_PROT_ETH, NH_FLD_ETH_SA,
937 					RTE_ETHER_ADDR_LEN);
938 			if (ret) {
939 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
940 				return -1;
941 			}
942 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
943 		}
944 
945 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
946 		if (ret) {
947 			DPAA2_PMD_ERR(
948 				"Move ipaddr before ETH_SA rule set failed");
949 			return -1;
950 		}
951 
952 		ret = dpaa2_flow_rule_data_set(
953 				&priv->extract.qos_key_extract,
954 				&flow->qos_rule,
955 				NET_PROT_ETH,
956 				NH_FLD_ETH_SA,
957 				&spec->src.addr_bytes,
958 				&mask->src.addr_bytes,
959 				sizeof(struct rte_ether_addr));
960 		if (ret) {
961 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
962 			return -1;
963 		}
964 
965 		ret = dpaa2_flow_rule_data_set(
966 				&priv->extract.tc_key_extract[group],
967 				&flow->fs_rule,
968 				NET_PROT_ETH,
969 				NH_FLD_ETH_SA,
970 				&spec->src.addr_bytes,
971 				&mask->src.addr_bytes,
972 				sizeof(struct rte_ether_addr));
973 		if (ret) {
974 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
975 			return -1;
976 		}
977 	}
978 
979 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
980 		index = dpaa2_flow_extract_search(
981 				&priv->extract.qos_key_extract.dpkg,
982 				NET_PROT_ETH, NH_FLD_ETH_DA);
983 		if (index < 0) {
984 			ret = dpaa2_flow_extract_add(
985 					&priv->extract.qos_key_extract,
986 					NET_PROT_ETH, NH_FLD_ETH_DA,
987 					RTE_ETHER_ADDR_LEN);
988 			if (ret) {
989 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
990 
991 				return -1;
992 			}
993 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
994 		}
995 
996 		index = dpaa2_flow_extract_search(
997 				&priv->extract.tc_key_extract[group].dpkg,
998 				NET_PROT_ETH, NH_FLD_ETH_DA);
999 		if (index < 0) {
1000 			ret = dpaa2_flow_extract_add(
1001 					&priv->extract.tc_key_extract[group],
1002 					NET_PROT_ETH, NH_FLD_ETH_DA,
1003 					RTE_ETHER_ADDR_LEN);
1004 			if (ret) {
1005 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1006 
1007 				return -1;
1008 			}
1009 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1010 		}
1011 
1012 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1013 		if (ret) {
1014 			DPAA2_PMD_ERR(
1015 				"Move ipaddr before ETH DA rule set failed");
1016 			return -1;
1017 		}
1018 
1019 		ret = dpaa2_flow_rule_data_set(
1020 				&priv->extract.qos_key_extract,
1021 				&flow->qos_rule,
1022 				NET_PROT_ETH,
1023 				NH_FLD_ETH_DA,
1024 				&spec->dst.addr_bytes,
1025 				&mask->dst.addr_bytes,
1026 				sizeof(struct rte_ether_addr));
1027 		if (ret) {
1028 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1029 			return -1;
1030 		}
1031 
1032 		ret = dpaa2_flow_rule_data_set(
1033 				&priv->extract.tc_key_extract[group],
1034 				&flow->fs_rule,
1035 				NET_PROT_ETH,
1036 				NH_FLD_ETH_DA,
1037 				&spec->dst.addr_bytes,
1038 				&mask->dst.addr_bytes,
1039 				sizeof(struct rte_ether_addr));
1040 		if (ret) {
1041 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1042 			return -1;
1043 		}
1044 	}
1045 
1046 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1047 		index = dpaa2_flow_extract_search(
1048 				&priv->extract.qos_key_extract.dpkg,
1049 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1050 		if (index < 0) {
1051 			ret = dpaa2_flow_extract_add(
1052 					&priv->extract.qos_key_extract,
1053 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1054 					RTE_ETHER_TYPE_LEN);
1055 			if (ret) {
1056 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1057 
1058 				return -1;
1059 			}
1060 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1061 		}
1062 		index = dpaa2_flow_extract_search(
1063 				&priv->extract.tc_key_extract[group].dpkg,
1064 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1065 		if (index < 0) {
1066 			ret = dpaa2_flow_extract_add(
1067 					&priv->extract.tc_key_extract[group],
1068 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1069 					RTE_ETHER_TYPE_LEN);
1070 			if (ret) {
1071 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1072 
1073 				return -1;
1074 			}
1075 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1076 		}
1077 
1078 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1079 		if (ret) {
1080 			DPAA2_PMD_ERR(
1081 				"Move ipaddr before ETH TYPE rule set failed");
1082 				return -1;
1083 		}
1084 
1085 		ret = dpaa2_flow_rule_data_set(
1086 				&priv->extract.qos_key_extract,
1087 				&flow->qos_rule,
1088 				NET_PROT_ETH,
1089 				NH_FLD_ETH_TYPE,
1090 				&spec->type,
1091 				&mask->type,
1092 				sizeof(rte_be16_t));
1093 		if (ret) {
1094 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1095 			return -1;
1096 		}
1097 
1098 		ret = dpaa2_flow_rule_data_set(
1099 				&priv->extract.tc_key_extract[group],
1100 				&flow->fs_rule,
1101 				NET_PROT_ETH,
1102 				NH_FLD_ETH_TYPE,
1103 				&spec->type,
1104 				&mask->type,
1105 				sizeof(rte_be16_t));
1106 		if (ret) {
1107 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1108 			return -1;
1109 		}
1110 	}
1111 
1112 	(*device_configured) |= local_cfg;
1113 
1114 	return 0;
1115 }
1116 
1117 static int
1118 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1119 			  struct rte_eth_dev *dev,
1120 			  const struct rte_flow_attr *attr,
1121 			  const struct rte_flow_item *pattern,
1122 			  const struct rte_flow_action actions[] __rte_unused,
1123 			  struct rte_flow_error *error __rte_unused,
1124 			  int *device_configured)
1125 {
1126 	int index, ret;
1127 	int local_cfg = 0;
1128 	uint32_t group;
1129 	const struct rte_flow_item_vlan *spec, *mask;
1130 
1131 	const struct rte_flow_item_vlan *last __rte_unused;
1132 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1133 
1134 	group = attr->group;
1135 
1136 	/* Parse pattern list to get the matching parameters */
1137 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1138 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1139 	mask    = (const struct rte_flow_item_vlan *)
1140 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1141 
1142 	/* Get traffic class index and flow id to be configured */
1143 	flow->tc_id = group;
1144 	flow->tc_index = attr->priority;
1145 
1146 	if (!spec) {
1147 		/* Don't care any field of vlan header,
1148 		 * only care vlan protocol.
1149 		 */
1150 		/* Eth type is actually used for vLan classification.
1151 		 */
1152 		struct proto_discrimination proto;
1153 
1154 		index = dpaa2_flow_extract_search(
1155 				&priv->extract.qos_key_extract.dpkg,
1156 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1157 		if (index < 0) {
1158 			ret = dpaa2_flow_proto_discrimination_extract(
1159 						&priv->extract.qos_key_extract,
1160 						RTE_FLOW_ITEM_TYPE_ETH);
1161 			if (ret) {
1162 				DPAA2_PMD_ERR(
1163 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1164 
1165 				return -1;
1166 			}
1167 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1168 		}
1169 
1170 		index = dpaa2_flow_extract_search(
1171 				&priv->extract.tc_key_extract[group].dpkg,
1172 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1173 		if (index < 0) {
1174 			ret = dpaa2_flow_proto_discrimination_extract(
1175 					&priv->extract.tc_key_extract[group],
1176 					RTE_FLOW_ITEM_TYPE_ETH);
1177 			if (ret) {
1178 				DPAA2_PMD_ERR(
1179 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1180 
1181 				return -1;
1182 			}
1183 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1184 		}
1185 
1186 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1187 		if (ret) {
1188 			DPAA2_PMD_ERR(
1189 			"Move ipaddr before vLan discrimination set failed");
1190 			return -1;
1191 		}
1192 
1193 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1194 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1195 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1196 							proto, group);
1197 		if (ret) {
1198 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1199 			return -1;
1200 		}
1201 
1202 		(*device_configured) |= local_cfg;
1203 
1204 		return 0;
1205 	}
1206 
1207 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1208 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1209 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1210 
1211 		return -1;
1212 	}
1213 
1214 	if (!mask->tci)
1215 		return 0;
1216 
1217 	index = dpaa2_flow_extract_search(
1218 				&priv->extract.qos_key_extract.dpkg,
1219 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1220 	if (index < 0) {
1221 		ret = dpaa2_flow_extract_add(
1222 						&priv->extract.qos_key_extract,
1223 						NET_PROT_VLAN,
1224 						NH_FLD_VLAN_TCI,
1225 						sizeof(rte_be16_t));
1226 		if (ret) {
1227 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1228 
1229 			return -1;
1230 		}
1231 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1232 	}
1233 
1234 	index = dpaa2_flow_extract_search(
1235 			&priv->extract.tc_key_extract[group].dpkg,
1236 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1237 	if (index < 0) {
1238 		ret = dpaa2_flow_extract_add(
1239 				&priv->extract.tc_key_extract[group],
1240 				NET_PROT_VLAN,
1241 				NH_FLD_VLAN_TCI,
1242 				sizeof(rte_be16_t));
1243 		if (ret) {
1244 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1245 
1246 			return -1;
1247 		}
1248 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1249 	}
1250 
1251 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1252 	if (ret) {
1253 		DPAA2_PMD_ERR(
1254 			"Move ipaddr before VLAN TCI rule set failed");
1255 		return -1;
1256 	}
1257 
1258 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1259 				&flow->qos_rule,
1260 				NET_PROT_VLAN,
1261 				NH_FLD_VLAN_TCI,
1262 				&spec->tci,
1263 				&mask->tci,
1264 				sizeof(rte_be16_t));
1265 	if (ret) {
1266 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1267 		return -1;
1268 	}
1269 
1270 	ret = dpaa2_flow_rule_data_set(
1271 			&priv->extract.tc_key_extract[group],
1272 			&flow->fs_rule,
1273 			NET_PROT_VLAN,
1274 			NH_FLD_VLAN_TCI,
1275 			&spec->tci,
1276 			&mask->tci,
1277 			sizeof(rte_be16_t));
1278 	if (ret) {
1279 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1280 		return -1;
1281 	}
1282 
1283 	(*device_configured) |= local_cfg;
1284 
1285 	return 0;
1286 }
1287 
1288 static int
1289 dpaa2_configure_flow_ip_discrimation(
1290 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1291 	const struct rte_flow_item *pattern,
1292 	int *local_cfg,	int *device_configured,
1293 	uint32_t group)
1294 {
1295 	int index, ret;
1296 	struct proto_discrimination proto;
1297 
1298 	index = dpaa2_flow_extract_search(
1299 			&priv->extract.qos_key_extract.dpkg,
1300 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1301 	if (index < 0) {
1302 		ret = dpaa2_flow_proto_discrimination_extract(
1303 				&priv->extract.qos_key_extract,
1304 				RTE_FLOW_ITEM_TYPE_ETH);
1305 		if (ret) {
1306 			DPAA2_PMD_ERR(
1307 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1308 			return -1;
1309 		}
1310 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1311 	}
1312 
1313 	index = dpaa2_flow_extract_search(
1314 			&priv->extract.tc_key_extract[group].dpkg,
1315 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1316 	if (index < 0) {
1317 		ret = dpaa2_flow_proto_discrimination_extract(
1318 				&priv->extract.tc_key_extract[group],
1319 				RTE_FLOW_ITEM_TYPE_ETH);
1320 		if (ret) {
1321 			DPAA2_PMD_ERR(
1322 			"FS Extract ETH_TYPE to discriminate IP failed.");
1323 			return -1;
1324 		}
1325 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1326 	}
1327 
1328 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1329 	if (ret) {
1330 		DPAA2_PMD_ERR(
1331 			"Move ipaddr before IP discrimination set failed");
1332 		return -1;
1333 	}
1334 
1335 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1336 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1337 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1338 	else
1339 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1340 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1341 	if (ret) {
1342 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1343 		return -1;
1344 	}
1345 
1346 	(*device_configured) |= (*local_cfg);
1347 
1348 	return 0;
1349 }
1350 
1351 
1352 static int
1353 dpaa2_configure_flow_generic_ip(
1354 	struct rte_flow *flow,
1355 	struct rte_eth_dev *dev,
1356 	const struct rte_flow_attr *attr,
1357 	const struct rte_flow_item *pattern,
1358 	const struct rte_flow_action actions[] __rte_unused,
1359 	struct rte_flow_error *error __rte_unused,
1360 	int *device_configured)
1361 {
1362 	int index, ret;
1363 	int local_cfg = 0;
1364 	uint32_t group;
1365 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1366 		*mask_ipv4 = 0;
1367 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1368 		*mask_ipv6 = 0;
1369 	const void *key, *mask;
1370 	enum net_prot prot;
1371 
1372 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1373 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1374 	int size;
1375 
1376 	group = attr->group;
1377 
1378 	/* Parse pattern list to get the matching parameters */
1379 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1380 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1381 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1382 			(pattern->mask ? pattern->mask :
1383 					&dpaa2_flow_item_ipv4_mask);
1384 	} else {
1385 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1386 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1387 			(pattern->mask ? pattern->mask :
1388 					&dpaa2_flow_item_ipv6_mask);
1389 	}
1390 
1391 	/* Get traffic class index and flow id to be configured */
1392 	flow->tc_id = group;
1393 	flow->tc_index = attr->priority;
1394 
1395 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1396 			flow, pattern, &local_cfg,
1397 			device_configured, group);
1398 	if (ret) {
1399 		DPAA2_PMD_ERR("IP discrimation failed!");
1400 		return -1;
1401 	}
1402 
1403 	if (!spec_ipv4 && !spec_ipv6)
1404 		return 0;
1405 
1406 	if (mask_ipv4) {
1407 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1408 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1409 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1410 
1411 			return -1;
1412 		}
1413 	}
1414 
1415 	if (mask_ipv6) {
1416 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1417 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1418 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1419 
1420 			return -1;
1421 		}
1422 	}
1423 
1424 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1425 		mask_ipv4->hdr.dst_addr)) {
1426 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1427 	} else if (mask_ipv6 &&
1428 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1429 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1430 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1431 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1432 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1433 	}
1434 
1435 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1436 		(mask_ipv6 &&
1437 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1438 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1439 		index = dpaa2_flow_extract_search(
1440 				&priv->extract.qos_key_extract.dpkg,
1441 				NET_PROT_IP, NH_FLD_IP_SRC);
1442 		if (index < 0) {
1443 			ret = dpaa2_flow_extract_add(
1444 					&priv->extract.qos_key_extract,
1445 					NET_PROT_IP,
1446 					NH_FLD_IP_SRC,
1447 					0);
1448 			if (ret) {
1449 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1450 
1451 				return -1;
1452 			}
1453 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1454 		}
1455 
1456 		index = dpaa2_flow_extract_search(
1457 				&priv->extract.tc_key_extract[group].dpkg,
1458 				NET_PROT_IP, NH_FLD_IP_SRC);
1459 		if (index < 0) {
1460 			ret = dpaa2_flow_extract_add(
1461 					&priv->extract.tc_key_extract[group],
1462 					NET_PROT_IP,
1463 					NH_FLD_IP_SRC,
1464 					0);
1465 			if (ret) {
1466 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1467 
1468 				return -1;
1469 			}
1470 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1471 		}
1472 
1473 		if (spec_ipv4)
1474 			key = &spec_ipv4->hdr.src_addr;
1475 		else
1476 			key = &spec_ipv6->hdr.src_addr[0];
1477 		if (mask_ipv4) {
1478 			mask = &mask_ipv4->hdr.src_addr;
1479 			size = NH_FLD_IPV4_ADDR_SIZE;
1480 			prot = NET_PROT_IPV4;
1481 		} else {
1482 			mask = &mask_ipv6->hdr.src_addr[0];
1483 			size = NH_FLD_IPV6_ADDR_SIZE;
1484 			prot = NET_PROT_IPV6;
1485 		}
1486 
1487 		ret = dpaa2_flow_rule_data_set(
1488 				&priv->extract.qos_key_extract,
1489 				&flow->qos_rule,
1490 				prot, NH_FLD_IP_SRC,
1491 				key,	mask, size);
1492 		if (ret) {
1493 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1494 			return -1;
1495 		}
1496 
1497 		ret = dpaa2_flow_rule_data_set(
1498 				&priv->extract.tc_key_extract[group],
1499 				&flow->fs_rule,
1500 				prot, NH_FLD_IP_SRC,
1501 				key,	mask, size);
1502 		if (ret) {
1503 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1504 			return -1;
1505 		}
1506 
1507 		flow->ipaddr_rule.qos_ipsrc_offset =
1508 			dpaa2_flow_extract_key_offset(
1509 				&priv->extract.qos_key_extract,
1510 				prot, NH_FLD_IP_SRC);
1511 		flow->ipaddr_rule.fs_ipsrc_offset =
1512 			dpaa2_flow_extract_key_offset(
1513 				&priv->extract.tc_key_extract[group],
1514 				prot, NH_FLD_IP_SRC);
1515 	}
1516 
1517 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1518 		(mask_ipv6 &&
1519 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1520 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1521 		index = dpaa2_flow_extract_search(
1522 				&priv->extract.qos_key_extract.dpkg,
1523 				NET_PROT_IP, NH_FLD_IP_DST);
1524 		if (index < 0) {
1525 			if (mask_ipv4)
1526 				size = NH_FLD_IPV4_ADDR_SIZE;
1527 			else
1528 				size = NH_FLD_IPV6_ADDR_SIZE;
1529 			ret = dpaa2_flow_extract_add(
1530 					&priv->extract.qos_key_extract,
1531 					NET_PROT_IP,
1532 					NH_FLD_IP_DST,
1533 					size);
1534 			if (ret) {
1535 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1536 
1537 				return -1;
1538 			}
1539 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1540 		}
1541 
1542 		index = dpaa2_flow_extract_search(
1543 				&priv->extract.tc_key_extract[group].dpkg,
1544 				NET_PROT_IP, NH_FLD_IP_DST);
1545 		if (index < 0) {
1546 			if (mask_ipv4)
1547 				size = NH_FLD_IPV4_ADDR_SIZE;
1548 			else
1549 				size = NH_FLD_IPV6_ADDR_SIZE;
1550 			ret = dpaa2_flow_extract_add(
1551 					&priv->extract.tc_key_extract[group],
1552 					NET_PROT_IP,
1553 					NH_FLD_IP_DST,
1554 					size);
1555 			if (ret) {
1556 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1557 
1558 				return -1;
1559 			}
1560 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1561 		}
1562 
1563 		if (spec_ipv4)
1564 			key = &spec_ipv4->hdr.dst_addr;
1565 		else
1566 			key = spec_ipv6->hdr.dst_addr;
1567 		if (mask_ipv4) {
1568 			mask = &mask_ipv4->hdr.dst_addr;
1569 			size = NH_FLD_IPV4_ADDR_SIZE;
1570 			prot = NET_PROT_IPV4;
1571 		} else {
1572 			mask = &mask_ipv6->hdr.dst_addr[0];
1573 			size = NH_FLD_IPV6_ADDR_SIZE;
1574 			prot = NET_PROT_IPV6;
1575 		}
1576 
1577 		ret = dpaa2_flow_rule_data_set(
1578 				&priv->extract.qos_key_extract,
1579 				&flow->qos_rule,
1580 				prot, NH_FLD_IP_DST,
1581 				key,	mask, size);
1582 		if (ret) {
1583 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1584 			return -1;
1585 		}
1586 
1587 		ret = dpaa2_flow_rule_data_set(
1588 				&priv->extract.tc_key_extract[group],
1589 				&flow->fs_rule,
1590 				prot, NH_FLD_IP_DST,
1591 				key,	mask, size);
1592 		if (ret) {
1593 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1594 			return -1;
1595 		}
1596 		flow->ipaddr_rule.qos_ipdst_offset =
1597 			dpaa2_flow_extract_key_offset(
1598 				&priv->extract.qos_key_extract,
1599 				prot, NH_FLD_IP_DST);
1600 		flow->ipaddr_rule.fs_ipdst_offset =
1601 			dpaa2_flow_extract_key_offset(
1602 				&priv->extract.tc_key_extract[group],
1603 				prot, NH_FLD_IP_DST);
1604 	}
1605 
1606 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1607 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1608 		index = dpaa2_flow_extract_search(
1609 				&priv->extract.qos_key_extract.dpkg,
1610 				NET_PROT_IP, NH_FLD_IP_PROTO);
1611 		if (index < 0) {
1612 			ret = dpaa2_flow_extract_add(
1613 				&priv->extract.qos_key_extract,
1614 				NET_PROT_IP,
1615 				NH_FLD_IP_PROTO,
1616 				NH_FLD_IP_PROTO_SIZE);
1617 			if (ret) {
1618 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1619 
1620 				return -1;
1621 			}
1622 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1623 		}
1624 
1625 		index = dpaa2_flow_extract_search(
1626 				&priv->extract.tc_key_extract[group].dpkg,
1627 				NET_PROT_IP, NH_FLD_IP_PROTO);
1628 		if (index < 0) {
1629 			ret = dpaa2_flow_extract_add(
1630 					&priv->extract.tc_key_extract[group],
1631 					NET_PROT_IP,
1632 					NH_FLD_IP_PROTO,
1633 					NH_FLD_IP_PROTO_SIZE);
1634 			if (ret) {
1635 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1636 
1637 				return -1;
1638 			}
1639 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1640 		}
1641 
1642 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1643 		if (ret) {
1644 			DPAA2_PMD_ERR(
1645 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1646 			return -1;
1647 		}
1648 
1649 		if (spec_ipv4)
1650 			key = &spec_ipv4->hdr.next_proto_id;
1651 		else
1652 			key = &spec_ipv6->hdr.proto;
1653 		if (mask_ipv4)
1654 			mask = &mask_ipv4->hdr.next_proto_id;
1655 		else
1656 			mask = &mask_ipv6->hdr.proto;
1657 
1658 		ret = dpaa2_flow_rule_data_set(
1659 				&priv->extract.qos_key_extract,
1660 				&flow->qos_rule,
1661 				NET_PROT_IP,
1662 				NH_FLD_IP_PROTO,
1663 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1664 		if (ret) {
1665 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1666 			return -1;
1667 		}
1668 
1669 		ret = dpaa2_flow_rule_data_set(
1670 				&priv->extract.tc_key_extract[group],
1671 				&flow->fs_rule,
1672 				NET_PROT_IP,
1673 				NH_FLD_IP_PROTO,
1674 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1675 		if (ret) {
1676 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1677 			return -1;
1678 		}
1679 	}
1680 
1681 	(*device_configured) |= local_cfg;
1682 
1683 	return 0;
1684 }
1685 
1686 static int
1687 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1688 			  struct rte_eth_dev *dev,
1689 			  const struct rte_flow_attr *attr,
1690 			  const struct rte_flow_item *pattern,
1691 			  const struct rte_flow_action actions[] __rte_unused,
1692 			  struct rte_flow_error *error __rte_unused,
1693 			  int *device_configured)
1694 {
1695 	int index, ret;
1696 	int local_cfg = 0;
1697 	uint32_t group;
1698 	const struct rte_flow_item_icmp *spec, *mask;
1699 
1700 	const struct rte_flow_item_icmp *last __rte_unused;
1701 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1702 
1703 	group = attr->group;
1704 
1705 	/* Parse pattern list to get the matching parameters */
1706 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1707 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1708 	mask    = (const struct rte_flow_item_icmp *)
1709 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1710 
1711 	/* Get traffic class index and flow id to be configured */
1712 	flow->tc_id = group;
1713 	flow->tc_index = attr->priority;
1714 
1715 	if (!spec) {
1716 		/* Don't care any field of ICMP header,
1717 		 * only care ICMP protocol.
1718 		 * Example: flow create 0 ingress pattern icmp /
1719 		 */
1720 		/* Next proto of Generical IP is actually used
1721 		 * for ICMP identification.
1722 		 */
1723 		struct proto_discrimination proto;
1724 
1725 		index = dpaa2_flow_extract_search(
1726 				&priv->extract.qos_key_extract.dpkg,
1727 				NET_PROT_IP, NH_FLD_IP_PROTO);
1728 		if (index < 0) {
1729 			ret = dpaa2_flow_proto_discrimination_extract(
1730 					&priv->extract.qos_key_extract,
1731 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1732 			if (ret) {
1733 				DPAA2_PMD_ERR(
1734 					"QoS Extract IP protocol to discriminate ICMP failed.");
1735 
1736 				return -1;
1737 			}
1738 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1739 		}
1740 
1741 		index = dpaa2_flow_extract_search(
1742 				&priv->extract.tc_key_extract[group].dpkg,
1743 				NET_PROT_IP, NH_FLD_IP_PROTO);
1744 		if (index < 0) {
1745 			ret = dpaa2_flow_proto_discrimination_extract(
1746 					&priv->extract.tc_key_extract[group],
1747 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1748 			if (ret) {
1749 				DPAA2_PMD_ERR(
1750 					"FS Extract IP protocol to discriminate ICMP failed.");
1751 
1752 				return -1;
1753 			}
1754 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1755 		}
1756 
1757 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1758 		if (ret) {
1759 			DPAA2_PMD_ERR(
1760 				"Move IP addr before ICMP discrimination set failed");
1761 			return -1;
1762 		}
1763 
1764 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1765 		proto.ip_proto = IPPROTO_ICMP;
1766 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1767 							proto, group);
1768 		if (ret) {
1769 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1770 			return -1;
1771 		}
1772 
1773 		(*device_configured) |= local_cfg;
1774 
1775 		return 0;
1776 	}
1777 
1778 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1779 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1780 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1781 
1782 		return -1;
1783 	}
1784 
1785 	if (mask->hdr.icmp_type) {
1786 		index = dpaa2_flow_extract_search(
1787 				&priv->extract.qos_key_extract.dpkg,
1788 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1789 		if (index < 0) {
1790 			ret = dpaa2_flow_extract_add(
1791 					&priv->extract.qos_key_extract,
1792 					NET_PROT_ICMP,
1793 					NH_FLD_ICMP_TYPE,
1794 					NH_FLD_ICMP_TYPE_SIZE);
1795 			if (ret) {
1796 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1797 
1798 				return -1;
1799 			}
1800 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1801 		}
1802 
1803 		index = dpaa2_flow_extract_search(
1804 				&priv->extract.tc_key_extract[group].dpkg,
1805 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1806 		if (index < 0) {
1807 			ret = dpaa2_flow_extract_add(
1808 					&priv->extract.tc_key_extract[group],
1809 					NET_PROT_ICMP,
1810 					NH_FLD_ICMP_TYPE,
1811 					NH_FLD_ICMP_TYPE_SIZE);
1812 			if (ret) {
1813 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1814 
1815 				return -1;
1816 			}
1817 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1818 		}
1819 
1820 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1821 		if (ret) {
1822 			DPAA2_PMD_ERR(
1823 				"Move ipaddr before ICMP TYPE set failed");
1824 			return -1;
1825 		}
1826 
1827 		ret = dpaa2_flow_rule_data_set(
1828 				&priv->extract.qos_key_extract,
1829 				&flow->qos_rule,
1830 				NET_PROT_ICMP,
1831 				NH_FLD_ICMP_TYPE,
1832 				&spec->hdr.icmp_type,
1833 				&mask->hdr.icmp_type,
1834 				NH_FLD_ICMP_TYPE_SIZE);
1835 		if (ret) {
1836 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1837 			return -1;
1838 		}
1839 
1840 		ret = dpaa2_flow_rule_data_set(
1841 				&priv->extract.tc_key_extract[group],
1842 				&flow->fs_rule,
1843 				NET_PROT_ICMP,
1844 				NH_FLD_ICMP_TYPE,
1845 				&spec->hdr.icmp_type,
1846 				&mask->hdr.icmp_type,
1847 				NH_FLD_ICMP_TYPE_SIZE);
1848 		if (ret) {
1849 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1850 			return -1;
1851 		}
1852 	}
1853 
1854 	if (mask->hdr.icmp_code) {
1855 		index = dpaa2_flow_extract_search(
1856 				&priv->extract.qos_key_extract.dpkg,
1857 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1858 		if (index < 0) {
1859 			ret = dpaa2_flow_extract_add(
1860 					&priv->extract.qos_key_extract,
1861 					NET_PROT_ICMP,
1862 					NH_FLD_ICMP_CODE,
1863 					NH_FLD_ICMP_CODE_SIZE);
1864 			if (ret) {
1865 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1866 
1867 				return -1;
1868 			}
1869 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1870 		}
1871 
1872 		index = dpaa2_flow_extract_search(
1873 				&priv->extract.tc_key_extract[group].dpkg,
1874 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1875 		if (index < 0) {
1876 			ret = dpaa2_flow_extract_add(
1877 					&priv->extract.tc_key_extract[group],
1878 					NET_PROT_ICMP,
1879 					NH_FLD_ICMP_CODE,
1880 					NH_FLD_ICMP_CODE_SIZE);
1881 			if (ret) {
1882 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1883 
1884 				return -1;
1885 			}
1886 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1887 		}
1888 
1889 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1890 		if (ret) {
1891 			DPAA2_PMD_ERR(
1892 				"Move ipaddr after ICMP CODE set failed");
1893 			return -1;
1894 		}
1895 
1896 		ret = dpaa2_flow_rule_data_set(
1897 				&priv->extract.qos_key_extract,
1898 				&flow->qos_rule,
1899 				NET_PROT_ICMP,
1900 				NH_FLD_ICMP_CODE,
1901 				&spec->hdr.icmp_code,
1902 				&mask->hdr.icmp_code,
1903 				NH_FLD_ICMP_CODE_SIZE);
1904 		if (ret) {
1905 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1906 			return -1;
1907 		}
1908 
1909 		ret = dpaa2_flow_rule_data_set(
1910 				&priv->extract.tc_key_extract[group],
1911 				&flow->fs_rule,
1912 				NET_PROT_ICMP,
1913 				NH_FLD_ICMP_CODE,
1914 				&spec->hdr.icmp_code,
1915 				&mask->hdr.icmp_code,
1916 				NH_FLD_ICMP_CODE_SIZE);
1917 		if (ret) {
1918 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1919 			return -1;
1920 		}
1921 	}
1922 
1923 	(*device_configured) |= local_cfg;
1924 
1925 	return 0;
1926 }
1927 
1928 static int
1929 dpaa2_configure_flow_udp(struct rte_flow *flow,
1930 			 struct rte_eth_dev *dev,
1931 			  const struct rte_flow_attr *attr,
1932 			  const struct rte_flow_item *pattern,
1933 			  const struct rte_flow_action actions[] __rte_unused,
1934 			  struct rte_flow_error *error __rte_unused,
1935 			  int *device_configured)
1936 {
1937 	int index, ret;
1938 	int local_cfg = 0;
1939 	uint32_t group;
1940 	const struct rte_flow_item_udp *spec, *mask;
1941 
1942 	const struct rte_flow_item_udp *last __rte_unused;
1943 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1944 
1945 	group = attr->group;
1946 
1947 	/* Parse pattern list to get the matching parameters */
1948 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1949 	last    = (const struct rte_flow_item_udp *)pattern->last;
1950 	mask    = (const struct rte_flow_item_udp *)
1951 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1952 
1953 	/* Get traffic class index and flow id to be configured */
1954 	flow->tc_id = group;
1955 	flow->tc_index = attr->priority;
1956 
1957 	if (!spec || !mc_l4_port_identification) {
1958 		struct proto_discrimination proto;
1959 
1960 		index = dpaa2_flow_extract_search(
1961 				&priv->extract.qos_key_extract.dpkg,
1962 				NET_PROT_IP, NH_FLD_IP_PROTO);
1963 		if (index < 0) {
1964 			ret = dpaa2_flow_proto_discrimination_extract(
1965 					&priv->extract.qos_key_extract,
1966 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1967 			if (ret) {
1968 				DPAA2_PMD_ERR(
1969 					"QoS Extract IP protocol to discriminate UDP failed.");
1970 
1971 				return -1;
1972 			}
1973 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1974 		}
1975 
1976 		index = dpaa2_flow_extract_search(
1977 				&priv->extract.tc_key_extract[group].dpkg,
1978 				NET_PROT_IP, NH_FLD_IP_PROTO);
1979 		if (index < 0) {
1980 			ret = dpaa2_flow_proto_discrimination_extract(
1981 				&priv->extract.tc_key_extract[group],
1982 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1983 			if (ret) {
1984 				DPAA2_PMD_ERR(
1985 					"FS Extract IP protocol to discriminate UDP failed.");
1986 
1987 				return -1;
1988 			}
1989 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1990 		}
1991 
1992 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1993 		if (ret) {
1994 			DPAA2_PMD_ERR(
1995 				"Move IP addr before UDP discrimination set failed");
1996 			return -1;
1997 		}
1998 
1999 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2000 		proto.ip_proto = IPPROTO_UDP;
2001 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2002 							proto, group);
2003 		if (ret) {
2004 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2005 			return -1;
2006 		}
2007 
2008 		(*device_configured) |= local_cfg;
2009 
2010 		if (!spec)
2011 			return 0;
2012 	}
2013 
2014 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2015 		RTE_FLOW_ITEM_TYPE_UDP)) {
2016 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2017 
2018 		return -1;
2019 	}
2020 
2021 	if (mask->hdr.src_port) {
2022 		index = dpaa2_flow_extract_search(
2023 				&priv->extract.qos_key_extract.dpkg,
2024 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2025 		if (index < 0) {
2026 			ret = dpaa2_flow_extract_add(
2027 					&priv->extract.qos_key_extract,
2028 				NET_PROT_UDP,
2029 				NH_FLD_UDP_PORT_SRC,
2030 				NH_FLD_UDP_PORT_SIZE);
2031 			if (ret) {
2032 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2033 
2034 				return -1;
2035 			}
2036 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2037 		}
2038 
2039 		index = dpaa2_flow_extract_search(
2040 				&priv->extract.tc_key_extract[group].dpkg,
2041 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2042 		if (index < 0) {
2043 			ret = dpaa2_flow_extract_add(
2044 					&priv->extract.tc_key_extract[group],
2045 					NET_PROT_UDP,
2046 					NH_FLD_UDP_PORT_SRC,
2047 					NH_FLD_UDP_PORT_SIZE);
2048 			if (ret) {
2049 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2050 
2051 				return -1;
2052 			}
2053 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2054 		}
2055 
2056 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2057 		if (ret) {
2058 			DPAA2_PMD_ERR(
2059 				"Move ipaddr before UDP_PORT_SRC set failed");
2060 			return -1;
2061 		}
2062 
2063 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2064 				&flow->qos_rule,
2065 				NET_PROT_UDP,
2066 				NH_FLD_UDP_PORT_SRC,
2067 				&spec->hdr.src_port,
2068 				&mask->hdr.src_port,
2069 				NH_FLD_UDP_PORT_SIZE);
2070 		if (ret) {
2071 			DPAA2_PMD_ERR(
2072 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2073 			return -1;
2074 		}
2075 
2076 		ret = dpaa2_flow_rule_data_set(
2077 				&priv->extract.tc_key_extract[group],
2078 				&flow->fs_rule,
2079 				NET_PROT_UDP,
2080 				NH_FLD_UDP_PORT_SRC,
2081 				&spec->hdr.src_port,
2082 				&mask->hdr.src_port,
2083 				NH_FLD_UDP_PORT_SIZE);
2084 		if (ret) {
2085 			DPAA2_PMD_ERR(
2086 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2087 			return -1;
2088 		}
2089 	}
2090 
2091 	if (mask->hdr.dst_port) {
2092 		index = dpaa2_flow_extract_search(
2093 				&priv->extract.qos_key_extract.dpkg,
2094 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2095 		if (index < 0) {
2096 			ret = dpaa2_flow_extract_add(
2097 					&priv->extract.qos_key_extract,
2098 					NET_PROT_UDP,
2099 					NH_FLD_UDP_PORT_DST,
2100 					NH_FLD_UDP_PORT_SIZE);
2101 			if (ret) {
2102 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2103 
2104 				return -1;
2105 			}
2106 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2107 		}
2108 
2109 		index = dpaa2_flow_extract_search(
2110 				&priv->extract.tc_key_extract[group].dpkg,
2111 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2112 		if (index < 0) {
2113 			ret = dpaa2_flow_extract_add(
2114 					&priv->extract.tc_key_extract[group],
2115 					NET_PROT_UDP,
2116 					NH_FLD_UDP_PORT_DST,
2117 					NH_FLD_UDP_PORT_SIZE);
2118 			if (ret) {
2119 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2120 
2121 				return -1;
2122 			}
2123 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2124 		}
2125 
2126 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2127 		if (ret) {
2128 			DPAA2_PMD_ERR(
2129 				"Move ipaddr before UDP_PORT_DST set failed");
2130 			return -1;
2131 		}
2132 
2133 		ret = dpaa2_flow_rule_data_set(
2134 				&priv->extract.qos_key_extract,
2135 				&flow->qos_rule,
2136 				NET_PROT_UDP,
2137 				NH_FLD_UDP_PORT_DST,
2138 				&spec->hdr.dst_port,
2139 				&mask->hdr.dst_port,
2140 				NH_FLD_UDP_PORT_SIZE);
2141 		if (ret) {
2142 			DPAA2_PMD_ERR(
2143 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2144 			return -1;
2145 		}
2146 
2147 		ret = dpaa2_flow_rule_data_set(
2148 				&priv->extract.tc_key_extract[group],
2149 				&flow->fs_rule,
2150 				NET_PROT_UDP,
2151 				NH_FLD_UDP_PORT_DST,
2152 				&spec->hdr.dst_port,
2153 				&mask->hdr.dst_port,
2154 				NH_FLD_UDP_PORT_SIZE);
2155 		if (ret) {
2156 			DPAA2_PMD_ERR(
2157 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2158 			return -1;
2159 		}
2160 	}
2161 
2162 	(*device_configured) |= local_cfg;
2163 
2164 	return 0;
2165 }
2166 
2167 static int
2168 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2169 			 struct rte_eth_dev *dev,
2170 			 const struct rte_flow_attr *attr,
2171 			 const struct rte_flow_item *pattern,
2172 			 const struct rte_flow_action actions[] __rte_unused,
2173 			 struct rte_flow_error *error __rte_unused,
2174 			 int *device_configured)
2175 {
2176 	int index, ret;
2177 	int local_cfg = 0;
2178 	uint32_t group;
2179 	const struct rte_flow_item_tcp *spec, *mask;
2180 
2181 	const struct rte_flow_item_tcp *last __rte_unused;
2182 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2183 
2184 	group = attr->group;
2185 
2186 	/* Parse pattern list to get the matching parameters */
2187 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2188 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2189 	mask    = (const struct rte_flow_item_tcp *)
2190 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2191 
2192 	/* Get traffic class index and flow id to be configured */
2193 	flow->tc_id = group;
2194 	flow->tc_index = attr->priority;
2195 
2196 	if (!spec || !mc_l4_port_identification) {
2197 		struct proto_discrimination proto;
2198 
2199 		index = dpaa2_flow_extract_search(
2200 				&priv->extract.qos_key_extract.dpkg,
2201 				NET_PROT_IP, NH_FLD_IP_PROTO);
2202 		if (index < 0) {
2203 			ret = dpaa2_flow_proto_discrimination_extract(
2204 					&priv->extract.qos_key_extract,
2205 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2206 			if (ret) {
2207 				DPAA2_PMD_ERR(
2208 					"QoS Extract IP protocol to discriminate TCP failed.");
2209 
2210 				return -1;
2211 			}
2212 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2213 		}
2214 
2215 		index = dpaa2_flow_extract_search(
2216 				&priv->extract.tc_key_extract[group].dpkg,
2217 				NET_PROT_IP, NH_FLD_IP_PROTO);
2218 		if (index < 0) {
2219 			ret = dpaa2_flow_proto_discrimination_extract(
2220 				&priv->extract.tc_key_extract[group],
2221 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2222 			if (ret) {
2223 				DPAA2_PMD_ERR(
2224 					"FS Extract IP protocol to discriminate TCP failed.");
2225 
2226 				return -1;
2227 			}
2228 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2229 		}
2230 
2231 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2232 		if (ret) {
2233 			DPAA2_PMD_ERR(
2234 				"Move IP addr before TCP discrimination set failed");
2235 			return -1;
2236 		}
2237 
2238 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2239 		proto.ip_proto = IPPROTO_TCP;
2240 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2241 							proto, group);
2242 		if (ret) {
2243 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2244 			return -1;
2245 		}
2246 
2247 		(*device_configured) |= local_cfg;
2248 
2249 		if (!spec)
2250 			return 0;
2251 	}
2252 
2253 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2254 		RTE_FLOW_ITEM_TYPE_TCP)) {
2255 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2256 
2257 		return -1;
2258 	}
2259 
2260 	if (mask->hdr.src_port) {
2261 		index = dpaa2_flow_extract_search(
2262 				&priv->extract.qos_key_extract.dpkg,
2263 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2264 		if (index < 0) {
2265 			ret = dpaa2_flow_extract_add(
2266 					&priv->extract.qos_key_extract,
2267 					NET_PROT_TCP,
2268 					NH_FLD_TCP_PORT_SRC,
2269 					NH_FLD_TCP_PORT_SIZE);
2270 			if (ret) {
2271 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2272 
2273 				return -1;
2274 			}
2275 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2276 		}
2277 
2278 		index = dpaa2_flow_extract_search(
2279 				&priv->extract.tc_key_extract[group].dpkg,
2280 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2281 		if (index < 0) {
2282 			ret = dpaa2_flow_extract_add(
2283 					&priv->extract.tc_key_extract[group],
2284 					NET_PROT_TCP,
2285 					NH_FLD_TCP_PORT_SRC,
2286 					NH_FLD_TCP_PORT_SIZE);
2287 			if (ret) {
2288 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2289 
2290 				return -1;
2291 			}
2292 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2293 		}
2294 
2295 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2296 		if (ret) {
2297 			DPAA2_PMD_ERR(
2298 				"Move ipaddr before TCP_PORT_SRC set failed");
2299 			return -1;
2300 		}
2301 
2302 		ret = dpaa2_flow_rule_data_set(
2303 				&priv->extract.qos_key_extract,
2304 				&flow->qos_rule,
2305 				NET_PROT_TCP,
2306 				NH_FLD_TCP_PORT_SRC,
2307 				&spec->hdr.src_port,
2308 				&mask->hdr.src_port,
2309 				NH_FLD_TCP_PORT_SIZE);
2310 		if (ret) {
2311 			DPAA2_PMD_ERR(
2312 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2313 			return -1;
2314 		}
2315 
2316 		ret = dpaa2_flow_rule_data_set(
2317 				&priv->extract.tc_key_extract[group],
2318 				&flow->fs_rule,
2319 				NET_PROT_TCP,
2320 				NH_FLD_TCP_PORT_SRC,
2321 				&spec->hdr.src_port,
2322 				&mask->hdr.src_port,
2323 				NH_FLD_TCP_PORT_SIZE);
2324 		if (ret) {
2325 			DPAA2_PMD_ERR(
2326 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2327 			return -1;
2328 		}
2329 	}
2330 
2331 	if (mask->hdr.dst_port) {
2332 		index = dpaa2_flow_extract_search(
2333 				&priv->extract.qos_key_extract.dpkg,
2334 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2335 		if (index < 0) {
2336 			ret = dpaa2_flow_extract_add(
2337 					&priv->extract.qos_key_extract,
2338 					NET_PROT_TCP,
2339 					NH_FLD_TCP_PORT_DST,
2340 					NH_FLD_TCP_PORT_SIZE);
2341 			if (ret) {
2342 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2343 
2344 				return -1;
2345 			}
2346 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2347 		}
2348 
2349 		index = dpaa2_flow_extract_search(
2350 				&priv->extract.tc_key_extract[group].dpkg,
2351 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2352 		if (index < 0) {
2353 			ret = dpaa2_flow_extract_add(
2354 					&priv->extract.tc_key_extract[group],
2355 					NET_PROT_TCP,
2356 					NH_FLD_TCP_PORT_DST,
2357 					NH_FLD_TCP_PORT_SIZE);
2358 			if (ret) {
2359 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2360 
2361 				return -1;
2362 			}
2363 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2364 		}
2365 
2366 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2367 		if (ret) {
2368 			DPAA2_PMD_ERR(
2369 				"Move ipaddr before TCP_PORT_DST set failed");
2370 			return -1;
2371 		}
2372 
2373 		ret = dpaa2_flow_rule_data_set(
2374 				&priv->extract.qos_key_extract,
2375 				&flow->qos_rule,
2376 				NET_PROT_TCP,
2377 				NH_FLD_TCP_PORT_DST,
2378 				&spec->hdr.dst_port,
2379 				&mask->hdr.dst_port,
2380 				NH_FLD_TCP_PORT_SIZE);
2381 		if (ret) {
2382 			DPAA2_PMD_ERR(
2383 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2384 			return -1;
2385 		}
2386 
2387 		ret = dpaa2_flow_rule_data_set(
2388 				&priv->extract.tc_key_extract[group],
2389 				&flow->fs_rule,
2390 				NET_PROT_TCP,
2391 				NH_FLD_TCP_PORT_DST,
2392 				&spec->hdr.dst_port,
2393 				&mask->hdr.dst_port,
2394 				NH_FLD_TCP_PORT_SIZE);
2395 		if (ret) {
2396 			DPAA2_PMD_ERR(
2397 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2398 			return -1;
2399 		}
2400 	}
2401 
2402 	(*device_configured) |= local_cfg;
2403 
2404 	return 0;
2405 }
2406 
2407 static int
2408 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2409 			  struct rte_eth_dev *dev,
2410 			  const struct rte_flow_attr *attr,
2411 			  const struct rte_flow_item *pattern,
2412 			  const struct rte_flow_action actions[] __rte_unused,
2413 			  struct rte_flow_error *error __rte_unused,
2414 			  int *device_configured)
2415 {
2416 	int index, ret;
2417 	int local_cfg = 0;
2418 	uint32_t group;
2419 	const struct rte_flow_item_sctp *spec, *mask;
2420 
2421 	const struct rte_flow_item_sctp *last __rte_unused;
2422 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2423 
2424 	group = attr->group;
2425 
2426 	/* Parse pattern list to get the matching parameters */
2427 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2428 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2429 	mask    = (const struct rte_flow_item_sctp *)
2430 			(pattern->mask ? pattern->mask :
2431 				&dpaa2_flow_item_sctp_mask);
2432 
2433 	/* Get traffic class index and flow id to be configured */
2434 	flow->tc_id = group;
2435 	flow->tc_index = attr->priority;
2436 
2437 	if (!spec || !mc_l4_port_identification) {
2438 		struct proto_discrimination proto;
2439 
2440 		index = dpaa2_flow_extract_search(
2441 				&priv->extract.qos_key_extract.dpkg,
2442 				NET_PROT_IP, NH_FLD_IP_PROTO);
2443 		if (index < 0) {
2444 			ret = dpaa2_flow_proto_discrimination_extract(
2445 					&priv->extract.qos_key_extract,
2446 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2447 			if (ret) {
2448 				DPAA2_PMD_ERR(
2449 					"QoS Extract IP protocol to discriminate SCTP failed.");
2450 
2451 				return -1;
2452 			}
2453 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2454 		}
2455 
2456 		index = dpaa2_flow_extract_search(
2457 				&priv->extract.tc_key_extract[group].dpkg,
2458 				NET_PROT_IP, NH_FLD_IP_PROTO);
2459 		if (index < 0) {
2460 			ret = dpaa2_flow_proto_discrimination_extract(
2461 					&priv->extract.tc_key_extract[group],
2462 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2463 			if (ret) {
2464 				DPAA2_PMD_ERR(
2465 					"FS Extract IP protocol to discriminate SCTP failed.");
2466 
2467 				return -1;
2468 			}
2469 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2470 		}
2471 
2472 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2473 		if (ret) {
2474 			DPAA2_PMD_ERR(
2475 				"Move ipaddr before SCTP discrimination set failed");
2476 			return -1;
2477 		}
2478 
2479 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2480 		proto.ip_proto = IPPROTO_SCTP;
2481 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2482 							proto, group);
2483 		if (ret) {
2484 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2485 			return -1;
2486 		}
2487 
2488 		(*device_configured) |= local_cfg;
2489 
2490 		if (!spec)
2491 			return 0;
2492 	}
2493 
2494 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2495 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2496 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2497 
2498 		return -1;
2499 	}
2500 
2501 	if (mask->hdr.src_port) {
2502 		index = dpaa2_flow_extract_search(
2503 				&priv->extract.qos_key_extract.dpkg,
2504 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2505 		if (index < 0) {
2506 			ret = dpaa2_flow_extract_add(
2507 					&priv->extract.qos_key_extract,
2508 					NET_PROT_SCTP,
2509 					NH_FLD_SCTP_PORT_SRC,
2510 					NH_FLD_SCTP_PORT_SIZE);
2511 			if (ret) {
2512 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2513 
2514 				return -1;
2515 			}
2516 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2517 		}
2518 
2519 		index = dpaa2_flow_extract_search(
2520 				&priv->extract.tc_key_extract[group].dpkg,
2521 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2522 		if (index < 0) {
2523 			ret = dpaa2_flow_extract_add(
2524 					&priv->extract.tc_key_extract[group],
2525 					NET_PROT_SCTP,
2526 					NH_FLD_SCTP_PORT_SRC,
2527 					NH_FLD_SCTP_PORT_SIZE);
2528 			if (ret) {
2529 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2530 
2531 				return -1;
2532 			}
2533 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2534 		}
2535 
2536 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2537 		if (ret) {
2538 			DPAA2_PMD_ERR(
2539 				"Move ipaddr before SCTP_PORT_SRC set failed");
2540 			return -1;
2541 		}
2542 
2543 		ret = dpaa2_flow_rule_data_set(
2544 				&priv->extract.qos_key_extract,
2545 				&flow->qos_rule,
2546 				NET_PROT_SCTP,
2547 				NH_FLD_SCTP_PORT_SRC,
2548 				&spec->hdr.src_port,
2549 				&mask->hdr.src_port,
2550 				NH_FLD_SCTP_PORT_SIZE);
2551 		if (ret) {
2552 			DPAA2_PMD_ERR(
2553 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2554 			return -1;
2555 		}
2556 
2557 		ret = dpaa2_flow_rule_data_set(
2558 				&priv->extract.tc_key_extract[group],
2559 				&flow->fs_rule,
2560 				NET_PROT_SCTP,
2561 				NH_FLD_SCTP_PORT_SRC,
2562 				&spec->hdr.src_port,
2563 				&mask->hdr.src_port,
2564 				NH_FLD_SCTP_PORT_SIZE);
2565 		if (ret) {
2566 			DPAA2_PMD_ERR(
2567 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2568 			return -1;
2569 		}
2570 	}
2571 
2572 	if (mask->hdr.dst_port) {
2573 		index = dpaa2_flow_extract_search(
2574 				&priv->extract.qos_key_extract.dpkg,
2575 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2576 		if (index < 0) {
2577 			ret = dpaa2_flow_extract_add(
2578 					&priv->extract.qos_key_extract,
2579 					NET_PROT_SCTP,
2580 					NH_FLD_SCTP_PORT_DST,
2581 					NH_FLD_SCTP_PORT_SIZE);
2582 			if (ret) {
2583 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2584 
2585 				return -1;
2586 			}
2587 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2588 		}
2589 
2590 		index = dpaa2_flow_extract_search(
2591 				&priv->extract.tc_key_extract[group].dpkg,
2592 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2593 		if (index < 0) {
2594 			ret = dpaa2_flow_extract_add(
2595 					&priv->extract.tc_key_extract[group],
2596 					NET_PROT_SCTP,
2597 					NH_FLD_SCTP_PORT_DST,
2598 					NH_FLD_SCTP_PORT_SIZE);
2599 			if (ret) {
2600 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2601 
2602 				return -1;
2603 			}
2604 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2605 		}
2606 
2607 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2608 		if (ret) {
2609 			DPAA2_PMD_ERR(
2610 				"Move ipaddr before SCTP_PORT_DST set failed");
2611 			return -1;
2612 		}
2613 
2614 		ret = dpaa2_flow_rule_data_set(
2615 				&priv->extract.qos_key_extract,
2616 				&flow->qos_rule,
2617 				NET_PROT_SCTP,
2618 				NH_FLD_SCTP_PORT_DST,
2619 				&spec->hdr.dst_port,
2620 				&mask->hdr.dst_port,
2621 				NH_FLD_SCTP_PORT_SIZE);
2622 		if (ret) {
2623 			DPAA2_PMD_ERR(
2624 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2625 			return -1;
2626 		}
2627 
2628 		ret = dpaa2_flow_rule_data_set(
2629 				&priv->extract.tc_key_extract[group],
2630 				&flow->fs_rule,
2631 				NET_PROT_SCTP,
2632 				NH_FLD_SCTP_PORT_DST,
2633 				&spec->hdr.dst_port,
2634 				&mask->hdr.dst_port,
2635 				NH_FLD_SCTP_PORT_SIZE);
2636 		if (ret) {
2637 			DPAA2_PMD_ERR(
2638 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2639 			return -1;
2640 		}
2641 	}
2642 
2643 	(*device_configured) |= local_cfg;
2644 
2645 	return 0;
2646 }
2647 
2648 static int
2649 dpaa2_configure_flow_gre(struct rte_flow *flow,
2650 			 struct rte_eth_dev *dev,
2651 			 const struct rte_flow_attr *attr,
2652 			 const struct rte_flow_item *pattern,
2653 			 const struct rte_flow_action actions[] __rte_unused,
2654 			 struct rte_flow_error *error __rte_unused,
2655 			 int *device_configured)
2656 {
2657 	int index, ret;
2658 	int local_cfg = 0;
2659 	uint32_t group;
2660 	const struct rte_flow_item_gre *spec, *mask;
2661 
2662 	const struct rte_flow_item_gre *last __rte_unused;
2663 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2664 
2665 	group = attr->group;
2666 
2667 	/* Parse pattern list to get the matching parameters */
2668 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2669 	last    = (const struct rte_flow_item_gre *)pattern->last;
2670 	mask    = (const struct rte_flow_item_gre *)
2671 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2672 
2673 	/* Get traffic class index and flow id to be configured */
2674 	flow->tc_id = group;
2675 	flow->tc_index = attr->priority;
2676 
2677 	if (!spec) {
2678 		struct proto_discrimination proto;
2679 
2680 		index = dpaa2_flow_extract_search(
2681 				&priv->extract.qos_key_extract.dpkg,
2682 				NET_PROT_IP, NH_FLD_IP_PROTO);
2683 		if (index < 0) {
2684 			ret = dpaa2_flow_proto_discrimination_extract(
2685 					&priv->extract.qos_key_extract,
2686 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2687 			if (ret) {
2688 				DPAA2_PMD_ERR(
2689 					"QoS Extract IP protocol to discriminate GRE failed.");
2690 
2691 				return -1;
2692 			}
2693 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2694 		}
2695 
2696 		index = dpaa2_flow_extract_search(
2697 				&priv->extract.tc_key_extract[group].dpkg,
2698 				NET_PROT_IP, NH_FLD_IP_PROTO);
2699 		if (index < 0) {
2700 			ret = dpaa2_flow_proto_discrimination_extract(
2701 					&priv->extract.tc_key_extract[group],
2702 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2703 			if (ret) {
2704 				DPAA2_PMD_ERR(
2705 					"FS Extract IP protocol to discriminate GRE failed.");
2706 
2707 				return -1;
2708 			}
2709 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2710 		}
2711 
2712 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2713 		if (ret) {
2714 			DPAA2_PMD_ERR(
2715 				"Move IP addr before GRE discrimination set failed");
2716 			return -1;
2717 		}
2718 
2719 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2720 		proto.ip_proto = IPPROTO_GRE;
2721 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2722 							proto, group);
2723 		if (ret) {
2724 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2725 			return -1;
2726 		}
2727 
2728 		(*device_configured) |= local_cfg;
2729 
2730 		return 0;
2731 	}
2732 
2733 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2734 		RTE_FLOW_ITEM_TYPE_GRE)) {
2735 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2736 
2737 		return -1;
2738 	}
2739 
2740 	if (!mask->protocol)
2741 		return 0;
2742 
2743 	index = dpaa2_flow_extract_search(
2744 			&priv->extract.qos_key_extract.dpkg,
2745 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2746 	if (index < 0) {
2747 		ret = dpaa2_flow_extract_add(
2748 				&priv->extract.qos_key_extract,
2749 				NET_PROT_GRE,
2750 				NH_FLD_GRE_TYPE,
2751 				sizeof(rte_be16_t));
2752 		if (ret) {
2753 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2754 
2755 			return -1;
2756 		}
2757 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2758 	}
2759 
2760 	index = dpaa2_flow_extract_search(
2761 			&priv->extract.tc_key_extract[group].dpkg,
2762 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2763 	if (index < 0) {
2764 		ret = dpaa2_flow_extract_add(
2765 				&priv->extract.tc_key_extract[group],
2766 				NET_PROT_GRE,
2767 				NH_FLD_GRE_TYPE,
2768 				sizeof(rte_be16_t));
2769 		if (ret) {
2770 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2771 
2772 			return -1;
2773 		}
2774 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2775 	}
2776 
2777 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2778 	if (ret) {
2779 		DPAA2_PMD_ERR(
2780 			"Move ipaddr before GRE_TYPE set failed");
2781 		return -1;
2782 	}
2783 
2784 	ret = dpaa2_flow_rule_data_set(
2785 				&priv->extract.qos_key_extract,
2786 				&flow->qos_rule,
2787 				NET_PROT_GRE,
2788 				NH_FLD_GRE_TYPE,
2789 				&spec->protocol,
2790 				&mask->protocol,
2791 				sizeof(rte_be16_t));
2792 	if (ret) {
2793 		DPAA2_PMD_ERR(
2794 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2795 		return -1;
2796 	}
2797 
2798 	ret = dpaa2_flow_rule_data_set(
2799 			&priv->extract.tc_key_extract[group],
2800 			&flow->fs_rule,
2801 			NET_PROT_GRE,
2802 			NH_FLD_GRE_TYPE,
2803 			&spec->protocol,
2804 			&mask->protocol,
2805 			sizeof(rte_be16_t));
2806 	if (ret) {
2807 		DPAA2_PMD_ERR(
2808 			"FS NH_FLD_GRE_TYPE rule data set failed");
2809 		return -1;
2810 	}
2811 
2812 	(*device_configured) |= local_cfg;
2813 
2814 	return 0;
2815 }
2816 
2817 /* The existing QoS/FS entry with IP address(es)
2818  * needs update after
2819  * new extract(s) are inserted before IP
2820  * address(es) extract(s).
2821  */
2822 static int
2823 dpaa2_flow_entry_update(
2824 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2825 {
2826 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2827 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2828 	int ret;
2829 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2830 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2831 	struct dpaa2_key_extract *qos_key_extract =
2832 		&priv->extract.qos_key_extract;
2833 	struct dpaa2_key_extract *tc_key_extract =
2834 		&priv->extract.tc_key_extract[tc_id];
2835 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2836 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2837 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2838 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2839 	int extend = -1, extend1, size = -1;
2840 	uint16_t qos_index;
2841 
2842 	while (curr) {
2843 		if (curr->ipaddr_rule.ipaddr_type ==
2844 			FLOW_NONE_IPADDR) {
2845 			curr = LIST_NEXT(curr, next);
2846 			continue;
2847 		}
2848 
2849 		if (curr->ipaddr_rule.ipaddr_type ==
2850 			FLOW_IPV4_ADDR) {
2851 			qos_ipsrc_offset =
2852 				qos_key_extract->key_info.ipv4_src_offset;
2853 			qos_ipdst_offset =
2854 				qos_key_extract->key_info.ipv4_dst_offset;
2855 			fs_ipsrc_offset =
2856 				tc_key_extract->key_info.ipv4_src_offset;
2857 			fs_ipdst_offset =
2858 				tc_key_extract->key_info.ipv4_dst_offset;
2859 			size = NH_FLD_IPV4_ADDR_SIZE;
2860 		} else {
2861 			qos_ipsrc_offset =
2862 				qos_key_extract->key_info.ipv6_src_offset;
2863 			qos_ipdst_offset =
2864 				qos_key_extract->key_info.ipv6_dst_offset;
2865 			fs_ipsrc_offset =
2866 				tc_key_extract->key_info.ipv6_src_offset;
2867 			fs_ipdst_offset =
2868 				tc_key_extract->key_info.ipv6_dst_offset;
2869 			size = NH_FLD_IPV6_ADDR_SIZE;
2870 		}
2871 
2872 		qos_index = curr->tc_id * priv->fs_entries +
2873 			curr->tc_index;
2874 
2875 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
2876 
2877 		if (priv->num_rx_tc > 1) {
2878 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
2879 					priv->token, &curr->qos_rule);
2880 			if (ret) {
2881 				DPAA2_PMD_ERR("Qos entry remove failed.");
2882 				return -1;
2883 			}
2884 		}
2885 
2886 		extend = -1;
2887 
2888 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2889 			RTE_ASSERT(qos_ipsrc_offset >=
2890 				curr->ipaddr_rule.qos_ipsrc_offset);
2891 			extend1 = qos_ipsrc_offset -
2892 				curr->ipaddr_rule.qos_ipsrc_offset;
2893 			if (extend >= 0)
2894 				RTE_ASSERT(extend == extend1);
2895 			else
2896 				extend = extend1;
2897 
2898 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2899 				(size == NH_FLD_IPV6_ADDR_SIZE));
2900 
2901 			memcpy(ipsrc_key,
2902 				(char *)(size_t)curr->qos_rule.key_iova +
2903 				curr->ipaddr_rule.qos_ipsrc_offset,
2904 				size);
2905 			memset((char *)(size_t)curr->qos_rule.key_iova +
2906 				curr->ipaddr_rule.qos_ipsrc_offset,
2907 				0, size);
2908 
2909 			memcpy(ipsrc_mask,
2910 				(char *)(size_t)curr->qos_rule.mask_iova +
2911 				curr->ipaddr_rule.qos_ipsrc_offset,
2912 				size);
2913 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2914 				curr->ipaddr_rule.qos_ipsrc_offset,
2915 				0, size);
2916 
2917 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
2918 		}
2919 
2920 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2921 			RTE_ASSERT(qos_ipdst_offset >=
2922 				curr->ipaddr_rule.qos_ipdst_offset);
2923 			extend1 = qos_ipdst_offset -
2924 				curr->ipaddr_rule.qos_ipdst_offset;
2925 			if (extend >= 0)
2926 				RTE_ASSERT(extend == extend1);
2927 			else
2928 				extend = extend1;
2929 
2930 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2931 				(size == NH_FLD_IPV6_ADDR_SIZE));
2932 
2933 			memcpy(ipdst_key,
2934 				(char *)(size_t)curr->qos_rule.key_iova +
2935 				curr->ipaddr_rule.qos_ipdst_offset,
2936 				size);
2937 			memset((char *)(size_t)curr->qos_rule.key_iova +
2938 				curr->ipaddr_rule.qos_ipdst_offset,
2939 				0, size);
2940 
2941 			memcpy(ipdst_mask,
2942 				(char *)(size_t)curr->qos_rule.mask_iova +
2943 				curr->ipaddr_rule.qos_ipdst_offset,
2944 				size);
2945 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2946 				curr->ipaddr_rule.qos_ipdst_offset,
2947 				0, size);
2948 
2949 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
2950 		}
2951 
2952 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2953 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2954 				(size == NH_FLD_IPV6_ADDR_SIZE));
2955 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2956 				curr->ipaddr_rule.qos_ipsrc_offset,
2957 				ipsrc_key,
2958 				size);
2959 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2960 				curr->ipaddr_rule.qos_ipsrc_offset,
2961 				ipsrc_mask,
2962 				size);
2963 		}
2964 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2965 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2966 				(size == NH_FLD_IPV6_ADDR_SIZE));
2967 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2968 				curr->ipaddr_rule.qos_ipdst_offset,
2969 				ipdst_key,
2970 				size);
2971 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2972 				curr->ipaddr_rule.qos_ipdst_offset,
2973 				ipdst_mask,
2974 				size);
2975 		}
2976 
2977 		if (extend >= 0)
2978 			curr->qos_real_key_size += extend;
2979 
2980 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
2981 
2982 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
2983 
2984 		if (priv->num_rx_tc > 1) {
2985 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
2986 					priv->token, &curr->qos_rule,
2987 					curr->tc_id, qos_index,
2988 					0, 0);
2989 			if (ret) {
2990 				DPAA2_PMD_ERR("Qos entry update failed.");
2991 				return -1;
2992 			}
2993 		}
2994 
2995 		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
2996 			curr = LIST_NEXT(curr, next);
2997 			continue;
2998 		}
2999 
3000 		dpaa2_flow_fs_entry_log("Before update", curr);
3001 		extend = -1;
3002 
3003 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3004 				priv->token, curr->tc_id, &curr->fs_rule);
3005 		if (ret) {
3006 			DPAA2_PMD_ERR("FS entry remove failed.");
3007 			return -1;
3008 		}
3009 
3010 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3011 			tc_id == curr->tc_id) {
3012 			RTE_ASSERT(fs_ipsrc_offset >=
3013 				curr->ipaddr_rule.fs_ipsrc_offset);
3014 			extend1 = fs_ipsrc_offset -
3015 				curr->ipaddr_rule.fs_ipsrc_offset;
3016 			if (extend >= 0)
3017 				RTE_ASSERT(extend == extend1);
3018 			else
3019 				extend = extend1;
3020 
3021 			memcpy(ipsrc_key,
3022 				(char *)(size_t)curr->fs_rule.key_iova +
3023 				curr->ipaddr_rule.fs_ipsrc_offset,
3024 				size);
3025 			memset((char *)(size_t)curr->fs_rule.key_iova +
3026 				curr->ipaddr_rule.fs_ipsrc_offset,
3027 				0, size);
3028 
3029 			memcpy(ipsrc_mask,
3030 				(char *)(size_t)curr->fs_rule.mask_iova +
3031 				curr->ipaddr_rule.fs_ipsrc_offset,
3032 				size);
3033 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3034 				curr->ipaddr_rule.fs_ipsrc_offset,
3035 				0, size);
3036 
3037 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3038 		}
3039 
3040 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3041 			tc_id == curr->tc_id) {
3042 			RTE_ASSERT(fs_ipdst_offset >=
3043 				curr->ipaddr_rule.fs_ipdst_offset);
3044 			extend1 = fs_ipdst_offset -
3045 				curr->ipaddr_rule.fs_ipdst_offset;
3046 			if (extend >= 0)
3047 				RTE_ASSERT(extend == extend1);
3048 			else
3049 				extend = extend1;
3050 
3051 			memcpy(ipdst_key,
3052 				(char *)(size_t)curr->fs_rule.key_iova +
3053 				curr->ipaddr_rule.fs_ipdst_offset,
3054 				size);
3055 			memset((char *)(size_t)curr->fs_rule.key_iova +
3056 				curr->ipaddr_rule.fs_ipdst_offset,
3057 				0, size);
3058 
3059 			memcpy(ipdst_mask,
3060 				(char *)(size_t)curr->fs_rule.mask_iova +
3061 				curr->ipaddr_rule.fs_ipdst_offset,
3062 				size);
3063 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3064 				curr->ipaddr_rule.fs_ipdst_offset,
3065 				0, size);
3066 
3067 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3068 		}
3069 
3070 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3071 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3072 				curr->ipaddr_rule.fs_ipsrc_offset,
3073 				ipsrc_key,
3074 				size);
3075 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3076 				curr->ipaddr_rule.fs_ipsrc_offset,
3077 				ipsrc_mask,
3078 				size);
3079 		}
3080 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3081 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3082 				curr->ipaddr_rule.fs_ipdst_offset,
3083 				ipdst_key,
3084 				size);
3085 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3086 				curr->ipaddr_rule.fs_ipdst_offset,
3087 				ipdst_mask,
3088 				size);
3089 		}
3090 
3091 		if (extend >= 0)
3092 			curr->fs_real_key_size += extend;
3093 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3094 
3095 		dpaa2_flow_fs_entry_log("Start update", curr);
3096 
3097 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3098 				priv->token, curr->tc_id, curr->tc_index,
3099 				&curr->fs_rule, &curr->action_cfg);
3100 		if (ret) {
3101 			DPAA2_PMD_ERR("FS entry update failed.");
3102 			return -1;
3103 		}
3104 
3105 		curr = LIST_NEXT(curr, next);
3106 	}
3107 
3108 	return 0;
3109 }
3110 
3111 static inline int
3112 dpaa2_flow_verify_attr(
3113 	struct dpaa2_dev_priv *priv,
3114 	const struct rte_flow_attr *attr)
3115 {
3116 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3117 
3118 	while (curr) {
3119 		if (curr->tc_id == attr->group &&
3120 			curr->tc_index == attr->priority) {
3121 			DPAA2_PMD_ERR(
3122 				"Flow with group %d and priority %d already exists.",
3123 				attr->group, attr->priority);
3124 
3125 			return -1;
3126 		}
3127 		curr = LIST_NEXT(curr, next);
3128 	}
3129 
3130 	return 0;
3131 }
3132 
3133 static inline int
3134 dpaa2_flow_verify_action(
3135 	struct dpaa2_dev_priv *priv,
3136 	const struct rte_flow_attr *attr,
3137 	const struct rte_flow_action actions[])
3138 {
3139 	int end_of_list = 0, i, j = 0;
3140 	const struct rte_flow_action_queue *dest_queue;
3141 	const struct rte_flow_action_rss *rss_conf;
3142 	struct dpaa2_queue *rxq;
3143 
3144 	while (!end_of_list) {
3145 		switch (actions[j].type) {
3146 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3147 			dest_queue = (const struct rte_flow_action_queue *)
3148 					(actions[j].conf);
3149 			rxq = priv->rx_vq[dest_queue->index];
3150 			if (attr->group != rxq->tc_index) {
3151 				DPAA2_PMD_ERR(
3152 					"RXQ[%d] does not belong to the group %d",
3153 					dest_queue->index, attr->group);
3154 
3155 				return -1;
3156 			}
3157 			break;
3158 		case RTE_FLOW_ACTION_TYPE_RSS:
3159 			rss_conf = (const struct rte_flow_action_rss *)
3160 					(actions[j].conf);
3161 			if (rss_conf->queue_num > priv->dist_queues) {
3162 				DPAA2_PMD_ERR(
3163 					"RSS number exceeds the distrbution size");
3164 				return -ENOTSUP;
3165 			}
3166 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3167 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3168 					DPAA2_PMD_ERR(
3169 						"RSS queue index exceeds the number of RXQs");
3170 					return -ENOTSUP;
3171 				}
3172 				rxq = priv->rx_vq[rss_conf->queue[i]];
3173 				if (rxq->tc_index != attr->group) {
3174 					DPAA2_PMD_ERR(
3175 						"Queue/Group combination are not supported\n");
3176 					return -ENOTSUP;
3177 				}
3178 			}
3179 
3180 			break;
3181 		case RTE_FLOW_ACTION_TYPE_END:
3182 			end_of_list = 1;
3183 			break;
3184 		default:
3185 			DPAA2_PMD_ERR("Invalid action type");
3186 			return -ENOTSUP;
3187 		}
3188 		j++;
3189 	}
3190 
3191 	return 0;
3192 }
3193 
3194 static int
3195 dpaa2_generic_flow_set(struct rte_flow *flow,
3196 		       struct rte_eth_dev *dev,
3197 		       const struct rte_flow_attr *attr,
3198 		       const struct rte_flow_item pattern[],
3199 		       const struct rte_flow_action actions[],
3200 		       struct rte_flow_error *error)
3201 {
3202 	const struct rte_flow_action_queue *dest_queue;
3203 	const struct rte_flow_action_rss *rss_conf;
3204 	int is_keycfg_configured = 0, end_of_list = 0;
3205 	int ret = 0, i = 0, j = 0;
3206 	struct dpni_rx_dist_cfg tc_cfg;
3207 	struct dpni_qos_tbl_cfg qos_cfg;
3208 	struct dpni_fs_action_cfg action;
3209 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3210 	struct dpaa2_queue *rxq;
3211 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3212 	size_t param;
3213 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3214 	uint16_t qos_index;
3215 
3216 	ret = dpaa2_flow_verify_attr(priv, attr);
3217 	if (ret)
3218 		return ret;
3219 
3220 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3221 	if (ret)
3222 		return ret;
3223 
3224 	/* Parse pattern list to get the matching parameters */
3225 	while (!end_of_list) {
3226 		switch (pattern[i].type) {
3227 		case RTE_FLOW_ITEM_TYPE_ETH:
3228 			ret = dpaa2_configure_flow_eth(flow,
3229 					dev, attr, &pattern[i], actions, error,
3230 					&is_keycfg_configured);
3231 			if (ret) {
3232 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3233 				return ret;
3234 			}
3235 			break;
3236 		case RTE_FLOW_ITEM_TYPE_VLAN:
3237 			ret = dpaa2_configure_flow_vlan(flow,
3238 					dev, attr, &pattern[i], actions, error,
3239 					&is_keycfg_configured);
3240 			if (ret) {
3241 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3242 				return ret;
3243 			}
3244 			break;
3245 		case RTE_FLOW_ITEM_TYPE_IPV4:
3246 		case RTE_FLOW_ITEM_TYPE_IPV6:
3247 			ret = dpaa2_configure_flow_generic_ip(flow,
3248 					dev, attr, &pattern[i], actions, error,
3249 					&is_keycfg_configured);
3250 			if (ret) {
3251 				DPAA2_PMD_ERR("IP flow configuration failed!");
3252 				return ret;
3253 			}
3254 			break;
3255 		case RTE_FLOW_ITEM_TYPE_ICMP:
3256 			ret = dpaa2_configure_flow_icmp(flow,
3257 					dev, attr, &pattern[i], actions, error,
3258 					&is_keycfg_configured);
3259 			if (ret) {
3260 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3261 				return ret;
3262 			}
3263 			break;
3264 		case RTE_FLOW_ITEM_TYPE_UDP:
3265 			ret = dpaa2_configure_flow_udp(flow,
3266 					dev, attr, &pattern[i], actions, error,
3267 					&is_keycfg_configured);
3268 			if (ret) {
3269 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3270 				return ret;
3271 			}
3272 			break;
3273 		case RTE_FLOW_ITEM_TYPE_TCP:
3274 			ret = dpaa2_configure_flow_tcp(flow,
3275 					dev, attr, &pattern[i], actions, error,
3276 					&is_keycfg_configured);
3277 			if (ret) {
3278 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3279 				return ret;
3280 			}
3281 			break;
3282 		case RTE_FLOW_ITEM_TYPE_SCTP:
3283 			ret = dpaa2_configure_flow_sctp(flow,
3284 					dev, attr, &pattern[i], actions, error,
3285 					&is_keycfg_configured);
3286 			if (ret) {
3287 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3288 				return ret;
3289 			}
3290 			break;
3291 		case RTE_FLOW_ITEM_TYPE_GRE:
3292 			ret = dpaa2_configure_flow_gre(flow,
3293 					dev, attr, &pattern[i], actions, error,
3294 					&is_keycfg_configured);
3295 			if (ret) {
3296 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3297 				return ret;
3298 			}
3299 			break;
3300 		case RTE_FLOW_ITEM_TYPE_END:
3301 			end_of_list = 1;
3302 			break; /*End of List*/
3303 		default:
3304 			DPAA2_PMD_ERR("Invalid action type");
3305 			ret = -ENOTSUP;
3306 			break;
3307 		}
3308 		i++;
3309 	}
3310 
3311 	/* Let's parse action on matching traffic */
3312 	end_of_list = 0;
3313 	while (!end_of_list) {
3314 		switch (actions[j].type) {
3315 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3316 			dest_queue =
3317 				(const struct rte_flow_action_queue *)(actions[j].conf);
3318 			rxq = priv->rx_vq[dest_queue->index];
3319 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3320 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3321 			action.flow_id = rxq->flow_id;
3322 
3323 			/* Configure FS table first*/
3324 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3325 				dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3326 				if (dpkg_prepare_key_cfg(
3327 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3328 				(uint8_t *)(size_t)priv->extract
3329 				.tc_extract_param[flow->tc_id]) < 0) {
3330 					DPAA2_PMD_ERR(
3331 					"Unable to prepare extract parameters");
3332 					return -1;
3333 				}
3334 
3335 				memset(&tc_cfg, 0,
3336 					sizeof(struct dpni_rx_dist_cfg));
3337 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3338 				tc_cfg.key_cfg_iova =
3339 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3340 				tc_cfg.tc = flow->tc_id;
3341 				tc_cfg.enable = false;
3342 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3343 						priv->token, &tc_cfg);
3344 				if (ret < 0) {
3345 					DPAA2_PMD_ERR(
3346 						"TC hash cannot be disabled.(%d)",
3347 						ret);
3348 					return -1;
3349 				}
3350 				tc_cfg.enable = true;
3351 				tc_cfg.fs_miss_flow_id =
3352 					dpaa2_flow_miss_flow_id;
3353 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3354 							 priv->token, &tc_cfg);
3355 				if (ret < 0) {
3356 					DPAA2_PMD_ERR(
3357 						"TC distribution cannot be configured.(%d)",
3358 						ret);
3359 					return -1;
3360 				}
3361 			}
3362 
3363 			/* Configure QoS table then.*/
3364 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3365 				dpaa2_flow_qos_table_extracts_log(priv);
3366 				if (dpkg_prepare_key_cfg(
3367 					&priv->extract.qos_key_extract.dpkg,
3368 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3369 					DPAA2_PMD_ERR(
3370 						"Unable to prepare extract parameters");
3371 					return -1;
3372 				}
3373 
3374 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3375 				qos_cfg.discard_on_miss = false;
3376 				qos_cfg.default_tc = 0;
3377 				qos_cfg.keep_entries = true;
3378 				qos_cfg.key_cfg_iova =
3379 					(size_t)priv->extract.qos_extract_param;
3380 				/* QoS table is effecitive for multiple TCs.*/
3381 				if (priv->num_rx_tc > 1) {
3382 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3383 						priv->token, &qos_cfg);
3384 					if (ret < 0) {
3385 						DPAA2_PMD_ERR(
3386 						"RSS QoS table can not be configured(%d)\n",
3387 							ret);
3388 						return -1;
3389 					}
3390 				}
3391 			}
3392 
3393 			flow->qos_real_key_size = priv->extract
3394 				.qos_key_extract.key_info.key_total_size;
3395 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3396 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3397 					flow->ipaddr_rule.qos_ipsrc_offset) {
3398 					flow->qos_real_key_size =
3399 						flow->ipaddr_rule.qos_ipdst_offset +
3400 						NH_FLD_IPV4_ADDR_SIZE;
3401 				} else {
3402 					flow->qos_real_key_size =
3403 						flow->ipaddr_rule.qos_ipsrc_offset +
3404 						NH_FLD_IPV4_ADDR_SIZE;
3405 				}
3406 			} else if (flow->ipaddr_rule.ipaddr_type ==
3407 				FLOW_IPV6_ADDR) {
3408 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3409 					flow->ipaddr_rule.qos_ipsrc_offset) {
3410 					flow->qos_real_key_size =
3411 						flow->ipaddr_rule.qos_ipdst_offset +
3412 						NH_FLD_IPV6_ADDR_SIZE;
3413 				} else {
3414 					flow->qos_real_key_size =
3415 						flow->ipaddr_rule.qos_ipsrc_offset +
3416 						NH_FLD_IPV6_ADDR_SIZE;
3417 				}
3418 			}
3419 
3420 			/* QoS entry added is only effective for multiple TCs.*/
3421 			if (priv->num_rx_tc > 1) {
3422 				qos_index = flow->tc_id * priv->fs_entries +
3423 					flow->tc_index;
3424 				if (qos_index >= priv->qos_entries) {
3425 					DPAA2_PMD_ERR("QoS table with %d entries full",
3426 						priv->qos_entries);
3427 					return -1;
3428 				}
3429 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3430 
3431 				dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3432 
3433 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3434 						priv->token, &flow->qos_rule,
3435 						flow->tc_id, qos_index,
3436 						0, 0);
3437 				if (ret < 0) {
3438 					DPAA2_PMD_ERR(
3439 						"Error in addnig entry to QoS table(%d)", ret);
3440 					return ret;
3441 				}
3442 			}
3443 
3444 			if (flow->tc_index >= priv->fs_entries) {
3445 				DPAA2_PMD_ERR("FS table with %d entries full",
3446 					priv->fs_entries);
3447 				return -1;
3448 			}
3449 
3450 			flow->fs_real_key_size =
3451 				priv->extract.tc_key_extract[flow->tc_id]
3452 				.key_info.key_total_size;
3453 
3454 			if (flow->ipaddr_rule.ipaddr_type ==
3455 				FLOW_IPV4_ADDR) {
3456 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3457 					flow->ipaddr_rule.fs_ipsrc_offset) {
3458 					flow->fs_real_key_size =
3459 						flow->ipaddr_rule.fs_ipdst_offset +
3460 						NH_FLD_IPV4_ADDR_SIZE;
3461 				} else {
3462 					flow->fs_real_key_size =
3463 						flow->ipaddr_rule.fs_ipsrc_offset +
3464 						NH_FLD_IPV4_ADDR_SIZE;
3465 				}
3466 			} else if (flow->ipaddr_rule.ipaddr_type ==
3467 				FLOW_IPV6_ADDR) {
3468 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3469 					flow->ipaddr_rule.fs_ipsrc_offset) {
3470 					flow->fs_real_key_size =
3471 						flow->ipaddr_rule.fs_ipdst_offset +
3472 						NH_FLD_IPV6_ADDR_SIZE;
3473 				} else {
3474 					flow->fs_real_key_size =
3475 						flow->ipaddr_rule.fs_ipsrc_offset +
3476 						NH_FLD_IPV6_ADDR_SIZE;
3477 				}
3478 			}
3479 
3480 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3481 
3482 			dpaa2_flow_fs_entry_log("Start add", flow);
3483 
3484 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3485 						flow->tc_id, flow->tc_index,
3486 						&flow->fs_rule, &action);
3487 			if (ret < 0) {
3488 				DPAA2_PMD_ERR(
3489 				"Error in adding entry to FS table(%d)", ret);
3490 				return ret;
3491 			}
3492 			memcpy(&flow->action_cfg, &action,
3493 				sizeof(struct dpni_fs_action_cfg));
3494 			break;
3495 		case RTE_FLOW_ACTION_TYPE_RSS:
3496 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3497 
3498 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3499 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3500 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3501 			if (ret < 0) {
3502 				DPAA2_PMD_ERR(
3503 				"unable to set flow distribution.please check queue config\n");
3504 				return ret;
3505 			}
3506 
3507 			/* Allocate DMA'ble memory to write the rules */
3508 			param = (size_t)rte_malloc(NULL, 256, 64);
3509 			if (!param) {
3510 				DPAA2_PMD_ERR("Memory allocation failure\n");
3511 				return -1;
3512 			}
3513 
3514 			if (dpkg_prepare_key_cfg(
3515 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3516 				(uint8_t *)param) < 0) {
3517 				DPAA2_PMD_ERR(
3518 				"Unable to prepare extract parameters");
3519 				rte_free((void *)param);
3520 				return -1;
3521 			}
3522 
3523 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3524 			tc_cfg.dist_size = rss_conf->queue_num;
3525 			tc_cfg.key_cfg_iova = (size_t)param;
3526 			tc_cfg.enable = true;
3527 			tc_cfg.tc = flow->tc_id;
3528 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3529 						 priv->token, &tc_cfg);
3530 			if (ret < 0) {
3531 				DPAA2_PMD_ERR(
3532 					"RSS TC table cannot be configured: %d\n",
3533 					ret);
3534 				rte_free((void *)param);
3535 				return -1;
3536 			}
3537 
3538 			rte_free((void *)param);
3539 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3540 				if (dpkg_prepare_key_cfg(
3541 					&priv->extract.qos_key_extract.dpkg,
3542 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3543 					DPAA2_PMD_ERR(
3544 					"Unable to prepare extract parameters");
3545 					return -1;
3546 				}
3547 				memset(&qos_cfg, 0,
3548 					sizeof(struct dpni_qos_tbl_cfg));
3549 				qos_cfg.discard_on_miss = true;
3550 				qos_cfg.keep_entries = true;
3551 				qos_cfg.key_cfg_iova =
3552 					(size_t)priv->extract.qos_extract_param;
3553 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3554 							 priv->token, &qos_cfg);
3555 				if (ret < 0) {
3556 					DPAA2_PMD_ERR(
3557 					"RSS QoS dist can't be configured-%d\n",
3558 					ret);
3559 					return -1;
3560 				}
3561 			}
3562 
3563 			/* Add Rule into QoS table */
3564 			qos_index = flow->tc_id * priv->fs_entries +
3565 				flow->tc_index;
3566 			if (qos_index >= priv->qos_entries) {
3567 				DPAA2_PMD_ERR("QoS table with %d entries full",
3568 					priv->qos_entries);
3569 				return -1;
3570 			}
3571 
3572 			flow->qos_real_key_size =
3573 			  priv->extract.qos_key_extract.key_info.key_total_size;
3574 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3575 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3576 						&flow->qos_rule, flow->tc_id,
3577 						qos_index, 0, 0);
3578 			if (ret < 0) {
3579 				DPAA2_PMD_ERR(
3580 				"Error in entry addition in QoS table(%d)",
3581 				ret);
3582 				return ret;
3583 			}
3584 			break;
3585 		case RTE_FLOW_ACTION_TYPE_END:
3586 			end_of_list = 1;
3587 			break;
3588 		default:
3589 			DPAA2_PMD_ERR("Invalid action type");
3590 			ret = -ENOTSUP;
3591 			break;
3592 		}
3593 		j++;
3594 	}
3595 
3596 	if (!ret) {
3597 		if (is_keycfg_configured &
3598 			(DPAA2_QOS_TABLE_RECONFIGURE |
3599 			DPAA2_FS_TABLE_RECONFIGURE)) {
3600 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3601 			if (ret) {
3602 				DPAA2_PMD_ERR("Flow entry update failed.");
3603 
3604 				return -1;
3605 			}
3606 		}
3607 		/* New rules are inserted. */
3608 		if (!curr) {
3609 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3610 		} else {
3611 			while (LIST_NEXT(curr, next))
3612 				curr = LIST_NEXT(curr, next);
3613 			LIST_INSERT_AFTER(curr, flow, next);
3614 		}
3615 	}
3616 	return ret;
3617 }
3618 
3619 static inline int
3620 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3621 		      const struct rte_flow_attr *attr)
3622 {
3623 	int ret = 0;
3624 
3625 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3626 		DPAA2_PMD_ERR("Priority group is out of range\n");
3627 		ret = -ENOTSUP;
3628 	}
3629 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3630 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3631 		ret = -ENOTSUP;
3632 	}
3633 	if (unlikely(attr->egress)) {
3634 		DPAA2_PMD_ERR(
3635 			"Flow configuration is not supported on egress side\n");
3636 		ret = -ENOTSUP;
3637 	}
3638 	if (unlikely(!attr->ingress)) {
3639 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3640 		ret = -EINVAL;
3641 	}
3642 	return ret;
3643 }
3644 
3645 static inline int
3646 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3647 {
3648 	unsigned int i, j, is_found = 0;
3649 	int ret = 0;
3650 
3651 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3652 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3653 			if (dpaa2_supported_pattern_type[i]
3654 					== pattern[j].type) {
3655 				is_found = 1;
3656 				break;
3657 			}
3658 		}
3659 		if (!is_found) {
3660 			ret = -ENOTSUP;
3661 			break;
3662 		}
3663 	}
3664 	/* Lets verify other combinations of given pattern rules */
3665 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3666 		if (!pattern[j].spec) {
3667 			ret = -EINVAL;
3668 			break;
3669 		}
3670 	}
3671 
3672 	return ret;
3673 }
3674 
3675 static inline int
3676 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3677 {
3678 	unsigned int i, j, is_found = 0;
3679 	int ret = 0;
3680 
3681 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3682 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3683 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3684 				is_found = 1;
3685 				break;
3686 			}
3687 		}
3688 		if (!is_found) {
3689 			ret = -ENOTSUP;
3690 			break;
3691 		}
3692 	}
3693 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3694 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3695 				!actions[j].conf)
3696 			ret = -EINVAL;
3697 	}
3698 	return ret;
3699 }
3700 
3701 static
3702 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3703 			const struct rte_flow_attr *flow_attr,
3704 			const struct rte_flow_item pattern[],
3705 			const struct rte_flow_action actions[],
3706 			struct rte_flow_error *error)
3707 {
3708 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3709 	struct dpni_attr dpni_attr;
3710 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3711 	uint16_t token = priv->token;
3712 	int ret = 0;
3713 
3714 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3715 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3716 	if (ret < 0) {
3717 		DPAA2_PMD_ERR(
3718 			"Failure to get dpni@%p attribute, err code  %d\n",
3719 			dpni, ret);
3720 		rte_flow_error_set(error, EPERM,
3721 			   RTE_FLOW_ERROR_TYPE_ATTR,
3722 			   flow_attr, "invalid");
3723 		return ret;
3724 	}
3725 
3726 	/* Verify input attributes */
3727 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3728 	if (ret < 0) {
3729 		DPAA2_PMD_ERR(
3730 			"Invalid attributes are given\n");
3731 		rte_flow_error_set(error, EPERM,
3732 			   RTE_FLOW_ERROR_TYPE_ATTR,
3733 			   flow_attr, "invalid");
3734 		goto not_valid_params;
3735 	}
3736 	/* Verify input pattern list */
3737 	ret = dpaa2_dev_verify_patterns(pattern);
3738 	if (ret < 0) {
3739 		DPAA2_PMD_ERR(
3740 			"Invalid pattern list is given\n");
3741 		rte_flow_error_set(error, EPERM,
3742 			   RTE_FLOW_ERROR_TYPE_ITEM,
3743 			   pattern, "invalid");
3744 		goto not_valid_params;
3745 	}
3746 	/* Verify input action list */
3747 	ret = dpaa2_dev_verify_actions(actions);
3748 	if (ret < 0) {
3749 		DPAA2_PMD_ERR(
3750 			"Invalid action list is given\n");
3751 		rte_flow_error_set(error, EPERM,
3752 			   RTE_FLOW_ERROR_TYPE_ACTION,
3753 			   actions, "invalid");
3754 		goto not_valid_params;
3755 	}
3756 not_valid_params:
3757 	return ret;
3758 }
3759 
3760 static
3761 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3762 				   const struct rte_flow_attr *attr,
3763 				   const struct rte_flow_item pattern[],
3764 				   const struct rte_flow_action actions[],
3765 				   struct rte_flow_error *error)
3766 {
3767 	struct rte_flow *flow = NULL;
3768 	size_t key_iova = 0, mask_iova = 0;
3769 	int ret;
3770 
3771 	dpaa2_flow_control_log =
3772 		getenv("DPAA2_FLOW_CONTROL_LOG");
3773 
3774 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3775 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3776 
3777 		dpaa2_flow_miss_flow_id =
3778 			atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3779 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3780 			DPAA2_PMD_ERR(
3781 				"The missed flow ID %d exceeds the max flow ID %d",
3782 				dpaa2_flow_miss_flow_id,
3783 				priv->dist_queues - 1);
3784 			return NULL;
3785 		}
3786 	}
3787 
3788 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3789 	if (!flow) {
3790 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3791 		goto mem_failure;
3792 	}
3793 	/* Allocate DMA'ble memory to write the rules */
3794 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3795 	if (!key_iova) {
3796 		DPAA2_PMD_ERR(
3797 			"Memory allocation failure for rule configuration\n");
3798 		goto mem_failure;
3799 	}
3800 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3801 	if (!mask_iova) {
3802 		DPAA2_PMD_ERR(
3803 			"Memory allocation failure for rule configuration\n");
3804 		goto mem_failure;
3805 	}
3806 
3807 	flow->qos_rule.key_iova = key_iova;
3808 	flow->qos_rule.mask_iova = mask_iova;
3809 
3810 	/* Allocate DMA'ble memory to write the rules */
3811 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3812 	if (!key_iova) {
3813 		DPAA2_PMD_ERR(
3814 			"Memory allocation failure for rule configuration\n");
3815 		goto mem_failure;
3816 	}
3817 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3818 	if (!mask_iova) {
3819 		DPAA2_PMD_ERR(
3820 			"Memory allocation failure for rule configuration\n");
3821 		goto mem_failure;
3822 	}
3823 
3824 	flow->fs_rule.key_iova = key_iova;
3825 	flow->fs_rule.mask_iova = mask_iova;
3826 
3827 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3828 	flow->ipaddr_rule.qos_ipsrc_offset =
3829 		IP_ADDRESS_OFFSET_INVALID;
3830 	flow->ipaddr_rule.qos_ipdst_offset =
3831 		IP_ADDRESS_OFFSET_INVALID;
3832 	flow->ipaddr_rule.fs_ipsrc_offset =
3833 		IP_ADDRESS_OFFSET_INVALID;
3834 	flow->ipaddr_rule.fs_ipdst_offset =
3835 		IP_ADDRESS_OFFSET_INVALID;
3836 
3837 	switch (dpaa2_filter_type) {
3838 	case RTE_ETH_FILTER_GENERIC:
3839 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3840 					     actions, error);
3841 		if (ret < 0) {
3842 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3843 				rte_flow_error_set(error, EPERM,
3844 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3845 						attr, "unknown");
3846 			DPAA2_PMD_ERR(
3847 			"Failure to create flow, return code (%d)", ret);
3848 			goto creation_error;
3849 		}
3850 		break;
3851 	default:
3852 		DPAA2_PMD_ERR("Filter type (%d) not supported",
3853 		dpaa2_filter_type);
3854 		break;
3855 	}
3856 
3857 	return flow;
3858 mem_failure:
3859 	rte_flow_error_set(error, EPERM,
3860 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3861 			   NULL, "memory alloc");
3862 creation_error:
3863 	rte_free((void *)flow);
3864 	rte_free((void *)key_iova);
3865 	rte_free((void *)mask_iova);
3866 
3867 	return NULL;
3868 }
3869 
3870 static
3871 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3872 		       struct rte_flow *flow,
3873 		       struct rte_flow_error *error)
3874 {
3875 	int ret = 0;
3876 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3877 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3878 
3879 	switch (flow->action) {
3880 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3881 		if (priv->num_rx_tc > 1) {
3882 			/* Remove entry from QoS table first */
3883 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3884 					&flow->qos_rule);
3885 			if (ret < 0) {
3886 				DPAA2_PMD_ERR(
3887 					"Error in removing entry from QoS table(%d)", ret);
3888 				goto error;
3889 			}
3890 		}
3891 
3892 		/* Then remove entry from FS table */
3893 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3894 					   flow->tc_id, &flow->fs_rule);
3895 		if (ret < 0) {
3896 			DPAA2_PMD_ERR(
3897 				"Error in removing entry from FS table(%d)", ret);
3898 			goto error;
3899 		}
3900 		break;
3901 	case RTE_FLOW_ACTION_TYPE_RSS:
3902 		if (priv->num_rx_tc > 1) {
3903 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3904 					&flow->qos_rule);
3905 			if (ret < 0) {
3906 				DPAA2_PMD_ERR(
3907 					"Error in entry addition in QoS table(%d)", ret);
3908 				goto error;
3909 			}
3910 		}
3911 		break;
3912 	default:
3913 		DPAA2_PMD_ERR(
3914 		"Action type (%d) is not supported", flow->action);
3915 		ret = -ENOTSUP;
3916 		break;
3917 	}
3918 
3919 	LIST_REMOVE(flow, next);
3920 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
3921 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
3922 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
3923 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
3924 	/* Now free the flow */
3925 	rte_free(flow);
3926 
3927 error:
3928 	if (ret)
3929 		rte_flow_error_set(error, EPERM,
3930 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3931 				   NULL, "unknown");
3932 	return ret;
3933 }
3934 
3935 /**
3936  * Destroy user-configured flow rules.
3937  *
3938  * This function skips internal flows rules.
3939  *
3940  * @see rte_flow_flush()
3941  * @see rte_flow_ops
3942  */
3943 static int
3944 dpaa2_flow_flush(struct rte_eth_dev *dev,
3945 		struct rte_flow_error *error)
3946 {
3947 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3948 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
3949 
3950 	while (flow) {
3951 		struct rte_flow *next = LIST_NEXT(flow, next);
3952 
3953 		dpaa2_flow_destroy(dev, flow, error);
3954 		flow = next;
3955 	}
3956 	return 0;
3957 }
3958 
3959 static int
3960 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3961 		struct rte_flow *flow __rte_unused,
3962 		const struct rte_flow_action *actions __rte_unused,
3963 		void *data __rte_unused,
3964 		struct rte_flow_error *error __rte_unused)
3965 {
3966 	return 0;
3967 }
3968 
3969 /**
3970  * Clean up all flow rules.
3971  *
3972  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3973  * rules regardless of whether they are internal or user-configured.
3974  *
3975  * @param priv
3976  *   Pointer to private structure.
3977  */
3978 void
3979 dpaa2_flow_clean(struct rte_eth_dev *dev)
3980 {
3981 	struct rte_flow *flow;
3982 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3983 
3984 	while ((flow = LIST_FIRST(&priv->flows)))
3985 		dpaa2_flow_destroy(dev, flow, NULL);
3986 }
3987 
3988 const struct rte_flow_ops dpaa2_flow_ops = {
3989 	.create	= dpaa2_flow_create,
3990 	.validate = dpaa2_flow_validate,
3991 	.destroy = dpaa2_flow_destroy,
3992 	.flush	= dpaa2_flow_flush,
3993 	.query	= dpaa2_flow_query,
3994 };
3995