xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static int dpaa2_flow_miss_flow_id =
34 	DPNI_FS_MISS_DROP;
35 
36 #define FIXED_ENTRY_SIZE 54
37 
38 enum flow_rule_ipaddr_type {
39 	FLOW_NONE_IPADDR,
40 	FLOW_IPV4_ADDR,
41 	FLOW_IPV6_ADDR
42 };
43 
44 struct flow_rule_ipaddr {
45 	enum flow_rule_ipaddr_type ipaddr_type;
46 	int qos_ipsrc_offset;
47 	int qos_ipdst_offset;
48 	int fs_ipsrc_offset;
49 	int fs_ipdst_offset;
50 };
51 
52 struct rte_flow {
53 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 	struct dpni_rule_cfg qos_rule;
55 	struct dpni_rule_cfg fs_rule;
56 	uint8_t qos_real_key_size;
57 	uint8_t fs_real_key_size;
58 	uint8_t tc_id; /** Traffic Class ID. */
59 	uint8_t tc_index; /** index within this Traffic Class. */
60 	enum rte_flow_action_type action;
61 	/* Special for IP address to specify the offset
62 	 * in key/mask.
63 	 */
64 	struct flow_rule_ipaddr ipaddr_rule;
65 	struct dpni_fs_action_cfg action_cfg;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 };
81 
82 static const
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 	RTE_FLOW_ACTION_TYPE_END,
85 	RTE_FLOW_ACTION_TYPE_QUEUE,
86 	RTE_FLOW_ACTION_TYPE_RSS
87 };
88 
89 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
90 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
91 
92 #ifndef __cplusplus
93 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
94 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
95 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
96 	.type = RTE_BE16(0xffff),
97 };
98 
99 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
100 	.tci = RTE_BE16(0xffff),
101 };
102 
103 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
104 	.hdr.src_addr = RTE_BE32(0xffffffff),
105 	.hdr.dst_addr = RTE_BE32(0xffffffff),
106 	.hdr.next_proto_id = 0xff,
107 };
108 
109 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
110 	.hdr = {
111 		.src_addr =
112 			"\xff\xff\xff\xff\xff\xff\xff\xff"
113 			"\xff\xff\xff\xff\xff\xff\xff\xff",
114 		.dst_addr =
115 			"\xff\xff\xff\xff\xff\xff\xff\xff"
116 			"\xff\xff\xff\xff\xff\xff\xff\xff",
117 		.proto = 0xff
118 	},
119 };
120 
121 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
122 	.hdr.icmp_type = 0xff,
123 	.hdr.icmp_code = 0xff,
124 };
125 
126 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
127 	.hdr = {
128 		.src_port = RTE_BE16(0xffff),
129 		.dst_port = RTE_BE16(0xffff),
130 	},
131 };
132 
133 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
134 	.hdr = {
135 		.src_port = RTE_BE16(0xffff),
136 		.dst_port = RTE_BE16(0xffff),
137 	},
138 };
139 
140 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
141 	.hdr = {
142 		.src_port = RTE_BE16(0xffff),
143 		.dst_port = RTE_BE16(0xffff),
144 	},
145 };
146 
147 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
148 	.protocol = RTE_BE16(0xffff),
149 };
150 
151 #endif
152 
153 static inline void dpaa2_prot_field_string(
154 	enum net_prot prot, uint32_t field,
155 	char *string)
156 {
157 	if (!dpaa2_flow_control_log)
158 		return;
159 
160 	if (prot == NET_PROT_ETH) {
161 		strcpy(string, "eth");
162 		if (field == NH_FLD_ETH_DA)
163 			strcat(string, ".dst");
164 		else if (field == NH_FLD_ETH_SA)
165 			strcat(string, ".src");
166 		else if (field == NH_FLD_ETH_TYPE)
167 			strcat(string, ".type");
168 		else
169 			strcat(string, ".unknown field");
170 	} else if (prot == NET_PROT_VLAN) {
171 		strcpy(string, "vlan");
172 		if (field == NH_FLD_VLAN_TCI)
173 			strcat(string, ".tci");
174 		else
175 			strcat(string, ".unknown field");
176 	} else if (prot == NET_PROT_IP) {
177 		strcpy(string, "ip");
178 		if (field == NH_FLD_IP_SRC)
179 			strcat(string, ".src");
180 		else if (field == NH_FLD_IP_DST)
181 			strcat(string, ".dst");
182 		else if (field == NH_FLD_IP_PROTO)
183 			strcat(string, ".proto");
184 		else
185 			strcat(string, ".unknown field");
186 	} else if (prot == NET_PROT_TCP) {
187 		strcpy(string, "tcp");
188 		if (field == NH_FLD_TCP_PORT_SRC)
189 			strcat(string, ".src");
190 		else if (field == NH_FLD_TCP_PORT_DST)
191 			strcat(string, ".dst");
192 		else
193 			strcat(string, ".unknown field");
194 	} else if (prot == NET_PROT_UDP) {
195 		strcpy(string, "udp");
196 		if (field == NH_FLD_UDP_PORT_SRC)
197 			strcat(string, ".src");
198 		else if (field == NH_FLD_UDP_PORT_DST)
199 			strcat(string, ".dst");
200 		else
201 			strcat(string, ".unknown field");
202 	} else if (prot == NET_PROT_ICMP) {
203 		strcpy(string, "icmp");
204 		if (field == NH_FLD_ICMP_TYPE)
205 			strcat(string, ".type");
206 		else if (field == NH_FLD_ICMP_CODE)
207 			strcat(string, ".code");
208 		else
209 			strcat(string, ".unknown field");
210 	} else if (prot == NET_PROT_SCTP) {
211 		strcpy(string, "sctp");
212 		if (field == NH_FLD_SCTP_PORT_SRC)
213 			strcat(string, ".src");
214 		else if (field == NH_FLD_SCTP_PORT_DST)
215 			strcat(string, ".dst");
216 		else
217 			strcat(string, ".unknown field");
218 	} else if (prot == NET_PROT_GRE) {
219 		strcpy(string, "gre");
220 		if (field == NH_FLD_GRE_TYPE)
221 			strcat(string, ".type");
222 		else
223 			strcat(string, ".unknown field");
224 	} else {
225 		strcpy(string, "unknown protocol");
226 	}
227 }
228 
229 static inline void dpaa2_flow_qos_table_extracts_log(
230 	const struct dpaa2_dev_priv *priv)
231 {
232 	int idx;
233 	char string[32];
234 
235 	if (!dpaa2_flow_control_log)
236 		return;
237 
238 	printf("Setup QoS table: number of extracts: %d\r\n",
239 			priv->extract.qos_key_extract.dpkg.num_extracts);
240 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
241 		idx++) {
242 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
243 			.extracts[idx].extract.from_hdr.prot,
244 			priv->extract.qos_key_extract.dpkg.extracts[idx]
245 			.extract.from_hdr.field,
246 			string);
247 		printf("%s", string);
248 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
249 			printf(" / ");
250 	}
251 	printf("\r\n");
252 }
253 
254 static inline void dpaa2_flow_fs_table_extracts_log(
255 	const struct dpaa2_dev_priv *priv, int tc_id)
256 {
257 	int idx;
258 	char string[32];
259 
260 	if (!dpaa2_flow_control_log)
261 		return;
262 
263 	printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
264 			tc_id, priv->extract.tc_key_extract[tc_id]
265 			.dpkg.num_extracts);
266 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
267 		.dpkg.num_extracts; idx++) {
268 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
269 			.dpkg.extracts[idx].extract.from_hdr.prot,
270 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
271 			.extract.from_hdr.field,
272 			string);
273 		printf("%s", string);
274 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
275 			.dpkg.num_extracts)
276 			printf(" / ");
277 	}
278 	printf("\r\n");
279 }
280 
281 static inline void dpaa2_flow_qos_entry_log(
282 	const char *log_info, const struct rte_flow *flow, int qos_index)
283 {
284 	int idx;
285 	uint8_t *key, *mask;
286 
287 	if (!dpaa2_flow_control_log)
288 		return;
289 
290 	printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
291 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
292 
293 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
294 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
295 
296 	printf("key:\r\n");
297 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
298 		printf("%02x ", key[idx]);
299 
300 	printf("\r\nmask:\r\n");
301 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
302 		printf("%02x ", mask[idx]);
303 
304 	printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
305 		flow->ipaddr_rule.qos_ipsrc_offset,
306 		flow->ipaddr_rule.qos_ipdst_offset);
307 }
308 
309 static inline void dpaa2_flow_fs_entry_log(
310 	const char *log_info, const struct rte_flow *flow)
311 {
312 	int idx;
313 	uint8_t *key, *mask;
314 
315 	if (!dpaa2_flow_control_log)
316 		return;
317 
318 	printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
319 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
320 
321 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
322 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
323 
324 	printf("key:\r\n");
325 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
326 		printf("%02x ", key[idx]);
327 
328 	printf("\r\nmask:\r\n");
329 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
330 		printf("%02x ", mask[idx]);
331 
332 	printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
333 		flow->ipaddr_rule.fs_ipsrc_offset,
334 		flow->ipaddr_rule.fs_ipdst_offset);
335 }
336 
337 static inline void dpaa2_flow_extract_key_set(
338 	struct dpaa2_key_info *key_info, int index, uint8_t size)
339 {
340 	key_info->key_size[index] = size;
341 	if (index > 0) {
342 		key_info->key_offset[index] =
343 			key_info->key_offset[index - 1] +
344 			key_info->key_size[index - 1];
345 	} else {
346 		key_info->key_offset[index] = 0;
347 	}
348 	key_info->key_total_size += size;
349 }
350 
351 static int dpaa2_flow_extract_add(
352 	struct dpaa2_key_extract *key_extract,
353 	enum net_prot prot,
354 	uint32_t field, uint8_t field_size)
355 {
356 	int index, ip_src = -1, ip_dst = -1;
357 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
358 	struct dpaa2_key_info *key_info = &key_extract->key_info;
359 
360 	if (dpkg->num_extracts >=
361 		DPKG_MAX_NUM_OF_EXTRACTS) {
362 		DPAA2_PMD_WARN("Number of extracts overflows");
363 		return -1;
364 	}
365 	/* Before reorder, the IP SRC and IP DST are already last
366 	 * extract(s).
367 	 */
368 	for (index = 0; index < dpkg->num_extracts; index++) {
369 		if (dpkg->extracts[index].extract.from_hdr.prot ==
370 			NET_PROT_IP) {
371 			if (dpkg->extracts[index].extract.from_hdr.field ==
372 				NH_FLD_IP_SRC) {
373 				ip_src = index;
374 			}
375 			if (dpkg->extracts[index].extract.from_hdr.field ==
376 				NH_FLD_IP_DST) {
377 				ip_dst = index;
378 			}
379 		}
380 	}
381 
382 	if (ip_src >= 0)
383 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
384 
385 	if (ip_dst >= 0)
386 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
387 
388 	if (prot == NET_PROT_IP &&
389 		(field == NH_FLD_IP_SRC ||
390 		field == NH_FLD_IP_DST)) {
391 		index = dpkg->num_extracts;
392 	} else {
393 		if (ip_src >= 0 && ip_dst >= 0)
394 			index = dpkg->num_extracts - 2;
395 		else if (ip_src >= 0 || ip_dst >= 0)
396 			index = dpkg->num_extracts - 1;
397 		else
398 			index = dpkg->num_extracts;
399 	}
400 
401 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
402 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
403 	dpkg->extracts[index].extract.from_hdr.prot = prot;
404 	dpkg->extracts[index].extract.from_hdr.field = field;
405 	if (prot == NET_PROT_IP &&
406 		(field == NH_FLD_IP_SRC ||
407 		field == NH_FLD_IP_DST)) {
408 		dpaa2_flow_extract_key_set(key_info, index, 0);
409 	} else {
410 		dpaa2_flow_extract_key_set(key_info, index, field_size);
411 	}
412 
413 	if (prot == NET_PROT_IP) {
414 		if (field == NH_FLD_IP_SRC) {
415 			if (key_info->ipv4_dst_offset >= 0) {
416 				key_info->ipv4_src_offset =
417 					key_info->ipv4_dst_offset +
418 					NH_FLD_IPV4_ADDR_SIZE;
419 			} else {
420 				key_info->ipv4_src_offset =
421 					key_info->key_offset[index - 1] +
422 						key_info->key_size[index - 1];
423 			}
424 			if (key_info->ipv6_dst_offset >= 0) {
425 				key_info->ipv6_src_offset =
426 					key_info->ipv6_dst_offset +
427 					NH_FLD_IPV6_ADDR_SIZE;
428 			} else {
429 				key_info->ipv6_src_offset =
430 					key_info->key_offset[index - 1] +
431 						key_info->key_size[index - 1];
432 			}
433 		} else if (field == NH_FLD_IP_DST) {
434 			if (key_info->ipv4_src_offset >= 0) {
435 				key_info->ipv4_dst_offset =
436 					key_info->ipv4_src_offset +
437 					NH_FLD_IPV4_ADDR_SIZE;
438 			} else {
439 				key_info->ipv4_dst_offset =
440 					key_info->key_offset[index - 1] +
441 						key_info->key_size[index - 1];
442 			}
443 			if (key_info->ipv6_src_offset >= 0) {
444 				key_info->ipv6_dst_offset =
445 					key_info->ipv6_src_offset +
446 					NH_FLD_IPV6_ADDR_SIZE;
447 			} else {
448 				key_info->ipv6_dst_offset =
449 					key_info->key_offset[index - 1] +
450 						key_info->key_size[index - 1];
451 			}
452 		}
453 	}
454 
455 	if (index == dpkg->num_extracts) {
456 		dpkg->num_extracts++;
457 		return 0;
458 	}
459 
460 	if (ip_src >= 0) {
461 		ip_src++;
462 		dpkg->extracts[ip_src].type =
463 			DPKG_EXTRACT_FROM_HDR;
464 		dpkg->extracts[ip_src].extract.from_hdr.type =
465 			DPKG_FULL_FIELD;
466 		dpkg->extracts[ip_src].extract.from_hdr.prot =
467 			NET_PROT_IP;
468 		dpkg->extracts[ip_src].extract.from_hdr.field =
469 			NH_FLD_IP_SRC;
470 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
471 		key_info->ipv4_src_offset += field_size;
472 		key_info->ipv6_src_offset += field_size;
473 	}
474 	if (ip_dst >= 0) {
475 		ip_dst++;
476 		dpkg->extracts[ip_dst].type =
477 			DPKG_EXTRACT_FROM_HDR;
478 		dpkg->extracts[ip_dst].extract.from_hdr.type =
479 			DPKG_FULL_FIELD;
480 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
481 			NET_PROT_IP;
482 		dpkg->extracts[ip_dst].extract.from_hdr.field =
483 			NH_FLD_IP_DST;
484 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
485 		key_info->ipv4_dst_offset += field_size;
486 		key_info->ipv6_dst_offset += field_size;
487 	}
488 
489 	dpkg->num_extracts++;
490 
491 	return 0;
492 }
493 
494 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
495 				      int size)
496 {
497 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
498 	struct dpaa2_key_info *key_info = &key_extract->key_info;
499 	int last_extract_size, index;
500 
501 	if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
502 	    DPKG_EXTRACT_FROM_DATA) {
503 		DPAA2_PMD_WARN("RAW extract cannot be combined with others");
504 		return -1;
505 	}
506 
507 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
508 	dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
509 	if (last_extract_size)
510 		dpkg->num_extracts++;
511 	else
512 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
513 
514 	for (index = 0; index < dpkg->num_extracts; index++) {
515 		dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
516 		if (index == dpkg->num_extracts - 1)
517 			dpkg->extracts[index].extract.from_data.size =
518 				last_extract_size;
519 		else
520 			dpkg->extracts[index].extract.from_data.size =
521 				DPAA2_FLOW_MAX_KEY_SIZE;
522 		dpkg->extracts[index].extract.from_data.offset =
523 			DPAA2_FLOW_MAX_KEY_SIZE * index;
524 	}
525 
526 	key_info->key_total_size = size;
527 	return 0;
528 }
529 
530 /* Protocol discrimination.
531  * Discriminate IPv4/IPv6/vLan by Eth type.
532  * Discriminate UDP/TCP/ICMP by next proto of IP.
533  */
534 static inline int
535 dpaa2_flow_proto_discrimination_extract(
536 	struct dpaa2_key_extract *key_extract,
537 	enum rte_flow_item_type type)
538 {
539 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
540 		return dpaa2_flow_extract_add(
541 				key_extract, NET_PROT_ETH,
542 				NH_FLD_ETH_TYPE,
543 				sizeof(rte_be16_t));
544 	} else if (type == (enum rte_flow_item_type)
545 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
546 		return dpaa2_flow_extract_add(
547 				key_extract, NET_PROT_IP,
548 				NH_FLD_IP_PROTO,
549 				NH_FLD_IP_PROTO_SIZE);
550 	}
551 
552 	return -1;
553 }
554 
555 static inline int dpaa2_flow_extract_search(
556 	struct dpkg_profile_cfg *dpkg,
557 	enum net_prot prot, uint32_t field)
558 {
559 	int i;
560 
561 	for (i = 0; i < dpkg->num_extracts; i++) {
562 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
563 			dpkg->extracts[i].extract.from_hdr.field == field) {
564 			return i;
565 		}
566 	}
567 
568 	return -1;
569 }
570 
571 static inline int dpaa2_flow_extract_key_offset(
572 	struct dpaa2_key_extract *key_extract,
573 	enum net_prot prot, uint32_t field)
574 {
575 	int i;
576 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
577 	struct dpaa2_key_info *key_info = &key_extract->key_info;
578 
579 	if (prot == NET_PROT_IPV4 ||
580 		prot == NET_PROT_IPV6)
581 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
582 	else
583 		i = dpaa2_flow_extract_search(dpkg, prot, field);
584 
585 	if (i >= 0) {
586 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
587 			return key_info->ipv4_src_offset;
588 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
589 			return key_info->ipv4_dst_offset;
590 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
591 			return key_info->ipv6_src_offset;
592 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
593 			return key_info->ipv6_dst_offset;
594 		else
595 			return key_info->key_offset[i];
596 	} else {
597 		return -1;
598 	}
599 }
600 
601 struct proto_discrimination {
602 	enum rte_flow_item_type type;
603 	union {
604 		rte_be16_t eth_type;
605 		uint8_t ip_proto;
606 	};
607 };
608 
609 static int
610 dpaa2_flow_proto_discrimination_rule(
611 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
612 	struct proto_discrimination proto, int group)
613 {
614 	enum net_prot prot;
615 	uint32_t field;
616 	int offset;
617 	size_t key_iova;
618 	size_t mask_iova;
619 	rte_be16_t eth_type;
620 	uint8_t ip_proto;
621 
622 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
623 		prot = NET_PROT_ETH;
624 		field = NH_FLD_ETH_TYPE;
625 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
626 		prot = NET_PROT_IP;
627 		field = NH_FLD_IP_PROTO;
628 	} else {
629 		DPAA2_PMD_ERR(
630 			"Only Eth and IP support to discriminate next proto.");
631 		return -1;
632 	}
633 
634 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
635 			prot, field);
636 	if (offset < 0) {
637 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
638 				prot, field);
639 		return -1;
640 	}
641 	key_iova = flow->qos_rule.key_iova + offset;
642 	mask_iova = flow->qos_rule.mask_iova + offset;
643 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
644 		eth_type = proto.eth_type;
645 		memcpy((void *)key_iova, (const void *)(&eth_type),
646 			sizeof(rte_be16_t));
647 		eth_type = 0xffff;
648 		memcpy((void *)mask_iova, (const void *)(&eth_type),
649 			sizeof(rte_be16_t));
650 	} else {
651 		ip_proto = proto.ip_proto;
652 		memcpy((void *)key_iova, (const void *)(&ip_proto),
653 			sizeof(uint8_t));
654 		ip_proto = 0xff;
655 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
656 			sizeof(uint8_t));
657 	}
658 
659 	offset = dpaa2_flow_extract_key_offset(
660 			&priv->extract.tc_key_extract[group],
661 			prot, field);
662 	if (offset < 0) {
663 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
664 				prot, field);
665 		return -1;
666 	}
667 	key_iova = flow->fs_rule.key_iova + offset;
668 	mask_iova = flow->fs_rule.mask_iova + offset;
669 
670 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
671 		eth_type = proto.eth_type;
672 		memcpy((void *)key_iova, (const void *)(&eth_type),
673 			sizeof(rte_be16_t));
674 		eth_type = 0xffff;
675 		memcpy((void *)mask_iova, (const void *)(&eth_type),
676 			sizeof(rte_be16_t));
677 	} else {
678 		ip_proto = proto.ip_proto;
679 		memcpy((void *)key_iova, (const void *)(&ip_proto),
680 			sizeof(uint8_t));
681 		ip_proto = 0xff;
682 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
683 			sizeof(uint8_t));
684 	}
685 
686 	return 0;
687 }
688 
689 static inline int
690 dpaa2_flow_rule_data_set(
691 	struct dpaa2_key_extract *key_extract,
692 	struct dpni_rule_cfg *rule,
693 	enum net_prot prot, uint32_t field,
694 	const void *key, const void *mask, int size)
695 {
696 	int offset = dpaa2_flow_extract_key_offset(key_extract,
697 				prot, field);
698 
699 	if (offset < 0) {
700 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
701 			prot, field);
702 		return -1;
703 	}
704 
705 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
706 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
707 
708 	return 0;
709 }
710 
711 static inline int
712 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
713 			     const void *key, const void *mask, int size)
714 {
715 	int offset = 0;
716 
717 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
718 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
719 
720 	return 0;
721 }
722 
723 static inline int
724 _dpaa2_flow_rule_move_ipaddr_tail(
725 	struct dpaa2_key_extract *key_extract,
726 	struct dpni_rule_cfg *rule, int src_offset,
727 	uint32_t field, bool ipv4)
728 {
729 	size_t key_src;
730 	size_t mask_src;
731 	size_t key_dst;
732 	size_t mask_dst;
733 	int dst_offset, len;
734 	enum net_prot prot;
735 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
736 
737 	if (field != NH_FLD_IP_SRC &&
738 		field != NH_FLD_IP_DST) {
739 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
740 		return -1;
741 	}
742 	if (ipv4)
743 		prot = NET_PROT_IPV4;
744 	else
745 		prot = NET_PROT_IPV6;
746 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
747 				prot, field);
748 	if (dst_offset < 0) {
749 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
750 		return -1;
751 	}
752 	key_src = rule->key_iova + src_offset;
753 	mask_src = rule->mask_iova + src_offset;
754 	key_dst = rule->key_iova + dst_offset;
755 	mask_dst = rule->mask_iova + dst_offset;
756 	if (ipv4)
757 		len = sizeof(rte_be32_t);
758 	else
759 		len = NH_FLD_IPV6_ADDR_SIZE;
760 
761 	memcpy(tmp, (char *)key_src, len);
762 	memset((char *)key_src, 0, len);
763 	memcpy((char *)key_dst, tmp, len);
764 
765 	memcpy(tmp, (char *)mask_src, len);
766 	memset((char *)mask_src, 0, len);
767 	memcpy((char *)mask_dst, tmp, len);
768 
769 	return 0;
770 }
771 
772 static inline int
773 dpaa2_flow_rule_move_ipaddr_tail(
774 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
775 	int fs_group)
776 {
777 	int ret;
778 	enum net_prot prot;
779 
780 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
781 		return 0;
782 
783 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
784 		prot = NET_PROT_IPV4;
785 	else
786 		prot = NET_PROT_IPV6;
787 
788 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
789 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
790 				&priv->extract.qos_key_extract,
791 				&flow->qos_rule,
792 				flow->ipaddr_rule.qos_ipsrc_offset,
793 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
794 		if (ret) {
795 			DPAA2_PMD_ERR("QoS src address reorder failed");
796 			return -1;
797 		}
798 		flow->ipaddr_rule.qos_ipsrc_offset =
799 			dpaa2_flow_extract_key_offset(
800 				&priv->extract.qos_key_extract,
801 				prot, NH_FLD_IP_SRC);
802 	}
803 
804 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
805 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
806 				&priv->extract.qos_key_extract,
807 				&flow->qos_rule,
808 				flow->ipaddr_rule.qos_ipdst_offset,
809 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
810 		if (ret) {
811 			DPAA2_PMD_ERR("QoS dst address reorder failed");
812 			return -1;
813 		}
814 		flow->ipaddr_rule.qos_ipdst_offset =
815 			dpaa2_flow_extract_key_offset(
816 				&priv->extract.qos_key_extract,
817 				prot, NH_FLD_IP_DST);
818 	}
819 
820 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
821 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
822 				&priv->extract.tc_key_extract[fs_group],
823 				&flow->fs_rule,
824 				flow->ipaddr_rule.fs_ipsrc_offset,
825 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
826 		if (ret) {
827 			DPAA2_PMD_ERR("FS src address reorder failed");
828 			return -1;
829 		}
830 		flow->ipaddr_rule.fs_ipsrc_offset =
831 			dpaa2_flow_extract_key_offset(
832 				&priv->extract.tc_key_extract[fs_group],
833 				prot, NH_FLD_IP_SRC);
834 	}
835 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
836 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
837 				&priv->extract.tc_key_extract[fs_group],
838 				&flow->fs_rule,
839 				flow->ipaddr_rule.fs_ipdst_offset,
840 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
841 		if (ret) {
842 			DPAA2_PMD_ERR("FS dst address reorder failed");
843 			return -1;
844 		}
845 		flow->ipaddr_rule.fs_ipdst_offset =
846 			dpaa2_flow_extract_key_offset(
847 				&priv->extract.tc_key_extract[fs_group],
848 				prot, NH_FLD_IP_DST);
849 	}
850 
851 	return 0;
852 }
853 
854 static int
855 dpaa2_flow_extract_support(
856 	const uint8_t *mask_src,
857 	enum rte_flow_item_type type)
858 {
859 	char mask[64];
860 	int i, size = 0;
861 	const char *mask_support = 0;
862 
863 	switch (type) {
864 	case RTE_FLOW_ITEM_TYPE_ETH:
865 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
866 		size = sizeof(struct rte_flow_item_eth);
867 		break;
868 	case RTE_FLOW_ITEM_TYPE_VLAN:
869 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
870 		size = sizeof(struct rte_flow_item_vlan);
871 		break;
872 	case RTE_FLOW_ITEM_TYPE_IPV4:
873 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
874 		size = sizeof(struct rte_flow_item_ipv4);
875 		break;
876 	case RTE_FLOW_ITEM_TYPE_IPV6:
877 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
878 		size = sizeof(struct rte_flow_item_ipv6);
879 		break;
880 	case RTE_FLOW_ITEM_TYPE_ICMP:
881 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
882 		size = sizeof(struct rte_flow_item_icmp);
883 		break;
884 	case RTE_FLOW_ITEM_TYPE_UDP:
885 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
886 		size = sizeof(struct rte_flow_item_udp);
887 		break;
888 	case RTE_FLOW_ITEM_TYPE_TCP:
889 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
890 		size = sizeof(struct rte_flow_item_tcp);
891 		break;
892 	case RTE_FLOW_ITEM_TYPE_SCTP:
893 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
894 		size = sizeof(struct rte_flow_item_sctp);
895 		break;
896 	case RTE_FLOW_ITEM_TYPE_GRE:
897 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
898 		size = sizeof(struct rte_flow_item_gre);
899 		break;
900 	default:
901 		return -1;
902 	}
903 
904 	memcpy(mask, mask_support, size);
905 
906 	for (i = 0; i < size; i++)
907 		mask[i] = (mask[i] | mask_src[i]);
908 
909 	if (memcmp(mask, mask_support, size))
910 		return -1;
911 
912 	return 0;
913 }
914 
915 static int
916 dpaa2_configure_flow_eth(struct rte_flow *flow,
917 			 struct rte_eth_dev *dev,
918 			 const struct rte_flow_attr *attr,
919 			 const struct rte_flow_item *pattern,
920 			 const struct rte_flow_action actions[] __rte_unused,
921 			 struct rte_flow_error *error __rte_unused,
922 			 int *device_configured)
923 {
924 	int index, ret;
925 	int local_cfg = 0;
926 	uint32_t group;
927 	const struct rte_flow_item_eth *spec, *mask;
928 
929 	/* TODO: Currently upper bound of range parameter is not implemented */
930 	const struct rte_flow_item_eth *last __rte_unused;
931 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
932 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
933 
934 	group = attr->group;
935 
936 	/* Parse pattern list to get the matching parameters */
937 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
938 	last    = (const struct rte_flow_item_eth *)pattern->last;
939 	mask    = (const struct rte_flow_item_eth *)
940 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
941 	if (!spec) {
942 		/* Don't care any field of eth header,
943 		 * only care eth protocol.
944 		 */
945 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
946 		return 0;
947 	}
948 
949 	/* Get traffic class index and flow id to be configured */
950 	flow->tc_id = group;
951 	flow->tc_index = attr->priority;
952 
953 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
954 		RTE_FLOW_ITEM_TYPE_ETH)) {
955 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
956 
957 		return -1;
958 	}
959 
960 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
961 		index = dpaa2_flow_extract_search(
962 				&priv->extract.qos_key_extract.dpkg,
963 				NET_PROT_ETH, NH_FLD_ETH_SA);
964 		if (index < 0) {
965 			ret = dpaa2_flow_extract_add(
966 					&priv->extract.qos_key_extract,
967 					NET_PROT_ETH, NH_FLD_ETH_SA,
968 					RTE_ETHER_ADDR_LEN);
969 			if (ret) {
970 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
971 
972 				return -1;
973 			}
974 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
975 		}
976 		index = dpaa2_flow_extract_search(
977 				&priv->extract.tc_key_extract[group].dpkg,
978 				NET_PROT_ETH, NH_FLD_ETH_SA);
979 		if (index < 0) {
980 			ret = dpaa2_flow_extract_add(
981 					&priv->extract.tc_key_extract[group],
982 					NET_PROT_ETH, NH_FLD_ETH_SA,
983 					RTE_ETHER_ADDR_LEN);
984 			if (ret) {
985 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
986 				return -1;
987 			}
988 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
989 		}
990 
991 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
992 		if (ret) {
993 			DPAA2_PMD_ERR(
994 				"Move ipaddr before ETH_SA rule set failed");
995 			return -1;
996 		}
997 
998 		ret = dpaa2_flow_rule_data_set(
999 				&priv->extract.qos_key_extract,
1000 				&flow->qos_rule,
1001 				NET_PROT_ETH,
1002 				NH_FLD_ETH_SA,
1003 				&spec->src.addr_bytes,
1004 				&mask->src.addr_bytes,
1005 				sizeof(struct rte_ether_addr));
1006 		if (ret) {
1007 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1008 			return -1;
1009 		}
1010 
1011 		ret = dpaa2_flow_rule_data_set(
1012 				&priv->extract.tc_key_extract[group],
1013 				&flow->fs_rule,
1014 				NET_PROT_ETH,
1015 				NH_FLD_ETH_SA,
1016 				&spec->src.addr_bytes,
1017 				&mask->src.addr_bytes,
1018 				sizeof(struct rte_ether_addr));
1019 		if (ret) {
1020 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1021 			return -1;
1022 		}
1023 	}
1024 
1025 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1026 		index = dpaa2_flow_extract_search(
1027 				&priv->extract.qos_key_extract.dpkg,
1028 				NET_PROT_ETH, NH_FLD_ETH_DA);
1029 		if (index < 0) {
1030 			ret = dpaa2_flow_extract_add(
1031 					&priv->extract.qos_key_extract,
1032 					NET_PROT_ETH, NH_FLD_ETH_DA,
1033 					RTE_ETHER_ADDR_LEN);
1034 			if (ret) {
1035 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1036 
1037 				return -1;
1038 			}
1039 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1040 		}
1041 
1042 		index = dpaa2_flow_extract_search(
1043 				&priv->extract.tc_key_extract[group].dpkg,
1044 				NET_PROT_ETH, NH_FLD_ETH_DA);
1045 		if (index < 0) {
1046 			ret = dpaa2_flow_extract_add(
1047 					&priv->extract.tc_key_extract[group],
1048 					NET_PROT_ETH, NH_FLD_ETH_DA,
1049 					RTE_ETHER_ADDR_LEN);
1050 			if (ret) {
1051 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1052 
1053 				return -1;
1054 			}
1055 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1056 		}
1057 
1058 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1059 		if (ret) {
1060 			DPAA2_PMD_ERR(
1061 				"Move ipaddr before ETH DA rule set failed");
1062 			return -1;
1063 		}
1064 
1065 		ret = dpaa2_flow_rule_data_set(
1066 				&priv->extract.qos_key_extract,
1067 				&flow->qos_rule,
1068 				NET_PROT_ETH,
1069 				NH_FLD_ETH_DA,
1070 				&spec->dst.addr_bytes,
1071 				&mask->dst.addr_bytes,
1072 				sizeof(struct rte_ether_addr));
1073 		if (ret) {
1074 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1075 			return -1;
1076 		}
1077 
1078 		ret = dpaa2_flow_rule_data_set(
1079 				&priv->extract.tc_key_extract[group],
1080 				&flow->fs_rule,
1081 				NET_PROT_ETH,
1082 				NH_FLD_ETH_DA,
1083 				&spec->dst.addr_bytes,
1084 				&mask->dst.addr_bytes,
1085 				sizeof(struct rte_ether_addr));
1086 		if (ret) {
1087 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1088 			return -1;
1089 		}
1090 	}
1091 
1092 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1093 		index = dpaa2_flow_extract_search(
1094 				&priv->extract.qos_key_extract.dpkg,
1095 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1096 		if (index < 0) {
1097 			ret = dpaa2_flow_extract_add(
1098 					&priv->extract.qos_key_extract,
1099 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1100 					RTE_ETHER_TYPE_LEN);
1101 			if (ret) {
1102 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1103 
1104 				return -1;
1105 			}
1106 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1107 		}
1108 		index = dpaa2_flow_extract_search(
1109 				&priv->extract.tc_key_extract[group].dpkg,
1110 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1111 		if (index < 0) {
1112 			ret = dpaa2_flow_extract_add(
1113 					&priv->extract.tc_key_extract[group],
1114 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1115 					RTE_ETHER_TYPE_LEN);
1116 			if (ret) {
1117 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1118 
1119 				return -1;
1120 			}
1121 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1122 		}
1123 
1124 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1125 		if (ret) {
1126 			DPAA2_PMD_ERR(
1127 				"Move ipaddr before ETH TYPE rule set failed");
1128 				return -1;
1129 		}
1130 
1131 		ret = dpaa2_flow_rule_data_set(
1132 				&priv->extract.qos_key_extract,
1133 				&flow->qos_rule,
1134 				NET_PROT_ETH,
1135 				NH_FLD_ETH_TYPE,
1136 				&spec->type,
1137 				&mask->type,
1138 				sizeof(rte_be16_t));
1139 		if (ret) {
1140 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1141 			return -1;
1142 		}
1143 
1144 		ret = dpaa2_flow_rule_data_set(
1145 				&priv->extract.tc_key_extract[group],
1146 				&flow->fs_rule,
1147 				NET_PROT_ETH,
1148 				NH_FLD_ETH_TYPE,
1149 				&spec->type,
1150 				&mask->type,
1151 				sizeof(rte_be16_t));
1152 		if (ret) {
1153 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1154 			return -1;
1155 		}
1156 	}
1157 
1158 	(*device_configured) |= local_cfg;
1159 
1160 	return 0;
1161 }
1162 
1163 static int
1164 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1165 			  struct rte_eth_dev *dev,
1166 			  const struct rte_flow_attr *attr,
1167 			  const struct rte_flow_item *pattern,
1168 			  const struct rte_flow_action actions[] __rte_unused,
1169 			  struct rte_flow_error *error __rte_unused,
1170 			  int *device_configured)
1171 {
1172 	int index, ret;
1173 	int local_cfg = 0;
1174 	uint32_t group;
1175 	const struct rte_flow_item_vlan *spec, *mask;
1176 
1177 	const struct rte_flow_item_vlan *last __rte_unused;
1178 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1179 
1180 	group = attr->group;
1181 
1182 	/* Parse pattern list to get the matching parameters */
1183 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1184 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1185 	mask    = (const struct rte_flow_item_vlan *)
1186 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1187 
1188 	/* Get traffic class index and flow id to be configured */
1189 	flow->tc_id = group;
1190 	flow->tc_index = attr->priority;
1191 
1192 	if (!spec) {
1193 		/* Don't care any field of vlan header,
1194 		 * only care vlan protocol.
1195 		 */
1196 		/* Eth type is actually used for vLan classification.
1197 		 */
1198 		struct proto_discrimination proto;
1199 
1200 		index = dpaa2_flow_extract_search(
1201 				&priv->extract.qos_key_extract.dpkg,
1202 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1203 		if (index < 0) {
1204 			ret = dpaa2_flow_proto_discrimination_extract(
1205 						&priv->extract.qos_key_extract,
1206 						RTE_FLOW_ITEM_TYPE_ETH);
1207 			if (ret) {
1208 				DPAA2_PMD_ERR(
1209 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1210 
1211 				return -1;
1212 			}
1213 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1214 		}
1215 
1216 		index = dpaa2_flow_extract_search(
1217 				&priv->extract.tc_key_extract[group].dpkg,
1218 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1219 		if (index < 0) {
1220 			ret = dpaa2_flow_proto_discrimination_extract(
1221 					&priv->extract.tc_key_extract[group],
1222 					RTE_FLOW_ITEM_TYPE_ETH);
1223 			if (ret) {
1224 				DPAA2_PMD_ERR(
1225 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1226 
1227 				return -1;
1228 			}
1229 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1230 		}
1231 
1232 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1233 		if (ret) {
1234 			DPAA2_PMD_ERR(
1235 			"Move ipaddr before vLan discrimination set failed");
1236 			return -1;
1237 		}
1238 
1239 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1240 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1241 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1242 							proto, group);
1243 		if (ret) {
1244 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1245 			return -1;
1246 		}
1247 
1248 		(*device_configured) |= local_cfg;
1249 
1250 		return 0;
1251 	}
1252 
1253 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1254 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1255 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1256 
1257 		return -1;
1258 	}
1259 
1260 	if (!mask->tci)
1261 		return 0;
1262 
1263 	index = dpaa2_flow_extract_search(
1264 				&priv->extract.qos_key_extract.dpkg,
1265 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1266 	if (index < 0) {
1267 		ret = dpaa2_flow_extract_add(
1268 						&priv->extract.qos_key_extract,
1269 						NET_PROT_VLAN,
1270 						NH_FLD_VLAN_TCI,
1271 						sizeof(rte_be16_t));
1272 		if (ret) {
1273 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1274 
1275 			return -1;
1276 		}
1277 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1278 	}
1279 
1280 	index = dpaa2_flow_extract_search(
1281 			&priv->extract.tc_key_extract[group].dpkg,
1282 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1283 	if (index < 0) {
1284 		ret = dpaa2_flow_extract_add(
1285 				&priv->extract.tc_key_extract[group],
1286 				NET_PROT_VLAN,
1287 				NH_FLD_VLAN_TCI,
1288 				sizeof(rte_be16_t));
1289 		if (ret) {
1290 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1291 
1292 			return -1;
1293 		}
1294 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1295 	}
1296 
1297 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1298 	if (ret) {
1299 		DPAA2_PMD_ERR(
1300 			"Move ipaddr before VLAN TCI rule set failed");
1301 		return -1;
1302 	}
1303 
1304 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1305 				&flow->qos_rule,
1306 				NET_PROT_VLAN,
1307 				NH_FLD_VLAN_TCI,
1308 				&spec->tci,
1309 				&mask->tci,
1310 				sizeof(rte_be16_t));
1311 	if (ret) {
1312 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1313 		return -1;
1314 	}
1315 
1316 	ret = dpaa2_flow_rule_data_set(
1317 			&priv->extract.tc_key_extract[group],
1318 			&flow->fs_rule,
1319 			NET_PROT_VLAN,
1320 			NH_FLD_VLAN_TCI,
1321 			&spec->tci,
1322 			&mask->tci,
1323 			sizeof(rte_be16_t));
1324 	if (ret) {
1325 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1326 		return -1;
1327 	}
1328 
1329 	(*device_configured) |= local_cfg;
1330 
1331 	return 0;
1332 }
1333 
1334 static int
1335 dpaa2_configure_flow_ip_discrimation(
1336 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1337 	const struct rte_flow_item *pattern,
1338 	int *local_cfg,	int *device_configured,
1339 	uint32_t group)
1340 {
1341 	int index, ret;
1342 	struct proto_discrimination proto;
1343 
1344 	index = dpaa2_flow_extract_search(
1345 			&priv->extract.qos_key_extract.dpkg,
1346 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1347 	if (index < 0) {
1348 		ret = dpaa2_flow_proto_discrimination_extract(
1349 				&priv->extract.qos_key_extract,
1350 				RTE_FLOW_ITEM_TYPE_ETH);
1351 		if (ret) {
1352 			DPAA2_PMD_ERR(
1353 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1354 			return -1;
1355 		}
1356 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1357 	}
1358 
1359 	index = dpaa2_flow_extract_search(
1360 			&priv->extract.tc_key_extract[group].dpkg,
1361 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1362 	if (index < 0) {
1363 		ret = dpaa2_flow_proto_discrimination_extract(
1364 				&priv->extract.tc_key_extract[group],
1365 				RTE_FLOW_ITEM_TYPE_ETH);
1366 		if (ret) {
1367 			DPAA2_PMD_ERR(
1368 			"FS Extract ETH_TYPE to discriminate IP failed.");
1369 			return -1;
1370 		}
1371 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1372 	}
1373 
1374 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1375 	if (ret) {
1376 		DPAA2_PMD_ERR(
1377 			"Move ipaddr before IP discrimination set failed");
1378 		return -1;
1379 	}
1380 
1381 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1382 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1383 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1384 	else
1385 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1386 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1387 	if (ret) {
1388 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1389 		return -1;
1390 	}
1391 
1392 	(*device_configured) |= (*local_cfg);
1393 
1394 	return 0;
1395 }
1396 
1397 
1398 static int
1399 dpaa2_configure_flow_generic_ip(
1400 	struct rte_flow *flow,
1401 	struct rte_eth_dev *dev,
1402 	const struct rte_flow_attr *attr,
1403 	const struct rte_flow_item *pattern,
1404 	const struct rte_flow_action actions[] __rte_unused,
1405 	struct rte_flow_error *error __rte_unused,
1406 	int *device_configured)
1407 {
1408 	int index, ret;
1409 	int local_cfg = 0;
1410 	uint32_t group;
1411 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1412 		*mask_ipv4 = 0;
1413 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1414 		*mask_ipv6 = 0;
1415 	const void *key, *mask;
1416 	enum net_prot prot;
1417 
1418 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1419 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1420 	int size;
1421 
1422 	group = attr->group;
1423 
1424 	/* Parse pattern list to get the matching parameters */
1425 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1426 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1427 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1428 			(pattern->mask ? pattern->mask :
1429 					&dpaa2_flow_item_ipv4_mask);
1430 	} else {
1431 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1432 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1433 			(pattern->mask ? pattern->mask :
1434 					&dpaa2_flow_item_ipv6_mask);
1435 	}
1436 
1437 	/* Get traffic class index and flow id to be configured */
1438 	flow->tc_id = group;
1439 	flow->tc_index = attr->priority;
1440 
1441 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1442 			flow, pattern, &local_cfg,
1443 			device_configured, group);
1444 	if (ret) {
1445 		DPAA2_PMD_ERR("IP discrimation failed!");
1446 		return -1;
1447 	}
1448 
1449 	if (!spec_ipv4 && !spec_ipv6)
1450 		return 0;
1451 
1452 	if (mask_ipv4) {
1453 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1454 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1455 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1456 
1457 			return -1;
1458 		}
1459 	}
1460 
1461 	if (mask_ipv6) {
1462 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1463 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1464 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1465 
1466 			return -1;
1467 		}
1468 	}
1469 
1470 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1471 		mask_ipv4->hdr.dst_addr)) {
1472 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1473 	} else if (mask_ipv6 &&
1474 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1475 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1476 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1477 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1478 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1479 	}
1480 
1481 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1482 		(mask_ipv6 &&
1483 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1484 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1485 		index = dpaa2_flow_extract_search(
1486 				&priv->extract.qos_key_extract.dpkg,
1487 				NET_PROT_IP, NH_FLD_IP_SRC);
1488 		if (index < 0) {
1489 			ret = dpaa2_flow_extract_add(
1490 					&priv->extract.qos_key_extract,
1491 					NET_PROT_IP,
1492 					NH_FLD_IP_SRC,
1493 					0);
1494 			if (ret) {
1495 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1496 
1497 				return -1;
1498 			}
1499 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1500 		}
1501 
1502 		index = dpaa2_flow_extract_search(
1503 				&priv->extract.tc_key_extract[group].dpkg,
1504 				NET_PROT_IP, NH_FLD_IP_SRC);
1505 		if (index < 0) {
1506 			ret = dpaa2_flow_extract_add(
1507 					&priv->extract.tc_key_extract[group],
1508 					NET_PROT_IP,
1509 					NH_FLD_IP_SRC,
1510 					0);
1511 			if (ret) {
1512 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1513 
1514 				return -1;
1515 			}
1516 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1517 		}
1518 
1519 		if (spec_ipv4)
1520 			key = &spec_ipv4->hdr.src_addr;
1521 		else
1522 			key = &spec_ipv6->hdr.src_addr[0];
1523 		if (mask_ipv4) {
1524 			mask = &mask_ipv4->hdr.src_addr;
1525 			size = NH_FLD_IPV4_ADDR_SIZE;
1526 			prot = NET_PROT_IPV4;
1527 		} else {
1528 			mask = &mask_ipv6->hdr.src_addr[0];
1529 			size = NH_FLD_IPV6_ADDR_SIZE;
1530 			prot = NET_PROT_IPV6;
1531 		}
1532 
1533 		ret = dpaa2_flow_rule_data_set(
1534 				&priv->extract.qos_key_extract,
1535 				&flow->qos_rule,
1536 				prot, NH_FLD_IP_SRC,
1537 				key,	mask, size);
1538 		if (ret) {
1539 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1540 			return -1;
1541 		}
1542 
1543 		ret = dpaa2_flow_rule_data_set(
1544 				&priv->extract.tc_key_extract[group],
1545 				&flow->fs_rule,
1546 				prot, NH_FLD_IP_SRC,
1547 				key,	mask, size);
1548 		if (ret) {
1549 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1550 			return -1;
1551 		}
1552 
1553 		flow->ipaddr_rule.qos_ipsrc_offset =
1554 			dpaa2_flow_extract_key_offset(
1555 				&priv->extract.qos_key_extract,
1556 				prot, NH_FLD_IP_SRC);
1557 		flow->ipaddr_rule.fs_ipsrc_offset =
1558 			dpaa2_flow_extract_key_offset(
1559 				&priv->extract.tc_key_extract[group],
1560 				prot, NH_FLD_IP_SRC);
1561 	}
1562 
1563 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1564 		(mask_ipv6 &&
1565 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1566 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1567 		index = dpaa2_flow_extract_search(
1568 				&priv->extract.qos_key_extract.dpkg,
1569 				NET_PROT_IP, NH_FLD_IP_DST);
1570 		if (index < 0) {
1571 			if (mask_ipv4)
1572 				size = NH_FLD_IPV4_ADDR_SIZE;
1573 			else
1574 				size = NH_FLD_IPV6_ADDR_SIZE;
1575 			ret = dpaa2_flow_extract_add(
1576 					&priv->extract.qos_key_extract,
1577 					NET_PROT_IP,
1578 					NH_FLD_IP_DST,
1579 					size);
1580 			if (ret) {
1581 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1582 
1583 				return -1;
1584 			}
1585 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1586 		}
1587 
1588 		index = dpaa2_flow_extract_search(
1589 				&priv->extract.tc_key_extract[group].dpkg,
1590 				NET_PROT_IP, NH_FLD_IP_DST);
1591 		if (index < 0) {
1592 			if (mask_ipv4)
1593 				size = NH_FLD_IPV4_ADDR_SIZE;
1594 			else
1595 				size = NH_FLD_IPV6_ADDR_SIZE;
1596 			ret = dpaa2_flow_extract_add(
1597 					&priv->extract.tc_key_extract[group],
1598 					NET_PROT_IP,
1599 					NH_FLD_IP_DST,
1600 					size);
1601 			if (ret) {
1602 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1603 
1604 				return -1;
1605 			}
1606 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1607 		}
1608 
1609 		if (spec_ipv4)
1610 			key = &spec_ipv4->hdr.dst_addr;
1611 		else
1612 			key = spec_ipv6->hdr.dst_addr;
1613 		if (mask_ipv4) {
1614 			mask = &mask_ipv4->hdr.dst_addr;
1615 			size = NH_FLD_IPV4_ADDR_SIZE;
1616 			prot = NET_PROT_IPV4;
1617 		} else {
1618 			mask = &mask_ipv6->hdr.dst_addr[0];
1619 			size = NH_FLD_IPV6_ADDR_SIZE;
1620 			prot = NET_PROT_IPV6;
1621 		}
1622 
1623 		ret = dpaa2_flow_rule_data_set(
1624 				&priv->extract.qos_key_extract,
1625 				&flow->qos_rule,
1626 				prot, NH_FLD_IP_DST,
1627 				key,	mask, size);
1628 		if (ret) {
1629 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1630 			return -1;
1631 		}
1632 
1633 		ret = dpaa2_flow_rule_data_set(
1634 				&priv->extract.tc_key_extract[group],
1635 				&flow->fs_rule,
1636 				prot, NH_FLD_IP_DST,
1637 				key,	mask, size);
1638 		if (ret) {
1639 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1640 			return -1;
1641 		}
1642 		flow->ipaddr_rule.qos_ipdst_offset =
1643 			dpaa2_flow_extract_key_offset(
1644 				&priv->extract.qos_key_extract,
1645 				prot, NH_FLD_IP_DST);
1646 		flow->ipaddr_rule.fs_ipdst_offset =
1647 			dpaa2_flow_extract_key_offset(
1648 				&priv->extract.tc_key_extract[group],
1649 				prot, NH_FLD_IP_DST);
1650 	}
1651 
1652 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1653 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1654 		index = dpaa2_flow_extract_search(
1655 				&priv->extract.qos_key_extract.dpkg,
1656 				NET_PROT_IP, NH_FLD_IP_PROTO);
1657 		if (index < 0) {
1658 			ret = dpaa2_flow_extract_add(
1659 				&priv->extract.qos_key_extract,
1660 				NET_PROT_IP,
1661 				NH_FLD_IP_PROTO,
1662 				NH_FLD_IP_PROTO_SIZE);
1663 			if (ret) {
1664 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1665 
1666 				return -1;
1667 			}
1668 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1669 		}
1670 
1671 		index = dpaa2_flow_extract_search(
1672 				&priv->extract.tc_key_extract[group].dpkg,
1673 				NET_PROT_IP, NH_FLD_IP_PROTO);
1674 		if (index < 0) {
1675 			ret = dpaa2_flow_extract_add(
1676 					&priv->extract.tc_key_extract[group],
1677 					NET_PROT_IP,
1678 					NH_FLD_IP_PROTO,
1679 					NH_FLD_IP_PROTO_SIZE);
1680 			if (ret) {
1681 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1682 
1683 				return -1;
1684 			}
1685 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1686 		}
1687 
1688 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1689 		if (ret) {
1690 			DPAA2_PMD_ERR(
1691 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1692 			return -1;
1693 		}
1694 
1695 		if (spec_ipv4)
1696 			key = &spec_ipv4->hdr.next_proto_id;
1697 		else
1698 			key = &spec_ipv6->hdr.proto;
1699 		if (mask_ipv4)
1700 			mask = &mask_ipv4->hdr.next_proto_id;
1701 		else
1702 			mask = &mask_ipv6->hdr.proto;
1703 
1704 		ret = dpaa2_flow_rule_data_set(
1705 				&priv->extract.qos_key_extract,
1706 				&flow->qos_rule,
1707 				NET_PROT_IP,
1708 				NH_FLD_IP_PROTO,
1709 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1710 		if (ret) {
1711 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1712 			return -1;
1713 		}
1714 
1715 		ret = dpaa2_flow_rule_data_set(
1716 				&priv->extract.tc_key_extract[group],
1717 				&flow->fs_rule,
1718 				NET_PROT_IP,
1719 				NH_FLD_IP_PROTO,
1720 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1721 		if (ret) {
1722 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1723 			return -1;
1724 		}
1725 	}
1726 
1727 	(*device_configured) |= local_cfg;
1728 
1729 	return 0;
1730 }
1731 
1732 static int
1733 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1734 			  struct rte_eth_dev *dev,
1735 			  const struct rte_flow_attr *attr,
1736 			  const struct rte_flow_item *pattern,
1737 			  const struct rte_flow_action actions[] __rte_unused,
1738 			  struct rte_flow_error *error __rte_unused,
1739 			  int *device_configured)
1740 {
1741 	int index, ret;
1742 	int local_cfg = 0;
1743 	uint32_t group;
1744 	const struct rte_flow_item_icmp *spec, *mask;
1745 
1746 	const struct rte_flow_item_icmp *last __rte_unused;
1747 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1748 
1749 	group = attr->group;
1750 
1751 	/* Parse pattern list to get the matching parameters */
1752 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1753 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1754 	mask    = (const struct rte_flow_item_icmp *)
1755 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1756 
1757 	/* Get traffic class index and flow id to be configured */
1758 	flow->tc_id = group;
1759 	flow->tc_index = attr->priority;
1760 
1761 	if (!spec) {
1762 		/* Don't care any field of ICMP header,
1763 		 * only care ICMP protocol.
1764 		 * Example: flow create 0 ingress pattern icmp /
1765 		 */
1766 		/* Next proto of Generical IP is actually used
1767 		 * for ICMP identification.
1768 		 */
1769 		struct proto_discrimination proto;
1770 
1771 		index = dpaa2_flow_extract_search(
1772 				&priv->extract.qos_key_extract.dpkg,
1773 				NET_PROT_IP, NH_FLD_IP_PROTO);
1774 		if (index < 0) {
1775 			ret = dpaa2_flow_proto_discrimination_extract(
1776 					&priv->extract.qos_key_extract,
1777 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1778 			if (ret) {
1779 				DPAA2_PMD_ERR(
1780 					"QoS Extract IP protocol to discriminate ICMP failed.");
1781 
1782 				return -1;
1783 			}
1784 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1785 		}
1786 
1787 		index = dpaa2_flow_extract_search(
1788 				&priv->extract.tc_key_extract[group].dpkg,
1789 				NET_PROT_IP, NH_FLD_IP_PROTO);
1790 		if (index < 0) {
1791 			ret = dpaa2_flow_proto_discrimination_extract(
1792 					&priv->extract.tc_key_extract[group],
1793 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1794 			if (ret) {
1795 				DPAA2_PMD_ERR(
1796 					"FS Extract IP protocol to discriminate ICMP failed.");
1797 
1798 				return -1;
1799 			}
1800 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1801 		}
1802 
1803 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1804 		if (ret) {
1805 			DPAA2_PMD_ERR(
1806 				"Move IP addr before ICMP discrimination set failed");
1807 			return -1;
1808 		}
1809 
1810 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1811 		proto.ip_proto = IPPROTO_ICMP;
1812 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1813 							proto, group);
1814 		if (ret) {
1815 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1816 			return -1;
1817 		}
1818 
1819 		(*device_configured) |= local_cfg;
1820 
1821 		return 0;
1822 	}
1823 
1824 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1825 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1826 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1827 
1828 		return -1;
1829 	}
1830 
1831 	if (mask->hdr.icmp_type) {
1832 		index = dpaa2_flow_extract_search(
1833 				&priv->extract.qos_key_extract.dpkg,
1834 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1835 		if (index < 0) {
1836 			ret = dpaa2_flow_extract_add(
1837 					&priv->extract.qos_key_extract,
1838 					NET_PROT_ICMP,
1839 					NH_FLD_ICMP_TYPE,
1840 					NH_FLD_ICMP_TYPE_SIZE);
1841 			if (ret) {
1842 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1843 
1844 				return -1;
1845 			}
1846 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1847 		}
1848 
1849 		index = dpaa2_flow_extract_search(
1850 				&priv->extract.tc_key_extract[group].dpkg,
1851 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1852 		if (index < 0) {
1853 			ret = dpaa2_flow_extract_add(
1854 					&priv->extract.tc_key_extract[group],
1855 					NET_PROT_ICMP,
1856 					NH_FLD_ICMP_TYPE,
1857 					NH_FLD_ICMP_TYPE_SIZE);
1858 			if (ret) {
1859 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1860 
1861 				return -1;
1862 			}
1863 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1864 		}
1865 
1866 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1867 		if (ret) {
1868 			DPAA2_PMD_ERR(
1869 				"Move ipaddr before ICMP TYPE set failed");
1870 			return -1;
1871 		}
1872 
1873 		ret = dpaa2_flow_rule_data_set(
1874 				&priv->extract.qos_key_extract,
1875 				&flow->qos_rule,
1876 				NET_PROT_ICMP,
1877 				NH_FLD_ICMP_TYPE,
1878 				&spec->hdr.icmp_type,
1879 				&mask->hdr.icmp_type,
1880 				NH_FLD_ICMP_TYPE_SIZE);
1881 		if (ret) {
1882 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1883 			return -1;
1884 		}
1885 
1886 		ret = dpaa2_flow_rule_data_set(
1887 				&priv->extract.tc_key_extract[group],
1888 				&flow->fs_rule,
1889 				NET_PROT_ICMP,
1890 				NH_FLD_ICMP_TYPE,
1891 				&spec->hdr.icmp_type,
1892 				&mask->hdr.icmp_type,
1893 				NH_FLD_ICMP_TYPE_SIZE);
1894 		if (ret) {
1895 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1896 			return -1;
1897 		}
1898 	}
1899 
1900 	if (mask->hdr.icmp_code) {
1901 		index = dpaa2_flow_extract_search(
1902 				&priv->extract.qos_key_extract.dpkg,
1903 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1904 		if (index < 0) {
1905 			ret = dpaa2_flow_extract_add(
1906 					&priv->extract.qos_key_extract,
1907 					NET_PROT_ICMP,
1908 					NH_FLD_ICMP_CODE,
1909 					NH_FLD_ICMP_CODE_SIZE);
1910 			if (ret) {
1911 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1912 
1913 				return -1;
1914 			}
1915 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1916 		}
1917 
1918 		index = dpaa2_flow_extract_search(
1919 				&priv->extract.tc_key_extract[group].dpkg,
1920 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1921 		if (index < 0) {
1922 			ret = dpaa2_flow_extract_add(
1923 					&priv->extract.tc_key_extract[group],
1924 					NET_PROT_ICMP,
1925 					NH_FLD_ICMP_CODE,
1926 					NH_FLD_ICMP_CODE_SIZE);
1927 			if (ret) {
1928 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1929 
1930 				return -1;
1931 			}
1932 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1933 		}
1934 
1935 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1936 		if (ret) {
1937 			DPAA2_PMD_ERR(
1938 				"Move ipaddr after ICMP CODE set failed");
1939 			return -1;
1940 		}
1941 
1942 		ret = dpaa2_flow_rule_data_set(
1943 				&priv->extract.qos_key_extract,
1944 				&flow->qos_rule,
1945 				NET_PROT_ICMP,
1946 				NH_FLD_ICMP_CODE,
1947 				&spec->hdr.icmp_code,
1948 				&mask->hdr.icmp_code,
1949 				NH_FLD_ICMP_CODE_SIZE);
1950 		if (ret) {
1951 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1952 			return -1;
1953 		}
1954 
1955 		ret = dpaa2_flow_rule_data_set(
1956 				&priv->extract.tc_key_extract[group],
1957 				&flow->fs_rule,
1958 				NET_PROT_ICMP,
1959 				NH_FLD_ICMP_CODE,
1960 				&spec->hdr.icmp_code,
1961 				&mask->hdr.icmp_code,
1962 				NH_FLD_ICMP_CODE_SIZE);
1963 		if (ret) {
1964 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1965 			return -1;
1966 		}
1967 	}
1968 
1969 	(*device_configured) |= local_cfg;
1970 
1971 	return 0;
1972 }
1973 
1974 static int
1975 dpaa2_configure_flow_udp(struct rte_flow *flow,
1976 			 struct rte_eth_dev *dev,
1977 			  const struct rte_flow_attr *attr,
1978 			  const struct rte_flow_item *pattern,
1979 			  const struct rte_flow_action actions[] __rte_unused,
1980 			  struct rte_flow_error *error __rte_unused,
1981 			  int *device_configured)
1982 {
1983 	int index, ret;
1984 	int local_cfg = 0;
1985 	uint32_t group;
1986 	const struct rte_flow_item_udp *spec, *mask;
1987 
1988 	const struct rte_flow_item_udp *last __rte_unused;
1989 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1990 
1991 	group = attr->group;
1992 
1993 	/* Parse pattern list to get the matching parameters */
1994 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1995 	last    = (const struct rte_flow_item_udp *)pattern->last;
1996 	mask    = (const struct rte_flow_item_udp *)
1997 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1998 
1999 	/* Get traffic class index and flow id to be configured */
2000 	flow->tc_id = group;
2001 	flow->tc_index = attr->priority;
2002 
2003 	if (!spec || !mc_l4_port_identification) {
2004 		struct proto_discrimination proto;
2005 
2006 		index = dpaa2_flow_extract_search(
2007 				&priv->extract.qos_key_extract.dpkg,
2008 				NET_PROT_IP, NH_FLD_IP_PROTO);
2009 		if (index < 0) {
2010 			ret = dpaa2_flow_proto_discrimination_extract(
2011 					&priv->extract.qos_key_extract,
2012 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2013 			if (ret) {
2014 				DPAA2_PMD_ERR(
2015 					"QoS Extract IP protocol to discriminate UDP failed.");
2016 
2017 				return -1;
2018 			}
2019 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2020 		}
2021 
2022 		index = dpaa2_flow_extract_search(
2023 				&priv->extract.tc_key_extract[group].dpkg,
2024 				NET_PROT_IP, NH_FLD_IP_PROTO);
2025 		if (index < 0) {
2026 			ret = dpaa2_flow_proto_discrimination_extract(
2027 				&priv->extract.tc_key_extract[group],
2028 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2029 			if (ret) {
2030 				DPAA2_PMD_ERR(
2031 					"FS Extract IP protocol to discriminate UDP failed.");
2032 
2033 				return -1;
2034 			}
2035 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2036 		}
2037 
2038 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2039 		if (ret) {
2040 			DPAA2_PMD_ERR(
2041 				"Move IP addr before UDP discrimination set failed");
2042 			return -1;
2043 		}
2044 
2045 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2046 		proto.ip_proto = IPPROTO_UDP;
2047 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2048 							proto, group);
2049 		if (ret) {
2050 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2051 			return -1;
2052 		}
2053 
2054 		(*device_configured) |= local_cfg;
2055 
2056 		if (!spec)
2057 			return 0;
2058 	}
2059 
2060 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2061 		RTE_FLOW_ITEM_TYPE_UDP)) {
2062 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2063 
2064 		return -1;
2065 	}
2066 
2067 	if (mask->hdr.src_port) {
2068 		index = dpaa2_flow_extract_search(
2069 				&priv->extract.qos_key_extract.dpkg,
2070 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2071 		if (index < 0) {
2072 			ret = dpaa2_flow_extract_add(
2073 					&priv->extract.qos_key_extract,
2074 				NET_PROT_UDP,
2075 				NH_FLD_UDP_PORT_SRC,
2076 				NH_FLD_UDP_PORT_SIZE);
2077 			if (ret) {
2078 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2079 
2080 				return -1;
2081 			}
2082 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2083 		}
2084 
2085 		index = dpaa2_flow_extract_search(
2086 				&priv->extract.tc_key_extract[group].dpkg,
2087 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2088 		if (index < 0) {
2089 			ret = dpaa2_flow_extract_add(
2090 					&priv->extract.tc_key_extract[group],
2091 					NET_PROT_UDP,
2092 					NH_FLD_UDP_PORT_SRC,
2093 					NH_FLD_UDP_PORT_SIZE);
2094 			if (ret) {
2095 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2096 
2097 				return -1;
2098 			}
2099 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2100 		}
2101 
2102 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2103 		if (ret) {
2104 			DPAA2_PMD_ERR(
2105 				"Move ipaddr before UDP_PORT_SRC set failed");
2106 			return -1;
2107 		}
2108 
2109 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2110 				&flow->qos_rule,
2111 				NET_PROT_UDP,
2112 				NH_FLD_UDP_PORT_SRC,
2113 				&spec->hdr.src_port,
2114 				&mask->hdr.src_port,
2115 				NH_FLD_UDP_PORT_SIZE);
2116 		if (ret) {
2117 			DPAA2_PMD_ERR(
2118 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2119 			return -1;
2120 		}
2121 
2122 		ret = dpaa2_flow_rule_data_set(
2123 				&priv->extract.tc_key_extract[group],
2124 				&flow->fs_rule,
2125 				NET_PROT_UDP,
2126 				NH_FLD_UDP_PORT_SRC,
2127 				&spec->hdr.src_port,
2128 				&mask->hdr.src_port,
2129 				NH_FLD_UDP_PORT_SIZE);
2130 		if (ret) {
2131 			DPAA2_PMD_ERR(
2132 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2133 			return -1;
2134 		}
2135 	}
2136 
2137 	if (mask->hdr.dst_port) {
2138 		index = dpaa2_flow_extract_search(
2139 				&priv->extract.qos_key_extract.dpkg,
2140 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2141 		if (index < 0) {
2142 			ret = dpaa2_flow_extract_add(
2143 					&priv->extract.qos_key_extract,
2144 					NET_PROT_UDP,
2145 					NH_FLD_UDP_PORT_DST,
2146 					NH_FLD_UDP_PORT_SIZE);
2147 			if (ret) {
2148 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2149 
2150 				return -1;
2151 			}
2152 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2153 		}
2154 
2155 		index = dpaa2_flow_extract_search(
2156 				&priv->extract.tc_key_extract[group].dpkg,
2157 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2158 		if (index < 0) {
2159 			ret = dpaa2_flow_extract_add(
2160 					&priv->extract.tc_key_extract[group],
2161 					NET_PROT_UDP,
2162 					NH_FLD_UDP_PORT_DST,
2163 					NH_FLD_UDP_PORT_SIZE);
2164 			if (ret) {
2165 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2166 
2167 				return -1;
2168 			}
2169 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2170 		}
2171 
2172 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2173 		if (ret) {
2174 			DPAA2_PMD_ERR(
2175 				"Move ipaddr before UDP_PORT_DST set failed");
2176 			return -1;
2177 		}
2178 
2179 		ret = dpaa2_flow_rule_data_set(
2180 				&priv->extract.qos_key_extract,
2181 				&flow->qos_rule,
2182 				NET_PROT_UDP,
2183 				NH_FLD_UDP_PORT_DST,
2184 				&spec->hdr.dst_port,
2185 				&mask->hdr.dst_port,
2186 				NH_FLD_UDP_PORT_SIZE);
2187 		if (ret) {
2188 			DPAA2_PMD_ERR(
2189 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2190 			return -1;
2191 		}
2192 
2193 		ret = dpaa2_flow_rule_data_set(
2194 				&priv->extract.tc_key_extract[group],
2195 				&flow->fs_rule,
2196 				NET_PROT_UDP,
2197 				NH_FLD_UDP_PORT_DST,
2198 				&spec->hdr.dst_port,
2199 				&mask->hdr.dst_port,
2200 				NH_FLD_UDP_PORT_SIZE);
2201 		if (ret) {
2202 			DPAA2_PMD_ERR(
2203 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2204 			return -1;
2205 		}
2206 	}
2207 
2208 	(*device_configured) |= local_cfg;
2209 
2210 	return 0;
2211 }
2212 
2213 static int
2214 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2215 			 struct rte_eth_dev *dev,
2216 			 const struct rte_flow_attr *attr,
2217 			 const struct rte_flow_item *pattern,
2218 			 const struct rte_flow_action actions[] __rte_unused,
2219 			 struct rte_flow_error *error __rte_unused,
2220 			 int *device_configured)
2221 {
2222 	int index, ret;
2223 	int local_cfg = 0;
2224 	uint32_t group;
2225 	const struct rte_flow_item_tcp *spec, *mask;
2226 
2227 	const struct rte_flow_item_tcp *last __rte_unused;
2228 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2229 
2230 	group = attr->group;
2231 
2232 	/* Parse pattern list to get the matching parameters */
2233 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2234 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2235 	mask    = (const struct rte_flow_item_tcp *)
2236 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2237 
2238 	/* Get traffic class index and flow id to be configured */
2239 	flow->tc_id = group;
2240 	flow->tc_index = attr->priority;
2241 
2242 	if (!spec || !mc_l4_port_identification) {
2243 		struct proto_discrimination proto;
2244 
2245 		index = dpaa2_flow_extract_search(
2246 				&priv->extract.qos_key_extract.dpkg,
2247 				NET_PROT_IP, NH_FLD_IP_PROTO);
2248 		if (index < 0) {
2249 			ret = dpaa2_flow_proto_discrimination_extract(
2250 					&priv->extract.qos_key_extract,
2251 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2252 			if (ret) {
2253 				DPAA2_PMD_ERR(
2254 					"QoS Extract IP protocol to discriminate TCP failed.");
2255 
2256 				return -1;
2257 			}
2258 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2259 		}
2260 
2261 		index = dpaa2_flow_extract_search(
2262 				&priv->extract.tc_key_extract[group].dpkg,
2263 				NET_PROT_IP, NH_FLD_IP_PROTO);
2264 		if (index < 0) {
2265 			ret = dpaa2_flow_proto_discrimination_extract(
2266 				&priv->extract.tc_key_extract[group],
2267 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2268 			if (ret) {
2269 				DPAA2_PMD_ERR(
2270 					"FS Extract IP protocol to discriminate TCP failed.");
2271 
2272 				return -1;
2273 			}
2274 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2275 		}
2276 
2277 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2278 		if (ret) {
2279 			DPAA2_PMD_ERR(
2280 				"Move IP addr before TCP discrimination set failed");
2281 			return -1;
2282 		}
2283 
2284 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2285 		proto.ip_proto = IPPROTO_TCP;
2286 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2287 							proto, group);
2288 		if (ret) {
2289 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2290 			return -1;
2291 		}
2292 
2293 		(*device_configured) |= local_cfg;
2294 
2295 		if (!spec)
2296 			return 0;
2297 	}
2298 
2299 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2300 		RTE_FLOW_ITEM_TYPE_TCP)) {
2301 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2302 
2303 		return -1;
2304 	}
2305 
2306 	if (mask->hdr.src_port) {
2307 		index = dpaa2_flow_extract_search(
2308 				&priv->extract.qos_key_extract.dpkg,
2309 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2310 		if (index < 0) {
2311 			ret = dpaa2_flow_extract_add(
2312 					&priv->extract.qos_key_extract,
2313 					NET_PROT_TCP,
2314 					NH_FLD_TCP_PORT_SRC,
2315 					NH_FLD_TCP_PORT_SIZE);
2316 			if (ret) {
2317 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2318 
2319 				return -1;
2320 			}
2321 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2322 		}
2323 
2324 		index = dpaa2_flow_extract_search(
2325 				&priv->extract.tc_key_extract[group].dpkg,
2326 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2327 		if (index < 0) {
2328 			ret = dpaa2_flow_extract_add(
2329 					&priv->extract.tc_key_extract[group],
2330 					NET_PROT_TCP,
2331 					NH_FLD_TCP_PORT_SRC,
2332 					NH_FLD_TCP_PORT_SIZE);
2333 			if (ret) {
2334 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2335 
2336 				return -1;
2337 			}
2338 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2339 		}
2340 
2341 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2342 		if (ret) {
2343 			DPAA2_PMD_ERR(
2344 				"Move ipaddr before TCP_PORT_SRC set failed");
2345 			return -1;
2346 		}
2347 
2348 		ret = dpaa2_flow_rule_data_set(
2349 				&priv->extract.qos_key_extract,
2350 				&flow->qos_rule,
2351 				NET_PROT_TCP,
2352 				NH_FLD_TCP_PORT_SRC,
2353 				&spec->hdr.src_port,
2354 				&mask->hdr.src_port,
2355 				NH_FLD_TCP_PORT_SIZE);
2356 		if (ret) {
2357 			DPAA2_PMD_ERR(
2358 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2359 			return -1;
2360 		}
2361 
2362 		ret = dpaa2_flow_rule_data_set(
2363 				&priv->extract.tc_key_extract[group],
2364 				&flow->fs_rule,
2365 				NET_PROT_TCP,
2366 				NH_FLD_TCP_PORT_SRC,
2367 				&spec->hdr.src_port,
2368 				&mask->hdr.src_port,
2369 				NH_FLD_TCP_PORT_SIZE);
2370 		if (ret) {
2371 			DPAA2_PMD_ERR(
2372 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2373 			return -1;
2374 		}
2375 	}
2376 
2377 	if (mask->hdr.dst_port) {
2378 		index = dpaa2_flow_extract_search(
2379 				&priv->extract.qos_key_extract.dpkg,
2380 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2381 		if (index < 0) {
2382 			ret = dpaa2_flow_extract_add(
2383 					&priv->extract.qos_key_extract,
2384 					NET_PROT_TCP,
2385 					NH_FLD_TCP_PORT_DST,
2386 					NH_FLD_TCP_PORT_SIZE);
2387 			if (ret) {
2388 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2389 
2390 				return -1;
2391 			}
2392 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2393 		}
2394 
2395 		index = dpaa2_flow_extract_search(
2396 				&priv->extract.tc_key_extract[group].dpkg,
2397 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2398 		if (index < 0) {
2399 			ret = dpaa2_flow_extract_add(
2400 					&priv->extract.tc_key_extract[group],
2401 					NET_PROT_TCP,
2402 					NH_FLD_TCP_PORT_DST,
2403 					NH_FLD_TCP_PORT_SIZE);
2404 			if (ret) {
2405 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2406 
2407 				return -1;
2408 			}
2409 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2410 		}
2411 
2412 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2413 		if (ret) {
2414 			DPAA2_PMD_ERR(
2415 				"Move ipaddr before TCP_PORT_DST set failed");
2416 			return -1;
2417 		}
2418 
2419 		ret = dpaa2_flow_rule_data_set(
2420 				&priv->extract.qos_key_extract,
2421 				&flow->qos_rule,
2422 				NET_PROT_TCP,
2423 				NH_FLD_TCP_PORT_DST,
2424 				&spec->hdr.dst_port,
2425 				&mask->hdr.dst_port,
2426 				NH_FLD_TCP_PORT_SIZE);
2427 		if (ret) {
2428 			DPAA2_PMD_ERR(
2429 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2430 			return -1;
2431 		}
2432 
2433 		ret = dpaa2_flow_rule_data_set(
2434 				&priv->extract.tc_key_extract[group],
2435 				&flow->fs_rule,
2436 				NET_PROT_TCP,
2437 				NH_FLD_TCP_PORT_DST,
2438 				&spec->hdr.dst_port,
2439 				&mask->hdr.dst_port,
2440 				NH_FLD_TCP_PORT_SIZE);
2441 		if (ret) {
2442 			DPAA2_PMD_ERR(
2443 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2444 			return -1;
2445 		}
2446 	}
2447 
2448 	(*device_configured) |= local_cfg;
2449 
2450 	return 0;
2451 }
2452 
2453 static int
2454 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2455 			  struct rte_eth_dev *dev,
2456 			  const struct rte_flow_attr *attr,
2457 			  const struct rte_flow_item *pattern,
2458 			  const struct rte_flow_action actions[] __rte_unused,
2459 			  struct rte_flow_error *error __rte_unused,
2460 			  int *device_configured)
2461 {
2462 	int index, ret;
2463 	int local_cfg = 0;
2464 	uint32_t group;
2465 	const struct rte_flow_item_sctp *spec, *mask;
2466 
2467 	const struct rte_flow_item_sctp *last __rte_unused;
2468 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2469 
2470 	group = attr->group;
2471 
2472 	/* Parse pattern list to get the matching parameters */
2473 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2474 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2475 	mask    = (const struct rte_flow_item_sctp *)
2476 			(pattern->mask ? pattern->mask :
2477 				&dpaa2_flow_item_sctp_mask);
2478 
2479 	/* Get traffic class index and flow id to be configured */
2480 	flow->tc_id = group;
2481 	flow->tc_index = attr->priority;
2482 
2483 	if (!spec || !mc_l4_port_identification) {
2484 		struct proto_discrimination proto;
2485 
2486 		index = dpaa2_flow_extract_search(
2487 				&priv->extract.qos_key_extract.dpkg,
2488 				NET_PROT_IP, NH_FLD_IP_PROTO);
2489 		if (index < 0) {
2490 			ret = dpaa2_flow_proto_discrimination_extract(
2491 					&priv->extract.qos_key_extract,
2492 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2493 			if (ret) {
2494 				DPAA2_PMD_ERR(
2495 					"QoS Extract IP protocol to discriminate SCTP failed.");
2496 
2497 				return -1;
2498 			}
2499 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2500 		}
2501 
2502 		index = dpaa2_flow_extract_search(
2503 				&priv->extract.tc_key_extract[group].dpkg,
2504 				NET_PROT_IP, NH_FLD_IP_PROTO);
2505 		if (index < 0) {
2506 			ret = dpaa2_flow_proto_discrimination_extract(
2507 					&priv->extract.tc_key_extract[group],
2508 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2509 			if (ret) {
2510 				DPAA2_PMD_ERR(
2511 					"FS Extract IP protocol to discriminate SCTP failed.");
2512 
2513 				return -1;
2514 			}
2515 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2516 		}
2517 
2518 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2519 		if (ret) {
2520 			DPAA2_PMD_ERR(
2521 				"Move ipaddr before SCTP discrimination set failed");
2522 			return -1;
2523 		}
2524 
2525 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2526 		proto.ip_proto = IPPROTO_SCTP;
2527 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2528 							proto, group);
2529 		if (ret) {
2530 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2531 			return -1;
2532 		}
2533 
2534 		(*device_configured) |= local_cfg;
2535 
2536 		if (!spec)
2537 			return 0;
2538 	}
2539 
2540 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2541 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2542 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2543 
2544 		return -1;
2545 	}
2546 
2547 	if (mask->hdr.src_port) {
2548 		index = dpaa2_flow_extract_search(
2549 				&priv->extract.qos_key_extract.dpkg,
2550 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2551 		if (index < 0) {
2552 			ret = dpaa2_flow_extract_add(
2553 					&priv->extract.qos_key_extract,
2554 					NET_PROT_SCTP,
2555 					NH_FLD_SCTP_PORT_SRC,
2556 					NH_FLD_SCTP_PORT_SIZE);
2557 			if (ret) {
2558 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2559 
2560 				return -1;
2561 			}
2562 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2563 		}
2564 
2565 		index = dpaa2_flow_extract_search(
2566 				&priv->extract.tc_key_extract[group].dpkg,
2567 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2568 		if (index < 0) {
2569 			ret = dpaa2_flow_extract_add(
2570 					&priv->extract.tc_key_extract[group],
2571 					NET_PROT_SCTP,
2572 					NH_FLD_SCTP_PORT_SRC,
2573 					NH_FLD_SCTP_PORT_SIZE);
2574 			if (ret) {
2575 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2576 
2577 				return -1;
2578 			}
2579 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2580 		}
2581 
2582 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2583 		if (ret) {
2584 			DPAA2_PMD_ERR(
2585 				"Move ipaddr before SCTP_PORT_SRC set failed");
2586 			return -1;
2587 		}
2588 
2589 		ret = dpaa2_flow_rule_data_set(
2590 				&priv->extract.qos_key_extract,
2591 				&flow->qos_rule,
2592 				NET_PROT_SCTP,
2593 				NH_FLD_SCTP_PORT_SRC,
2594 				&spec->hdr.src_port,
2595 				&mask->hdr.src_port,
2596 				NH_FLD_SCTP_PORT_SIZE);
2597 		if (ret) {
2598 			DPAA2_PMD_ERR(
2599 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2600 			return -1;
2601 		}
2602 
2603 		ret = dpaa2_flow_rule_data_set(
2604 				&priv->extract.tc_key_extract[group],
2605 				&flow->fs_rule,
2606 				NET_PROT_SCTP,
2607 				NH_FLD_SCTP_PORT_SRC,
2608 				&spec->hdr.src_port,
2609 				&mask->hdr.src_port,
2610 				NH_FLD_SCTP_PORT_SIZE);
2611 		if (ret) {
2612 			DPAA2_PMD_ERR(
2613 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2614 			return -1;
2615 		}
2616 	}
2617 
2618 	if (mask->hdr.dst_port) {
2619 		index = dpaa2_flow_extract_search(
2620 				&priv->extract.qos_key_extract.dpkg,
2621 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2622 		if (index < 0) {
2623 			ret = dpaa2_flow_extract_add(
2624 					&priv->extract.qos_key_extract,
2625 					NET_PROT_SCTP,
2626 					NH_FLD_SCTP_PORT_DST,
2627 					NH_FLD_SCTP_PORT_SIZE);
2628 			if (ret) {
2629 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2630 
2631 				return -1;
2632 			}
2633 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2634 		}
2635 
2636 		index = dpaa2_flow_extract_search(
2637 				&priv->extract.tc_key_extract[group].dpkg,
2638 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2639 		if (index < 0) {
2640 			ret = dpaa2_flow_extract_add(
2641 					&priv->extract.tc_key_extract[group],
2642 					NET_PROT_SCTP,
2643 					NH_FLD_SCTP_PORT_DST,
2644 					NH_FLD_SCTP_PORT_SIZE);
2645 			if (ret) {
2646 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2647 
2648 				return -1;
2649 			}
2650 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2651 		}
2652 
2653 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2654 		if (ret) {
2655 			DPAA2_PMD_ERR(
2656 				"Move ipaddr before SCTP_PORT_DST set failed");
2657 			return -1;
2658 		}
2659 
2660 		ret = dpaa2_flow_rule_data_set(
2661 				&priv->extract.qos_key_extract,
2662 				&flow->qos_rule,
2663 				NET_PROT_SCTP,
2664 				NH_FLD_SCTP_PORT_DST,
2665 				&spec->hdr.dst_port,
2666 				&mask->hdr.dst_port,
2667 				NH_FLD_SCTP_PORT_SIZE);
2668 		if (ret) {
2669 			DPAA2_PMD_ERR(
2670 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2671 			return -1;
2672 		}
2673 
2674 		ret = dpaa2_flow_rule_data_set(
2675 				&priv->extract.tc_key_extract[group],
2676 				&flow->fs_rule,
2677 				NET_PROT_SCTP,
2678 				NH_FLD_SCTP_PORT_DST,
2679 				&spec->hdr.dst_port,
2680 				&mask->hdr.dst_port,
2681 				NH_FLD_SCTP_PORT_SIZE);
2682 		if (ret) {
2683 			DPAA2_PMD_ERR(
2684 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2685 			return -1;
2686 		}
2687 	}
2688 
2689 	(*device_configured) |= local_cfg;
2690 
2691 	return 0;
2692 }
2693 
2694 static int
2695 dpaa2_configure_flow_gre(struct rte_flow *flow,
2696 			 struct rte_eth_dev *dev,
2697 			 const struct rte_flow_attr *attr,
2698 			 const struct rte_flow_item *pattern,
2699 			 const struct rte_flow_action actions[] __rte_unused,
2700 			 struct rte_flow_error *error __rte_unused,
2701 			 int *device_configured)
2702 {
2703 	int index, ret;
2704 	int local_cfg = 0;
2705 	uint32_t group;
2706 	const struct rte_flow_item_gre *spec, *mask;
2707 
2708 	const struct rte_flow_item_gre *last __rte_unused;
2709 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2710 
2711 	group = attr->group;
2712 
2713 	/* Parse pattern list to get the matching parameters */
2714 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2715 	last    = (const struct rte_flow_item_gre *)pattern->last;
2716 	mask    = (const struct rte_flow_item_gre *)
2717 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2718 
2719 	/* Get traffic class index and flow id to be configured */
2720 	flow->tc_id = group;
2721 	flow->tc_index = attr->priority;
2722 
2723 	if (!spec) {
2724 		struct proto_discrimination proto;
2725 
2726 		index = dpaa2_flow_extract_search(
2727 				&priv->extract.qos_key_extract.dpkg,
2728 				NET_PROT_IP, NH_FLD_IP_PROTO);
2729 		if (index < 0) {
2730 			ret = dpaa2_flow_proto_discrimination_extract(
2731 					&priv->extract.qos_key_extract,
2732 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2733 			if (ret) {
2734 				DPAA2_PMD_ERR(
2735 					"QoS Extract IP protocol to discriminate GRE failed.");
2736 
2737 				return -1;
2738 			}
2739 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2740 		}
2741 
2742 		index = dpaa2_flow_extract_search(
2743 				&priv->extract.tc_key_extract[group].dpkg,
2744 				NET_PROT_IP, NH_FLD_IP_PROTO);
2745 		if (index < 0) {
2746 			ret = dpaa2_flow_proto_discrimination_extract(
2747 					&priv->extract.tc_key_extract[group],
2748 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2749 			if (ret) {
2750 				DPAA2_PMD_ERR(
2751 					"FS Extract IP protocol to discriminate GRE failed.");
2752 
2753 				return -1;
2754 			}
2755 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2756 		}
2757 
2758 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2759 		if (ret) {
2760 			DPAA2_PMD_ERR(
2761 				"Move IP addr before GRE discrimination set failed");
2762 			return -1;
2763 		}
2764 
2765 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2766 		proto.ip_proto = IPPROTO_GRE;
2767 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2768 							proto, group);
2769 		if (ret) {
2770 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2771 			return -1;
2772 		}
2773 
2774 		(*device_configured) |= local_cfg;
2775 
2776 		return 0;
2777 	}
2778 
2779 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2780 		RTE_FLOW_ITEM_TYPE_GRE)) {
2781 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2782 
2783 		return -1;
2784 	}
2785 
2786 	if (!mask->protocol)
2787 		return 0;
2788 
2789 	index = dpaa2_flow_extract_search(
2790 			&priv->extract.qos_key_extract.dpkg,
2791 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2792 	if (index < 0) {
2793 		ret = dpaa2_flow_extract_add(
2794 				&priv->extract.qos_key_extract,
2795 				NET_PROT_GRE,
2796 				NH_FLD_GRE_TYPE,
2797 				sizeof(rte_be16_t));
2798 		if (ret) {
2799 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2800 
2801 			return -1;
2802 		}
2803 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2804 	}
2805 
2806 	index = dpaa2_flow_extract_search(
2807 			&priv->extract.tc_key_extract[group].dpkg,
2808 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2809 	if (index < 0) {
2810 		ret = dpaa2_flow_extract_add(
2811 				&priv->extract.tc_key_extract[group],
2812 				NET_PROT_GRE,
2813 				NH_FLD_GRE_TYPE,
2814 				sizeof(rte_be16_t));
2815 		if (ret) {
2816 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2817 
2818 			return -1;
2819 		}
2820 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2821 	}
2822 
2823 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2824 	if (ret) {
2825 		DPAA2_PMD_ERR(
2826 			"Move ipaddr before GRE_TYPE set failed");
2827 		return -1;
2828 	}
2829 
2830 	ret = dpaa2_flow_rule_data_set(
2831 				&priv->extract.qos_key_extract,
2832 				&flow->qos_rule,
2833 				NET_PROT_GRE,
2834 				NH_FLD_GRE_TYPE,
2835 				&spec->protocol,
2836 				&mask->protocol,
2837 				sizeof(rte_be16_t));
2838 	if (ret) {
2839 		DPAA2_PMD_ERR(
2840 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2841 		return -1;
2842 	}
2843 
2844 	ret = dpaa2_flow_rule_data_set(
2845 			&priv->extract.tc_key_extract[group],
2846 			&flow->fs_rule,
2847 			NET_PROT_GRE,
2848 			NH_FLD_GRE_TYPE,
2849 			&spec->protocol,
2850 			&mask->protocol,
2851 			sizeof(rte_be16_t));
2852 	if (ret) {
2853 		DPAA2_PMD_ERR(
2854 			"FS NH_FLD_GRE_TYPE rule data set failed");
2855 		return -1;
2856 	}
2857 
2858 	(*device_configured) |= local_cfg;
2859 
2860 	return 0;
2861 }
2862 
2863 static int
2864 dpaa2_configure_flow_raw(struct rte_flow *flow,
2865 			 struct rte_eth_dev *dev,
2866 			 const struct rte_flow_attr *attr,
2867 			 const struct rte_flow_item *pattern,
2868 			 const struct rte_flow_action actions[] __rte_unused,
2869 			 struct rte_flow_error *error __rte_unused,
2870 			 int *device_configured)
2871 {
2872 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2873 	const struct rte_flow_item_raw *spec = pattern->spec;
2874 	const struct rte_flow_item_raw *mask = pattern->mask;
2875 	int prev_key_size =
2876 		priv->extract.qos_key_extract.key_info.key_total_size;
2877 	int local_cfg = 0, ret;
2878 	uint32_t group;
2879 
2880 	/* Need both spec and mask */
2881 	if (!spec || !mask) {
2882 		DPAA2_PMD_ERR("spec or mask not present.");
2883 		return -EINVAL;
2884 	}
2885 	/* Only supports non-relative with offset 0 */
2886 	if (spec->relative || spec->offset != 0 ||
2887 	    spec->search || spec->limit) {
2888 		DPAA2_PMD_ERR("relative and non zero offset not supported.");
2889 		return -EINVAL;
2890 	}
2891 	/* Spec len and mask len should be same */
2892 	if (spec->length != mask->length) {
2893 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2894 		return -EINVAL;
2895 	}
2896 
2897 	/* Get traffic class index and flow id to be configured */
2898 	group = attr->group;
2899 	flow->tc_id = group;
2900 	flow->tc_index = attr->priority;
2901 
2902 	if (prev_key_size <= spec->length) {
2903 		ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2904 						 spec->length);
2905 		if (ret) {
2906 			DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2907 			return -1;
2908 		}
2909 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2910 
2911 		ret = dpaa2_flow_extract_add_raw(
2912 					&priv->extract.tc_key_extract[group],
2913 					spec->length);
2914 		if (ret) {
2915 			DPAA2_PMD_ERR("FS Extract RAW add failed.");
2916 			return -1;
2917 		}
2918 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2919 	}
2920 
2921 	ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2922 					   mask->pattern, spec->length);
2923 	if (ret) {
2924 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2925 		return -1;
2926 	}
2927 
2928 	ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2929 					   mask->pattern, spec->length);
2930 	if (ret) {
2931 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2932 		return -1;
2933 	}
2934 
2935 	(*device_configured) |= local_cfg;
2936 
2937 	return 0;
2938 }
2939 
2940 /* The existing QoS/FS entry with IP address(es)
2941  * needs update after
2942  * new extract(s) are inserted before IP
2943  * address(es) extract(s).
2944  */
2945 static int
2946 dpaa2_flow_entry_update(
2947 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2948 {
2949 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2950 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2951 	int ret;
2952 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2953 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2954 	struct dpaa2_key_extract *qos_key_extract =
2955 		&priv->extract.qos_key_extract;
2956 	struct dpaa2_key_extract *tc_key_extract =
2957 		&priv->extract.tc_key_extract[tc_id];
2958 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2959 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2960 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2961 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2962 	int extend = -1, extend1, size = -1;
2963 	uint16_t qos_index;
2964 
2965 	while (curr) {
2966 		if (curr->ipaddr_rule.ipaddr_type ==
2967 			FLOW_NONE_IPADDR) {
2968 			curr = LIST_NEXT(curr, next);
2969 			continue;
2970 		}
2971 
2972 		if (curr->ipaddr_rule.ipaddr_type ==
2973 			FLOW_IPV4_ADDR) {
2974 			qos_ipsrc_offset =
2975 				qos_key_extract->key_info.ipv4_src_offset;
2976 			qos_ipdst_offset =
2977 				qos_key_extract->key_info.ipv4_dst_offset;
2978 			fs_ipsrc_offset =
2979 				tc_key_extract->key_info.ipv4_src_offset;
2980 			fs_ipdst_offset =
2981 				tc_key_extract->key_info.ipv4_dst_offset;
2982 			size = NH_FLD_IPV4_ADDR_SIZE;
2983 		} else {
2984 			qos_ipsrc_offset =
2985 				qos_key_extract->key_info.ipv6_src_offset;
2986 			qos_ipdst_offset =
2987 				qos_key_extract->key_info.ipv6_dst_offset;
2988 			fs_ipsrc_offset =
2989 				tc_key_extract->key_info.ipv6_src_offset;
2990 			fs_ipdst_offset =
2991 				tc_key_extract->key_info.ipv6_dst_offset;
2992 			size = NH_FLD_IPV6_ADDR_SIZE;
2993 		}
2994 
2995 		qos_index = curr->tc_id * priv->fs_entries +
2996 			curr->tc_index;
2997 
2998 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
2999 
3000 		if (priv->num_rx_tc > 1) {
3001 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3002 					priv->token, &curr->qos_rule);
3003 			if (ret) {
3004 				DPAA2_PMD_ERR("Qos entry remove failed.");
3005 				return -1;
3006 			}
3007 		}
3008 
3009 		extend = -1;
3010 
3011 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3012 			RTE_ASSERT(qos_ipsrc_offset >=
3013 				curr->ipaddr_rule.qos_ipsrc_offset);
3014 			extend1 = qos_ipsrc_offset -
3015 				curr->ipaddr_rule.qos_ipsrc_offset;
3016 			if (extend >= 0)
3017 				RTE_ASSERT(extend == extend1);
3018 			else
3019 				extend = extend1;
3020 
3021 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3022 				(size == NH_FLD_IPV6_ADDR_SIZE));
3023 
3024 			memcpy(ipsrc_key,
3025 				(char *)(size_t)curr->qos_rule.key_iova +
3026 				curr->ipaddr_rule.qos_ipsrc_offset,
3027 				size);
3028 			memset((char *)(size_t)curr->qos_rule.key_iova +
3029 				curr->ipaddr_rule.qos_ipsrc_offset,
3030 				0, size);
3031 
3032 			memcpy(ipsrc_mask,
3033 				(char *)(size_t)curr->qos_rule.mask_iova +
3034 				curr->ipaddr_rule.qos_ipsrc_offset,
3035 				size);
3036 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3037 				curr->ipaddr_rule.qos_ipsrc_offset,
3038 				0, size);
3039 
3040 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3041 		}
3042 
3043 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3044 			RTE_ASSERT(qos_ipdst_offset >=
3045 				curr->ipaddr_rule.qos_ipdst_offset);
3046 			extend1 = qos_ipdst_offset -
3047 				curr->ipaddr_rule.qos_ipdst_offset;
3048 			if (extend >= 0)
3049 				RTE_ASSERT(extend == extend1);
3050 			else
3051 				extend = extend1;
3052 
3053 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3054 				(size == NH_FLD_IPV6_ADDR_SIZE));
3055 
3056 			memcpy(ipdst_key,
3057 				(char *)(size_t)curr->qos_rule.key_iova +
3058 				curr->ipaddr_rule.qos_ipdst_offset,
3059 				size);
3060 			memset((char *)(size_t)curr->qos_rule.key_iova +
3061 				curr->ipaddr_rule.qos_ipdst_offset,
3062 				0, size);
3063 
3064 			memcpy(ipdst_mask,
3065 				(char *)(size_t)curr->qos_rule.mask_iova +
3066 				curr->ipaddr_rule.qos_ipdst_offset,
3067 				size);
3068 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3069 				curr->ipaddr_rule.qos_ipdst_offset,
3070 				0, size);
3071 
3072 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3073 		}
3074 
3075 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3076 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3077 				(size == NH_FLD_IPV6_ADDR_SIZE));
3078 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3079 				curr->ipaddr_rule.qos_ipsrc_offset,
3080 				ipsrc_key,
3081 				size);
3082 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3083 				curr->ipaddr_rule.qos_ipsrc_offset,
3084 				ipsrc_mask,
3085 				size);
3086 		}
3087 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3088 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3089 				(size == NH_FLD_IPV6_ADDR_SIZE));
3090 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3091 				curr->ipaddr_rule.qos_ipdst_offset,
3092 				ipdst_key,
3093 				size);
3094 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3095 				curr->ipaddr_rule.qos_ipdst_offset,
3096 				ipdst_mask,
3097 				size);
3098 		}
3099 
3100 		if (extend >= 0)
3101 			curr->qos_real_key_size += extend;
3102 
3103 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3104 
3105 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
3106 
3107 		if (priv->num_rx_tc > 1) {
3108 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3109 					priv->token, &curr->qos_rule,
3110 					curr->tc_id, qos_index,
3111 					0, 0);
3112 			if (ret) {
3113 				DPAA2_PMD_ERR("Qos entry update failed.");
3114 				return -1;
3115 			}
3116 		}
3117 
3118 		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
3119 			curr = LIST_NEXT(curr, next);
3120 			continue;
3121 		}
3122 
3123 		dpaa2_flow_fs_entry_log("Before update", curr);
3124 		extend = -1;
3125 
3126 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3127 				priv->token, curr->tc_id, &curr->fs_rule);
3128 		if (ret) {
3129 			DPAA2_PMD_ERR("FS entry remove failed.");
3130 			return -1;
3131 		}
3132 
3133 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3134 			tc_id == curr->tc_id) {
3135 			RTE_ASSERT(fs_ipsrc_offset >=
3136 				curr->ipaddr_rule.fs_ipsrc_offset);
3137 			extend1 = fs_ipsrc_offset -
3138 				curr->ipaddr_rule.fs_ipsrc_offset;
3139 			if (extend >= 0)
3140 				RTE_ASSERT(extend == extend1);
3141 			else
3142 				extend = extend1;
3143 
3144 			memcpy(ipsrc_key,
3145 				(char *)(size_t)curr->fs_rule.key_iova +
3146 				curr->ipaddr_rule.fs_ipsrc_offset,
3147 				size);
3148 			memset((char *)(size_t)curr->fs_rule.key_iova +
3149 				curr->ipaddr_rule.fs_ipsrc_offset,
3150 				0, size);
3151 
3152 			memcpy(ipsrc_mask,
3153 				(char *)(size_t)curr->fs_rule.mask_iova +
3154 				curr->ipaddr_rule.fs_ipsrc_offset,
3155 				size);
3156 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3157 				curr->ipaddr_rule.fs_ipsrc_offset,
3158 				0, size);
3159 
3160 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3161 		}
3162 
3163 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3164 			tc_id == curr->tc_id) {
3165 			RTE_ASSERT(fs_ipdst_offset >=
3166 				curr->ipaddr_rule.fs_ipdst_offset);
3167 			extend1 = fs_ipdst_offset -
3168 				curr->ipaddr_rule.fs_ipdst_offset;
3169 			if (extend >= 0)
3170 				RTE_ASSERT(extend == extend1);
3171 			else
3172 				extend = extend1;
3173 
3174 			memcpy(ipdst_key,
3175 				(char *)(size_t)curr->fs_rule.key_iova +
3176 				curr->ipaddr_rule.fs_ipdst_offset,
3177 				size);
3178 			memset((char *)(size_t)curr->fs_rule.key_iova +
3179 				curr->ipaddr_rule.fs_ipdst_offset,
3180 				0, size);
3181 
3182 			memcpy(ipdst_mask,
3183 				(char *)(size_t)curr->fs_rule.mask_iova +
3184 				curr->ipaddr_rule.fs_ipdst_offset,
3185 				size);
3186 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3187 				curr->ipaddr_rule.fs_ipdst_offset,
3188 				0, size);
3189 
3190 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3191 		}
3192 
3193 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3194 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3195 				curr->ipaddr_rule.fs_ipsrc_offset,
3196 				ipsrc_key,
3197 				size);
3198 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3199 				curr->ipaddr_rule.fs_ipsrc_offset,
3200 				ipsrc_mask,
3201 				size);
3202 		}
3203 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3204 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3205 				curr->ipaddr_rule.fs_ipdst_offset,
3206 				ipdst_key,
3207 				size);
3208 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3209 				curr->ipaddr_rule.fs_ipdst_offset,
3210 				ipdst_mask,
3211 				size);
3212 		}
3213 
3214 		if (extend >= 0)
3215 			curr->fs_real_key_size += extend;
3216 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3217 
3218 		dpaa2_flow_fs_entry_log("Start update", curr);
3219 
3220 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3221 				priv->token, curr->tc_id, curr->tc_index,
3222 				&curr->fs_rule, &curr->action_cfg);
3223 		if (ret) {
3224 			DPAA2_PMD_ERR("FS entry update failed.");
3225 			return -1;
3226 		}
3227 
3228 		curr = LIST_NEXT(curr, next);
3229 	}
3230 
3231 	return 0;
3232 }
3233 
3234 static inline int
3235 dpaa2_flow_verify_attr(
3236 	struct dpaa2_dev_priv *priv,
3237 	const struct rte_flow_attr *attr)
3238 {
3239 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3240 
3241 	while (curr) {
3242 		if (curr->tc_id == attr->group &&
3243 			curr->tc_index == attr->priority) {
3244 			DPAA2_PMD_ERR(
3245 				"Flow with group %d and priority %d already exists.",
3246 				attr->group, attr->priority);
3247 
3248 			return -1;
3249 		}
3250 		curr = LIST_NEXT(curr, next);
3251 	}
3252 
3253 	return 0;
3254 }
3255 
3256 static inline int
3257 dpaa2_flow_verify_action(
3258 	struct dpaa2_dev_priv *priv,
3259 	const struct rte_flow_attr *attr,
3260 	const struct rte_flow_action actions[])
3261 {
3262 	int end_of_list = 0, i, j = 0;
3263 	const struct rte_flow_action_queue *dest_queue;
3264 	const struct rte_flow_action_rss *rss_conf;
3265 	struct dpaa2_queue *rxq;
3266 
3267 	while (!end_of_list) {
3268 		switch (actions[j].type) {
3269 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3270 			dest_queue = (const struct rte_flow_action_queue *)
3271 					(actions[j].conf);
3272 			rxq = priv->rx_vq[dest_queue->index];
3273 			if (attr->group != rxq->tc_index) {
3274 				DPAA2_PMD_ERR(
3275 					"RXQ[%d] does not belong to the group %d",
3276 					dest_queue->index, attr->group);
3277 
3278 				return -1;
3279 			}
3280 			break;
3281 		case RTE_FLOW_ACTION_TYPE_RSS:
3282 			rss_conf = (const struct rte_flow_action_rss *)
3283 					(actions[j].conf);
3284 			if (rss_conf->queue_num > priv->dist_queues) {
3285 				DPAA2_PMD_ERR(
3286 					"RSS number exceeds the distrbution size");
3287 				return -ENOTSUP;
3288 			}
3289 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3290 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3291 					DPAA2_PMD_ERR(
3292 						"RSS queue index exceeds the number of RXQs");
3293 					return -ENOTSUP;
3294 				}
3295 				rxq = priv->rx_vq[rss_conf->queue[i]];
3296 				if (rxq->tc_index != attr->group) {
3297 					DPAA2_PMD_ERR(
3298 						"Queue/Group combination are not supported\n");
3299 					return -ENOTSUP;
3300 				}
3301 			}
3302 
3303 			break;
3304 		case RTE_FLOW_ACTION_TYPE_END:
3305 			end_of_list = 1;
3306 			break;
3307 		default:
3308 			DPAA2_PMD_ERR("Invalid action type");
3309 			return -ENOTSUP;
3310 		}
3311 		j++;
3312 	}
3313 
3314 	return 0;
3315 }
3316 
3317 static int
3318 dpaa2_generic_flow_set(struct rte_flow *flow,
3319 		       struct rte_eth_dev *dev,
3320 		       const struct rte_flow_attr *attr,
3321 		       const struct rte_flow_item pattern[],
3322 		       const struct rte_flow_action actions[],
3323 		       struct rte_flow_error *error)
3324 {
3325 	const struct rte_flow_action_queue *dest_queue;
3326 	const struct rte_flow_action_rss *rss_conf;
3327 	int is_keycfg_configured = 0, end_of_list = 0;
3328 	int ret = 0, i = 0, j = 0;
3329 	struct dpni_rx_dist_cfg tc_cfg;
3330 	struct dpni_qos_tbl_cfg qos_cfg;
3331 	struct dpni_fs_action_cfg action;
3332 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3333 	struct dpaa2_queue *rxq;
3334 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3335 	size_t param;
3336 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3337 	uint16_t qos_index;
3338 
3339 	ret = dpaa2_flow_verify_attr(priv, attr);
3340 	if (ret)
3341 		return ret;
3342 
3343 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3344 	if (ret)
3345 		return ret;
3346 
3347 	/* Parse pattern list to get the matching parameters */
3348 	while (!end_of_list) {
3349 		switch (pattern[i].type) {
3350 		case RTE_FLOW_ITEM_TYPE_ETH:
3351 			ret = dpaa2_configure_flow_eth(flow,
3352 					dev, attr, &pattern[i], actions, error,
3353 					&is_keycfg_configured);
3354 			if (ret) {
3355 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3356 				return ret;
3357 			}
3358 			break;
3359 		case RTE_FLOW_ITEM_TYPE_VLAN:
3360 			ret = dpaa2_configure_flow_vlan(flow,
3361 					dev, attr, &pattern[i], actions, error,
3362 					&is_keycfg_configured);
3363 			if (ret) {
3364 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3365 				return ret;
3366 			}
3367 			break;
3368 		case RTE_FLOW_ITEM_TYPE_IPV4:
3369 		case RTE_FLOW_ITEM_TYPE_IPV6:
3370 			ret = dpaa2_configure_flow_generic_ip(flow,
3371 					dev, attr, &pattern[i], actions, error,
3372 					&is_keycfg_configured);
3373 			if (ret) {
3374 				DPAA2_PMD_ERR("IP flow configuration failed!");
3375 				return ret;
3376 			}
3377 			break;
3378 		case RTE_FLOW_ITEM_TYPE_ICMP:
3379 			ret = dpaa2_configure_flow_icmp(flow,
3380 					dev, attr, &pattern[i], actions, error,
3381 					&is_keycfg_configured);
3382 			if (ret) {
3383 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3384 				return ret;
3385 			}
3386 			break;
3387 		case RTE_FLOW_ITEM_TYPE_UDP:
3388 			ret = dpaa2_configure_flow_udp(flow,
3389 					dev, attr, &pattern[i], actions, error,
3390 					&is_keycfg_configured);
3391 			if (ret) {
3392 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3393 				return ret;
3394 			}
3395 			break;
3396 		case RTE_FLOW_ITEM_TYPE_TCP:
3397 			ret = dpaa2_configure_flow_tcp(flow,
3398 					dev, attr, &pattern[i], actions, error,
3399 					&is_keycfg_configured);
3400 			if (ret) {
3401 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3402 				return ret;
3403 			}
3404 			break;
3405 		case RTE_FLOW_ITEM_TYPE_SCTP:
3406 			ret = dpaa2_configure_flow_sctp(flow,
3407 					dev, attr, &pattern[i], actions, error,
3408 					&is_keycfg_configured);
3409 			if (ret) {
3410 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3411 				return ret;
3412 			}
3413 			break;
3414 		case RTE_FLOW_ITEM_TYPE_GRE:
3415 			ret = dpaa2_configure_flow_gre(flow,
3416 					dev, attr, &pattern[i], actions, error,
3417 					&is_keycfg_configured);
3418 			if (ret) {
3419 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3420 				return ret;
3421 			}
3422 			break;
3423 		case RTE_FLOW_ITEM_TYPE_RAW:
3424 			ret = dpaa2_configure_flow_raw(flow,
3425 						       dev, attr, &pattern[i],
3426 						       actions, error,
3427 						       &is_keycfg_configured);
3428 			if (ret) {
3429 				DPAA2_PMD_ERR("RAW flow configuration failed!");
3430 				return ret;
3431 			}
3432 			break;
3433 		case RTE_FLOW_ITEM_TYPE_END:
3434 			end_of_list = 1;
3435 			break; /*End of List*/
3436 		default:
3437 			DPAA2_PMD_ERR("Invalid action type");
3438 			ret = -ENOTSUP;
3439 			break;
3440 		}
3441 		i++;
3442 	}
3443 
3444 	/* Let's parse action on matching traffic */
3445 	end_of_list = 0;
3446 	while (!end_of_list) {
3447 		switch (actions[j].type) {
3448 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3449 			dest_queue =
3450 				(const struct rte_flow_action_queue *)(actions[j].conf);
3451 			rxq = priv->rx_vq[dest_queue->index];
3452 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3453 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3454 			action.flow_id = rxq->flow_id;
3455 
3456 			/* Configure FS table first*/
3457 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3458 				dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3459 				if (dpkg_prepare_key_cfg(
3460 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3461 				(uint8_t *)(size_t)priv->extract
3462 				.tc_extract_param[flow->tc_id]) < 0) {
3463 					DPAA2_PMD_ERR(
3464 					"Unable to prepare extract parameters");
3465 					return -1;
3466 				}
3467 
3468 				memset(&tc_cfg, 0,
3469 					sizeof(struct dpni_rx_dist_cfg));
3470 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3471 				tc_cfg.key_cfg_iova =
3472 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3473 				tc_cfg.tc = flow->tc_id;
3474 				tc_cfg.enable = false;
3475 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3476 						priv->token, &tc_cfg);
3477 				if (ret < 0) {
3478 					DPAA2_PMD_ERR(
3479 						"TC hash cannot be disabled.(%d)",
3480 						ret);
3481 					return -1;
3482 				}
3483 				tc_cfg.enable = true;
3484 				tc_cfg.fs_miss_flow_id =
3485 					dpaa2_flow_miss_flow_id;
3486 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3487 							 priv->token, &tc_cfg);
3488 				if (ret < 0) {
3489 					DPAA2_PMD_ERR(
3490 						"TC distribution cannot be configured.(%d)",
3491 						ret);
3492 					return -1;
3493 				}
3494 			}
3495 
3496 			/* Configure QoS table then.*/
3497 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3498 				dpaa2_flow_qos_table_extracts_log(priv);
3499 				if (dpkg_prepare_key_cfg(
3500 					&priv->extract.qos_key_extract.dpkg,
3501 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3502 					DPAA2_PMD_ERR(
3503 						"Unable to prepare extract parameters");
3504 					return -1;
3505 				}
3506 
3507 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3508 				qos_cfg.discard_on_miss = false;
3509 				qos_cfg.default_tc = 0;
3510 				qos_cfg.keep_entries = true;
3511 				qos_cfg.key_cfg_iova =
3512 					(size_t)priv->extract.qos_extract_param;
3513 				/* QoS table is effecitive for multiple TCs.*/
3514 				if (priv->num_rx_tc > 1) {
3515 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3516 						priv->token, &qos_cfg);
3517 					if (ret < 0) {
3518 						DPAA2_PMD_ERR(
3519 						"RSS QoS table can not be configured(%d)\n",
3520 							ret);
3521 						return -1;
3522 					}
3523 				}
3524 			}
3525 
3526 			flow->qos_real_key_size = priv->extract
3527 				.qos_key_extract.key_info.key_total_size;
3528 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3529 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3530 					flow->ipaddr_rule.qos_ipsrc_offset) {
3531 					flow->qos_real_key_size =
3532 						flow->ipaddr_rule.qos_ipdst_offset +
3533 						NH_FLD_IPV4_ADDR_SIZE;
3534 				} else {
3535 					flow->qos_real_key_size =
3536 						flow->ipaddr_rule.qos_ipsrc_offset +
3537 						NH_FLD_IPV4_ADDR_SIZE;
3538 				}
3539 			} else if (flow->ipaddr_rule.ipaddr_type ==
3540 				FLOW_IPV6_ADDR) {
3541 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3542 					flow->ipaddr_rule.qos_ipsrc_offset) {
3543 					flow->qos_real_key_size =
3544 						flow->ipaddr_rule.qos_ipdst_offset +
3545 						NH_FLD_IPV6_ADDR_SIZE;
3546 				} else {
3547 					flow->qos_real_key_size =
3548 						flow->ipaddr_rule.qos_ipsrc_offset +
3549 						NH_FLD_IPV6_ADDR_SIZE;
3550 				}
3551 			}
3552 
3553 			/* QoS entry added is only effective for multiple TCs.*/
3554 			if (priv->num_rx_tc > 1) {
3555 				qos_index = flow->tc_id * priv->fs_entries +
3556 					flow->tc_index;
3557 				if (qos_index >= priv->qos_entries) {
3558 					DPAA2_PMD_ERR("QoS table with %d entries full",
3559 						priv->qos_entries);
3560 					return -1;
3561 				}
3562 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3563 
3564 				dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3565 
3566 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3567 						priv->token, &flow->qos_rule,
3568 						flow->tc_id, qos_index,
3569 						0, 0);
3570 				if (ret < 0) {
3571 					DPAA2_PMD_ERR(
3572 						"Error in addnig entry to QoS table(%d)", ret);
3573 					return ret;
3574 				}
3575 			}
3576 
3577 			if (flow->tc_index >= priv->fs_entries) {
3578 				DPAA2_PMD_ERR("FS table with %d entries full",
3579 					priv->fs_entries);
3580 				return -1;
3581 			}
3582 
3583 			flow->fs_real_key_size =
3584 				priv->extract.tc_key_extract[flow->tc_id]
3585 				.key_info.key_total_size;
3586 
3587 			if (flow->ipaddr_rule.ipaddr_type ==
3588 				FLOW_IPV4_ADDR) {
3589 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3590 					flow->ipaddr_rule.fs_ipsrc_offset) {
3591 					flow->fs_real_key_size =
3592 						flow->ipaddr_rule.fs_ipdst_offset +
3593 						NH_FLD_IPV4_ADDR_SIZE;
3594 				} else {
3595 					flow->fs_real_key_size =
3596 						flow->ipaddr_rule.fs_ipsrc_offset +
3597 						NH_FLD_IPV4_ADDR_SIZE;
3598 				}
3599 			} else if (flow->ipaddr_rule.ipaddr_type ==
3600 				FLOW_IPV6_ADDR) {
3601 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3602 					flow->ipaddr_rule.fs_ipsrc_offset) {
3603 					flow->fs_real_key_size =
3604 						flow->ipaddr_rule.fs_ipdst_offset +
3605 						NH_FLD_IPV6_ADDR_SIZE;
3606 				} else {
3607 					flow->fs_real_key_size =
3608 						flow->ipaddr_rule.fs_ipsrc_offset +
3609 						NH_FLD_IPV6_ADDR_SIZE;
3610 				}
3611 			}
3612 
3613 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3614 
3615 			dpaa2_flow_fs_entry_log("Start add", flow);
3616 
3617 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3618 						flow->tc_id, flow->tc_index,
3619 						&flow->fs_rule, &action);
3620 			if (ret < 0) {
3621 				DPAA2_PMD_ERR(
3622 				"Error in adding entry to FS table(%d)", ret);
3623 				return ret;
3624 			}
3625 			memcpy(&flow->action_cfg, &action,
3626 				sizeof(struct dpni_fs_action_cfg));
3627 			break;
3628 		case RTE_FLOW_ACTION_TYPE_RSS:
3629 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3630 
3631 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3632 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3633 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3634 			if (ret < 0) {
3635 				DPAA2_PMD_ERR(
3636 				"unable to set flow distribution.please check queue config\n");
3637 				return ret;
3638 			}
3639 
3640 			/* Allocate DMA'ble memory to write the rules */
3641 			param = (size_t)rte_malloc(NULL, 256, 64);
3642 			if (!param) {
3643 				DPAA2_PMD_ERR("Memory allocation failure\n");
3644 				return -1;
3645 			}
3646 
3647 			if (dpkg_prepare_key_cfg(
3648 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3649 				(uint8_t *)param) < 0) {
3650 				DPAA2_PMD_ERR(
3651 				"Unable to prepare extract parameters");
3652 				rte_free((void *)param);
3653 				return -1;
3654 			}
3655 
3656 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3657 			tc_cfg.dist_size = rss_conf->queue_num;
3658 			tc_cfg.key_cfg_iova = (size_t)param;
3659 			tc_cfg.enable = true;
3660 			tc_cfg.tc = flow->tc_id;
3661 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3662 						 priv->token, &tc_cfg);
3663 			if (ret < 0) {
3664 				DPAA2_PMD_ERR(
3665 					"RSS TC table cannot be configured: %d\n",
3666 					ret);
3667 				rte_free((void *)param);
3668 				return -1;
3669 			}
3670 
3671 			rte_free((void *)param);
3672 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3673 				if (dpkg_prepare_key_cfg(
3674 					&priv->extract.qos_key_extract.dpkg,
3675 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3676 					DPAA2_PMD_ERR(
3677 					"Unable to prepare extract parameters");
3678 					return -1;
3679 				}
3680 				memset(&qos_cfg, 0,
3681 					sizeof(struct dpni_qos_tbl_cfg));
3682 				qos_cfg.discard_on_miss = true;
3683 				qos_cfg.keep_entries = true;
3684 				qos_cfg.key_cfg_iova =
3685 					(size_t)priv->extract.qos_extract_param;
3686 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3687 							 priv->token, &qos_cfg);
3688 				if (ret < 0) {
3689 					DPAA2_PMD_ERR(
3690 					"RSS QoS dist can't be configured-%d\n",
3691 					ret);
3692 					return -1;
3693 				}
3694 			}
3695 
3696 			/* Add Rule into QoS table */
3697 			qos_index = flow->tc_id * priv->fs_entries +
3698 				flow->tc_index;
3699 			if (qos_index >= priv->qos_entries) {
3700 				DPAA2_PMD_ERR("QoS table with %d entries full",
3701 					priv->qos_entries);
3702 				return -1;
3703 			}
3704 
3705 			flow->qos_real_key_size =
3706 			  priv->extract.qos_key_extract.key_info.key_total_size;
3707 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3708 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3709 						&flow->qos_rule, flow->tc_id,
3710 						qos_index, 0, 0);
3711 			if (ret < 0) {
3712 				DPAA2_PMD_ERR(
3713 				"Error in entry addition in QoS table(%d)",
3714 				ret);
3715 				return ret;
3716 			}
3717 			break;
3718 		case RTE_FLOW_ACTION_TYPE_END:
3719 			end_of_list = 1;
3720 			break;
3721 		default:
3722 			DPAA2_PMD_ERR("Invalid action type");
3723 			ret = -ENOTSUP;
3724 			break;
3725 		}
3726 		j++;
3727 	}
3728 
3729 	if (!ret) {
3730 		if (is_keycfg_configured &
3731 			(DPAA2_QOS_TABLE_RECONFIGURE |
3732 			DPAA2_FS_TABLE_RECONFIGURE)) {
3733 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3734 			if (ret) {
3735 				DPAA2_PMD_ERR("Flow entry update failed.");
3736 
3737 				return -1;
3738 			}
3739 		}
3740 		/* New rules are inserted. */
3741 		if (!curr) {
3742 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3743 		} else {
3744 			while (LIST_NEXT(curr, next))
3745 				curr = LIST_NEXT(curr, next);
3746 			LIST_INSERT_AFTER(curr, flow, next);
3747 		}
3748 	}
3749 	return ret;
3750 }
3751 
3752 static inline int
3753 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3754 		      const struct rte_flow_attr *attr)
3755 {
3756 	int ret = 0;
3757 
3758 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3759 		DPAA2_PMD_ERR("Priority group is out of range\n");
3760 		ret = -ENOTSUP;
3761 	}
3762 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3763 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3764 		ret = -ENOTSUP;
3765 	}
3766 	if (unlikely(attr->egress)) {
3767 		DPAA2_PMD_ERR(
3768 			"Flow configuration is not supported on egress side\n");
3769 		ret = -ENOTSUP;
3770 	}
3771 	if (unlikely(!attr->ingress)) {
3772 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3773 		ret = -EINVAL;
3774 	}
3775 	return ret;
3776 }
3777 
3778 static inline int
3779 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3780 {
3781 	unsigned int i, j, is_found = 0;
3782 	int ret = 0;
3783 
3784 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3785 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3786 			if (dpaa2_supported_pattern_type[i]
3787 					== pattern[j].type) {
3788 				is_found = 1;
3789 				break;
3790 			}
3791 		}
3792 		if (!is_found) {
3793 			ret = -ENOTSUP;
3794 			break;
3795 		}
3796 	}
3797 	/* Lets verify other combinations of given pattern rules */
3798 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3799 		if (!pattern[j].spec) {
3800 			ret = -EINVAL;
3801 			break;
3802 		}
3803 	}
3804 
3805 	return ret;
3806 }
3807 
3808 static inline int
3809 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3810 {
3811 	unsigned int i, j, is_found = 0;
3812 	int ret = 0;
3813 
3814 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3815 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3816 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3817 				is_found = 1;
3818 				break;
3819 			}
3820 		}
3821 		if (!is_found) {
3822 			ret = -ENOTSUP;
3823 			break;
3824 		}
3825 	}
3826 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3827 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3828 				!actions[j].conf)
3829 			ret = -EINVAL;
3830 	}
3831 	return ret;
3832 }
3833 
3834 static
3835 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3836 			const struct rte_flow_attr *flow_attr,
3837 			const struct rte_flow_item pattern[],
3838 			const struct rte_flow_action actions[],
3839 			struct rte_flow_error *error)
3840 {
3841 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3842 	struct dpni_attr dpni_attr;
3843 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3844 	uint16_t token = priv->token;
3845 	int ret = 0;
3846 
3847 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3848 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3849 	if (ret < 0) {
3850 		DPAA2_PMD_ERR(
3851 			"Failure to get dpni@%p attribute, err code  %d\n",
3852 			dpni, ret);
3853 		rte_flow_error_set(error, EPERM,
3854 			   RTE_FLOW_ERROR_TYPE_ATTR,
3855 			   flow_attr, "invalid");
3856 		return ret;
3857 	}
3858 
3859 	/* Verify input attributes */
3860 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3861 	if (ret < 0) {
3862 		DPAA2_PMD_ERR(
3863 			"Invalid attributes are given\n");
3864 		rte_flow_error_set(error, EPERM,
3865 			   RTE_FLOW_ERROR_TYPE_ATTR,
3866 			   flow_attr, "invalid");
3867 		goto not_valid_params;
3868 	}
3869 	/* Verify input pattern list */
3870 	ret = dpaa2_dev_verify_patterns(pattern);
3871 	if (ret < 0) {
3872 		DPAA2_PMD_ERR(
3873 			"Invalid pattern list is given\n");
3874 		rte_flow_error_set(error, EPERM,
3875 			   RTE_FLOW_ERROR_TYPE_ITEM,
3876 			   pattern, "invalid");
3877 		goto not_valid_params;
3878 	}
3879 	/* Verify input action list */
3880 	ret = dpaa2_dev_verify_actions(actions);
3881 	if (ret < 0) {
3882 		DPAA2_PMD_ERR(
3883 			"Invalid action list is given\n");
3884 		rte_flow_error_set(error, EPERM,
3885 			   RTE_FLOW_ERROR_TYPE_ACTION,
3886 			   actions, "invalid");
3887 		goto not_valid_params;
3888 	}
3889 not_valid_params:
3890 	return ret;
3891 }
3892 
3893 static
3894 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3895 				   const struct rte_flow_attr *attr,
3896 				   const struct rte_flow_item pattern[],
3897 				   const struct rte_flow_action actions[],
3898 				   struct rte_flow_error *error)
3899 {
3900 	struct rte_flow *flow = NULL;
3901 	size_t key_iova = 0, mask_iova = 0;
3902 	int ret;
3903 
3904 	dpaa2_flow_control_log =
3905 		getenv("DPAA2_FLOW_CONTROL_LOG");
3906 
3907 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3908 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3909 
3910 		dpaa2_flow_miss_flow_id =
3911 			atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3912 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3913 			DPAA2_PMD_ERR(
3914 				"The missed flow ID %d exceeds the max flow ID %d",
3915 				dpaa2_flow_miss_flow_id,
3916 				priv->dist_queues - 1);
3917 			return NULL;
3918 		}
3919 	}
3920 
3921 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3922 	if (!flow) {
3923 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3924 		goto mem_failure;
3925 	}
3926 	/* Allocate DMA'ble memory to write the rules */
3927 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3928 	if (!key_iova) {
3929 		DPAA2_PMD_ERR(
3930 			"Memory allocation failure for rule configuration\n");
3931 		goto mem_failure;
3932 	}
3933 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3934 	if (!mask_iova) {
3935 		DPAA2_PMD_ERR(
3936 			"Memory allocation failure for rule configuration\n");
3937 		goto mem_failure;
3938 	}
3939 
3940 	flow->qos_rule.key_iova = key_iova;
3941 	flow->qos_rule.mask_iova = mask_iova;
3942 
3943 	/* Allocate DMA'ble memory to write the rules */
3944 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3945 	if (!key_iova) {
3946 		DPAA2_PMD_ERR(
3947 			"Memory allocation failure for rule configuration\n");
3948 		goto mem_failure;
3949 	}
3950 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3951 	if (!mask_iova) {
3952 		DPAA2_PMD_ERR(
3953 			"Memory allocation failure for rule configuration\n");
3954 		goto mem_failure;
3955 	}
3956 
3957 	flow->fs_rule.key_iova = key_iova;
3958 	flow->fs_rule.mask_iova = mask_iova;
3959 
3960 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3961 	flow->ipaddr_rule.qos_ipsrc_offset =
3962 		IP_ADDRESS_OFFSET_INVALID;
3963 	flow->ipaddr_rule.qos_ipdst_offset =
3964 		IP_ADDRESS_OFFSET_INVALID;
3965 	flow->ipaddr_rule.fs_ipsrc_offset =
3966 		IP_ADDRESS_OFFSET_INVALID;
3967 	flow->ipaddr_rule.fs_ipdst_offset =
3968 		IP_ADDRESS_OFFSET_INVALID;
3969 
3970 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3971 			actions, error);
3972 	if (ret < 0) {
3973 		if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3974 			rte_flow_error_set(error, EPERM,
3975 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3976 					attr, "unknown");
3977 		DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
3978 		goto creation_error;
3979 	}
3980 
3981 	return flow;
3982 mem_failure:
3983 	rte_flow_error_set(error, EPERM,
3984 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3985 			   NULL, "memory alloc");
3986 creation_error:
3987 	rte_free((void *)flow);
3988 	rte_free((void *)key_iova);
3989 	rte_free((void *)mask_iova);
3990 
3991 	return NULL;
3992 }
3993 
3994 static
3995 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3996 		       struct rte_flow *flow,
3997 		       struct rte_flow_error *error)
3998 {
3999 	int ret = 0;
4000 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4001 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4002 
4003 	switch (flow->action) {
4004 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4005 		if (priv->num_rx_tc > 1) {
4006 			/* Remove entry from QoS table first */
4007 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4008 					&flow->qos_rule);
4009 			if (ret < 0) {
4010 				DPAA2_PMD_ERR(
4011 					"Error in removing entry from QoS table(%d)", ret);
4012 				goto error;
4013 			}
4014 		}
4015 
4016 		/* Then remove entry from FS table */
4017 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4018 					   flow->tc_id, &flow->fs_rule);
4019 		if (ret < 0) {
4020 			DPAA2_PMD_ERR(
4021 				"Error in removing entry from FS table(%d)", ret);
4022 			goto error;
4023 		}
4024 		break;
4025 	case RTE_FLOW_ACTION_TYPE_RSS:
4026 		if (priv->num_rx_tc > 1) {
4027 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4028 					&flow->qos_rule);
4029 			if (ret < 0) {
4030 				DPAA2_PMD_ERR(
4031 					"Error in entry addition in QoS table(%d)", ret);
4032 				goto error;
4033 			}
4034 		}
4035 		break;
4036 	default:
4037 		DPAA2_PMD_ERR(
4038 		"Action type (%d) is not supported", flow->action);
4039 		ret = -ENOTSUP;
4040 		break;
4041 	}
4042 
4043 	LIST_REMOVE(flow, next);
4044 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
4045 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4046 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
4047 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4048 	/* Now free the flow */
4049 	rte_free(flow);
4050 
4051 error:
4052 	if (ret)
4053 		rte_flow_error_set(error, EPERM,
4054 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4055 				   NULL, "unknown");
4056 	return ret;
4057 }
4058 
4059 /**
4060  * Destroy user-configured flow rules.
4061  *
4062  * This function skips internal flows rules.
4063  *
4064  * @see rte_flow_flush()
4065  * @see rte_flow_ops
4066  */
4067 static int
4068 dpaa2_flow_flush(struct rte_eth_dev *dev,
4069 		struct rte_flow_error *error)
4070 {
4071 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4072 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
4073 
4074 	while (flow) {
4075 		struct rte_flow *next = LIST_NEXT(flow, next);
4076 
4077 		dpaa2_flow_destroy(dev, flow, error);
4078 		flow = next;
4079 	}
4080 	return 0;
4081 }
4082 
4083 static int
4084 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4085 		struct rte_flow *flow __rte_unused,
4086 		const struct rte_flow_action *actions __rte_unused,
4087 		void *data __rte_unused,
4088 		struct rte_flow_error *error __rte_unused)
4089 {
4090 	return 0;
4091 }
4092 
4093 /**
4094  * Clean up all flow rules.
4095  *
4096  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4097  * rules regardless of whether they are internal or user-configured.
4098  *
4099  * @param priv
4100  *   Pointer to private structure.
4101  */
4102 void
4103 dpaa2_flow_clean(struct rte_eth_dev *dev)
4104 {
4105 	struct rte_flow *flow;
4106 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4107 
4108 	while ((flow = LIST_FIRST(&priv->flows)))
4109 		dpaa2_flow_destroy(dev, flow, NULL);
4110 }
4111 
4112 const struct rte_flow_ops dpaa2_flow_ops = {
4113 	.create	= dpaa2_flow_create,
4114 	.validate = dpaa2_flow_validate,
4115 	.destroy = dpaa2_flow_destroy,
4116 	.flush	= dpaa2_flow_flush,
4117 	.query	= dpaa2_flow_query,
4118 };
4119