xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 10b71caecbe1cddcbb65c050ca775fba575e88db)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static int dpaa2_flow_miss_flow_id =
34 	DPNI_FS_MISS_DROP;
35 
36 #define FIXED_ENTRY_SIZE 54
37 
38 enum flow_rule_ipaddr_type {
39 	FLOW_NONE_IPADDR,
40 	FLOW_IPV4_ADDR,
41 	FLOW_IPV6_ADDR
42 };
43 
44 struct flow_rule_ipaddr {
45 	enum flow_rule_ipaddr_type ipaddr_type;
46 	int qos_ipsrc_offset;
47 	int qos_ipdst_offset;
48 	int fs_ipsrc_offset;
49 	int fs_ipdst_offset;
50 };
51 
52 struct rte_flow {
53 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 	struct dpni_rule_cfg qos_rule;
55 	struct dpni_rule_cfg fs_rule;
56 	uint8_t qos_real_key_size;
57 	uint8_t fs_real_key_size;
58 	uint8_t tc_id; /** Traffic Class ID. */
59 	uint8_t tc_index; /** index within this Traffic Class. */
60 	enum rte_flow_action_type action;
61 	/* Special for IP address to specify the offset
62 	 * in key/mask.
63 	 */
64 	struct flow_rule_ipaddr ipaddr_rule;
65 	struct dpni_fs_action_cfg action_cfg;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 };
81 
82 static const
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 	RTE_FLOW_ACTION_TYPE_END,
85 	RTE_FLOW_ACTION_TYPE_QUEUE,
86 	RTE_FLOW_ACTION_TYPE_RSS
87 };
88 
89 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
90 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
91 
92 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
93 
94 #ifndef __cplusplus
95 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
96 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
97 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
98 	.type = RTE_BE16(0xffff),
99 };
100 
101 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
102 	.tci = RTE_BE16(0xffff),
103 };
104 
105 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
106 	.hdr.src_addr = RTE_BE32(0xffffffff),
107 	.hdr.dst_addr = RTE_BE32(0xffffffff),
108 	.hdr.next_proto_id = 0xff,
109 };
110 
111 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
112 	.hdr = {
113 		.src_addr =
114 			"\xff\xff\xff\xff\xff\xff\xff\xff"
115 			"\xff\xff\xff\xff\xff\xff\xff\xff",
116 		.dst_addr =
117 			"\xff\xff\xff\xff\xff\xff\xff\xff"
118 			"\xff\xff\xff\xff\xff\xff\xff\xff",
119 		.proto = 0xff
120 	},
121 };
122 
123 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
124 	.hdr.icmp_type = 0xff,
125 	.hdr.icmp_code = 0xff,
126 };
127 
128 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
129 	.hdr = {
130 		.src_port = RTE_BE16(0xffff),
131 		.dst_port = RTE_BE16(0xffff),
132 	},
133 };
134 
135 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
136 	.hdr = {
137 		.src_port = RTE_BE16(0xffff),
138 		.dst_port = RTE_BE16(0xffff),
139 	},
140 };
141 
142 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
143 	.hdr = {
144 		.src_port = RTE_BE16(0xffff),
145 		.dst_port = RTE_BE16(0xffff),
146 	},
147 };
148 
149 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
150 	.protocol = RTE_BE16(0xffff),
151 };
152 
153 #endif
154 
155 static inline void dpaa2_prot_field_string(
156 	enum net_prot prot, uint32_t field,
157 	char *string)
158 {
159 	if (!dpaa2_flow_control_log)
160 		return;
161 
162 	if (prot == NET_PROT_ETH) {
163 		strcpy(string, "eth");
164 		if (field == NH_FLD_ETH_DA)
165 			strcat(string, ".dst");
166 		else if (field == NH_FLD_ETH_SA)
167 			strcat(string, ".src");
168 		else if (field == NH_FLD_ETH_TYPE)
169 			strcat(string, ".type");
170 		else
171 			strcat(string, ".unknown field");
172 	} else if (prot == NET_PROT_VLAN) {
173 		strcpy(string, "vlan");
174 		if (field == NH_FLD_VLAN_TCI)
175 			strcat(string, ".tci");
176 		else
177 			strcat(string, ".unknown field");
178 	} else if (prot == NET_PROT_IP) {
179 		strcpy(string, "ip");
180 		if (field == NH_FLD_IP_SRC)
181 			strcat(string, ".src");
182 		else if (field == NH_FLD_IP_DST)
183 			strcat(string, ".dst");
184 		else if (field == NH_FLD_IP_PROTO)
185 			strcat(string, ".proto");
186 		else
187 			strcat(string, ".unknown field");
188 	} else if (prot == NET_PROT_TCP) {
189 		strcpy(string, "tcp");
190 		if (field == NH_FLD_TCP_PORT_SRC)
191 			strcat(string, ".src");
192 		else if (field == NH_FLD_TCP_PORT_DST)
193 			strcat(string, ".dst");
194 		else
195 			strcat(string, ".unknown field");
196 	} else if (prot == NET_PROT_UDP) {
197 		strcpy(string, "udp");
198 		if (field == NH_FLD_UDP_PORT_SRC)
199 			strcat(string, ".src");
200 		else if (field == NH_FLD_UDP_PORT_DST)
201 			strcat(string, ".dst");
202 		else
203 			strcat(string, ".unknown field");
204 	} else if (prot == NET_PROT_ICMP) {
205 		strcpy(string, "icmp");
206 		if (field == NH_FLD_ICMP_TYPE)
207 			strcat(string, ".type");
208 		else if (field == NH_FLD_ICMP_CODE)
209 			strcat(string, ".code");
210 		else
211 			strcat(string, ".unknown field");
212 	} else if (prot == NET_PROT_SCTP) {
213 		strcpy(string, "sctp");
214 		if (field == NH_FLD_SCTP_PORT_SRC)
215 			strcat(string, ".src");
216 		else if (field == NH_FLD_SCTP_PORT_DST)
217 			strcat(string, ".dst");
218 		else
219 			strcat(string, ".unknown field");
220 	} else if (prot == NET_PROT_GRE) {
221 		strcpy(string, "gre");
222 		if (field == NH_FLD_GRE_TYPE)
223 			strcat(string, ".type");
224 		else
225 			strcat(string, ".unknown field");
226 	} else {
227 		strcpy(string, "unknown protocol");
228 	}
229 }
230 
231 static inline void dpaa2_flow_qos_table_extracts_log(
232 	const struct dpaa2_dev_priv *priv)
233 {
234 	int idx;
235 	char string[32];
236 
237 	if (!dpaa2_flow_control_log)
238 		return;
239 
240 	printf("Setup QoS table: number of extracts: %d\r\n",
241 			priv->extract.qos_key_extract.dpkg.num_extracts);
242 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
243 		idx++) {
244 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
245 			.extracts[idx].extract.from_hdr.prot,
246 			priv->extract.qos_key_extract.dpkg.extracts[idx]
247 			.extract.from_hdr.field,
248 			string);
249 		printf("%s", string);
250 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
251 			printf(" / ");
252 	}
253 	printf("\r\n");
254 }
255 
256 static inline void dpaa2_flow_fs_table_extracts_log(
257 	const struct dpaa2_dev_priv *priv, int tc_id)
258 {
259 	int idx;
260 	char string[32];
261 
262 	if (!dpaa2_flow_control_log)
263 		return;
264 
265 	printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
266 			tc_id, priv->extract.tc_key_extract[tc_id]
267 			.dpkg.num_extracts);
268 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
269 		.dpkg.num_extracts; idx++) {
270 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
271 			.dpkg.extracts[idx].extract.from_hdr.prot,
272 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
273 			.extract.from_hdr.field,
274 			string);
275 		printf("%s", string);
276 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
277 			.dpkg.num_extracts)
278 			printf(" / ");
279 	}
280 	printf("\r\n");
281 }
282 
283 static inline void dpaa2_flow_qos_entry_log(
284 	const char *log_info, const struct rte_flow *flow, int qos_index)
285 {
286 	int idx;
287 	uint8_t *key, *mask;
288 
289 	if (!dpaa2_flow_control_log)
290 		return;
291 
292 	printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
293 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
294 
295 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
296 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
297 
298 	printf("key:\r\n");
299 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
300 		printf("%02x ", key[idx]);
301 
302 	printf("\r\nmask:\r\n");
303 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
304 		printf("%02x ", mask[idx]);
305 
306 	printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
307 		flow->ipaddr_rule.qos_ipsrc_offset,
308 		flow->ipaddr_rule.qos_ipdst_offset);
309 }
310 
311 static inline void dpaa2_flow_fs_entry_log(
312 	const char *log_info, const struct rte_flow *flow)
313 {
314 	int idx;
315 	uint8_t *key, *mask;
316 
317 	if (!dpaa2_flow_control_log)
318 		return;
319 
320 	printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
321 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
322 
323 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
324 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
325 
326 	printf("key:\r\n");
327 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
328 		printf("%02x ", key[idx]);
329 
330 	printf("\r\nmask:\r\n");
331 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
332 		printf("%02x ", mask[idx]);
333 
334 	printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
335 		flow->ipaddr_rule.fs_ipsrc_offset,
336 		flow->ipaddr_rule.fs_ipdst_offset);
337 }
338 
339 static inline void dpaa2_flow_extract_key_set(
340 	struct dpaa2_key_info *key_info, int index, uint8_t size)
341 {
342 	key_info->key_size[index] = size;
343 	if (index > 0) {
344 		key_info->key_offset[index] =
345 			key_info->key_offset[index - 1] +
346 			key_info->key_size[index - 1];
347 	} else {
348 		key_info->key_offset[index] = 0;
349 	}
350 	key_info->key_total_size += size;
351 }
352 
353 static int dpaa2_flow_extract_add(
354 	struct dpaa2_key_extract *key_extract,
355 	enum net_prot prot,
356 	uint32_t field, uint8_t field_size)
357 {
358 	int index, ip_src = -1, ip_dst = -1;
359 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
360 	struct dpaa2_key_info *key_info = &key_extract->key_info;
361 
362 	if (dpkg->num_extracts >=
363 		DPKG_MAX_NUM_OF_EXTRACTS) {
364 		DPAA2_PMD_WARN("Number of extracts overflows");
365 		return -1;
366 	}
367 	/* Before reorder, the IP SRC and IP DST are already last
368 	 * extract(s).
369 	 */
370 	for (index = 0; index < dpkg->num_extracts; index++) {
371 		if (dpkg->extracts[index].extract.from_hdr.prot ==
372 			NET_PROT_IP) {
373 			if (dpkg->extracts[index].extract.from_hdr.field ==
374 				NH_FLD_IP_SRC) {
375 				ip_src = index;
376 			}
377 			if (dpkg->extracts[index].extract.from_hdr.field ==
378 				NH_FLD_IP_DST) {
379 				ip_dst = index;
380 			}
381 		}
382 	}
383 
384 	if (ip_src >= 0)
385 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
386 
387 	if (ip_dst >= 0)
388 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
389 
390 	if (prot == NET_PROT_IP &&
391 		(field == NH_FLD_IP_SRC ||
392 		field == NH_FLD_IP_DST)) {
393 		index = dpkg->num_extracts;
394 	} else {
395 		if (ip_src >= 0 && ip_dst >= 0)
396 			index = dpkg->num_extracts - 2;
397 		else if (ip_src >= 0 || ip_dst >= 0)
398 			index = dpkg->num_extracts - 1;
399 		else
400 			index = dpkg->num_extracts;
401 	}
402 
403 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
404 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
405 	dpkg->extracts[index].extract.from_hdr.prot = prot;
406 	dpkg->extracts[index].extract.from_hdr.field = field;
407 	if (prot == NET_PROT_IP &&
408 		(field == NH_FLD_IP_SRC ||
409 		field == NH_FLD_IP_DST)) {
410 		dpaa2_flow_extract_key_set(key_info, index, 0);
411 	} else {
412 		dpaa2_flow_extract_key_set(key_info, index, field_size);
413 	}
414 
415 	if (prot == NET_PROT_IP) {
416 		if (field == NH_FLD_IP_SRC) {
417 			if (key_info->ipv4_dst_offset >= 0) {
418 				key_info->ipv4_src_offset =
419 					key_info->ipv4_dst_offset +
420 					NH_FLD_IPV4_ADDR_SIZE;
421 			} else {
422 				key_info->ipv4_src_offset =
423 					key_info->key_offset[index - 1] +
424 						key_info->key_size[index - 1];
425 			}
426 			if (key_info->ipv6_dst_offset >= 0) {
427 				key_info->ipv6_src_offset =
428 					key_info->ipv6_dst_offset +
429 					NH_FLD_IPV6_ADDR_SIZE;
430 			} else {
431 				key_info->ipv6_src_offset =
432 					key_info->key_offset[index - 1] +
433 						key_info->key_size[index - 1];
434 			}
435 		} else if (field == NH_FLD_IP_DST) {
436 			if (key_info->ipv4_src_offset >= 0) {
437 				key_info->ipv4_dst_offset =
438 					key_info->ipv4_src_offset +
439 					NH_FLD_IPV4_ADDR_SIZE;
440 			} else {
441 				key_info->ipv4_dst_offset =
442 					key_info->key_offset[index - 1] +
443 						key_info->key_size[index - 1];
444 			}
445 			if (key_info->ipv6_src_offset >= 0) {
446 				key_info->ipv6_dst_offset =
447 					key_info->ipv6_src_offset +
448 					NH_FLD_IPV6_ADDR_SIZE;
449 			} else {
450 				key_info->ipv6_dst_offset =
451 					key_info->key_offset[index - 1] +
452 						key_info->key_size[index - 1];
453 			}
454 		}
455 	}
456 
457 	if (index == dpkg->num_extracts) {
458 		dpkg->num_extracts++;
459 		return 0;
460 	}
461 
462 	if (ip_src >= 0) {
463 		ip_src++;
464 		dpkg->extracts[ip_src].type =
465 			DPKG_EXTRACT_FROM_HDR;
466 		dpkg->extracts[ip_src].extract.from_hdr.type =
467 			DPKG_FULL_FIELD;
468 		dpkg->extracts[ip_src].extract.from_hdr.prot =
469 			NET_PROT_IP;
470 		dpkg->extracts[ip_src].extract.from_hdr.field =
471 			NH_FLD_IP_SRC;
472 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
473 		key_info->ipv4_src_offset += field_size;
474 		key_info->ipv6_src_offset += field_size;
475 	}
476 	if (ip_dst >= 0) {
477 		ip_dst++;
478 		dpkg->extracts[ip_dst].type =
479 			DPKG_EXTRACT_FROM_HDR;
480 		dpkg->extracts[ip_dst].extract.from_hdr.type =
481 			DPKG_FULL_FIELD;
482 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
483 			NET_PROT_IP;
484 		dpkg->extracts[ip_dst].extract.from_hdr.field =
485 			NH_FLD_IP_DST;
486 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
487 		key_info->ipv4_dst_offset += field_size;
488 		key_info->ipv6_dst_offset += field_size;
489 	}
490 
491 	dpkg->num_extracts++;
492 
493 	return 0;
494 }
495 
496 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
497 				      int size)
498 {
499 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
500 	struct dpaa2_key_info *key_info = &key_extract->key_info;
501 	int last_extract_size, index;
502 
503 	if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
504 	    DPKG_EXTRACT_FROM_DATA) {
505 		DPAA2_PMD_WARN("RAW extract cannot be combined with others");
506 		return -1;
507 	}
508 
509 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
510 	dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
511 	if (last_extract_size)
512 		dpkg->num_extracts++;
513 	else
514 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
515 
516 	for (index = 0; index < dpkg->num_extracts; index++) {
517 		dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
518 		if (index == dpkg->num_extracts - 1)
519 			dpkg->extracts[index].extract.from_data.size =
520 				last_extract_size;
521 		else
522 			dpkg->extracts[index].extract.from_data.size =
523 				DPAA2_FLOW_MAX_KEY_SIZE;
524 		dpkg->extracts[index].extract.from_data.offset =
525 			DPAA2_FLOW_MAX_KEY_SIZE * index;
526 	}
527 
528 	key_info->key_total_size = size;
529 	return 0;
530 }
531 
532 /* Protocol discrimination.
533  * Discriminate IPv4/IPv6/vLan by Eth type.
534  * Discriminate UDP/TCP/ICMP by next proto of IP.
535  */
536 static inline int
537 dpaa2_flow_proto_discrimination_extract(
538 	struct dpaa2_key_extract *key_extract,
539 	enum rte_flow_item_type type)
540 {
541 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
542 		return dpaa2_flow_extract_add(
543 				key_extract, NET_PROT_ETH,
544 				NH_FLD_ETH_TYPE,
545 				sizeof(rte_be16_t));
546 	} else if (type == (enum rte_flow_item_type)
547 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
548 		return dpaa2_flow_extract_add(
549 				key_extract, NET_PROT_IP,
550 				NH_FLD_IP_PROTO,
551 				NH_FLD_IP_PROTO_SIZE);
552 	}
553 
554 	return -1;
555 }
556 
557 static inline int dpaa2_flow_extract_search(
558 	struct dpkg_profile_cfg *dpkg,
559 	enum net_prot prot, uint32_t field)
560 {
561 	int i;
562 
563 	for (i = 0; i < dpkg->num_extracts; i++) {
564 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
565 			dpkg->extracts[i].extract.from_hdr.field == field) {
566 			return i;
567 		}
568 	}
569 
570 	return -1;
571 }
572 
573 static inline int dpaa2_flow_extract_key_offset(
574 	struct dpaa2_key_extract *key_extract,
575 	enum net_prot prot, uint32_t field)
576 {
577 	int i;
578 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
579 	struct dpaa2_key_info *key_info = &key_extract->key_info;
580 
581 	if (prot == NET_PROT_IPV4 ||
582 		prot == NET_PROT_IPV6)
583 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
584 	else
585 		i = dpaa2_flow_extract_search(dpkg, prot, field);
586 
587 	if (i >= 0) {
588 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
589 			return key_info->ipv4_src_offset;
590 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
591 			return key_info->ipv4_dst_offset;
592 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
593 			return key_info->ipv6_src_offset;
594 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
595 			return key_info->ipv6_dst_offset;
596 		else
597 			return key_info->key_offset[i];
598 	} else {
599 		return -1;
600 	}
601 }
602 
603 struct proto_discrimination {
604 	enum rte_flow_item_type type;
605 	union {
606 		rte_be16_t eth_type;
607 		uint8_t ip_proto;
608 	};
609 };
610 
611 static int
612 dpaa2_flow_proto_discrimination_rule(
613 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
614 	struct proto_discrimination proto, int group)
615 {
616 	enum net_prot prot;
617 	uint32_t field;
618 	int offset;
619 	size_t key_iova;
620 	size_t mask_iova;
621 	rte_be16_t eth_type;
622 	uint8_t ip_proto;
623 
624 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
625 		prot = NET_PROT_ETH;
626 		field = NH_FLD_ETH_TYPE;
627 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
628 		prot = NET_PROT_IP;
629 		field = NH_FLD_IP_PROTO;
630 	} else {
631 		DPAA2_PMD_ERR(
632 			"Only Eth and IP support to discriminate next proto.");
633 		return -1;
634 	}
635 
636 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
637 			prot, field);
638 	if (offset < 0) {
639 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
640 				prot, field);
641 		return -1;
642 	}
643 	key_iova = flow->qos_rule.key_iova + offset;
644 	mask_iova = flow->qos_rule.mask_iova + offset;
645 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
646 		eth_type = proto.eth_type;
647 		memcpy((void *)key_iova, (const void *)(&eth_type),
648 			sizeof(rte_be16_t));
649 		eth_type = 0xffff;
650 		memcpy((void *)mask_iova, (const void *)(&eth_type),
651 			sizeof(rte_be16_t));
652 	} else {
653 		ip_proto = proto.ip_proto;
654 		memcpy((void *)key_iova, (const void *)(&ip_proto),
655 			sizeof(uint8_t));
656 		ip_proto = 0xff;
657 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
658 			sizeof(uint8_t));
659 	}
660 
661 	offset = dpaa2_flow_extract_key_offset(
662 			&priv->extract.tc_key_extract[group],
663 			prot, field);
664 	if (offset < 0) {
665 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
666 				prot, field);
667 		return -1;
668 	}
669 	key_iova = flow->fs_rule.key_iova + offset;
670 	mask_iova = flow->fs_rule.mask_iova + offset;
671 
672 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
673 		eth_type = proto.eth_type;
674 		memcpy((void *)key_iova, (const void *)(&eth_type),
675 			sizeof(rte_be16_t));
676 		eth_type = 0xffff;
677 		memcpy((void *)mask_iova, (const void *)(&eth_type),
678 			sizeof(rte_be16_t));
679 	} else {
680 		ip_proto = proto.ip_proto;
681 		memcpy((void *)key_iova, (const void *)(&ip_proto),
682 			sizeof(uint8_t));
683 		ip_proto = 0xff;
684 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
685 			sizeof(uint8_t));
686 	}
687 
688 	return 0;
689 }
690 
691 static inline int
692 dpaa2_flow_rule_data_set(
693 	struct dpaa2_key_extract *key_extract,
694 	struct dpni_rule_cfg *rule,
695 	enum net_prot prot, uint32_t field,
696 	const void *key, const void *mask, int size)
697 {
698 	int offset = dpaa2_flow_extract_key_offset(key_extract,
699 				prot, field);
700 
701 	if (offset < 0) {
702 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
703 			prot, field);
704 		return -1;
705 	}
706 
707 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
708 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
709 
710 	return 0;
711 }
712 
713 static inline int
714 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
715 			     const void *key, const void *mask, int size)
716 {
717 	int offset = 0;
718 
719 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
720 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
721 
722 	return 0;
723 }
724 
725 static inline int
726 _dpaa2_flow_rule_move_ipaddr_tail(
727 	struct dpaa2_key_extract *key_extract,
728 	struct dpni_rule_cfg *rule, int src_offset,
729 	uint32_t field, bool ipv4)
730 {
731 	size_t key_src;
732 	size_t mask_src;
733 	size_t key_dst;
734 	size_t mask_dst;
735 	int dst_offset, len;
736 	enum net_prot prot;
737 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
738 
739 	if (field != NH_FLD_IP_SRC &&
740 		field != NH_FLD_IP_DST) {
741 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
742 		return -1;
743 	}
744 	if (ipv4)
745 		prot = NET_PROT_IPV4;
746 	else
747 		prot = NET_PROT_IPV6;
748 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
749 				prot, field);
750 	if (dst_offset < 0) {
751 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
752 		return -1;
753 	}
754 	key_src = rule->key_iova + src_offset;
755 	mask_src = rule->mask_iova + src_offset;
756 	key_dst = rule->key_iova + dst_offset;
757 	mask_dst = rule->mask_iova + dst_offset;
758 	if (ipv4)
759 		len = sizeof(rte_be32_t);
760 	else
761 		len = NH_FLD_IPV6_ADDR_SIZE;
762 
763 	memcpy(tmp, (char *)key_src, len);
764 	memset((char *)key_src, 0, len);
765 	memcpy((char *)key_dst, tmp, len);
766 
767 	memcpy(tmp, (char *)mask_src, len);
768 	memset((char *)mask_src, 0, len);
769 	memcpy((char *)mask_dst, tmp, len);
770 
771 	return 0;
772 }
773 
774 static inline int
775 dpaa2_flow_rule_move_ipaddr_tail(
776 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
777 	int fs_group)
778 {
779 	int ret;
780 	enum net_prot prot;
781 
782 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
783 		return 0;
784 
785 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
786 		prot = NET_PROT_IPV4;
787 	else
788 		prot = NET_PROT_IPV6;
789 
790 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
791 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
792 				&priv->extract.qos_key_extract,
793 				&flow->qos_rule,
794 				flow->ipaddr_rule.qos_ipsrc_offset,
795 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
796 		if (ret) {
797 			DPAA2_PMD_ERR("QoS src address reorder failed");
798 			return -1;
799 		}
800 		flow->ipaddr_rule.qos_ipsrc_offset =
801 			dpaa2_flow_extract_key_offset(
802 				&priv->extract.qos_key_extract,
803 				prot, NH_FLD_IP_SRC);
804 	}
805 
806 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
807 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
808 				&priv->extract.qos_key_extract,
809 				&flow->qos_rule,
810 				flow->ipaddr_rule.qos_ipdst_offset,
811 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
812 		if (ret) {
813 			DPAA2_PMD_ERR("QoS dst address reorder failed");
814 			return -1;
815 		}
816 		flow->ipaddr_rule.qos_ipdst_offset =
817 			dpaa2_flow_extract_key_offset(
818 				&priv->extract.qos_key_extract,
819 				prot, NH_FLD_IP_DST);
820 	}
821 
822 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
823 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
824 				&priv->extract.tc_key_extract[fs_group],
825 				&flow->fs_rule,
826 				flow->ipaddr_rule.fs_ipsrc_offset,
827 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
828 		if (ret) {
829 			DPAA2_PMD_ERR("FS src address reorder failed");
830 			return -1;
831 		}
832 		flow->ipaddr_rule.fs_ipsrc_offset =
833 			dpaa2_flow_extract_key_offset(
834 				&priv->extract.tc_key_extract[fs_group],
835 				prot, NH_FLD_IP_SRC);
836 	}
837 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
838 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
839 				&priv->extract.tc_key_extract[fs_group],
840 				&flow->fs_rule,
841 				flow->ipaddr_rule.fs_ipdst_offset,
842 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
843 		if (ret) {
844 			DPAA2_PMD_ERR("FS dst address reorder failed");
845 			return -1;
846 		}
847 		flow->ipaddr_rule.fs_ipdst_offset =
848 			dpaa2_flow_extract_key_offset(
849 				&priv->extract.tc_key_extract[fs_group],
850 				prot, NH_FLD_IP_DST);
851 	}
852 
853 	return 0;
854 }
855 
856 static int
857 dpaa2_flow_extract_support(
858 	const uint8_t *mask_src,
859 	enum rte_flow_item_type type)
860 {
861 	char mask[64];
862 	int i, size = 0;
863 	const char *mask_support = 0;
864 
865 	switch (type) {
866 	case RTE_FLOW_ITEM_TYPE_ETH:
867 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
868 		size = sizeof(struct rte_flow_item_eth);
869 		break;
870 	case RTE_FLOW_ITEM_TYPE_VLAN:
871 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
872 		size = sizeof(struct rte_flow_item_vlan);
873 		break;
874 	case RTE_FLOW_ITEM_TYPE_IPV4:
875 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
876 		size = sizeof(struct rte_flow_item_ipv4);
877 		break;
878 	case RTE_FLOW_ITEM_TYPE_IPV6:
879 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
880 		size = sizeof(struct rte_flow_item_ipv6);
881 		break;
882 	case RTE_FLOW_ITEM_TYPE_ICMP:
883 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
884 		size = sizeof(struct rte_flow_item_icmp);
885 		break;
886 	case RTE_FLOW_ITEM_TYPE_UDP:
887 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
888 		size = sizeof(struct rte_flow_item_udp);
889 		break;
890 	case RTE_FLOW_ITEM_TYPE_TCP:
891 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
892 		size = sizeof(struct rte_flow_item_tcp);
893 		break;
894 	case RTE_FLOW_ITEM_TYPE_SCTP:
895 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
896 		size = sizeof(struct rte_flow_item_sctp);
897 		break;
898 	case RTE_FLOW_ITEM_TYPE_GRE:
899 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
900 		size = sizeof(struct rte_flow_item_gre);
901 		break;
902 	default:
903 		return -1;
904 	}
905 
906 	memcpy(mask, mask_support, size);
907 
908 	for (i = 0; i < size; i++)
909 		mask[i] = (mask[i] | mask_src[i]);
910 
911 	if (memcmp(mask, mask_support, size))
912 		return -1;
913 
914 	return 0;
915 }
916 
917 static int
918 dpaa2_configure_flow_eth(struct rte_flow *flow,
919 			 struct rte_eth_dev *dev,
920 			 const struct rte_flow_attr *attr,
921 			 const struct rte_flow_item *pattern,
922 			 const struct rte_flow_action actions[] __rte_unused,
923 			 struct rte_flow_error *error __rte_unused,
924 			 int *device_configured)
925 {
926 	int index, ret;
927 	int local_cfg = 0;
928 	uint32_t group;
929 	const struct rte_flow_item_eth *spec, *mask;
930 
931 	/* TODO: Currently upper bound of range parameter is not implemented */
932 	const struct rte_flow_item_eth *last __rte_unused;
933 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
934 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
935 
936 	group = attr->group;
937 
938 	/* Parse pattern list to get the matching parameters */
939 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
940 	last    = (const struct rte_flow_item_eth *)pattern->last;
941 	mask    = (const struct rte_flow_item_eth *)
942 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
943 	if (!spec) {
944 		/* Don't care any field of eth header,
945 		 * only care eth protocol.
946 		 */
947 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
948 		return 0;
949 	}
950 
951 	/* Get traffic class index and flow id to be configured */
952 	flow->tc_id = group;
953 	flow->tc_index = attr->priority;
954 
955 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
956 		RTE_FLOW_ITEM_TYPE_ETH)) {
957 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
958 
959 		return -1;
960 	}
961 
962 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
963 		index = dpaa2_flow_extract_search(
964 				&priv->extract.qos_key_extract.dpkg,
965 				NET_PROT_ETH, NH_FLD_ETH_SA);
966 		if (index < 0) {
967 			ret = dpaa2_flow_extract_add(
968 					&priv->extract.qos_key_extract,
969 					NET_PROT_ETH, NH_FLD_ETH_SA,
970 					RTE_ETHER_ADDR_LEN);
971 			if (ret) {
972 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
973 
974 				return -1;
975 			}
976 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
977 		}
978 		index = dpaa2_flow_extract_search(
979 				&priv->extract.tc_key_extract[group].dpkg,
980 				NET_PROT_ETH, NH_FLD_ETH_SA);
981 		if (index < 0) {
982 			ret = dpaa2_flow_extract_add(
983 					&priv->extract.tc_key_extract[group],
984 					NET_PROT_ETH, NH_FLD_ETH_SA,
985 					RTE_ETHER_ADDR_LEN);
986 			if (ret) {
987 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
988 				return -1;
989 			}
990 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
991 		}
992 
993 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
994 		if (ret) {
995 			DPAA2_PMD_ERR(
996 				"Move ipaddr before ETH_SA rule set failed");
997 			return -1;
998 		}
999 
1000 		ret = dpaa2_flow_rule_data_set(
1001 				&priv->extract.qos_key_extract,
1002 				&flow->qos_rule,
1003 				NET_PROT_ETH,
1004 				NH_FLD_ETH_SA,
1005 				&spec->src.addr_bytes,
1006 				&mask->src.addr_bytes,
1007 				sizeof(struct rte_ether_addr));
1008 		if (ret) {
1009 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1010 			return -1;
1011 		}
1012 
1013 		ret = dpaa2_flow_rule_data_set(
1014 				&priv->extract.tc_key_extract[group],
1015 				&flow->fs_rule,
1016 				NET_PROT_ETH,
1017 				NH_FLD_ETH_SA,
1018 				&spec->src.addr_bytes,
1019 				&mask->src.addr_bytes,
1020 				sizeof(struct rte_ether_addr));
1021 		if (ret) {
1022 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1023 			return -1;
1024 		}
1025 	}
1026 
1027 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1028 		index = dpaa2_flow_extract_search(
1029 				&priv->extract.qos_key_extract.dpkg,
1030 				NET_PROT_ETH, NH_FLD_ETH_DA);
1031 		if (index < 0) {
1032 			ret = dpaa2_flow_extract_add(
1033 					&priv->extract.qos_key_extract,
1034 					NET_PROT_ETH, NH_FLD_ETH_DA,
1035 					RTE_ETHER_ADDR_LEN);
1036 			if (ret) {
1037 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1038 
1039 				return -1;
1040 			}
1041 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1042 		}
1043 
1044 		index = dpaa2_flow_extract_search(
1045 				&priv->extract.tc_key_extract[group].dpkg,
1046 				NET_PROT_ETH, NH_FLD_ETH_DA);
1047 		if (index < 0) {
1048 			ret = dpaa2_flow_extract_add(
1049 					&priv->extract.tc_key_extract[group],
1050 					NET_PROT_ETH, NH_FLD_ETH_DA,
1051 					RTE_ETHER_ADDR_LEN);
1052 			if (ret) {
1053 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1054 
1055 				return -1;
1056 			}
1057 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1058 		}
1059 
1060 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1061 		if (ret) {
1062 			DPAA2_PMD_ERR(
1063 				"Move ipaddr before ETH DA rule set failed");
1064 			return -1;
1065 		}
1066 
1067 		ret = dpaa2_flow_rule_data_set(
1068 				&priv->extract.qos_key_extract,
1069 				&flow->qos_rule,
1070 				NET_PROT_ETH,
1071 				NH_FLD_ETH_DA,
1072 				&spec->dst.addr_bytes,
1073 				&mask->dst.addr_bytes,
1074 				sizeof(struct rte_ether_addr));
1075 		if (ret) {
1076 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1077 			return -1;
1078 		}
1079 
1080 		ret = dpaa2_flow_rule_data_set(
1081 				&priv->extract.tc_key_extract[group],
1082 				&flow->fs_rule,
1083 				NET_PROT_ETH,
1084 				NH_FLD_ETH_DA,
1085 				&spec->dst.addr_bytes,
1086 				&mask->dst.addr_bytes,
1087 				sizeof(struct rte_ether_addr));
1088 		if (ret) {
1089 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1090 			return -1;
1091 		}
1092 	}
1093 
1094 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1095 		index = dpaa2_flow_extract_search(
1096 				&priv->extract.qos_key_extract.dpkg,
1097 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1098 		if (index < 0) {
1099 			ret = dpaa2_flow_extract_add(
1100 					&priv->extract.qos_key_extract,
1101 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1102 					RTE_ETHER_TYPE_LEN);
1103 			if (ret) {
1104 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1105 
1106 				return -1;
1107 			}
1108 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1109 		}
1110 		index = dpaa2_flow_extract_search(
1111 				&priv->extract.tc_key_extract[group].dpkg,
1112 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1113 		if (index < 0) {
1114 			ret = dpaa2_flow_extract_add(
1115 					&priv->extract.tc_key_extract[group],
1116 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1117 					RTE_ETHER_TYPE_LEN);
1118 			if (ret) {
1119 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1120 
1121 				return -1;
1122 			}
1123 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1124 		}
1125 
1126 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1127 		if (ret) {
1128 			DPAA2_PMD_ERR(
1129 				"Move ipaddr before ETH TYPE rule set failed");
1130 				return -1;
1131 		}
1132 
1133 		ret = dpaa2_flow_rule_data_set(
1134 				&priv->extract.qos_key_extract,
1135 				&flow->qos_rule,
1136 				NET_PROT_ETH,
1137 				NH_FLD_ETH_TYPE,
1138 				&spec->type,
1139 				&mask->type,
1140 				sizeof(rte_be16_t));
1141 		if (ret) {
1142 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1143 			return -1;
1144 		}
1145 
1146 		ret = dpaa2_flow_rule_data_set(
1147 				&priv->extract.tc_key_extract[group],
1148 				&flow->fs_rule,
1149 				NET_PROT_ETH,
1150 				NH_FLD_ETH_TYPE,
1151 				&spec->type,
1152 				&mask->type,
1153 				sizeof(rte_be16_t));
1154 		if (ret) {
1155 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1156 			return -1;
1157 		}
1158 	}
1159 
1160 	(*device_configured) |= local_cfg;
1161 
1162 	return 0;
1163 }
1164 
1165 static int
1166 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1167 			  struct rte_eth_dev *dev,
1168 			  const struct rte_flow_attr *attr,
1169 			  const struct rte_flow_item *pattern,
1170 			  const struct rte_flow_action actions[] __rte_unused,
1171 			  struct rte_flow_error *error __rte_unused,
1172 			  int *device_configured)
1173 {
1174 	int index, ret;
1175 	int local_cfg = 0;
1176 	uint32_t group;
1177 	const struct rte_flow_item_vlan *spec, *mask;
1178 
1179 	const struct rte_flow_item_vlan *last __rte_unused;
1180 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1181 
1182 	group = attr->group;
1183 
1184 	/* Parse pattern list to get the matching parameters */
1185 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1186 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1187 	mask    = (const struct rte_flow_item_vlan *)
1188 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1189 
1190 	/* Get traffic class index and flow id to be configured */
1191 	flow->tc_id = group;
1192 	flow->tc_index = attr->priority;
1193 
1194 	if (!spec) {
1195 		/* Don't care any field of vlan header,
1196 		 * only care vlan protocol.
1197 		 */
1198 		/* Eth type is actually used for vLan classification.
1199 		 */
1200 		struct proto_discrimination proto;
1201 
1202 		index = dpaa2_flow_extract_search(
1203 				&priv->extract.qos_key_extract.dpkg,
1204 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1205 		if (index < 0) {
1206 			ret = dpaa2_flow_proto_discrimination_extract(
1207 						&priv->extract.qos_key_extract,
1208 						RTE_FLOW_ITEM_TYPE_ETH);
1209 			if (ret) {
1210 				DPAA2_PMD_ERR(
1211 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1212 
1213 				return -1;
1214 			}
1215 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1216 		}
1217 
1218 		index = dpaa2_flow_extract_search(
1219 				&priv->extract.tc_key_extract[group].dpkg,
1220 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1221 		if (index < 0) {
1222 			ret = dpaa2_flow_proto_discrimination_extract(
1223 					&priv->extract.tc_key_extract[group],
1224 					RTE_FLOW_ITEM_TYPE_ETH);
1225 			if (ret) {
1226 				DPAA2_PMD_ERR(
1227 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1228 
1229 				return -1;
1230 			}
1231 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1232 		}
1233 
1234 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1235 		if (ret) {
1236 			DPAA2_PMD_ERR(
1237 			"Move ipaddr before vLan discrimination set failed");
1238 			return -1;
1239 		}
1240 
1241 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1242 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1243 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1244 							proto, group);
1245 		if (ret) {
1246 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1247 			return -1;
1248 		}
1249 
1250 		(*device_configured) |= local_cfg;
1251 
1252 		return 0;
1253 	}
1254 
1255 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1256 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1257 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1258 
1259 		return -1;
1260 	}
1261 
1262 	if (!mask->tci)
1263 		return 0;
1264 
1265 	index = dpaa2_flow_extract_search(
1266 				&priv->extract.qos_key_extract.dpkg,
1267 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1268 	if (index < 0) {
1269 		ret = dpaa2_flow_extract_add(
1270 						&priv->extract.qos_key_extract,
1271 						NET_PROT_VLAN,
1272 						NH_FLD_VLAN_TCI,
1273 						sizeof(rte_be16_t));
1274 		if (ret) {
1275 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1276 
1277 			return -1;
1278 		}
1279 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1280 	}
1281 
1282 	index = dpaa2_flow_extract_search(
1283 			&priv->extract.tc_key_extract[group].dpkg,
1284 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1285 	if (index < 0) {
1286 		ret = dpaa2_flow_extract_add(
1287 				&priv->extract.tc_key_extract[group],
1288 				NET_PROT_VLAN,
1289 				NH_FLD_VLAN_TCI,
1290 				sizeof(rte_be16_t));
1291 		if (ret) {
1292 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1293 
1294 			return -1;
1295 		}
1296 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1297 	}
1298 
1299 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1300 	if (ret) {
1301 		DPAA2_PMD_ERR(
1302 			"Move ipaddr before VLAN TCI rule set failed");
1303 		return -1;
1304 	}
1305 
1306 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1307 				&flow->qos_rule,
1308 				NET_PROT_VLAN,
1309 				NH_FLD_VLAN_TCI,
1310 				&spec->tci,
1311 				&mask->tci,
1312 				sizeof(rte_be16_t));
1313 	if (ret) {
1314 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1315 		return -1;
1316 	}
1317 
1318 	ret = dpaa2_flow_rule_data_set(
1319 			&priv->extract.tc_key_extract[group],
1320 			&flow->fs_rule,
1321 			NET_PROT_VLAN,
1322 			NH_FLD_VLAN_TCI,
1323 			&spec->tci,
1324 			&mask->tci,
1325 			sizeof(rte_be16_t));
1326 	if (ret) {
1327 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1328 		return -1;
1329 	}
1330 
1331 	(*device_configured) |= local_cfg;
1332 
1333 	return 0;
1334 }
1335 
1336 static int
1337 dpaa2_configure_flow_ip_discrimation(
1338 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1339 	const struct rte_flow_item *pattern,
1340 	int *local_cfg,	int *device_configured,
1341 	uint32_t group)
1342 {
1343 	int index, ret;
1344 	struct proto_discrimination proto;
1345 
1346 	index = dpaa2_flow_extract_search(
1347 			&priv->extract.qos_key_extract.dpkg,
1348 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1349 	if (index < 0) {
1350 		ret = dpaa2_flow_proto_discrimination_extract(
1351 				&priv->extract.qos_key_extract,
1352 				RTE_FLOW_ITEM_TYPE_ETH);
1353 		if (ret) {
1354 			DPAA2_PMD_ERR(
1355 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1356 			return -1;
1357 		}
1358 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1359 	}
1360 
1361 	index = dpaa2_flow_extract_search(
1362 			&priv->extract.tc_key_extract[group].dpkg,
1363 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1364 	if (index < 0) {
1365 		ret = dpaa2_flow_proto_discrimination_extract(
1366 				&priv->extract.tc_key_extract[group],
1367 				RTE_FLOW_ITEM_TYPE_ETH);
1368 		if (ret) {
1369 			DPAA2_PMD_ERR(
1370 			"FS Extract ETH_TYPE to discriminate IP failed.");
1371 			return -1;
1372 		}
1373 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1374 	}
1375 
1376 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1377 	if (ret) {
1378 		DPAA2_PMD_ERR(
1379 			"Move ipaddr before IP discrimination set failed");
1380 		return -1;
1381 	}
1382 
1383 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1384 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1385 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1386 	else
1387 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1388 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1389 	if (ret) {
1390 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1391 		return -1;
1392 	}
1393 
1394 	(*device_configured) |= (*local_cfg);
1395 
1396 	return 0;
1397 }
1398 
1399 
1400 static int
1401 dpaa2_configure_flow_generic_ip(
1402 	struct rte_flow *flow,
1403 	struct rte_eth_dev *dev,
1404 	const struct rte_flow_attr *attr,
1405 	const struct rte_flow_item *pattern,
1406 	const struct rte_flow_action actions[] __rte_unused,
1407 	struct rte_flow_error *error __rte_unused,
1408 	int *device_configured)
1409 {
1410 	int index, ret;
1411 	int local_cfg = 0;
1412 	uint32_t group;
1413 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1414 		*mask_ipv4 = 0;
1415 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1416 		*mask_ipv6 = 0;
1417 	const void *key, *mask;
1418 	enum net_prot prot;
1419 
1420 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1421 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1422 	int size;
1423 
1424 	group = attr->group;
1425 
1426 	/* Parse pattern list to get the matching parameters */
1427 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1428 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1429 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1430 			(pattern->mask ? pattern->mask :
1431 					&dpaa2_flow_item_ipv4_mask);
1432 	} else {
1433 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1434 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1435 			(pattern->mask ? pattern->mask :
1436 					&dpaa2_flow_item_ipv6_mask);
1437 	}
1438 
1439 	/* Get traffic class index and flow id to be configured */
1440 	flow->tc_id = group;
1441 	flow->tc_index = attr->priority;
1442 
1443 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1444 			flow, pattern, &local_cfg,
1445 			device_configured, group);
1446 	if (ret) {
1447 		DPAA2_PMD_ERR("IP discrimation failed!");
1448 		return -1;
1449 	}
1450 
1451 	if (!spec_ipv4 && !spec_ipv6)
1452 		return 0;
1453 
1454 	if (mask_ipv4) {
1455 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1456 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1457 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1458 
1459 			return -1;
1460 		}
1461 	}
1462 
1463 	if (mask_ipv6) {
1464 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1465 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1466 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1467 
1468 			return -1;
1469 		}
1470 	}
1471 
1472 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1473 		mask_ipv4->hdr.dst_addr)) {
1474 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1475 	} else if (mask_ipv6 &&
1476 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1477 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1478 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1479 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1480 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1481 	}
1482 
1483 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1484 		(mask_ipv6 &&
1485 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1486 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1487 		index = dpaa2_flow_extract_search(
1488 				&priv->extract.qos_key_extract.dpkg,
1489 				NET_PROT_IP, NH_FLD_IP_SRC);
1490 		if (index < 0) {
1491 			ret = dpaa2_flow_extract_add(
1492 					&priv->extract.qos_key_extract,
1493 					NET_PROT_IP,
1494 					NH_FLD_IP_SRC,
1495 					0);
1496 			if (ret) {
1497 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1498 
1499 				return -1;
1500 			}
1501 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1502 		}
1503 
1504 		index = dpaa2_flow_extract_search(
1505 				&priv->extract.tc_key_extract[group].dpkg,
1506 				NET_PROT_IP, NH_FLD_IP_SRC);
1507 		if (index < 0) {
1508 			ret = dpaa2_flow_extract_add(
1509 					&priv->extract.tc_key_extract[group],
1510 					NET_PROT_IP,
1511 					NH_FLD_IP_SRC,
1512 					0);
1513 			if (ret) {
1514 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1515 
1516 				return -1;
1517 			}
1518 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1519 		}
1520 
1521 		if (spec_ipv4)
1522 			key = &spec_ipv4->hdr.src_addr;
1523 		else
1524 			key = &spec_ipv6->hdr.src_addr[0];
1525 		if (mask_ipv4) {
1526 			mask = &mask_ipv4->hdr.src_addr;
1527 			size = NH_FLD_IPV4_ADDR_SIZE;
1528 			prot = NET_PROT_IPV4;
1529 		} else {
1530 			mask = &mask_ipv6->hdr.src_addr[0];
1531 			size = NH_FLD_IPV6_ADDR_SIZE;
1532 			prot = NET_PROT_IPV6;
1533 		}
1534 
1535 		ret = dpaa2_flow_rule_data_set(
1536 				&priv->extract.qos_key_extract,
1537 				&flow->qos_rule,
1538 				prot, NH_FLD_IP_SRC,
1539 				key,	mask, size);
1540 		if (ret) {
1541 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1542 			return -1;
1543 		}
1544 
1545 		ret = dpaa2_flow_rule_data_set(
1546 				&priv->extract.tc_key_extract[group],
1547 				&flow->fs_rule,
1548 				prot, NH_FLD_IP_SRC,
1549 				key,	mask, size);
1550 		if (ret) {
1551 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1552 			return -1;
1553 		}
1554 
1555 		flow->ipaddr_rule.qos_ipsrc_offset =
1556 			dpaa2_flow_extract_key_offset(
1557 				&priv->extract.qos_key_extract,
1558 				prot, NH_FLD_IP_SRC);
1559 		flow->ipaddr_rule.fs_ipsrc_offset =
1560 			dpaa2_flow_extract_key_offset(
1561 				&priv->extract.tc_key_extract[group],
1562 				prot, NH_FLD_IP_SRC);
1563 	}
1564 
1565 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1566 		(mask_ipv6 &&
1567 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1568 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1569 		index = dpaa2_flow_extract_search(
1570 				&priv->extract.qos_key_extract.dpkg,
1571 				NET_PROT_IP, NH_FLD_IP_DST);
1572 		if (index < 0) {
1573 			if (mask_ipv4)
1574 				size = NH_FLD_IPV4_ADDR_SIZE;
1575 			else
1576 				size = NH_FLD_IPV6_ADDR_SIZE;
1577 			ret = dpaa2_flow_extract_add(
1578 					&priv->extract.qos_key_extract,
1579 					NET_PROT_IP,
1580 					NH_FLD_IP_DST,
1581 					size);
1582 			if (ret) {
1583 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1584 
1585 				return -1;
1586 			}
1587 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1588 		}
1589 
1590 		index = dpaa2_flow_extract_search(
1591 				&priv->extract.tc_key_extract[group].dpkg,
1592 				NET_PROT_IP, NH_FLD_IP_DST);
1593 		if (index < 0) {
1594 			if (mask_ipv4)
1595 				size = NH_FLD_IPV4_ADDR_SIZE;
1596 			else
1597 				size = NH_FLD_IPV6_ADDR_SIZE;
1598 			ret = dpaa2_flow_extract_add(
1599 					&priv->extract.tc_key_extract[group],
1600 					NET_PROT_IP,
1601 					NH_FLD_IP_DST,
1602 					size);
1603 			if (ret) {
1604 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1605 
1606 				return -1;
1607 			}
1608 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1609 		}
1610 
1611 		if (spec_ipv4)
1612 			key = &spec_ipv4->hdr.dst_addr;
1613 		else
1614 			key = spec_ipv6->hdr.dst_addr;
1615 		if (mask_ipv4) {
1616 			mask = &mask_ipv4->hdr.dst_addr;
1617 			size = NH_FLD_IPV4_ADDR_SIZE;
1618 			prot = NET_PROT_IPV4;
1619 		} else {
1620 			mask = &mask_ipv6->hdr.dst_addr[0];
1621 			size = NH_FLD_IPV6_ADDR_SIZE;
1622 			prot = NET_PROT_IPV6;
1623 		}
1624 
1625 		ret = dpaa2_flow_rule_data_set(
1626 				&priv->extract.qos_key_extract,
1627 				&flow->qos_rule,
1628 				prot, NH_FLD_IP_DST,
1629 				key,	mask, size);
1630 		if (ret) {
1631 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1632 			return -1;
1633 		}
1634 
1635 		ret = dpaa2_flow_rule_data_set(
1636 				&priv->extract.tc_key_extract[group],
1637 				&flow->fs_rule,
1638 				prot, NH_FLD_IP_DST,
1639 				key,	mask, size);
1640 		if (ret) {
1641 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1642 			return -1;
1643 		}
1644 		flow->ipaddr_rule.qos_ipdst_offset =
1645 			dpaa2_flow_extract_key_offset(
1646 				&priv->extract.qos_key_extract,
1647 				prot, NH_FLD_IP_DST);
1648 		flow->ipaddr_rule.fs_ipdst_offset =
1649 			dpaa2_flow_extract_key_offset(
1650 				&priv->extract.tc_key_extract[group],
1651 				prot, NH_FLD_IP_DST);
1652 	}
1653 
1654 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1655 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1656 		index = dpaa2_flow_extract_search(
1657 				&priv->extract.qos_key_extract.dpkg,
1658 				NET_PROT_IP, NH_FLD_IP_PROTO);
1659 		if (index < 0) {
1660 			ret = dpaa2_flow_extract_add(
1661 				&priv->extract.qos_key_extract,
1662 				NET_PROT_IP,
1663 				NH_FLD_IP_PROTO,
1664 				NH_FLD_IP_PROTO_SIZE);
1665 			if (ret) {
1666 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1667 
1668 				return -1;
1669 			}
1670 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1671 		}
1672 
1673 		index = dpaa2_flow_extract_search(
1674 				&priv->extract.tc_key_extract[group].dpkg,
1675 				NET_PROT_IP, NH_FLD_IP_PROTO);
1676 		if (index < 0) {
1677 			ret = dpaa2_flow_extract_add(
1678 					&priv->extract.tc_key_extract[group],
1679 					NET_PROT_IP,
1680 					NH_FLD_IP_PROTO,
1681 					NH_FLD_IP_PROTO_SIZE);
1682 			if (ret) {
1683 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1684 
1685 				return -1;
1686 			}
1687 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1688 		}
1689 
1690 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1691 		if (ret) {
1692 			DPAA2_PMD_ERR(
1693 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1694 			return -1;
1695 		}
1696 
1697 		if (spec_ipv4)
1698 			key = &spec_ipv4->hdr.next_proto_id;
1699 		else
1700 			key = &spec_ipv6->hdr.proto;
1701 		if (mask_ipv4)
1702 			mask = &mask_ipv4->hdr.next_proto_id;
1703 		else
1704 			mask = &mask_ipv6->hdr.proto;
1705 
1706 		ret = dpaa2_flow_rule_data_set(
1707 				&priv->extract.qos_key_extract,
1708 				&flow->qos_rule,
1709 				NET_PROT_IP,
1710 				NH_FLD_IP_PROTO,
1711 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1712 		if (ret) {
1713 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1714 			return -1;
1715 		}
1716 
1717 		ret = dpaa2_flow_rule_data_set(
1718 				&priv->extract.tc_key_extract[group],
1719 				&flow->fs_rule,
1720 				NET_PROT_IP,
1721 				NH_FLD_IP_PROTO,
1722 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1723 		if (ret) {
1724 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1725 			return -1;
1726 		}
1727 	}
1728 
1729 	(*device_configured) |= local_cfg;
1730 
1731 	return 0;
1732 }
1733 
1734 static int
1735 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1736 			  struct rte_eth_dev *dev,
1737 			  const struct rte_flow_attr *attr,
1738 			  const struct rte_flow_item *pattern,
1739 			  const struct rte_flow_action actions[] __rte_unused,
1740 			  struct rte_flow_error *error __rte_unused,
1741 			  int *device_configured)
1742 {
1743 	int index, ret;
1744 	int local_cfg = 0;
1745 	uint32_t group;
1746 	const struct rte_flow_item_icmp *spec, *mask;
1747 
1748 	const struct rte_flow_item_icmp *last __rte_unused;
1749 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1750 
1751 	group = attr->group;
1752 
1753 	/* Parse pattern list to get the matching parameters */
1754 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1755 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1756 	mask    = (const struct rte_flow_item_icmp *)
1757 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1758 
1759 	/* Get traffic class index and flow id to be configured */
1760 	flow->tc_id = group;
1761 	flow->tc_index = attr->priority;
1762 
1763 	if (!spec) {
1764 		/* Don't care any field of ICMP header,
1765 		 * only care ICMP protocol.
1766 		 * Example: flow create 0 ingress pattern icmp /
1767 		 */
1768 		/* Next proto of Generical IP is actually used
1769 		 * for ICMP identification.
1770 		 */
1771 		struct proto_discrimination proto;
1772 
1773 		index = dpaa2_flow_extract_search(
1774 				&priv->extract.qos_key_extract.dpkg,
1775 				NET_PROT_IP, NH_FLD_IP_PROTO);
1776 		if (index < 0) {
1777 			ret = dpaa2_flow_proto_discrimination_extract(
1778 					&priv->extract.qos_key_extract,
1779 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1780 			if (ret) {
1781 				DPAA2_PMD_ERR(
1782 					"QoS Extract IP protocol to discriminate ICMP failed.");
1783 
1784 				return -1;
1785 			}
1786 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1787 		}
1788 
1789 		index = dpaa2_flow_extract_search(
1790 				&priv->extract.tc_key_extract[group].dpkg,
1791 				NET_PROT_IP, NH_FLD_IP_PROTO);
1792 		if (index < 0) {
1793 			ret = dpaa2_flow_proto_discrimination_extract(
1794 					&priv->extract.tc_key_extract[group],
1795 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1796 			if (ret) {
1797 				DPAA2_PMD_ERR(
1798 					"FS Extract IP protocol to discriminate ICMP failed.");
1799 
1800 				return -1;
1801 			}
1802 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1803 		}
1804 
1805 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1806 		if (ret) {
1807 			DPAA2_PMD_ERR(
1808 				"Move IP addr before ICMP discrimination set failed");
1809 			return -1;
1810 		}
1811 
1812 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1813 		proto.ip_proto = IPPROTO_ICMP;
1814 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1815 							proto, group);
1816 		if (ret) {
1817 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1818 			return -1;
1819 		}
1820 
1821 		(*device_configured) |= local_cfg;
1822 
1823 		return 0;
1824 	}
1825 
1826 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1827 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1828 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1829 
1830 		return -1;
1831 	}
1832 
1833 	if (mask->hdr.icmp_type) {
1834 		index = dpaa2_flow_extract_search(
1835 				&priv->extract.qos_key_extract.dpkg,
1836 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1837 		if (index < 0) {
1838 			ret = dpaa2_flow_extract_add(
1839 					&priv->extract.qos_key_extract,
1840 					NET_PROT_ICMP,
1841 					NH_FLD_ICMP_TYPE,
1842 					NH_FLD_ICMP_TYPE_SIZE);
1843 			if (ret) {
1844 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1845 
1846 				return -1;
1847 			}
1848 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1849 		}
1850 
1851 		index = dpaa2_flow_extract_search(
1852 				&priv->extract.tc_key_extract[group].dpkg,
1853 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1854 		if (index < 0) {
1855 			ret = dpaa2_flow_extract_add(
1856 					&priv->extract.tc_key_extract[group],
1857 					NET_PROT_ICMP,
1858 					NH_FLD_ICMP_TYPE,
1859 					NH_FLD_ICMP_TYPE_SIZE);
1860 			if (ret) {
1861 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1862 
1863 				return -1;
1864 			}
1865 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1866 		}
1867 
1868 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1869 		if (ret) {
1870 			DPAA2_PMD_ERR(
1871 				"Move ipaddr before ICMP TYPE set failed");
1872 			return -1;
1873 		}
1874 
1875 		ret = dpaa2_flow_rule_data_set(
1876 				&priv->extract.qos_key_extract,
1877 				&flow->qos_rule,
1878 				NET_PROT_ICMP,
1879 				NH_FLD_ICMP_TYPE,
1880 				&spec->hdr.icmp_type,
1881 				&mask->hdr.icmp_type,
1882 				NH_FLD_ICMP_TYPE_SIZE);
1883 		if (ret) {
1884 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1885 			return -1;
1886 		}
1887 
1888 		ret = dpaa2_flow_rule_data_set(
1889 				&priv->extract.tc_key_extract[group],
1890 				&flow->fs_rule,
1891 				NET_PROT_ICMP,
1892 				NH_FLD_ICMP_TYPE,
1893 				&spec->hdr.icmp_type,
1894 				&mask->hdr.icmp_type,
1895 				NH_FLD_ICMP_TYPE_SIZE);
1896 		if (ret) {
1897 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1898 			return -1;
1899 		}
1900 	}
1901 
1902 	if (mask->hdr.icmp_code) {
1903 		index = dpaa2_flow_extract_search(
1904 				&priv->extract.qos_key_extract.dpkg,
1905 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1906 		if (index < 0) {
1907 			ret = dpaa2_flow_extract_add(
1908 					&priv->extract.qos_key_extract,
1909 					NET_PROT_ICMP,
1910 					NH_FLD_ICMP_CODE,
1911 					NH_FLD_ICMP_CODE_SIZE);
1912 			if (ret) {
1913 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1914 
1915 				return -1;
1916 			}
1917 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1918 		}
1919 
1920 		index = dpaa2_flow_extract_search(
1921 				&priv->extract.tc_key_extract[group].dpkg,
1922 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1923 		if (index < 0) {
1924 			ret = dpaa2_flow_extract_add(
1925 					&priv->extract.tc_key_extract[group],
1926 					NET_PROT_ICMP,
1927 					NH_FLD_ICMP_CODE,
1928 					NH_FLD_ICMP_CODE_SIZE);
1929 			if (ret) {
1930 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1931 
1932 				return -1;
1933 			}
1934 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1935 		}
1936 
1937 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1938 		if (ret) {
1939 			DPAA2_PMD_ERR(
1940 				"Move ipaddr after ICMP CODE set failed");
1941 			return -1;
1942 		}
1943 
1944 		ret = dpaa2_flow_rule_data_set(
1945 				&priv->extract.qos_key_extract,
1946 				&flow->qos_rule,
1947 				NET_PROT_ICMP,
1948 				NH_FLD_ICMP_CODE,
1949 				&spec->hdr.icmp_code,
1950 				&mask->hdr.icmp_code,
1951 				NH_FLD_ICMP_CODE_SIZE);
1952 		if (ret) {
1953 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1954 			return -1;
1955 		}
1956 
1957 		ret = dpaa2_flow_rule_data_set(
1958 				&priv->extract.tc_key_extract[group],
1959 				&flow->fs_rule,
1960 				NET_PROT_ICMP,
1961 				NH_FLD_ICMP_CODE,
1962 				&spec->hdr.icmp_code,
1963 				&mask->hdr.icmp_code,
1964 				NH_FLD_ICMP_CODE_SIZE);
1965 		if (ret) {
1966 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1967 			return -1;
1968 		}
1969 	}
1970 
1971 	(*device_configured) |= local_cfg;
1972 
1973 	return 0;
1974 }
1975 
1976 static int
1977 dpaa2_configure_flow_udp(struct rte_flow *flow,
1978 			 struct rte_eth_dev *dev,
1979 			  const struct rte_flow_attr *attr,
1980 			  const struct rte_flow_item *pattern,
1981 			  const struct rte_flow_action actions[] __rte_unused,
1982 			  struct rte_flow_error *error __rte_unused,
1983 			  int *device_configured)
1984 {
1985 	int index, ret;
1986 	int local_cfg = 0;
1987 	uint32_t group;
1988 	const struct rte_flow_item_udp *spec, *mask;
1989 
1990 	const struct rte_flow_item_udp *last __rte_unused;
1991 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1992 
1993 	group = attr->group;
1994 
1995 	/* Parse pattern list to get the matching parameters */
1996 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1997 	last    = (const struct rte_flow_item_udp *)pattern->last;
1998 	mask    = (const struct rte_flow_item_udp *)
1999 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2000 
2001 	/* Get traffic class index and flow id to be configured */
2002 	flow->tc_id = group;
2003 	flow->tc_index = attr->priority;
2004 
2005 	if (!spec || !mc_l4_port_identification) {
2006 		struct proto_discrimination proto;
2007 
2008 		index = dpaa2_flow_extract_search(
2009 				&priv->extract.qos_key_extract.dpkg,
2010 				NET_PROT_IP, NH_FLD_IP_PROTO);
2011 		if (index < 0) {
2012 			ret = dpaa2_flow_proto_discrimination_extract(
2013 					&priv->extract.qos_key_extract,
2014 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2015 			if (ret) {
2016 				DPAA2_PMD_ERR(
2017 					"QoS Extract IP protocol to discriminate UDP failed.");
2018 
2019 				return -1;
2020 			}
2021 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2022 		}
2023 
2024 		index = dpaa2_flow_extract_search(
2025 				&priv->extract.tc_key_extract[group].dpkg,
2026 				NET_PROT_IP, NH_FLD_IP_PROTO);
2027 		if (index < 0) {
2028 			ret = dpaa2_flow_proto_discrimination_extract(
2029 				&priv->extract.tc_key_extract[group],
2030 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2031 			if (ret) {
2032 				DPAA2_PMD_ERR(
2033 					"FS Extract IP protocol to discriminate UDP failed.");
2034 
2035 				return -1;
2036 			}
2037 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2038 		}
2039 
2040 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2041 		if (ret) {
2042 			DPAA2_PMD_ERR(
2043 				"Move IP addr before UDP discrimination set failed");
2044 			return -1;
2045 		}
2046 
2047 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2048 		proto.ip_proto = IPPROTO_UDP;
2049 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2050 							proto, group);
2051 		if (ret) {
2052 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2053 			return -1;
2054 		}
2055 
2056 		(*device_configured) |= local_cfg;
2057 
2058 		if (!spec)
2059 			return 0;
2060 	}
2061 
2062 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2063 		RTE_FLOW_ITEM_TYPE_UDP)) {
2064 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2065 
2066 		return -1;
2067 	}
2068 
2069 	if (mask->hdr.src_port) {
2070 		index = dpaa2_flow_extract_search(
2071 				&priv->extract.qos_key_extract.dpkg,
2072 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2073 		if (index < 0) {
2074 			ret = dpaa2_flow_extract_add(
2075 					&priv->extract.qos_key_extract,
2076 				NET_PROT_UDP,
2077 				NH_FLD_UDP_PORT_SRC,
2078 				NH_FLD_UDP_PORT_SIZE);
2079 			if (ret) {
2080 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2081 
2082 				return -1;
2083 			}
2084 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2085 		}
2086 
2087 		index = dpaa2_flow_extract_search(
2088 				&priv->extract.tc_key_extract[group].dpkg,
2089 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2090 		if (index < 0) {
2091 			ret = dpaa2_flow_extract_add(
2092 					&priv->extract.tc_key_extract[group],
2093 					NET_PROT_UDP,
2094 					NH_FLD_UDP_PORT_SRC,
2095 					NH_FLD_UDP_PORT_SIZE);
2096 			if (ret) {
2097 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2098 
2099 				return -1;
2100 			}
2101 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2102 		}
2103 
2104 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2105 		if (ret) {
2106 			DPAA2_PMD_ERR(
2107 				"Move ipaddr before UDP_PORT_SRC set failed");
2108 			return -1;
2109 		}
2110 
2111 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2112 				&flow->qos_rule,
2113 				NET_PROT_UDP,
2114 				NH_FLD_UDP_PORT_SRC,
2115 				&spec->hdr.src_port,
2116 				&mask->hdr.src_port,
2117 				NH_FLD_UDP_PORT_SIZE);
2118 		if (ret) {
2119 			DPAA2_PMD_ERR(
2120 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2121 			return -1;
2122 		}
2123 
2124 		ret = dpaa2_flow_rule_data_set(
2125 				&priv->extract.tc_key_extract[group],
2126 				&flow->fs_rule,
2127 				NET_PROT_UDP,
2128 				NH_FLD_UDP_PORT_SRC,
2129 				&spec->hdr.src_port,
2130 				&mask->hdr.src_port,
2131 				NH_FLD_UDP_PORT_SIZE);
2132 		if (ret) {
2133 			DPAA2_PMD_ERR(
2134 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2135 			return -1;
2136 		}
2137 	}
2138 
2139 	if (mask->hdr.dst_port) {
2140 		index = dpaa2_flow_extract_search(
2141 				&priv->extract.qos_key_extract.dpkg,
2142 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2143 		if (index < 0) {
2144 			ret = dpaa2_flow_extract_add(
2145 					&priv->extract.qos_key_extract,
2146 					NET_PROT_UDP,
2147 					NH_FLD_UDP_PORT_DST,
2148 					NH_FLD_UDP_PORT_SIZE);
2149 			if (ret) {
2150 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2151 
2152 				return -1;
2153 			}
2154 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2155 		}
2156 
2157 		index = dpaa2_flow_extract_search(
2158 				&priv->extract.tc_key_extract[group].dpkg,
2159 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2160 		if (index < 0) {
2161 			ret = dpaa2_flow_extract_add(
2162 					&priv->extract.tc_key_extract[group],
2163 					NET_PROT_UDP,
2164 					NH_FLD_UDP_PORT_DST,
2165 					NH_FLD_UDP_PORT_SIZE);
2166 			if (ret) {
2167 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2168 
2169 				return -1;
2170 			}
2171 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2172 		}
2173 
2174 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2175 		if (ret) {
2176 			DPAA2_PMD_ERR(
2177 				"Move ipaddr before UDP_PORT_DST set failed");
2178 			return -1;
2179 		}
2180 
2181 		ret = dpaa2_flow_rule_data_set(
2182 				&priv->extract.qos_key_extract,
2183 				&flow->qos_rule,
2184 				NET_PROT_UDP,
2185 				NH_FLD_UDP_PORT_DST,
2186 				&spec->hdr.dst_port,
2187 				&mask->hdr.dst_port,
2188 				NH_FLD_UDP_PORT_SIZE);
2189 		if (ret) {
2190 			DPAA2_PMD_ERR(
2191 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2192 			return -1;
2193 		}
2194 
2195 		ret = dpaa2_flow_rule_data_set(
2196 				&priv->extract.tc_key_extract[group],
2197 				&flow->fs_rule,
2198 				NET_PROT_UDP,
2199 				NH_FLD_UDP_PORT_DST,
2200 				&spec->hdr.dst_port,
2201 				&mask->hdr.dst_port,
2202 				NH_FLD_UDP_PORT_SIZE);
2203 		if (ret) {
2204 			DPAA2_PMD_ERR(
2205 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2206 			return -1;
2207 		}
2208 	}
2209 
2210 	(*device_configured) |= local_cfg;
2211 
2212 	return 0;
2213 }
2214 
2215 static int
2216 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2217 			 struct rte_eth_dev *dev,
2218 			 const struct rte_flow_attr *attr,
2219 			 const struct rte_flow_item *pattern,
2220 			 const struct rte_flow_action actions[] __rte_unused,
2221 			 struct rte_flow_error *error __rte_unused,
2222 			 int *device_configured)
2223 {
2224 	int index, ret;
2225 	int local_cfg = 0;
2226 	uint32_t group;
2227 	const struct rte_flow_item_tcp *spec, *mask;
2228 
2229 	const struct rte_flow_item_tcp *last __rte_unused;
2230 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2231 
2232 	group = attr->group;
2233 
2234 	/* Parse pattern list to get the matching parameters */
2235 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2236 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2237 	mask    = (const struct rte_flow_item_tcp *)
2238 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2239 
2240 	/* Get traffic class index and flow id to be configured */
2241 	flow->tc_id = group;
2242 	flow->tc_index = attr->priority;
2243 
2244 	if (!spec || !mc_l4_port_identification) {
2245 		struct proto_discrimination proto;
2246 
2247 		index = dpaa2_flow_extract_search(
2248 				&priv->extract.qos_key_extract.dpkg,
2249 				NET_PROT_IP, NH_FLD_IP_PROTO);
2250 		if (index < 0) {
2251 			ret = dpaa2_flow_proto_discrimination_extract(
2252 					&priv->extract.qos_key_extract,
2253 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2254 			if (ret) {
2255 				DPAA2_PMD_ERR(
2256 					"QoS Extract IP protocol to discriminate TCP failed.");
2257 
2258 				return -1;
2259 			}
2260 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2261 		}
2262 
2263 		index = dpaa2_flow_extract_search(
2264 				&priv->extract.tc_key_extract[group].dpkg,
2265 				NET_PROT_IP, NH_FLD_IP_PROTO);
2266 		if (index < 0) {
2267 			ret = dpaa2_flow_proto_discrimination_extract(
2268 				&priv->extract.tc_key_extract[group],
2269 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2270 			if (ret) {
2271 				DPAA2_PMD_ERR(
2272 					"FS Extract IP protocol to discriminate TCP failed.");
2273 
2274 				return -1;
2275 			}
2276 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2277 		}
2278 
2279 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2280 		if (ret) {
2281 			DPAA2_PMD_ERR(
2282 				"Move IP addr before TCP discrimination set failed");
2283 			return -1;
2284 		}
2285 
2286 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2287 		proto.ip_proto = IPPROTO_TCP;
2288 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2289 							proto, group);
2290 		if (ret) {
2291 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2292 			return -1;
2293 		}
2294 
2295 		(*device_configured) |= local_cfg;
2296 
2297 		if (!spec)
2298 			return 0;
2299 	}
2300 
2301 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2302 		RTE_FLOW_ITEM_TYPE_TCP)) {
2303 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2304 
2305 		return -1;
2306 	}
2307 
2308 	if (mask->hdr.src_port) {
2309 		index = dpaa2_flow_extract_search(
2310 				&priv->extract.qos_key_extract.dpkg,
2311 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2312 		if (index < 0) {
2313 			ret = dpaa2_flow_extract_add(
2314 					&priv->extract.qos_key_extract,
2315 					NET_PROT_TCP,
2316 					NH_FLD_TCP_PORT_SRC,
2317 					NH_FLD_TCP_PORT_SIZE);
2318 			if (ret) {
2319 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2320 
2321 				return -1;
2322 			}
2323 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2324 		}
2325 
2326 		index = dpaa2_flow_extract_search(
2327 				&priv->extract.tc_key_extract[group].dpkg,
2328 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2329 		if (index < 0) {
2330 			ret = dpaa2_flow_extract_add(
2331 					&priv->extract.tc_key_extract[group],
2332 					NET_PROT_TCP,
2333 					NH_FLD_TCP_PORT_SRC,
2334 					NH_FLD_TCP_PORT_SIZE);
2335 			if (ret) {
2336 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2337 
2338 				return -1;
2339 			}
2340 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2341 		}
2342 
2343 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2344 		if (ret) {
2345 			DPAA2_PMD_ERR(
2346 				"Move ipaddr before TCP_PORT_SRC set failed");
2347 			return -1;
2348 		}
2349 
2350 		ret = dpaa2_flow_rule_data_set(
2351 				&priv->extract.qos_key_extract,
2352 				&flow->qos_rule,
2353 				NET_PROT_TCP,
2354 				NH_FLD_TCP_PORT_SRC,
2355 				&spec->hdr.src_port,
2356 				&mask->hdr.src_port,
2357 				NH_FLD_TCP_PORT_SIZE);
2358 		if (ret) {
2359 			DPAA2_PMD_ERR(
2360 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2361 			return -1;
2362 		}
2363 
2364 		ret = dpaa2_flow_rule_data_set(
2365 				&priv->extract.tc_key_extract[group],
2366 				&flow->fs_rule,
2367 				NET_PROT_TCP,
2368 				NH_FLD_TCP_PORT_SRC,
2369 				&spec->hdr.src_port,
2370 				&mask->hdr.src_port,
2371 				NH_FLD_TCP_PORT_SIZE);
2372 		if (ret) {
2373 			DPAA2_PMD_ERR(
2374 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2375 			return -1;
2376 		}
2377 	}
2378 
2379 	if (mask->hdr.dst_port) {
2380 		index = dpaa2_flow_extract_search(
2381 				&priv->extract.qos_key_extract.dpkg,
2382 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2383 		if (index < 0) {
2384 			ret = dpaa2_flow_extract_add(
2385 					&priv->extract.qos_key_extract,
2386 					NET_PROT_TCP,
2387 					NH_FLD_TCP_PORT_DST,
2388 					NH_FLD_TCP_PORT_SIZE);
2389 			if (ret) {
2390 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2391 
2392 				return -1;
2393 			}
2394 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2395 		}
2396 
2397 		index = dpaa2_flow_extract_search(
2398 				&priv->extract.tc_key_extract[group].dpkg,
2399 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2400 		if (index < 0) {
2401 			ret = dpaa2_flow_extract_add(
2402 					&priv->extract.tc_key_extract[group],
2403 					NET_PROT_TCP,
2404 					NH_FLD_TCP_PORT_DST,
2405 					NH_FLD_TCP_PORT_SIZE);
2406 			if (ret) {
2407 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2408 
2409 				return -1;
2410 			}
2411 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2412 		}
2413 
2414 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2415 		if (ret) {
2416 			DPAA2_PMD_ERR(
2417 				"Move ipaddr before TCP_PORT_DST set failed");
2418 			return -1;
2419 		}
2420 
2421 		ret = dpaa2_flow_rule_data_set(
2422 				&priv->extract.qos_key_extract,
2423 				&flow->qos_rule,
2424 				NET_PROT_TCP,
2425 				NH_FLD_TCP_PORT_DST,
2426 				&spec->hdr.dst_port,
2427 				&mask->hdr.dst_port,
2428 				NH_FLD_TCP_PORT_SIZE);
2429 		if (ret) {
2430 			DPAA2_PMD_ERR(
2431 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2432 			return -1;
2433 		}
2434 
2435 		ret = dpaa2_flow_rule_data_set(
2436 				&priv->extract.tc_key_extract[group],
2437 				&flow->fs_rule,
2438 				NET_PROT_TCP,
2439 				NH_FLD_TCP_PORT_DST,
2440 				&spec->hdr.dst_port,
2441 				&mask->hdr.dst_port,
2442 				NH_FLD_TCP_PORT_SIZE);
2443 		if (ret) {
2444 			DPAA2_PMD_ERR(
2445 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2446 			return -1;
2447 		}
2448 	}
2449 
2450 	(*device_configured) |= local_cfg;
2451 
2452 	return 0;
2453 }
2454 
2455 static int
2456 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2457 			  struct rte_eth_dev *dev,
2458 			  const struct rte_flow_attr *attr,
2459 			  const struct rte_flow_item *pattern,
2460 			  const struct rte_flow_action actions[] __rte_unused,
2461 			  struct rte_flow_error *error __rte_unused,
2462 			  int *device_configured)
2463 {
2464 	int index, ret;
2465 	int local_cfg = 0;
2466 	uint32_t group;
2467 	const struct rte_flow_item_sctp *spec, *mask;
2468 
2469 	const struct rte_flow_item_sctp *last __rte_unused;
2470 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2471 
2472 	group = attr->group;
2473 
2474 	/* Parse pattern list to get the matching parameters */
2475 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2476 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2477 	mask    = (const struct rte_flow_item_sctp *)
2478 			(pattern->mask ? pattern->mask :
2479 				&dpaa2_flow_item_sctp_mask);
2480 
2481 	/* Get traffic class index and flow id to be configured */
2482 	flow->tc_id = group;
2483 	flow->tc_index = attr->priority;
2484 
2485 	if (!spec || !mc_l4_port_identification) {
2486 		struct proto_discrimination proto;
2487 
2488 		index = dpaa2_flow_extract_search(
2489 				&priv->extract.qos_key_extract.dpkg,
2490 				NET_PROT_IP, NH_FLD_IP_PROTO);
2491 		if (index < 0) {
2492 			ret = dpaa2_flow_proto_discrimination_extract(
2493 					&priv->extract.qos_key_extract,
2494 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2495 			if (ret) {
2496 				DPAA2_PMD_ERR(
2497 					"QoS Extract IP protocol to discriminate SCTP failed.");
2498 
2499 				return -1;
2500 			}
2501 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2502 		}
2503 
2504 		index = dpaa2_flow_extract_search(
2505 				&priv->extract.tc_key_extract[group].dpkg,
2506 				NET_PROT_IP, NH_FLD_IP_PROTO);
2507 		if (index < 0) {
2508 			ret = dpaa2_flow_proto_discrimination_extract(
2509 					&priv->extract.tc_key_extract[group],
2510 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2511 			if (ret) {
2512 				DPAA2_PMD_ERR(
2513 					"FS Extract IP protocol to discriminate SCTP failed.");
2514 
2515 				return -1;
2516 			}
2517 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2518 		}
2519 
2520 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2521 		if (ret) {
2522 			DPAA2_PMD_ERR(
2523 				"Move ipaddr before SCTP discrimination set failed");
2524 			return -1;
2525 		}
2526 
2527 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2528 		proto.ip_proto = IPPROTO_SCTP;
2529 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2530 							proto, group);
2531 		if (ret) {
2532 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2533 			return -1;
2534 		}
2535 
2536 		(*device_configured) |= local_cfg;
2537 
2538 		if (!spec)
2539 			return 0;
2540 	}
2541 
2542 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2543 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2544 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2545 
2546 		return -1;
2547 	}
2548 
2549 	if (mask->hdr.src_port) {
2550 		index = dpaa2_flow_extract_search(
2551 				&priv->extract.qos_key_extract.dpkg,
2552 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2553 		if (index < 0) {
2554 			ret = dpaa2_flow_extract_add(
2555 					&priv->extract.qos_key_extract,
2556 					NET_PROT_SCTP,
2557 					NH_FLD_SCTP_PORT_SRC,
2558 					NH_FLD_SCTP_PORT_SIZE);
2559 			if (ret) {
2560 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2561 
2562 				return -1;
2563 			}
2564 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2565 		}
2566 
2567 		index = dpaa2_flow_extract_search(
2568 				&priv->extract.tc_key_extract[group].dpkg,
2569 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2570 		if (index < 0) {
2571 			ret = dpaa2_flow_extract_add(
2572 					&priv->extract.tc_key_extract[group],
2573 					NET_PROT_SCTP,
2574 					NH_FLD_SCTP_PORT_SRC,
2575 					NH_FLD_SCTP_PORT_SIZE);
2576 			if (ret) {
2577 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2578 
2579 				return -1;
2580 			}
2581 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2582 		}
2583 
2584 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2585 		if (ret) {
2586 			DPAA2_PMD_ERR(
2587 				"Move ipaddr before SCTP_PORT_SRC set failed");
2588 			return -1;
2589 		}
2590 
2591 		ret = dpaa2_flow_rule_data_set(
2592 				&priv->extract.qos_key_extract,
2593 				&flow->qos_rule,
2594 				NET_PROT_SCTP,
2595 				NH_FLD_SCTP_PORT_SRC,
2596 				&spec->hdr.src_port,
2597 				&mask->hdr.src_port,
2598 				NH_FLD_SCTP_PORT_SIZE);
2599 		if (ret) {
2600 			DPAA2_PMD_ERR(
2601 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2602 			return -1;
2603 		}
2604 
2605 		ret = dpaa2_flow_rule_data_set(
2606 				&priv->extract.tc_key_extract[group],
2607 				&flow->fs_rule,
2608 				NET_PROT_SCTP,
2609 				NH_FLD_SCTP_PORT_SRC,
2610 				&spec->hdr.src_port,
2611 				&mask->hdr.src_port,
2612 				NH_FLD_SCTP_PORT_SIZE);
2613 		if (ret) {
2614 			DPAA2_PMD_ERR(
2615 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2616 			return -1;
2617 		}
2618 	}
2619 
2620 	if (mask->hdr.dst_port) {
2621 		index = dpaa2_flow_extract_search(
2622 				&priv->extract.qos_key_extract.dpkg,
2623 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2624 		if (index < 0) {
2625 			ret = dpaa2_flow_extract_add(
2626 					&priv->extract.qos_key_extract,
2627 					NET_PROT_SCTP,
2628 					NH_FLD_SCTP_PORT_DST,
2629 					NH_FLD_SCTP_PORT_SIZE);
2630 			if (ret) {
2631 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2632 
2633 				return -1;
2634 			}
2635 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2636 		}
2637 
2638 		index = dpaa2_flow_extract_search(
2639 				&priv->extract.tc_key_extract[group].dpkg,
2640 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2641 		if (index < 0) {
2642 			ret = dpaa2_flow_extract_add(
2643 					&priv->extract.tc_key_extract[group],
2644 					NET_PROT_SCTP,
2645 					NH_FLD_SCTP_PORT_DST,
2646 					NH_FLD_SCTP_PORT_SIZE);
2647 			if (ret) {
2648 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2649 
2650 				return -1;
2651 			}
2652 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2653 		}
2654 
2655 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2656 		if (ret) {
2657 			DPAA2_PMD_ERR(
2658 				"Move ipaddr before SCTP_PORT_DST set failed");
2659 			return -1;
2660 		}
2661 
2662 		ret = dpaa2_flow_rule_data_set(
2663 				&priv->extract.qos_key_extract,
2664 				&flow->qos_rule,
2665 				NET_PROT_SCTP,
2666 				NH_FLD_SCTP_PORT_DST,
2667 				&spec->hdr.dst_port,
2668 				&mask->hdr.dst_port,
2669 				NH_FLD_SCTP_PORT_SIZE);
2670 		if (ret) {
2671 			DPAA2_PMD_ERR(
2672 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2673 			return -1;
2674 		}
2675 
2676 		ret = dpaa2_flow_rule_data_set(
2677 				&priv->extract.tc_key_extract[group],
2678 				&flow->fs_rule,
2679 				NET_PROT_SCTP,
2680 				NH_FLD_SCTP_PORT_DST,
2681 				&spec->hdr.dst_port,
2682 				&mask->hdr.dst_port,
2683 				NH_FLD_SCTP_PORT_SIZE);
2684 		if (ret) {
2685 			DPAA2_PMD_ERR(
2686 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2687 			return -1;
2688 		}
2689 	}
2690 
2691 	(*device_configured) |= local_cfg;
2692 
2693 	return 0;
2694 }
2695 
2696 static int
2697 dpaa2_configure_flow_gre(struct rte_flow *flow,
2698 			 struct rte_eth_dev *dev,
2699 			 const struct rte_flow_attr *attr,
2700 			 const struct rte_flow_item *pattern,
2701 			 const struct rte_flow_action actions[] __rte_unused,
2702 			 struct rte_flow_error *error __rte_unused,
2703 			 int *device_configured)
2704 {
2705 	int index, ret;
2706 	int local_cfg = 0;
2707 	uint32_t group;
2708 	const struct rte_flow_item_gre *spec, *mask;
2709 
2710 	const struct rte_flow_item_gre *last __rte_unused;
2711 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2712 
2713 	group = attr->group;
2714 
2715 	/* Parse pattern list to get the matching parameters */
2716 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2717 	last    = (const struct rte_flow_item_gre *)pattern->last;
2718 	mask    = (const struct rte_flow_item_gre *)
2719 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2720 
2721 	/* Get traffic class index and flow id to be configured */
2722 	flow->tc_id = group;
2723 	flow->tc_index = attr->priority;
2724 
2725 	if (!spec) {
2726 		struct proto_discrimination proto;
2727 
2728 		index = dpaa2_flow_extract_search(
2729 				&priv->extract.qos_key_extract.dpkg,
2730 				NET_PROT_IP, NH_FLD_IP_PROTO);
2731 		if (index < 0) {
2732 			ret = dpaa2_flow_proto_discrimination_extract(
2733 					&priv->extract.qos_key_extract,
2734 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2735 			if (ret) {
2736 				DPAA2_PMD_ERR(
2737 					"QoS Extract IP protocol to discriminate GRE failed.");
2738 
2739 				return -1;
2740 			}
2741 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2742 		}
2743 
2744 		index = dpaa2_flow_extract_search(
2745 				&priv->extract.tc_key_extract[group].dpkg,
2746 				NET_PROT_IP, NH_FLD_IP_PROTO);
2747 		if (index < 0) {
2748 			ret = dpaa2_flow_proto_discrimination_extract(
2749 					&priv->extract.tc_key_extract[group],
2750 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2751 			if (ret) {
2752 				DPAA2_PMD_ERR(
2753 					"FS Extract IP protocol to discriminate GRE failed.");
2754 
2755 				return -1;
2756 			}
2757 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2758 		}
2759 
2760 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2761 		if (ret) {
2762 			DPAA2_PMD_ERR(
2763 				"Move IP addr before GRE discrimination set failed");
2764 			return -1;
2765 		}
2766 
2767 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2768 		proto.ip_proto = IPPROTO_GRE;
2769 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2770 							proto, group);
2771 		if (ret) {
2772 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2773 			return -1;
2774 		}
2775 
2776 		(*device_configured) |= local_cfg;
2777 
2778 		return 0;
2779 	}
2780 
2781 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2782 		RTE_FLOW_ITEM_TYPE_GRE)) {
2783 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2784 
2785 		return -1;
2786 	}
2787 
2788 	if (!mask->protocol)
2789 		return 0;
2790 
2791 	index = dpaa2_flow_extract_search(
2792 			&priv->extract.qos_key_extract.dpkg,
2793 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2794 	if (index < 0) {
2795 		ret = dpaa2_flow_extract_add(
2796 				&priv->extract.qos_key_extract,
2797 				NET_PROT_GRE,
2798 				NH_FLD_GRE_TYPE,
2799 				sizeof(rte_be16_t));
2800 		if (ret) {
2801 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2802 
2803 			return -1;
2804 		}
2805 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2806 	}
2807 
2808 	index = dpaa2_flow_extract_search(
2809 			&priv->extract.tc_key_extract[group].dpkg,
2810 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2811 	if (index < 0) {
2812 		ret = dpaa2_flow_extract_add(
2813 				&priv->extract.tc_key_extract[group],
2814 				NET_PROT_GRE,
2815 				NH_FLD_GRE_TYPE,
2816 				sizeof(rte_be16_t));
2817 		if (ret) {
2818 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2819 
2820 			return -1;
2821 		}
2822 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2823 	}
2824 
2825 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2826 	if (ret) {
2827 		DPAA2_PMD_ERR(
2828 			"Move ipaddr before GRE_TYPE set failed");
2829 		return -1;
2830 	}
2831 
2832 	ret = dpaa2_flow_rule_data_set(
2833 				&priv->extract.qos_key_extract,
2834 				&flow->qos_rule,
2835 				NET_PROT_GRE,
2836 				NH_FLD_GRE_TYPE,
2837 				&spec->protocol,
2838 				&mask->protocol,
2839 				sizeof(rte_be16_t));
2840 	if (ret) {
2841 		DPAA2_PMD_ERR(
2842 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2843 		return -1;
2844 	}
2845 
2846 	ret = dpaa2_flow_rule_data_set(
2847 			&priv->extract.tc_key_extract[group],
2848 			&flow->fs_rule,
2849 			NET_PROT_GRE,
2850 			NH_FLD_GRE_TYPE,
2851 			&spec->protocol,
2852 			&mask->protocol,
2853 			sizeof(rte_be16_t));
2854 	if (ret) {
2855 		DPAA2_PMD_ERR(
2856 			"FS NH_FLD_GRE_TYPE rule data set failed");
2857 		return -1;
2858 	}
2859 
2860 	(*device_configured) |= local_cfg;
2861 
2862 	return 0;
2863 }
2864 
2865 static int
2866 dpaa2_configure_flow_raw(struct rte_flow *flow,
2867 			 struct rte_eth_dev *dev,
2868 			 const struct rte_flow_attr *attr,
2869 			 const struct rte_flow_item *pattern,
2870 			 const struct rte_flow_action actions[] __rte_unused,
2871 			 struct rte_flow_error *error __rte_unused,
2872 			 int *device_configured)
2873 {
2874 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2875 	const struct rte_flow_item_raw *spec = pattern->spec;
2876 	const struct rte_flow_item_raw *mask = pattern->mask;
2877 	int prev_key_size =
2878 		priv->extract.qos_key_extract.key_info.key_total_size;
2879 	int local_cfg = 0, ret;
2880 	uint32_t group;
2881 
2882 	/* Need both spec and mask */
2883 	if (!spec || !mask) {
2884 		DPAA2_PMD_ERR("spec or mask not present.");
2885 		return -EINVAL;
2886 	}
2887 	/* Only supports non-relative with offset 0 */
2888 	if (spec->relative || spec->offset != 0 ||
2889 	    spec->search || spec->limit) {
2890 		DPAA2_PMD_ERR("relative and non zero offset not supported.");
2891 		return -EINVAL;
2892 	}
2893 	/* Spec len and mask len should be same */
2894 	if (spec->length != mask->length) {
2895 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2896 		return -EINVAL;
2897 	}
2898 
2899 	/* Get traffic class index and flow id to be configured */
2900 	group = attr->group;
2901 	flow->tc_id = group;
2902 	flow->tc_index = attr->priority;
2903 
2904 	if (prev_key_size < spec->length) {
2905 		ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2906 						 spec->length);
2907 		if (ret) {
2908 			DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2909 			return -1;
2910 		}
2911 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2912 
2913 		ret = dpaa2_flow_extract_add_raw(
2914 					&priv->extract.tc_key_extract[group],
2915 					spec->length);
2916 		if (ret) {
2917 			DPAA2_PMD_ERR("FS Extract RAW add failed.");
2918 			return -1;
2919 		}
2920 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2921 	}
2922 
2923 	ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2924 					   mask->pattern, spec->length);
2925 	if (ret) {
2926 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2927 		return -1;
2928 	}
2929 
2930 	ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2931 					   mask->pattern, spec->length);
2932 	if (ret) {
2933 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2934 		return -1;
2935 	}
2936 
2937 	(*device_configured) |= local_cfg;
2938 
2939 	return 0;
2940 }
2941 
2942 /* The existing QoS/FS entry with IP address(es)
2943  * needs update after
2944  * new extract(s) are inserted before IP
2945  * address(es) extract(s).
2946  */
2947 static int
2948 dpaa2_flow_entry_update(
2949 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2950 {
2951 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2952 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2953 	int ret;
2954 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2955 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2956 	struct dpaa2_key_extract *qos_key_extract =
2957 		&priv->extract.qos_key_extract;
2958 	struct dpaa2_key_extract *tc_key_extract =
2959 		&priv->extract.tc_key_extract[tc_id];
2960 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2961 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2962 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2963 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2964 	int extend = -1, extend1, size = -1;
2965 	uint16_t qos_index;
2966 
2967 	while (curr) {
2968 		if (curr->ipaddr_rule.ipaddr_type ==
2969 			FLOW_NONE_IPADDR) {
2970 			curr = LIST_NEXT(curr, next);
2971 			continue;
2972 		}
2973 
2974 		if (curr->ipaddr_rule.ipaddr_type ==
2975 			FLOW_IPV4_ADDR) {
2976 			qos_ipsrc_offset =
2977 				qos_key_extract->key_info.ipv4_src_offset;
2978 			qos_ipdst_offset =
2979 				qos_key_extract->key_info.ipv4_dst_offset;
2980 			fs_ipsrc_offset =
2981 				tc_key_extract->key_info.ipv4_src_offset;
2982 			fs_ipdst_offset =
2983 				tc_key_extract->key_info.ipv4_dst_offset;
2984 			size = NH_FLD_IPV4_ADDR_SIZE;
2985 		} else {
2986 			qos_ipsrc_offset =
2987 				qos_key_extract->key_info.ipv6_src_offset;
2988 			qos_ipdst_offset =
2989 				qos_key_extract->key_info.ipv6_dst_offset;
2990 			fs_ipsrc_offset =
2991 				tc_key_extract->key_info.ipv6_src_offset;
2992 			fs_ipdst_offset =
2993 				tc_key_extract->key_info.ipv6_dst_offset;
2994 			size = NH_FLD_IPV6_ADDR_SIZE;
2995 		}
2996 
2997 		qos_index = curr->tc_id * priv->fs_entries +
2998 			curr->tc_index;
2999 
3000 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
3001 
3002 		if (priv->num_rx_tc > 1) {
3003 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3004 					priv->token, &curr->qos_rule);
3005 			if (ret) {
3006 				DPAA2_PMD_ERR("Qos entry remove failed.");
3007 				return -1;
3008 			}
3009 		}
3010 
3011 		extend = -1;
3012 
3013 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3014 			RTE_ASSERT(qos_ipsrc_offset >=
3015 				curr->ipaddr_rule.qos_ipsrc_offset);
3016 			extend1 = qos_ipsrc_offset -
3017 				curr->ipaddr_rule.qos_ipsrc_offset;
3018 			if (extend >= 0)
3019 				RTE_ASSERT(extend == extend1);
3020 			else
3021 				extend = extend1;
3022 
3023 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3024 				(size == NH_FLD_IPV6_ADDR_SIZE));
3025 
3026 			memcpy(ipsrc_key,
3027 				(char *)(size_t)curr->qos_rule.key_iova +
3028 				curr->ipaddr_rule.qos_ipsrc_offset,
3029 				size);
3030 			memset((char *)(size_t)curr->qos_rule.key_iova +
3031 				curr->ipaddr_rule.qos_ipsrc_offset,
3032 				0, size);
3033 
3034 			memcpy(ipsrc_mask,
3035 				(char *)(size_t)curr->qos_rule.mask_iova +
3036 				curr->ipaddr_rule.qos_ipsrc_offset,
3037 				size);
3038 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3039 				curr->ipaddr_rule.qos_ipsrc_offset,
3040 				0, size);
3041 
3042 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3043 		}
3044 
3045 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3046 			RTE_ASSERT(qos_ipdst_offset >=
3047 				curr->ipaddr_rule.qos_ipdst_offset);
3048 			extend1 = qos_ipdst_offset -
3049 				curr->ipaddr_rule.qos_ipdst_offset;
3050 			if (extend >= 0)
3051 				RTE_ASSERT(extend == extend1);
3052 			else
3053 				extend = extend1;
3054 
3055 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3056 				(size == NH_FLD_IPV6_ADDR_SIZE));
3057 
3058 			memcpy(ipdst_key,
3059 				(char *)(size_t)curr->qos_rule.key_iova +
3060 				curr->ipaddr_rule.qos_ipdst_offset,
3061 				size);
3062 			memset((char *)(size_t)curr->qos_rule.key_iova +
3063 				curr->ipaddr_rule.qos_ipdst_offset,
3064 				0, size);
3065 
3066 			memcpy(ipdst_mask,
3067 				(char *)(size_t)curr->qos_rule.mask_iova +
3068 				curr->ipaddr_rule.qos_ipdst_offset,
3069 				size);
3070 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3071 				curr->ipaddr_rule.qos_ipdst_offset,
3072 				0, size);
3073 
3074 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3075 		}
3076 
3077 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3078 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3079 				(size == NH_FLD_IPV6_ADDR_SIZE));
3080 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3081 				curr->ipaddr_rule.qos_ipsrc_offset,
3082 				ipsrc_key,
3083 				size);
3084 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3085 				curr->ipaddr_rule.qos_ipsrc_offset,
3086 				ipsrc_mask,
3087 				size);
3088 		}
3089 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3090 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3091 				(size == NH_FLD_IPV6_ADDR_SIZE));
3092 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3093 				curr->ipaddr_rule.qos_ipdst_offset,
3094 				ipdst_key,
3095 				size);
3096 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3097 				curr->ipaddr_rule.qos_ipdst_offset,
3098 				ipdst_mask,
3099 				size);
3100 		}
3101 
3102 		if (extend >= 0)
3103 			curr->qos_real_key_size += extend;
3104 
3105 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3106 
3107 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
3108 
3109 		if (priv->num_rx_tc > 1) {
3110 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3111 					priv->token, &curr->qos_rule,
3112 					curr->tc_id, qos_index,
3113 					0, 0);
3114 			if (ret) {
3115 				DPAA2_PMD_ERR("Qos entry update failed.");
3116 				return -1;
3117 			}
3118 		}
3119 
3120 		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
3121 			curr = LIST_NEXT(curr, next);
3122 			continue;
3123 		}
3124 
3125 		dpaa2_flow_fs_entry_log("Before update", curr);
3126 		extend = -1;
3127 
3128 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3129 				priv->token, curr->tc_id, &curr->fs_rule);
3130 		if (ret) {
3131 			DPAA2_PMD_ERR("FS entry remove failed.");
3132 			return -1;
3133 		}
3134 
3135 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3136 			tc_id == curr->tc_id) {
3137 			RTE_ASSERT(fs_ipsrc_offset >=
3138 				curr->ipaddr_rule.fs_ipsrc_offset);
3139 			extend1 = fs_ipsrc_offset -
3140 				curr->ipaddr_rule.fs_ipsrc_offset;
3141 			if (extend >= 0)
3142 				RTE_ASSERT(extend == extend1);
3143 			else
3144 				extend = extend1;
3145 
3146 			memcpy(ipsrc_key,
3147 				(char *)(size_t)curr->fs_rule.key_iova +
3148 				curr->ipaddr_rule.fs_ipsrc_offset,
3149 				size);
3150 			memset((char *)(size_t)curr->fs_rule.key_iova +
3151 				curr->ipaddr_rule.fs_ipsrc_offset,
3152 				0, size);
3153 
3154 			memcpy(ipsrc_mask,
3155 				(char *)(size_t)curr->fs_rule.mask_iova +
3156 				curr->ipaddr_rule.fs_ipsrc_offset,
3157 				size);
3158 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3159 				curr->ipaddr_rule.fs_ipsrc_offset,
3160 				0, size);
3161 
3162 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3163 		}
3164 
3165 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3166 			tc_id == curr->tc_id) {
3167 			RTE_ASSERT(fs_ipdst_offset >=
3168 				curr->ipaddr_rule.fs_ipdst_offset);
3169 			extend1 = fs_ipdst_offset -
3170 				curr->ipaddr_rule.fs_ipdst_offset;
3171 			if (extend >= 0)
3172 				RTE_ASSERT(extend == extend1);
3173 			else
3174 				extend = extend1;
3175 
3176 			memcpy(ipdst_key,
3177 				(char *)(size_t)curr->fs_rule.key_iova +
3178 				curr->ipaddr_rule.fs_ipdst_offset,
3179 				size);
3180 			memset((char *)(size_t)curr->fs_rule.key_iova +
3181 				curr->ipaddr_rule.fs_ipdst_offset,
3182 				0, size);
3183 
3184 			memcpy(ipdst_mask,
3185 				(char *)(size_t)curr->fs_rule.mask_iova +
3186 				curr->ipaddr_rule.fs_ipdst_offset,
3187 				size);
3188 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3189 				curr->ipaddr_rule.fs_ipdst_offset,
3190 				0, size);
3191 
3192 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3193 		}
3194 
3195 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3196 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3197 				curr->ipaddr_rule.fs_ipsrc_offset,
3198 				ipsrc_key,
3199 				size);
3200 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3201 				curr->ipaddr_rule.fs_ipsrc_offset,
3202 				ipsrc_mask,
3203 				size);
3204 		}
3205 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3206 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3207 				curr->ipaddr_rule.fs_ipdst_offset,
3208 				ipdst_key,
3209 				size);
3210 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3211 				curr->ipaddr_rule.fs_ipdst_offset,
3212 				ipdst_mask,
3213 				size);
3214 		}
3215 
3216 		if (extend >= 0)
3217 			curr->fs_real_key_size += extend;
3218 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3219 
3220 		dpaa2_flow_fs_entry_log("Start update", curr);
3221 
3222 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3223 				priv->token, curr->tc_id, curr->tc_index,
3224 				&curr->fs_rule, &curr->action_cfg);
3225 		if (ret) {
3226 			DPAA2_PMD_ERR("FS entry update failed.");
3227 			return -1;
3228 		}
3229 
3230 		curr = LIST_NEXT(curr, next);
3231 	}
3232 
3233 	return 0;
3234 }
3235 
3236 static inline int
3237 dpaa2_flow_verify_attr(
3238 	struct dpaa2_dev_priv *priv,
3239 	const struct rte_flow_attr *attr)
3240 {
3241 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3242 
3243 	while (curr) {
3244 		if (curr->tc_id == attr->group &&
3245 			curr->tc_index == attr->priority) {
3246 			DPAA2_PMD_ERR(
3247 				"Flow with group %d and priority %d already exists.",
3248 				attr->group, attr->priority);
3249 
3250 			return -1;
3251 		}
3252 		curr = LIST_NEXT(curr, next);
3253 	}
3254 
3255 	return 0;
3256 }
3257 
3258 static inline int
3259 dpaa2_flow_verify_action(
3260 	struct dpaa2_dev_priv *priv,
3261 	const struct rte_flow_attr *attr,
3262 	const struct rte_flow_action actions[])
3263 {
3264 	int end_of_list = 0, i, j = 0;
3265 	const struct rte_flow_action_queue *dest_queue;
3266 	const struct rte_flow_action_rss *rss_conf;
3267 	struct dpaa2_queue *rxq;
3268 
3269 	while (!end_of_list) {
3270 		switch (actions[j].type) {
3271 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3272 			dest_queue = (const struct rte_flow_action_queue *)
3273 					(actions[j].conf);
3274 			rxq = priv->rx_vq[dest_queue->index];
3275 			if (attr->group != rxq->tc_index) {
3276 				DPAA2_PMD_ERR(
3277 					"RXQ[%d] does not belong to the group %d",
3278 					dest_queue->index, attr->group);
3279 
3280 				return -1;
3281 			}
3282 			break;
3283 		case RTE_FLOW_ACTION_TYPE_RSS:
3284 			rss_conf = (const struct rte_flow_action_rss *)
3285 					(actions[j].conf);
3286 			if (rss_conf->queue_num > priv->dist_queues) {
3287 				DPAA2_PMD_ERR(
3288 					"RSS number exceeds the distrbution size");
3289 				return -ENOTSUP;
3290 			}
3291 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3292 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3293 					DPAA2_PMD_ERR(
3294 						"RSS queue index exceeds the number of RXQs");
3295 					return -ENOTSUP;
3296 				}
3297 				rxq = priv->rx_vq[rss_conf->queue[i]];
3298 				if (rxq->tc_index != attr->group) {
3299 					DPAA2_PMD_ERR(
3300 						"Queue/Group combination are not supported\n");
3301 					return -ENOTSUP;
3302 				}
3303 			}
3304 
3305 			break;
3306 		case RTE_FLOW_ACTION_TYPE_END:
3307 			end_of_list = 1;
3308 			break;
3309 		default:
3310 			DPAA2_PMD_ERR("Invalid action type");
3311 			return -ENOTSUP;
3312 		}
3313 		j++;
3314 	}
3315 
3316 	return 0;
3317 }
3318 
3319 static int
3320 dpaa2_generic_flow_set(struct rte_flow *flow,
3321 		       struct rte_eth_dev *dev,
3322 		       const struct rte_flow_attr *attr,
3323 		       const struct rte_flow_item pattern[],
3324 		       const struct rte_flow_action actions[],
3325 		       struct rte_flow_error *error)
3326 {
3327 	const struct rte_flow_action_queue *dest_queue;
3328 	const struct rte_flow_action_rss *rss_conf;
3329 	int is_keycfg_configured = 0, end_of_list = 0;
3330 	int ret = 0, i = 0, j = 0;
3331 	struct dpni_rx_dist_cfg tc_cfg;
3332 	struct dpni_qos_tbl_cfg qos_cfg;
3333 	struct dpni_fs_action_cfg action;
3334 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3335 	struct dpaa2_queue *rxq;
3336 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3337 	size_t param;
3338 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3339 	uint16_t qos_index;
3340 
3341 	ret = dpaa2_flow_verify_attr(priv, attr);
3342 	if (ret)
3343 		return ret;
3344 
3345 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3346 	if (ret)
3347 		return ret;
3348 
3349 	/* Parse pattern list to get the matching parameters */
3350 	while (!end_of_list) {
3351 		switch (pattern[i].type) {
3352 		case RTE_FLOW_ITEM_TYPE_ETH:
3353 			ret = dpaa2_configure_flow_eth(flow,
3354 					dev, attr, &pattern[i], actions, error,
3355 					&is_keycfg_configured);
3356 			if (ret) {
3357 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3358 				return ret;
3359 			}
3360 			break;
3361 		case RTE_FLOW_ITEM_TYPE_VLAN:
3362 			ret = dpaa2_configure_flow_vlan(flow,
3363 					dev, attr, &pattern[i], actions, error,
3364 					&is_keycfg_configured);
3365 			if (ret) {
3366 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3367 				return ret;
3368 			}
3369 			break;
3370 		case RTE_FLOW_ITEM_TYPE_IPV4:
3371 		case RTE_FLOW_ITEM_TYPE_IPV6:
3372 			ret = dpaa2_configure_flow_generic_ip(flow,
3373 					dev, attr, &pattern[i], actions, error,
3374 					&is_keycfg_configured);
3375 			if (ret) {
3376 				DPAA2_PMD_ERR("IP flow configuration failed!");
3377 				return ret;
3378 			}
3379 			break;
3380 		case RTE_FLOW_ITEM_TYPE_ICMP:
3381 			ret = dpaa2_configure_flow_icmp(flow,
3382 					dev, attr, &pattern[i], actions, error,
3383 					&is_keycfg_configured);
3384 			if (ret) {
3385 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3386 				return ret;
3387 			}
3388 			break;
3389 		case RTE_FLOW_ITEM_TYPE_UDP:
3390 			ret = dpaa2_configure_flow_udp(flow,
3391 					dev, attr, &pattern[i], actions, error,
3392 					&is_keycfg_configured);
3393 			if (ret) {
3394 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3395 				return ret;
3396 			}
3397 			break;
3398 		case RTE_FLOW_ITEM_TYPE_TCP:
3399 			ret = dpaa2_configure_flow_tcp(flow,
3400 					dev, attr, &pattern[i], actions, error,
3401 					&is_keycfg_configured);
3402 			if (ret) {
3403 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3404 				return ret;
3405 			}
3406 			break;
3407 		case RTE_FLOW_ITEM_TYPE_SCTP:
3408 			ret = dpaa2_configure_flow_sctp(flow,
3409 					dev, attr, &pattern[i], actions, error,
3410 					&is_keycfg_configured);
3411 			if (ret) {
3412 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3413 				return ret;
3414 			}
3415 			break;
3416 		case RTE_FLOW_ITEM_TYPE_GRE:
3417 			ret = dpaa2_configure_flow_gre(flow,
3418 					dev, attr, &pattern[i], actions, error,
3419 					&is_keycfg_configured);
3420 			if (ret) {
3421 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3422 				return ret;
3423 			}
3424 			break;
3425 		case RTE_FLOW_ITEM_TYPE_RAW:
3426 			ret = dpaa2_configure_flow_raw(flow,
3427 						       dev, attr, &pattern[i],
3428 						       actions, error,
3429 						       &is_keycfg_configured);
3430 			if (ret) {
3431 				DPAA2_PMD_ERR("RAW flow configuration failed!");
3432 				return ret;
3433 			}
3434 			break;
3435 		case RTE_FLOW_ITEM_TYPE_END:
3436 			end_of_list = 1;
3437 			break; /*End of List*/
3438 		default:
3439 			DPAA2_PMD_ERR("Invalid action type");
3440 			ret = -ENOTSUP;
3441 			break;
3442 		}
3443 		i++;
3444 	}
3445 
3446 	/* Let's parse action on matching traffic */
3447 	end_of_list = 0;
3448 	while (!end_of_list) {
3449 		switch (actions[j].type) {
3450 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3451 			dest_queue =
3452 				(const struct rte_flow_action_queue *)(actions[j].conf);
3453 			rxq = priv->rx_vq[dest_queue->index];
3454 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3455 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3456 			action.flow_id = rxq->flow_id;
3457 
3458 			/* Configure FS table first*/
3459 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3460 				dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3461 				if (dpkg_prepare_key_cfg(
3462 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3463 				(uint8_t *)(size_t)priv->extract
3464 				.tc_extract_param[flow->tc_id]) < 0) {
3465 					DPAA2_PMD_ERR(
3466 					"Unable to prepare extract parameters");
3467 					return -1;
3468 				}
3469 
3470 				memset(&tc_cfg, 0,
3471 					sizeof(struct dpni_rx_dist_cfg));
3472 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3473 				tc_cfg.key_cfg_iova =
3474 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3475 				tc_cfg.tc = flow->tc_id;
3476 				tc_cfg.enable = false;
3477 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3478 						priv->token, &tc_cfg);
3479 				if (ret < 0) {
3480 					DPAA2_PMD_ERR(
3481 						"TC hash cannot be disabled.(%d)",
3482 						ret);
3483 					return -1;
3484 				}
3485 				tc_cfg.enable = true;
3486 				tc_cfg.fs_miss_flow_id =
3487 					dpaa2_flow_miss_flow_id;
3488 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3489 							 priv->token, &tc_cfg);
3490 				if (ret < 0) {
3491 					DPAA2_PMD_ERR(
3492 						"TC distribution cannot be configured.(%d)",
3493 						ret);
3494 					return -1;
3495 				}
3496 			}
3497 
3498 			/* Configure QoS table then.*/
3499 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3500 				dpaa2_flow_qos_table_extracts_log(priv);
3501 				if (dpkg_prepare_key_cfg(
3502 					&priv->extract.qos_key_extract.dpkg,
3503 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3504 					DPAA2_PMD_ERR(
3505 						"Unable to prepare extract parameters");
3506 					return -1;
3507 				}
3508 
3509 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3510 				qos_cfg.discard_on_miss = false;
3511 				qos_cfg.default_tc = 0;
3512 				qos_cfg.keep_entries = true;
3513 				qos_cfg.key_cfg_iova =
3514 					(size_t)priv->extract.qos_extract_param;
3515 				/* QoS table is effecitive for multiple TCs.*/
3516 				if (priv->num_rx_tc > 1) {
3517 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3518 						priv->token, &qos_cfg);
3519 					if (ret < 0) {
3520 						DPAA2_PMD_ERR(
3521 						"RSS QoS table can not be configured(%d)\n",
3522 							ret);
3523 						return -1;
3524 					}
3525 				}
3526 			}
3527 
3528 			flow->qos_real_key_size = priv->extract
3529 				.qos_key_extract.key_info.key_total_size;
3530 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3531 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3532 					flow->ipaddr_rule.qos_ipsrc_offset) {
3533 					flow->qos_real_key_size =
3534 						flow->ipaddr_rule.qos_ipdst_offset +
3535 						NH_FLD_IPV4_ADDR_SIZE;
3536 				} else {
3537 					flow->qos_real_key_size =
3538 						flow->ipaddr_rule.qos_ipsrc_offset +
3539 						NH_FLD_IPV4_ADDR_SIZE;
3540 				}
3541 			} else if (flow->ipaddr_rule.ipaddr_type ==
3542 				FLOW_IPV6_ADDR) {
3543 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3544 					flow->ipaddr_rule.qos_ipsrc_offset) {
3545 					flow->qos_real_key_size =
3546 						flow->ipaddr_rule.qos_ipdst_offset +
3547 						NH_FLD_IPV6_ADDR_SIZE;
3548 				} else {
3549 					flow->qos_real_key_size =
3550 						flow->ipaddr_rule.qos_ipsrc_offset +
3551 						NH_FLD_IPV6_ADDR_SIZE;
3552 				}
3553 			}
3554 
3555 			/* QoS entry added is only effective for multiple TCs.*/
3556 			if (priv->num_rx_tc > 1) {
3557 				qos_index = flow->tc_id * priv->fs_entries +
3558 					flow->tc_index;
3559 				if (qos_index >= priv->qos_entries) {
3560 					DPAA2_PMD_ERR("QoS table with %d entries full",
3561 						priv->qos_entries);
3562 					return -1;
3563 				}
3564 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3565 
3566 				dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3567 
3568 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3569 						priv->token, &flow->qos_rule,
3570 						flow->tc_id, qos_index,
3571 						0, 0);
3572 				if (ret < 0) {
3573 					DPAA2_PMD_ERR(
3574 						"Error in addnig entry to QoS table(%d)", ret);
3575 					return ret;
3576 				}
3577 			}
3578 
3579 			if (flow->tc_index >= priv->fs_entries) {
3580 				DPAA2_PMD_ERR("FS table with %d entries full",
3581 					priv->fs_entries);
3582 				return -1;
3583 			}
3584 
3585 			flow->fs_real_key_size =
3586 				priv->extract.tc_key_extract[flow->tc_id]
3587 				.key_info.key_total_size;
3588 
3589 			if (flow->ipaddr_rule.ipaddr_type ==
3590 				FLOW_IPV4_ADDR) {
3591 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3592 					flow->ipaddr_rule.fs_ipsrc_offset) {
3593 					flow->fs_real_key_size =
3594 						flow->ipaddr_rule.fs_ipdst_offset +
3595 						NH_FLD_IPV4_ADDR_SIZE;
3596 				} else {
3597 					flow->fs_real_key_size =
3598 						flow->ipaddr_rule.fs_ipsrc_offset +
3599 						NH_FLD_IPV4_ADDR_SIZE;
3600 				}
3601 			} else if (flow->ipaddr_rule.ipaddr_type ==
3602 				FLOW_IPV6_ADDR) {
3603 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3604 					flow->ipaddr_rule.fs_ipsrc_offset) {
3605 					flow->fs_real_key_size =
3606 						flow->ipaddr_rule.fs_ipdst_offset +
3607 						NH_FLD_IPV6_ADDR_SIZE;
3608 				} else {
3609 					flow->fs_real_key_size =
3610 						flow->ipaddr_rule.fs_ipsrc_offset +
3611 						NH_FLD_IPV6_ADDR_SIZE;
3612 				}
3613 			}
3614 
3615 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3616 
3617 			dpaa2_flow_fs_entry_log("Start add", flow);
3618 
3619 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3620 						flow->tc_id, flow->tc_index,
3621 						&flow->fs_rule, &action);
3622 			if (ret < 0) {
3623 				DPAA2_PMD_ERR(
3624 				"Error in adding entry to FS table(%d)", ret);
3625 				return ret;
3626 			}
3627 			memcpy(&flow->action_cfg, &action,
3628 				sizeof(struct dpni_fs_action_cfg));
3629 			break;
3630 		case RTE_FLOW_ACTION_TYPE_RSS:
3631 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3632 
3633 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3634 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3635 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3636 			if (ret < 0) {
3637 				DPAA2_PMD_ERR(
3638 				"unable to set flow distribution.please check queue config\n");
3639 				return ret;
3640 			}
3641 
3642 			/* Allocate DMA'ble memory to write the rules */
3643 			param = (size_t)rte_malloc(NULL, 256, 64);
3644 			if (!param) {
3645 				DPAA2_PMD_ERR("Memory allocation failure\n");
3646 				return -1;
3647 			}
3648 
3649 			if (dpkg_prepare_key_cfg(
3650 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3651 				(uint8_t *)param) < 0) {
3652 				DPAA2_PMD_ERR(
3653 				"Unable to prepare extract parameters");
3654 				rte_free((void *)param);
3655 				return -1;
3656 			}
3657 
3658 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3659 			tc_cfg.dist_size = rss_conf->queue_num;
3660 			tc_cfg.key_cfg_iova = (size_t)param;
3661 			tc_cfg.enable = true;
3662 			tc_cfg.tc = flow->tc_id;
3663 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3664 						 priv->token, &tc_cfg);
3665 			if (ret < 0) {
3666 				DPAA2_PMD_ERR(
3667 					"RSS TC table cannot be configured: %d\n",
3668 					ret);
3669 				rte_free((void *)param);
3670 				return -1;
3671 			}
3672 
3673 			rte_free((void *)param);
3674 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3675 				if (dpkg_prepare_key_cfg(
3676 					&priv->extract.qos_key_extract.dpkg,
3677 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3678 					DPAA2_PMD_ERR(
3679 					"Unable to prepare extract parameters");
3680 					return -1;
3681 				}
3682 				memset(&qos_cfg, 0,
3683 					sizeof(struct dpni_qos_tbl_cfg));
3684 				qos_cfg.discard_on_miss = true;
3685 				qos_cfg.keep_entries = true;
3686 				qos_cfg.key_cfg_iova =
3687 					(size_t)priv->extract.qos_extract_param;
3688 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3689 							 priv->token, &qos_cfg);
3690 				if (ret < 0) {
3691 					DPAA2_PMD_ERR(
3692 					"RSS QoS dist can't be configured-%d\n",
3693 					ret);
3694 					return -1;
3695 				}
3696 			}
3697 
3698 			/* Add Rule into QoS table */
3699 			qos_index = flow->tc_id * priv->fs_entries +
3700 				flow->tc_index;
3701 			if (qos_index >= priv->qos_entries) {
3702 				DPAA2_PMD_ERR("QoS table with %d entries full",
3703 					priv->qos_entries);
3704 				return -1;
3705 			}
3706 
3707 			flow->qos_real_key_size =
3708 			  priv->extract.qos_key_extract.key_info.key_total_size;
3709 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3710 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3711 						&flow->qos_rule, flow->tc_id,
3712 						qos_index, 0, 0);
3713 			if (ret < 0) {
3714 				DPAA2_PMD_ERR(
3715 				"Error in entry addition in QoS table(%d)",
3716 				ret);
3717 				return ret;
3718 			}
3719 			break;
3720 		case RTE_FLOW_ACTION_TYPE_END:
3721 			end_of_list = 1;
3722 			break;
3723 		default:
3724 			DPAA2_PMD_ERR("Invalid action type");
3725 			ret = -ENOTSUP;
3726 			break;
3727 		}
3728 		j++;
3729 	}
3730 
3731 	if (!ret) {
3732 		if (is_keycfg_configured &
3733 			(DPAA2_QOS_TABLE_RECONFIGURE |
3734 			DPAA2_FS_TABLE_RECONFIGURE)) {
3735 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3736 			if (ret) {
3737 				DPAA2_PMD_ERR("Flow entry update failed.");
3738 
3739 				return -1;
3740 			}
3741 		}
3742 		/* New rules are inserted. */
3743 		if (!curr) {
3744 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3745 		} else {
3746 			while (LIST_NEXT(curr, next))
3747 				curr = LIST_NEXT(curr, next);
3748 			LIST_INSERT_AFTER(curr, flow, next);
3749 		}
3750 	}
3751 	return ret;
3752 }
3753 
3754 static inline int
3755 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3756 		      const struct rte_flow_attr *attr)
3757 {
3758 	int ret = 0;
3759 
3760 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3761 		DPAA2_PMD_ERR("Priority group is out of range\n");
3762 		ret = -ENOTSUP;
3763 	}
3764 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3765 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3766 		ret = -ENOTSUP;
3767 	}
3768 	if (unlikely(attr->egress)) {
3769 		DPAA2_PMD_ERR(
3770 			"Flow configuration is not supported on egress side\n");
3771 		ret = -ENOTSUP;
3772 	}
3773 	if (unlikely(!attr->ingress)) {
3774 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3775 		ret = -EINVAL;
3776 	}
3777 	return ret;
3778 }
3779 
3780 static inline int
3781 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3782 {
3783 	unsigned int i, j, is_found = 0;
3784 	int ret = 0;
3785 
3786 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3787 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3788 			if (dpaa2_supported_pattern_type[i]
3789 					== pattern[j].type) {
3790 				is_found = 1;
3791 				break;
3792 			}
3793 		}
3794 		if (!is_found) {
3795 			ret = -ENOTSUP;
3796 			break;
3797 		}
3798 	}
3799 	/* Lets verify other combinations of given pattern rules */
3800 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3801 		if (!pattern[j].spec) {
3802 			ret = -EINVAL;
3803 			break;
3804 		}
3805 	}
3806 
3807 	return ret;
3808 }
3809 
3810 static inline int
3811 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3812 {
3813 	unsigned int i, j, is_found = 0;
3814 	int ret = 0;
3815 
3816 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3817 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3818 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3819 				is_found = 1;
3820 				break;
3821 			}
3822 		}
3823 		if (!is_found) {
3824 			ret = -ENOTSUP;
3825 			break;
3826 		}
3827 	}
3828 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3829 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3830 				!actions[j].conf)
3831 			ret = -EINVAL;
3832 	}
3833 	return ret;
3834 }
3835 
3836 static
3837 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3838 			const struct rte_flow_attr *flow_attr,
3839 			const struct rte_flow_item pattern[],
3840 			const struct rte_flow_action actions[],
3841 			struct rte_flow_error *error)
3842 {
3843 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3844 	struct dpni_attr dpni_attr;
3845 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3846 	uint16_t token = priv->token;
3847 	int ret = 0;
3848 
3849 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3850 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3851 	if (ret < 0) {
3852 		DPAA2_PMD_ERR(
3853 			"Failure to get dpni@%p attribute, err code  %d\n",
3854 			dpni, ret);
3855 		rte_flow_error_set(error, EPERM,
3856 			   RTE_FLOW_ERROR_TYPE_ATTR,
3857 			   flow_attr, "invalid");
3858 		return ret;
3859 	}
3860 
3861 	/* Verify input attributes */
3862 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3863 	if (ret < 0) {
3864 		DPAA2_PMD_ERR(
3865 			"Invalid attributes are given\n");
3866 		rte_flow_error_set(error, EPERM,
3867 			   RTE_FLOW_ERROR_TYPE_ATTR,
3868 			   flow_attr, "invalid");
3869 		goto not_valid_params;
3870 	}
3871 	/* Verify input pattern list */
3872 	ret = dpaa2_dev_verify_patterns(pattern);
3873 	if (ret < 0) {
3874 		DPAA2_PMD_ERR(
3875 			"Invalid pattern list is given\n");
3876 		rte_flow_error_set(error, EPERM,
3877 			   RTE_FLOW_ERROR_TYPE_ITEM,
3878 			   pattern, "invalid");
3879 		goto not_valid_params;
3880 	}
3881 	/* Verify input action list */
3882 	ret = dpaa2_dev_verify_actions(actions);
3883 	if (ret < 0) {
3884 		DPAA2_PMD_ERR(
3885 			"Invalid action list is given\n");
3886 		rte_flow_error_set(error, EPERM,
3887 			   RTE_FLOW_ERROR_TYPE_ACTION,
3888 			   actions, "invalid");
3889 		goto not_valid_params;
3890 	}
3891 not_valid_params:
3892 	return ret;
3893 }
3894 
3895 static
3896 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3897 				   const struct rte_flow_attr *attr,
3898 				   const struct rte_flow_item pattern[],
3899 				   const struct rte_flow_action actions[],
3900 				   struct rte_flow_error *error)
3901 {
3902 	struct rte_flow *flow = NULL;
3903 	size_t key_iova = 0, mask_iova = 0;
3904 	int ret;
3905 
3906 	dpaa2_flow_control_log =
3907 		getenv("DPAA2_FLOW_CONTROL_LOG");
3908 
3909 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3910 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3911 
3912 		dpaa2_flow_miss_flow_id =
3913 			atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3914 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3915 			DPAA2_PMD_ERR(
3916 				"The missed flow ID %d exceeds the max flow ID %d",
3917 				dpaa2_flow_miss_flow_id,
3918 				priv->dist_queues - 1);
3919 			return NULL;
3920 		}
3921 	}
3922 
3923 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3924 	if (!flow) {
3925 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3926 		goto mem_failure;
3927 	}
3928 	/* Allocate DMA'ble memory to write the rules */
3929 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3930 	if (!key_iova) {
3931 		DPAA2_PMD_ERR(
3932 			"Memory allocation failure for rule configuration\n");
3933 		goto mem_failure;
3934 	}
3935 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3936 	if (!mask_iova) {
3937 		DPAA2_PMD_ERR(
3938 			"Memory allocation failure for rule configuration\n");
3939 		goto mem_failure;
3940 	}
3941 
3942 	flow->qos_rule.key_iova = key_iova;
3943 	flow->qos_rule.mask_iova = mask_iova;
3944 
3945 	/* Allocate DMA'ble memory to write the rules */
3946 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3947 	if (!key_iova) {
3948 		DPAA2_PMD_ERR(
3949 			"Memory allocation failure for rule configuration\n");
3950 		goto mem_failure;
3951 	}
3952 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3953 	if (!mask_iova) {
3954 		DPAA2_PMD_ERR(
3955 			"Memory allocation failure for rule configuration\n");
3956 		goto mem_failure;
3957 	}
3958 
3959 	flow->fs_rule.key_iova = key_iova;
3960 	flow->fs_rule.mask_iova = mask_iova;
3961 
3962 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3963 	flow->ipaddr_rule.qos_ipsrc_offset =
3964 		IP_ADDRESS_OFFSET_INVALID;
3965 	flow->ipaddr_rule.qos_ipdst_offset =
3966 		IP_ADDRESS_OFFSET_INVALID;
3967 	flow->ipaddr_rule.fs_ipsrc_offset =
3968 		IP_ADDRESS_OFFSET_INVALID;
3969 	flow->ipaddr_rule.fs_ipdst_offset =
3970 		IP_ADDRESS_OFFSET_INVALID;
3971 
3972 	switch (dpaa2_filter_type) {
3973 	case RTE_ETH_FILTER_GENERIC:
3974 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3975 					     actions, error);
3976 		if (ret < 0) {
3977 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3978 				rte_flow_error_set(error, EPERM,
3979 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3980 						attr, "unknown");
3981 			DPAA2_PMD_ERR(
3982 			"Failure to create flow, return code (%d)", ret);
3983 			goto creation_error;
3984 		}
3985 		break;
3986 	default:
3987 		DPAA2_PMD_ERR("Filter type (%d) not supported",
3988 		dpaa2_filter_type);
3989 		break;
3990 	}
3991 
3992 	return flow;
3993 mem_failure:
3994 	rte_flow_error_set(error, EPERM,
3995 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3996 			   NULL, "memory alloc");
3997 creation_error:
3998 	rte_free((void *)flow);
3999 	rte_free((void *)key_iova);
4000 	rte_free((void *)mask_iova);
4001 
4002 	return NULL;
4003 }
4004 
4005 static
4006 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4007 		       struct rte_flow *flow,
4008 		       struct rte_flow_error *error)
4009 {
4010 	int ret = 0;
4011 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4012 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4013 
4014 	switch (flow->action) {
4015 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4016 		if (priv->num_rx_tc > 1) {
4017 			/* Remove entry from QoS table first */
4018 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4019 					&flow->qos_rule);
4020 			if (ret < 0) {
4021 				DPAA2_PMD_ERR(
4022 					"Error in removing entry from QoS table(%d)", ret);
4023 				goto error;
4024 			}
4025 		}
4026 
4027 		/* Then remove entry from FS table */
4028 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4029 					   flow->tc_id, &flow->fs_rule);
4030 		if (ret < 0) {
4031 			DPAA2_PMD_ERR(
4032 				"Error in removing entry from FS table(%d)", ret);
4033 			goto error;
4034 		}
4035 		break;
4036 	case RTE_FLOW_ACTION_TYPE_RSS:
4037 		if (priv->num_rx_tc > 1) {
4038 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4039 					&flow->qos_rule);
4040 			if (ret < 0) {
4041 				DPAA2_PMD_ERR(
4042 					"Error in entry addition in QoS table(%d)", ret);
4043 				goto error;
4044 			}
4045 		}
4046 		break;
4047 	default:
4048 		DPAA2_PMD_ERR(
4049 		"Action type (%d) is not supported", flow->action);
4050 		ret = -ENOTSUP;
4051 		break;
4052 	}
4053 
4054 	LIST_REMOVE(flow, next);
4055 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
4056 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4057 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
4058 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4059 	/* Now free the flow */
4060 	rte_free(flow);
4061 
4062 error:
4063 	if (ret)
4064 		rte_flow_error_set(error, EPERM,
4065 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4066 				   NULL, "unknown");
4067 	return ret;
4068 }
4069 
4070 /**
4071  * Destroy user-configured flow rules.
4072  *
4073  * This function skips internal flows rules.
4074  *
4075  * @see rte_flow_flush()
4076  * @see rte_flow_ops
4077  */
4078 static int
4079 dpaa2_flow_flush(struct rte_eth_dev *dev,
4080 		struct rte_flow_error *error)
4081 {
4082 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4083 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
4084 
4085 	while (flow) {
4086 		struct rte_flow *next = LIST_NEXT(flow, next);
4087 
4088 		dpaa2_flow_destroy(dev, flow, error);
4089 		flow = next;
4090 	}
4091 	return 0;
4092 }
4093 
4094 static int
4095 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4096 		struct rte_flow *flow __rte_unused,
4097 		const struct rte_flow_action *actions __rte_unused,
4098 		void *data __rte_unused,
4099 		struct rte_flow_error *error __rte_unused)
4100 {
4101 	return 0;
4102 }
4103 
4104 /**
4105  * Clean up all flow rules.
4106  *
4107  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4108  * rules regardless of whether they are internal or user-configured.
4109  *
4110  * @param priv
4111  *   Pointer to private structure.
4112  */
4113 void
4114 dpaa2_flow_clean(struct rte_eth_dev *dev)
4115 {
4116 	struct rte_flow *flow;
4117 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4118 
4119 	while ((flow = LIST_FIRST(&priv->flows)))
4120 		dpaa2_flow_destroy(dev, flow, NULL);
4121 }
4122 
4123 const struct rte_flow_ops dpaa2_flow_ops = {
4124 	.create	= dpaa2_flow_create,
4125 	.validate = dpaa2_flow_validate,
4126 	.destroy = dpaa2_flow_destroy,
4127 	.flush	= dpaa2_flow_flush,
4128 	.query	= dpaa2_flow_query,
4129 };
4130