xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2021 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static uint16_t dpaa2_flow_miss_flow_id =
34 	DPNI_FS_MISS_DROP;
35 
36 #define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
37 
38 enum flow_rule_ipaddr_type {
39 	FLOW_NONE_IPADDR,
40 	FLOW_IPV4_ADDR,
41 	FLOW_IPV6_ADDR
42 };
43 
44 struct flow_rule_ipaddr {
45 	enum flow_rule_ipaddr_type ipaddr_type;
46 	int qos_ipsrc_offset;
47 	int qos_ipdst_offset;
48 	int fs_ipsrc_offset;
49 	int fs_ipdst_offset;
50 };
51 
52 struct rte_flow {
53 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 	struct dpni_rule_cfg qos_rule;
55 	struct dpni_rule_cfg fs_rule;
56 	uint8_t qos_real_key_size;
57 	uint8_t fs_real_key_size;
58 	uint8_t tc_id; /** Traffic Class ID. */
59 	uint8_t tc_index; /** index within this Traffic Class. */
60 	enum rte_flow_action_type action;
61 	/* Special for IP address to specify the offset
62 	 * in key/mask.
63 	 */
64 	struct flow_rule_ipaddr ipaddr_rule;
65 	struct dpni_fs_action_cfg action_cfg;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 };
81 
82 static const
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 	RTE_FLOW_ACTION_TYPE_END,
85 	RTE_FLOW_ACTION_TYPE_QUEUE,
86 	RTE_FLOW_ACTION_TYPE_PHY_PORT,
87 	RTE_FLOW_ACTION_TYPE_PORT_ID,
88 	RTE_FLOW_ACTION_TYPE_RSS
89 };
90 
91 static const
92 enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
93 	RTE_FLOW_ACTION_TYPE_QUEUE,
94 	RTE_FLOW_ACTION_TYPE_PHY_PORT,
95 	RTE_FLOW_ACTION_TYPE_PORT_ID
96 };
97 
98 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
99 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
100 
101 #ifndef __cplusplus
102 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
103 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
104 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
105 	.type = RTE_BE16(0xffff),
106 };
107 
108 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
109 	.tci = RTE_BE16(0xffff),
110 };
111 
112 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
113 	.hdr.src_addr = RTE_BE32(0xffffffff),
114 	.hdr.dst_addr = RTE_BE32(0xffffffff),
115 	.hdr.next_proto_id = 0xff,
116 };
117 
118 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
119 	.hdr = {
120 		.src_addr =
121 			"\xff\xff\xff\xff\xff\xff\xff\xff"
122 			"\xff\xff\xff\xff\xff\xff\xff\xff",
123 		.dst_addr =
124 			"\xff\xff\xff\xff\xff\xff\xff\xff"
125 			"\xff\xff\xff\xff\xff\xff\xff\xff",
126 		.proto = 0xff
127 	},
128 };
129 
130 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
131 	.hdr.icmp_type = 0xff,
132 	.hdr.icmp_code = 0xff,
133 };
134 
135 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
136 	.hdr = {
137 		.src_port = RTE_BE16(0xffff),
138 		.dst_port = RTE_BE16(0xffff),
139 	},
140 };
141 
142 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
143 	.hdr = {
144 		.src_port = RTE_BE16(0xffff),
145 		.dst_port = RTE_BE16(0xffff),
146 	},
147 };
148 
149 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
150 	.hdr = {
151 		.src_port = RTE_BE16(0xffff),
152 		.dst_port = RTE_BE16(0xffff),
153 	},
154 };
155 
156 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
157 	.protocol = RTE_BE16(0xffff),
158 };
159 
160 #endif
161 
162 static inline void dpaa2_prot_field_string(
163 	enum net_prot prot, uint32_t field,
164 	char *string)
165 {
166 	if (!dpaa2_flow_control_log)
167 		return;
168 
169 	if (prot == NET_PROT_ETH) {
170 		strcpy(string, "eth");
171 		if (field == NH_FLD_ETH_DA)
172 			strcat(string, ".dst");
173 		else if (field == NH_FLD_ETH_SA)
174 			strcat(string, ".src");
175 		else if (field == NH_FLD_ETH_TYPE)
176 			strcat(string, ".type");
177 		else
178 			strcat(string, ".unknown field");
179 	} else if (prot == NET_PROT_VLAN) {
180 		strcpy(string, "vlan");
181 		if (field == NH_FLD_VLAN_TCI)
182 			strcat(string, ".tci");
183 		else
184 			strcat(string, ".unknown field");
185 	} else if (prot == NET_PROT_IP) {
186 		strcpy(string, "ip");
187 		if (field == NH_FLD_IP_SRC)
188 			strcat(string, ".src");
189 		else if (field == NH_FLD_IP_DST)
190 			strcat(string, ".dst");
191 		else if (field == NH_FLD_IP_PROTO)
192 			strcat(string, ".proto");
193 		else
194 			strcat(string, ".unknown field");
195 	} else if (prot == NET_PROT_TCP) {
196 		strcpy(string, "tcp");
197 		if (field == NH_FLD_TCP_PORT_SRC)
198 			strcat(string, ".src");
199 		else if (field == NH_FLD_TCP_PORT_DST)
200 			strcat(string, ".dst");
201 		else
202 			strcat(string, ".unknown field");
203 	} else if (prot == NET_PROT_UDP) {
204 		strcpy(string, "udp");
205 		if (field == NH_FLD_UDP_PORT_SRC)
206 			strcat(string, ".src");
207 		else if (field == NH_FLD_UDP_PORT_DST)
208 			strcat(string, ".dst");
209 		else
210 			strcat(string, ".unknown field");
211 	} else if (prot == NET_PROT_ICMP) {
212 		strcpy(string, "icmp");
213 		if (field == NH_FLD_ICMP_TYPE)
214 			strcat(string, ".type");
215 		else if (field == NH_FLD_ICMP_CODE)
216 			strcat(string, ".code");
217 		else
218 			strcat(string, ".unknown field");
219 	} else if (prot == NET_PROT_SCTP) {
220 		strcpy(string, "sctp");
221 		if (field == NH_FLD_SCTP_PORT_SRC)
222 			strcat(string, ".src");
223 		else if (field == NH_FLD_SCTP_PORT_DST)
224 			strcat(string, ".dst");
225 		else
226 			strcat(string, ".unknown field");
227 	} else if (prot == NET_PROT_GRE) {
228 		strcpy(string, "gre");
229 		if (field == NH_FLD_GRE_TYPE)
230 			strcat(string, ".type");
231 		else
232 			strcat(string, ".unknown field");
233 	} else {
234 		strcpy(string, "unknown protocol");
235 	}
236 }
237 
238 static inline void dpaa2_flow_qos_table_extracts_log(
239 	const struct dpaa2_dev_priv *priv)
240 {
241 	int idx;
242 	char string[32];
243 
244 	if (!dpaa2_flow_control_log)
245 		return;
246 
247 	printf("Setup QoS table: number of extracts: %d\r\n",
248 			priv->extract.qos_key_extract.dpkg.num_extracts);
249 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
250 		idx++) {
251 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
252 			.extracts[idx].extract.from_hdr.prot,
253 			priv->extract.qos_key_extract.dpkg.extracts[idx]
254 			.extract.from_hdr.field,
255 			string);
256 		printf("%s", string);
257 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
258 			printf(" / ");
259 	}
260 	printf("\r\n");
261 }
262 
263 static inline void dpaa2_flow_fs_table_extracts_log(
264 	const struct dpaa2_dev_priv *priv, int tc_id)
265 {
266 	int idx;
267 	char string[32];
268 
269 	if (!dpaa2_flow_control_log)
270 		return;
271 
272 	printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
273 			tc_id, priv->extract.tc_key_extract[tc_id]
274 			.dpkg.num_extracts);
275 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
276 		.dpkg.num_extracts; idx++) {
277 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
278 			.dpkg.extracts[idx].extract.from_hdr.prot,
279 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
280 			.extract.from_hdr.field,
281 			string);
282 		printf("%s", string);
283 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
284 			.dpkg.num_extracts)
285 			printf(" / ");
286 	}
287 	printf("\r\n");
288 }
289 
290 static inline void dpaa2_flow_qos_entry_log(
291 	const char *log_info, const struct rte_flow *flow, int qos_index)
292 {
293 	int idx;
294 	uint8_t *key, *mask;
295 
296 	if (!dpaa2_flow_control_log)
297 		return;
298 
299 	printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
300 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
301 
302 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
303 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
304 
305 	printf("key:\r\n");
306 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
307 		printf("%02x ", key[idx]);
308 
309 	printf("\r\nmask:\r\n");
310 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
311 		printf("%02x ", mask[idx]);
312 
313 	printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
314 		flow->ipaddr_rule.qos_ipsrc_offset,
315 		flow->ipaddr_rule.qos_ipdst_offset);
316 }
317 
318 static inline void dpaa2_flow_fs_entry_log(
319 	const char *log_info, const struct rte_flow *flow)
320 {
321 	int idx;
322 	uint8_t *key, *mask;
323 
324 	if (!dpaa2_flow_control_log)
325 		return;
326 
327 	printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
328 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
329 
330 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
331 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
332 
333 	printf("key:\r\n");
334 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
335 		printf("%02x ", key[idx]);
336 
337 	printf("\r\nmask:\r\n");
338 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
339 		printf("%02x ", mask[idx]);
340 
341 	printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
342 		flow->ipaddr_rule.fs_ipsrc_offset,
343 		flow->ipaddr_rule.fs_ipdst_offset);
344 }
345 
346 static inline void dpaa2_flow_extract_key_set(
347 	struct dpaa2_key_info *key_info, int index, uint8_t size)
348 {
349 	key_info->key_size[index] = size;
350 	if (index > 0) {
351 		key_info->key_offset[index] =
352 			key_info->key_offset[index - 1] +
353 			key_info->key_size[index - 1];
354 	} else {
355 		key_info->key_offset[index] = 0;
356 	}
357 	key_info->key_total_size += size;
358 }
359 
360 static int dpaa2_flow_extract_add(
361 	struct dpaa2_key_extract *key_extract,
362 	enum net_prot prot,
363 	uint32_t field, uint8_t field_size)
364 {
365 	int index, ip_src = -1, ip_dst = -1;
366 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
367 	struct dpaa2_key_info *key_info = &key_extract->key_info;
368 
369 	if (dpkg->num_extracts >=
370 		DPKG_MAX_NUM_OF_EXTRACTS) {
371 		DPAA2_PMD_WARN("Number of extracts overflows");
372 		return -1;
373 	}
374 	/* Before reorder, the IP SRC and IP DST are already last
375 	 * extract(s).
376 	 */
377 	for (index = 0; index < dpkg->num_extracts; index++) {
378 		if (dpkg->extracts[index].extract.from_hdr.prot ==
379 			NET_PROT_IP) {
380 			if (dpkg->extracts[index].extract.from_hdr.field ==
381 				NH_FLD_IP_SRC) {
382 				ip_src = index;
383 			}
384 			if (dpkg->extracts[index].extract.from_hdr.field ==
385 				NH_FLD_IP_DST) {
386 				ip_dst = index;
387 			}
388 		}
389 	}
390 
391 	if (ip_src >= 0)
392 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
393 
394 	if (ip_dst >= 0)
395 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
396 
397 	if (prot == NET_PROT_IP &&
398 		(field == NH_FLD_IP_SRC ||
399 		field == NH_FLD_IP_DST)) {
400 		index = dpkg->num_extracts;
401 	} else {
402 		if (ip_src >= 0 && ip_dst >= 0)
403 			index = dpkg->num_extracts - 2;
404 		else if (ip_src >= 0 || ip_dst >= 0)
405 			index = dpkg->num_extracts - 1;
406 		else
407 			index = dpkg->num_extracts;
408 	}
409 
410 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
411 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
412 	dpkg->extracts[index].extract.from_hdr.prot = prot;
413 	dpkg->extracts[index].extract.from_hdr.field = field;
414 	if (prot == NET_PROT_IP &&
415 		(field == NH_FLD_IP_SRC ||
416 		field == NH_FLD_IP_DST)) {
417 		dpaa2_flow_extract_key_set(key_info, index, 0);
418 	} else {
419 		dpaa2_flow_extract_key_set(key_info, index, field_size);
420 	}
421 
422 	if (prot == NET_PROT_IP) {
423 		if (field == NH_FLD_IP_SRC) {
424 			if (key_info->ipv4_dst_offset >= 0) {
425 				key_info->ipv4_src_offset =
426 					key_info->ipv4_dst_offset +
427 					NH_FLD_IPV4_ADDR_SIZE;
428 			} else {
429 				key_info->ipv4_src_offset =
430 					key_info->key_offset[index - 1] +
431 						key_info->key_size[index - 1];
432 			}
433 			if (key_info->ipv6_dst_offset >= 0) {
434 				key_info->ipv6_src_offset =
435 					key_info->ipv6_dst_offset +
436 					NH_FLD_IPV6_ADDR_SIZE;
437 			} else {
438 				key_info->ipv6_src_offset =
439 					key_info->key_offset[index - 1] +
440 						key_info->key_size[index - 1];
441 			}
442 		} else if (field == NH_FLD_IP_DST) {
443 			if (key_info->ipv4_src_offset >= 0) {
444 				key_info->ipv4_dst_offset =
445 					key_info->ipv4_src_offset +
446 					NH_FLD_IPV4_ADDR_SIZE;
447 			} else {
448 				key_info->ipv4_dst_offset =
449 					key_info->key_offset[index - 1] +
450 						key_info->key_size[index - 1];
451 			}
452 			if (key_info->ipv6_src_offset >= 0) {
453 				key_info->ipv6_dst_offset =
454 					key_info->ipv6_src_offset +
455 					NH_FLD_IPV6_ADDR_SIZE;
456 			} else {
457 				key_info->ipv6_dst_offset =
458 					key_info->key_offset[index - 1] +
459 						key_info->key_size[index - 1];
460 			}
461 		}
462 	}
463 
464 	if (index == dpkg->num_extracts) {
465 		dpkg->num_extracts++;
466 		return 0;
467 	}
468 
469 	if (ip_src >= 0) {
470 		ip_src++;
471 		dpkg->extracts[ip_src].type =
472 			DPKG_EXTRACT_FROM_HDR;
473 		dpkg->extracts[ip_src].extract.from_hdr.type =
474 			DPKG_FULL_FIELD;
475 		dpkg->extracts[ip_src].extract.from_hdr.prot =
476 			NET_PROT_IP;
477 		dpkg->extracts[ip_src].extract.from_hdr.field =
478 			NH_FLD_IP_SRC;
479 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
480 		key_info->ipv4_src_offset += field_size;
481 		key_info->ipv6_src_offset += field_size;
482 	}
483 	if (ip_dst >= 0) {
484 		ip_dst++;
485 		dpkg->extracts[ip_dst].type =
486 			DPKG_EXTRACT_FROM_HDR;
487 		dpkg->extracts[ip_dst].extract.from_hdr.type =
488 			DPKG_FULL_FIELD;
489 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
490 			NET_PROT_IP;
491 		dpkg->extracts[ip_dst].extract.from_hdr.field =
492 			NH_FLD_IP_DST;
493 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
494 		key_info->ipv4_dst_offset += field_size;
495 		key_info->ipv6_dst_offset += field_size;
496 	}
497 
498 	dpkg->num_extracts++;
499 
500 	return 0;
501 }
502 
503 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
504 				      int size)
505 {
506 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
507 	struct dpaa2_key_info *key_info = &key_extract->key_info;
508 	int last_extract_size, index;
509 
510 	if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
511 	    DPKG_EXTRACT_FROM_DATA) {
512 		DPAA2_PMD_WARN("RAW extract cannot be combined with others");
513 		return -1;
514 	}
515 
516 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
517 	dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
518 	if (last_extract_size)
519 		dpkg->num_extracts++;
520 	else
521 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
522 
523 	for (index = 0; index < dpkg->num_extracts; index++) {
524 		dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
525 		if (index == dpkg->num_extracts - 1)
526 			dpkg->extracts[index].extract.from_data.size =
527 				last_extract_size;
528 		else
529 			dpkg->extracts[index].extract.from_data.size =
530 				DPAA2_FLOW_MAX_KEY_SIZE;
531 		dpkg->extracts[index].extract.from_data.offset =
532 			DPAA2_FLOW_MAX_KEY_SIZE * index;
533 	}
534 
535 	key_info->key_total_size = size;
536 	return 0;
537 }
538 
539 /* Protocol discrimination.
540  * Discriminate IPv4/IPv6/vLan by Eth type.
541  * Discriminate UDP/TCP/ICMP by next proto of IP.
542  */
543 static inline int
544 dpaa2_flow_proto_discrimination_extract(
545 	struct dpaa2_key_extract *key_extract,
546 	enum rte_flow_item_type type)
547 {
548 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
549 		return dpaa2_flow_extract_add(
550 				key_extract, NET_PROT_ETH,
551 				NH_FLD_ETH_TYPE,
552 				sizeof(rte_be16_t));
553 	} else if (type == (enum rte_flow_item_type)
554 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
555 		return dpaa2_flow_extract_add(
556 				key_extract, NET_PROT_IP,
557 				NH_FLD_IP_PROTO,
558 				NH_FLD_IP_PROTO_SIZE);
559 	}
560 
561 	return -1;
562 }
563 
564 static inline int dpaa2_flow_extract_search(
565 	struct dpkg_profile_cfg *dpkg,
566 	enum net_prot prot, uint32_t field)
567 {
568 	int i;
569 
570 	for (i = 0; i < dpkg->num_extracts; i++) {
571 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
572 			dpkg->extracts[i].extract.from_hdr.field == field) {
573 			return i;
574 		}
575 	}
576 
577 	return -1;
578 }
579 
580 static inline int dpaa2_flow_extract_key_offset(
581 	struct dpaa2_key_extract *key_extract,
582 	enum net_prot prot, uint32_t field)
583 {
584 	int i;
585 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
586 	struct dpaa2_key_info *key_info = &key_extract->key_info;
587 
588 	if (prot == NET_PROT_IPV4 ||
589 		prot == NET_PROT_IPV6)
590 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
591 	else
592 		i = dpaa2_flow_extract_search(dpkg, prot, field);
593 
594 	if (i >= 0) {
595 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
596 			return key_info->ipv4_src_offset;
597 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
598 			return key_info->ipv4_dst_offset;
599 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
600 			return key_info->ipv6_src_offset;
601 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
602 			return key_info->ipv6_dst_offset;
603 		else
604 			return key_info->key_offset[i];
605 	} else {
606 		return -1;
607 	}
608 }
609 
610 struct proto_discrimination {
611 	enum rte_flow_item_type type;
612 	union {
613 		rte_be16_t eth_type;
614 		uint8_t ip_proto;
615 	};
616 };
617 
618 static int
619 dpaa2_flow_proto_discrimination_rule(
620 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
621 	struct proto_discrimination proto, int group)
622 {
623 	enum net_prot prot;
624 	uint32_t field;
625 	int offset;
626 	size_t key_iova;
627 	size_t mask_iova;
628 	rte_be16_t eth_type;
629 	uint8_t ip_proto;
630 
631 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
632 		prot = NET_PROT_ETH;
633 		field = NH_FLD_ETH_TYPE;
634 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
635 		prot = NET_PROT_IP;
636 		field = NH_FLD_IP_PROTO;
637 	} else {
638 		DPAA2_PMD_ERR(
639 			"Only Eth and IP support to discriminate next proto.");
640 		return -1;
641 	}
642 
643 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
644 			prot, field);
645 	if (offset < 0) {
646 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
647 				prot, field);
648 		return -1;
649 	}
650 	key_iova = flow->qos_rule.key_iova + offset;
651 	mask_iova = flow->qos_rule.mask_iova + offset;
652 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
653 		eth_type = proto.eth_type;
654 		memcpy((void *)key_iova, (const void *)(&eth_type),
655 			sizeof(rte_be16_t));
656 		eth_type = 0xffff;
657 		memcpy((void *)mask_iova, (const void *)(&eth_type),
658 			sizeof(rte_be16_t));
659 	} else {
660 		ip_proto = proto.ip_proto;
661 		memcpy((void *)key_iova, (const void *)(&ip_proto),
662 			sizeof(uint8_t));
663 		ip_proto = 0xff;
664 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
665 			sizeof(uint8_t));
666 	}
667 
668 	offset = dpaa2_flow_extract_key_offset(
669 			&priv->extract.tc_key_extract[group],
670 			prot, field);
671 	if (offset < 0) {
672 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
673 				prot, field);
674 		return -1;
675 	}
676 	key_iova = flow->fs_rule.key_iova + offset;
677 	mask_iova = flow->fs_rule.mask_iova + offset;
678 
679 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
680 		eth_type = proto.eth_type;
681 		memcpy((void *)key_iova, (const void *)(&eth_type),
682 			sizeof(rte_be16_t));
683 		eth_type = 0xffff;
684 		memcpy((void *)mask_iova, (const void *)(&eth_type),
685 			sizeof(rte_be16_t));
686 	} else {
687 		ip_proto = proto.ip_proto;
688 		memcpy((void *)key_iova, (const void *)(&ip_proto),
689 			sizeof(uint8_t));
690 		ip_proto = 0xff;
691 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
692 			sizeof(uint8_t));
693 	}
694 
695 	return 0;
696 }
697 
698 static inline int
699 dpaa2_flow_rule_data_set(
700 	struct dpaa2_key_extract *key_extract,
701 	struct dpni_rule_cfg *rule,
702 	enum net_prot prot, uint32_t field,
703 	const void *key, const void *mask, int size)
704 {
705 	int offset = dpaa2_flow_extract_key_offset(key_extract,
706 				prot, field);
707 
708 	if (offset < 0) {
709 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
710 			prot, field);
711 		return -1;
712 	}
713 
714 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
715 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
716 
717 	return 0;
718 }
719 
720 static inline int
721 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
722 			     const void *key, const void *mask, int size)
723 {
724 	int offset = 0;
725 
726 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
727 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
728 
729 	return 0;
730 }
731 
732 static inline int
733 _dpaa2_flow_rule_move_ipaddr_tail(
734 	struct dpaa2_key_extract *key_extract,
735 	struct dpni_rule_cfg *rule, int src_offset,
736 	uint32_t field, bool ipv4)
737 {
738 	size_t key_src;
739 	size_t mask_src;
740 	size_t key_dst;
741 	size_t mask_dst;
742 	int dst_offset, len;
743 	enum net_prot prot;
744 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
745 
746 	if (field != NH_FLD_IP_SRC &&
747 		field != NH_FLD_IP_DST) {
748 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
749 		return -1;
750 	}
751 	if (ipv4)
752 		prot = NET_PROT_IPV4;
753 	else
754 		prot = NET_PROT_IPV6;
755 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
756 				prot, field);
757 	if (dst_offset < 0) {
758 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
759 		return -1;
760 	}
761 	key_src = rule->key_iova + src_offset;
762 	mask_src = rule->mask_iova + src_offset;
763 	key_dst = rule->key_iova + dst_offset;
764 	mask_dst = rule->mask_iova + dst_offset;
765 	if (ipv4)
766 		len = sizeof(rte_be32_t);
767 	else
768 		len = NH_FLD_IPV6_ADDR_SIZE;
769 
770 	memcpy(tmp, (char *)key_src, len);
771 	memset((char *)key_src, 0, len);
772 	memcpy((char *)key_dst, tmp, len);
773 
774 	memcpy(tmp, (char *)mask_src, len);
775 	memset((char *)mask_src, 0, len);
776 	memcpy((char *)mask_dst, tmp, len);
777 
778 	return 0;
779 }
780 
781 static inline int
782 dpaa2_flow_rule_move_ipaddr_tail(
783 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
784 	int fs_group)
785 {
786 	int ret;
787 	enum net_prot prot;
788 
789 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
790 		return 0;
791 
792 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
793 		prot = NET_PROT_IPV4;
794 	else
795 		prot = NET_PROT_IPV6;
796 
797 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
798 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
799 				&priv->extract.qos_key_extract,
800 				&flow->qos_rule,
801 				flow->ipaddr_rule.qos_ipsrc_offset,
802 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
803 		if (ret) {
804 			DPAA2_PMD_ERR("QoS src address reorder failed");
805 			return -1;
806 		}
807 		flow->ipaddr_rule.qos_ipsrc_offset =
808 			dpaa2_flow_extract_key_offset(
809 				&priv->extract.qos_key_extract,
810 				prot, NH_FLD_IP_SRC);
811 	}
812 
813 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
814 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
815 				&priv->extract.qos_key_extract,
816 				&flow->qos_rule,
817 				flow->ipaddr_rule.qos_ipdst_offset,
818 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
819 		if (ret) {
820 			DPAA2_PMD_ERR("QoS dst address reorder failed");
821 			return -1;
822 		}
823 		flow->ipaddr_rule.qos_ipdst_offset =
824 			dpaa2_flow_extract_key_offset(
825 				&priv->extract.qos_key_extract,
826 				prot, NH_FLD_IP_DST);
827 	}
828 
829 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
830 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
831 				&priv->extract.tc_key_extract[fs_group],
832 				&flow->fs_rule,
833 				flow->ipaddr_rule.fs_ipsrc_offset,
834 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
835 		if (ret) {
836 			DPAA2_PMD_ERR("FS src address reorder failed");
837 			return -1;
838 		}
839 		flow->ipaddr_rule.fs_ipsrc_offset =
840 			dpaa2_flow_extract_key_offset(
841 				&priv->extract.tc_key_extract[fs_group],
842 				prot, NH_FLD_IP_SRC);
843 	}
844 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
845 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
846 				&priv->extract.tc_key_extract[fs_group],
847 				&flow->fs_rule,
848 				flow->ipaddr_rule.fs_ipdst_offset,
849 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
850 		if (ret) {
851 			DPAA2_PMD_ERR("FS dst address reorder failed");
852 			return -1;
853 		}
854 		flow->ipaddr_rule.fs_ipdst_offset =
855 			dpaa2_flow_extract_key_offset(
856 				&priv->extract.tc_key_extract[fs_group],
857 				prot, NH_FLD_IP_DST);
858 	}
859 
860 	return 0;
861 }
862 
863 static int
864 dpaa2_flow_extract_support(
865 	const uint8_t *mask_src,
866 	enum rte_flow_item_type type)
867 {
868 	char mask[64];
869 	int i, size = 0;
870 	const char *mask_support = 0;
871 
872 	switch (type) {
873 	case RTE_FLOW_ITEM_TYPE_ETH:
874 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
875 		size = sizeof(struct rte_flow_item_eth);
876 		break;
877 	case RTE_FLOW_ITEM_TYPE_VLAN:
878 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
879 		size = sizeof(struct rte_flow_item_vlan);
880 		break;
881 	case RTE_FLOW_ITEM_TYPE_IPV4:
882 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
883 		size = sizeof(struct rte_flow_item_ipv4);
884 		break;
885 	case RTE_FLOW_ITEM_TYPE_IPV6:
886 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
887 		size = sizeof(struct rte_flow_item_ipv6);
888 		break;
889 	case RTE_FLOW_ITEM_TYPE_ICMP:
890 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
891 		size = sizeof(struct rte_flow_item_icmp);
892 		break;
893 	case RTE_FLOW_ITEM_TYPE_UDP:
894 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
895 		size = sizeof(struct rte_flow_item_udp);
896 		break;
897 	case RTE_FLOW_ITEM_TYPE_TCP:
898 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
899 		size = sizeof(struct rte_flow_item_tcp);
900 		break;
901 	case RTE_FLOW_ITEM_TYPE_SCTP:
902 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
903 		size = sizeof(struct rte_flow_item_sctp);
904 		break;
905 	case RTE_FLOW_ITEM_TYPE_GRE:
906 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
907 		size = sizeof(struct rte_flow_item_gre);
908 		break;
909 	default:
910 		return -1;
911 	}
912 
913 	memcpy(mask, mask_support, size);
914 
915 	for (i = 0; i < size; i++)
916 		mask[i] = (mask[i] | mask_src[i]);
917 
918 	if (memcmp(mask, mask_support, size))
919 		return -1;
920 
921 	return 0;
922 }
923 
924 static int
925 dpaa2_configure_flow_eth(struct rte_flow *flow,
926 			 struct rte_eth_dev *dev,
927 			 const struct rte_flow_attr *attr,
928 			 const struct rte_flow_item *pattern,
929 			 const struct rte_flow_action actions[] __rte_unused,
930 			 struct rte_flow_error *error __rte_unused,
931 			 int *device_configured)
932 {
933 	int index, ret;
934 	int local_cfg = 0;
935 	uint32_t group;
936 	const struct rte_flow_item_eth *spec, *mask;
937 
938 	/* TODO: Currently upper bound of range parameter is not implemented */
939 	const struct rte_flow_item_eth *last __rte_unused;
940 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
941 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
942 
943 	group = attr->group;
944 
945 	/* Parse pattern list to get the matching parameters */
946 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
947 	last    = (const struct rte_flow_item_eth *)pattern->last;
948 	mask    = (const struct rte_flow_item_eth *)
949 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
950 	if (!spec) {
951 		/* Don't care any field of eth header,
952 		 * only care eth protocol.
953 		 */
954 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
955 		return 0;
956 	}
957 
958 	/* Get traffic class index and flow id to be configured */
959 	flow->tc_id = group;
960 	flow->tc_index = attr->priority;
961 
962 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
963 		RTE_FLOW_ITEM_TYPE_ETH)) {
964 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
965 
966 		return -1;
967 	}
968 
969 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
970 		index = dpaa2_flow_extract_search(
971 				&priv->extract.qos_key_extract.dpkg,
972 				NET_PROT_ETH, NH_FLD_ETH_SA);
973 		if (index < 0) {
974 			ret = dpaa2_flow_extract_add(
975 					&priv->extract.qos_key_extract,
976 					NET_PROT_ETH, NH_FLD_ETH_SA,
977 					RTE_ETHER_ADDR_LEN);
978 			if (ret) {
979 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
980 
981 				return -1;
982 			}
983 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
984 		}
985 		index = dpaa2_flow_extract_search(
986 				&priv->extract.tc_key_extract[group].dpkg,
987 				NET_PROT_ETH, NH_FLD_ETH_SA);
988 		if (index < 0) {
989 			ret = dpaa2_flow_extract_add(
990 					&priv->extract.tc_key_extract[group],
991 					NET_PROT_ETH, NH_FLD_ETH_SA,
992 					RTE_ETHER_ADDR_LEN);
993 			if (ret) {
994 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
995 				return -1;
996 			}
997 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
998 		}
999 
1000 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1001 		if (ret) {
1002 			DPAA2_PMD_ERR(
1003 				"Move ipaddr before ETH_SA rule set failed");
1004 			return -1;
1005 		}
1006 
1007 		ret = dpaa2_flow_rule_data_set(
1008 				&priv->extract.qos_key_extract,
1009 				&flow->qos_rule,
1010 				NET_PROT_ETH,
1011 				NH_FLD_ETH_SA,
1012 				&spec->src.addr_bytes,
1013 				&mask->src.addr_bytes,
1014 				sizeof(struct rte_ether_addr));
1015 		if (ret) {
1016 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1017 			return -1;
1018 		}
1019 
1020 		ret = dpaa2_flow_rule_data_set(
1021 				&priv->extract.tc_key_extract[group],
1022 				&flow->fs_rule,
1023 				NET_PROT_ETH,
1024 				NH_FLD_ETH_SA,
1025 				&spec->src.addr_bytes,
1026 				&mask->src.addr_bytes,
1027 				sizeof(struct rte_ether_addr));
1028 		if (ret) {
1029 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1030 			return -1;
1031 		}
1032 	}
1033 
1034 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1035 		index = dpaa2_flow_extract_search(
1036 				&priv->extract.qos_key_extract.dpkg,
1037 				NET_PROT_ETH, NH_FLD_ETH_DA);
1038 		if (index < 0) {
1039 			ret = dpaa2_flow_extract_add(
1040 					&priv->extract.qos_key_extract,
1041 					NET_PROT_ETH, NH_FLD_ETH_DA,
1042 					RTE_ETHER_ADDR_LEN);
1043 			if (ret) {
1044 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1045 
1046 				return -1;
1047 			}
1048 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1049 		}
1050 
1051 		index = dpaa2_flow_extract_search(
1052 				&priv->extract.tc_key_extract[group].dpkg,
1053 				NET_PROT_ETH, NH_FLD_ETH_DA);
1054 		if (index < 0) {
1055 			ret = dpaa2_flow_extract_add(
1056 					&priv->extract.tc_key_extract[group],
1057 					NET_PROT_ETH, NH_FLD_ETH_DA,
1058 					RTE_ETHER_ADDR_LEN);
1059 			if (ret) {
1060 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1061 
1062 				return -1;
1063 			}
1064 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1065 		}
1066 
1067 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1068 		if (ret) {
1069 			DPAA2_PMD_ERR(
1070 				"Move ipaddr before ETH DA rule set failed");
1071 			return -1;
1072 		}
1073 
1074 		ret = dpaa2_flow_rule_data_set(
1075 				&priv->extract.qos_key_extract,
1076 				&flow->qos_rule,
1077 				NET_PROT_ETH,
1078 				NH_FLD_ETH_DA,
1079 				&spec->dst.addr_bytes,
1080 				&mask->dst.addr_bytes,
1081 				sizeof(struct rte_ether_addr));
1082 		if (ret) {
1083 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1084 			return -1;
1085 		}
1086 
1087 		ret = dpaa2_flow_rule_data_set(
1088 				&priv->extract.tc_key_extract[group],
1089 				&flow->fs_rule,
1090 				NET_PROT_ETH,
1091 				NH_FLD_ETH_DA,
1092 				&spec->dst.addr_bytes,
1093 				&mask->dst.addr_bytes,
1094 				sizeof(struct rte_ether_addr));
1095 		if (ret) {
1096 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1097 			return -1;
1098 		}
1099 	}
1100 
1101 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1102 		index = dpaa2_flow_extract_search(
1103 				&priv->extract.qos_key_extract.dpkg,
1104 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1105 		if (index < 0) {
1106 			ret = dpaa2_flow_extract_add(
1107 					&priv->extract.qos_key_extract,
1108 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1109 					RTE_ETHER_TYPE_LEN);
1110 			if (ret) {
1111 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1112 
1113 				return -1;
1114 			}
1115 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1116 		}
1117 		index = dpaa2_flow_extract_search(
1118 				&priv->extract.tc_key_extract[group].dpkg,
1119 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1120 		if (index < 0) {
1121 			ret = dpaa2_flow_extract_add(
1122 					&priv->extract.tc_key_extract[group],
1123 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1124 					RTE_ETHER_TYPE_LEN);
1125 			if (ret) {
1126 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1127 
1128 				return -1;
1129 			}
1130 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1131 		}
1132 
1133 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1134 		if (ret) {
1135 			DPAA2_PMD_ERR(
1136 				"Move ipaddr before ETH TYPE rule set failed");
1137 				return -1;
1138 		}
1139 
1140 		ret = dpaa2_flow_rule_data_set(
1141 				&priv->extract.qos_key_extract,
1142 				&flow->qos_rule,
1143 				NET_PROT_ETH,
1144 				NH_FLD_ETH_TYPE,
1145 				&spec->type,
1146 				&mask->type,
1147 				sizeof(rte_be16_t));
1148 		if (ret) {
1149 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1150 			return -1;
1151 		}
1152 
1153 		ret = dpaa2_flow_rule_data_set(
1154 				&priv->extract.tc_key_extract[group],
1155 				&flow->fs_rule,
1156 				NET_PROT_ETH,
1157 				NH_FLD_ETH_TYPE,
1158 				&spec->type,
1159 				&mask->type,
1160 				sizeof(rte_be16_t));
1161 		if (ret) {
1162 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1163 			return -1;
1164 		}
1165 	}
1166 
1167 	(*device_configured) |= local_cfg;
1168 
1169 	return 0;
1170 }
1171 
1172 static int
1173 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1174 			  struct rte_eth_dev *dev,
1175 			  const struct rte_flow_attr *attr,
1176 			  const struct rte_flow_item *pattern,
1177 			  const struct rte_flow_action actions[] __rte_unused,
1178 			  struct rte_flow_error *error __rte_unused,
1179 			  int *device_configured)
1180 {
1181 	int index, ret;
1182 	int local_cfg = 0;
1183 	uint32_t group;
1184 	const struct rte_flow_item_vlan *spec, *mask;
1185 
1186 	const struct rte_flow_item_vlan *last __rte_unused;
1187 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1188 
1189 	group = attr->group;
1190 
1191 	/* Parse pattern list to get the matching parameters */
1192 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1193 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1194 	mask    = (const struct rte_flow_item_vlan *)
1195 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1196 
1197 	/* Get traffic class index and flow id to be configured */
1198 	flow->tc_id = group;
1199 	flow->tc_index = attr->priority;
1200 
1201 	if (!spec) {
1202 		/* Don't care any field of vlan header,
1203 		 * only care vlan protocol.
1204 		 */
1205 		/* Eth type is actually used for vLan classification.
1206 		 */
1207 		struct proto_discrimination proto;
1208 
1209 		index = dpaa2_flow_extract_search(
1210 				&priv->extract.qos_key_extract.dpkg,
1211 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1212 		if (index < 0) {
1213 			ret = dpaa2_flow_proto_discrimination_extract(
1214 						&priv->extract.qos_key_extract,
1215 						RTE_FLOW_ITEM_TYPE_ETH);
1216 			if (ret) {
1217 				DPAA2_PMD_ERR(
1218 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1219 
1220 				return -1;
1221 			}
1222 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1223 		}
1224 
1225 		index = dpaa2_flow_extract_search(
1226 				&priv->extract.tc_key_extract[group].dpkg,
1227 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1228 		if (index < 0) {
1229 			ret = dpaa2_flow_proto_discrimination_extract(
1230 					&priv->extract.tc_key_extract[group],
1231 					RTE_FLOW_ITEM_TYPE_ETH);
1232 			if (ret) {
1233 				DPAA2_PMD_ERR(
1234 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1235 
1236 				return -1;
1237 			}
1238 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1239 		}
1240 
1241 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1242 		if (ret) {
1243 			DPAA2_PMD_ERR(
1244 			"Move ipaddr before vLan discrimination set failed");
1245 			return -1;
1246 		}
1247 
1248 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1249 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1250 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1251 							proto, group);
1252 		if (ret) {
1253 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1254 			return -1;
1255 		}
1256 
1257 		(*device_configured) |= local_cfg;
1258 
1259 		return 0;
1260 	}
1261 
1262 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1263 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1264 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1265 
1266 		return -1;
1267 	}
1268 
1269 	if (!mask->tci)
1270 		return 0;
1271 
1272 	index = dpaa2_flow_extract_search(
1273 				&priv->extract.qos_key_extract.dpkg,
1274 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1275 	if (index < 0) {
1276 		ret = dpaa2_flow_extract_add(
1277 						&priv->extract.qos_key_extract,
1278 						NET_PROT_VLAN,
1279 						NH_FLD_VLAN_TCI,
1280 						sizeof(rte_be16_t));
1281 		if (ret) {
1282 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1283 
1284 			return -1;
1285 		}
1286 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1287 	}
1288 
1289 	index = dpaa2_flow_extract_search(
1290 			&priv->extract.tc_key_extract[group].dpkg,
1291 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1292 	if (index < 0) {
1293 		ret = dpaa2_flow_extract_add(
1294 				&priv->extract.tc_key_extract[group],
1295 				NET_PROT_VLAN,
1296 				NH_FLD_VLAN_TCI,
1297 				sizeof(rte_be16_t));
1298 		if (ret) {
1299 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1300 
1301 			return -1;
1302 		}
1303 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1304 	}
1305 
1306 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1307 	if (ret) {
1308 		DPAA2_PMD_ERR(
1309 			"Move ipaddr before VLAN TCI rule set failed");
1310 		return -1;
1311 	}
1312 
1313 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1314 				&flow->qos_rule,
1315 				NET_PROT_VLAN,
1316 				NH_FLD_VLAN_TCI,
1317 				&spec->tci,
1318 				&mask->tci,
1319 				sizeof(rte_be16_t));
1320 	if (ret) {
1321 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1322 		return -1;
1323 	}
1324 
1325 	ret = dpaa2_flow_rule_data_set(
1326 			&priv->extract.tc_key_extract[group],
1327 			&flow->fs_rule,
1328 			NET_PROT_VLAN,
1329 			NH_FLD_VLAN_TCI,
1330 			&spec->tci,
1331 			&mask->tci,
1332 			sizeof(rte_be16_t));
1333 	if (ret) {
1334 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1335 		return -1;
1336 	}
1337 
1338 	(*device_configured) |= local_cfg;
1339 
1340 	return 0;
1341 }
1342 
1343 static int
1344 dpaa2_configure_flow_ip_discrimation(
1345 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1346 	const struct rte_flow_item *pattern,
1347 	int *local_cfg,	int *device_configured,
1348 	uint32_t group)
1349 {
1350 	int index, ret;
1351 	struct proto_discrimination proto;
1352 
1353 	index = dpaa2_flow_extract_search(
1354 			&priv->extract.qos_key_extract.dpkg,
1355 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1356 	if (index < 0) {
1357 		ret = dpaa2_flow_proto_discrimination_extract(
1358 				&priv->extract.qos_key_extract,
1359 				RTE_FLOW_ITEM_TYPE_ETH);
1360 		if (ret) {
1361 			DPAA2_PMD_ERR(
1362 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1363 			return -1;
1364 		}
1365 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1366 	}
1367 
1368 	index = dpaa2_flow_extract_search(
1369 			&priv->extract.tc_key_extract[group].dpkg,
1370 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1371 	if (index < 0) {
1372 		ret = dpaa2_flow_proto_discrimination_extract(
1373 				&priv->extract.tc_key_extract[group],
1374 				RTE_FLOW_ITEM_TYPE_ETH);
1375 		if (ret) {
1376 			DPAA2_PMD_ERR(
1377 			"FS Extract ETH_TYPE to discriminate IP failed.");
1378 			return -1;
1379 		}
1380 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1381 	}
1382 
1383 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1384 	if (ret) {
1385 		DPAA2_PMD_ERR(
1386 			"Move ipaddr before IP discrimination set failed");
1387 		return -1;
1388 	}
1389 
1390 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1391 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1392 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1393 	else
1394 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1395 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1396 	if (ret) {
1397 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1398 		return -1;
1399 	}
1400 
1401 	(*device_configured) |= (*local_cfg);
1402 
1403 	return 0;
1404 }
1405 
1406 
1407 static int
1408 dpaa2_configure_flow_generic_ip(
1409 	struct rte_flow *flow,
1410 	struct rte_eth_dev *dev,
1411 	const struct rte_flow_attr *attr,
1412 	const struct rte_flow_item *pattern,
1413 	const struct rte_flow_action actions[] __rte_unused,
1414 	struct rte_flow_error *error __rte_unused,
1415 	int *device_configured)
1416 {
1417 	int index, ret;
1418 	int local_cfg = 0;
1419 	uint32_t group;
1420 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1421 		*mask_ipv4 = 0;
1422 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1423 		*mask_ipv6 = 0;
1424 	const void *key, *mask;
1425 	enum net_prot prot;
1426 
1427 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1428 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1429 	int size;
1430 
1431 	group = attr->group;
1432 
1433 	/* Parse pattern list to get the matching parameters */
1434 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1435 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1436 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1437 			(pattern->mask ? pattern->mask :
1438 					&dpaa2_flow_item_ipv4_mask);
1439 	} else {
1440 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1441 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1442 			(pattern->mask ? pattern->mask :
1443 					&dpaa2_flow_item_ipv6_mask);
1444 	}
1445 
1446 	/* Get traffic class index and flow id to be configured */
1447 	flow->tc_id = group;
1448 	flow->tc_index = attr->priority;
1449 
1450 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1451 			flow, pattern, &local_cfg,
1452 			device_configured, group);
1453 	if (ret) {
1454 		DPAA2_PMD_ERR("IP discrimation failed!");
1455 		return -1;
1456 	}
1457 
1458 	if (!spec_ipv4 && !spec_ipv6)
1459 		return 0;
1460 
1461 	if (mask_ipv4) {
1462 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1463 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1464 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1465 
1466 			return -1;
1467 		}
1468 	}
1469 
1470 	if (mask_ipv6) {
1471 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1472 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1473 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1474 
1475 			return -1;
1476 		}
1477 	}
1478 
1479 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1480 		mask_ipv4->hdr.dst_addr)) {
1481 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1482 	} else if (mask_ipv6 &&
1483 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1484 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1485 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1486 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1487 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1488 	}
1489 
1490 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1491 		(mask_ipv6 &&
1492 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1493 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1494 		index = dpaa2_flow_extract_search(
1495 				&priv->extract.qos_key_extract.dpkg,
1496 				NET_PROT_IP, NH_FLD_IP_SRC);
1497 		if (index < 0) {
1498 			ret = dpaa2_flow_extract_add(
1499 					&priv->extract.qos_key_extract,
1500 					NET_PROT_IP,
1501 					NH_FLD_IP_SRC,
1502 					0);
1503 			if (ret) {
1504 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1505 
1506 				return -1;
1507 			}
1508 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1509 		}
1510 
1511 		index = dpaa2_flow_extract_search(
1512 				&priv->extract.tc_key_extract[group].dpkg,
1513 				NET_PROT_IP, NH_FLD_IP_SRC);
1514 		if (index < 0) {
1515 			ret = dpaa2_flow_extract_add(
1516 					&priv->extract.tc_key_extract[group],
1517 					NET_PROT_IP,
1518 					NH_FLD_IP_SRC,
1519 					0);
1520 			if (ret) {
1521 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1522 
1523 				return -1;
1524 			}
1525 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1526 		}
1527 
1528 		if (spec_ipv4)
1529 			key = &spec_ipv4->hdr.src_addr;
1530 		else
1531 			key = &spec_ipv6->hdr.src_addr[0];
1532 		if (mask_ipv4) {
1533 			mask = &mask_ipv4->hdr.src_addr;
1534 			size = NH_FLD_IPV4_ADDR_SIZE;
1535 			prot = NET_PROT_IPV4;
1536 		} else {
1537 			mask = &mask_ipv6->hdr.src_addr[0];
1538 			size = NH_FLD_IPV6_ADDR_SIZE;
1539 			prot = NET_PROT_IPV6;
1540 		}
1541 
1542 		ret = dpaa2_flow_rule_data_set(
1543 				&priv->extract.qos_key_extract,
1544 				&flow->qos_rule,
1545 				prot, NH_FLD_IP_SRC,
1546 				key,	mask, size);
1547 		if (ret) {
1548 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1549 			return -1;
1550 		}
1551 
1552 		ret = dpaa2_flow_rule_data_set(
1553 				&priv->extract.tc_key_extract[group],
1554 				&flow->fs_rule,
1555 				prot, NH_FLD_IP_SRC,
1556 				key,	mask, size);
1557 		if (ret) {
1558 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1559 			return -1;
1560 		}
1561 
1562 		flow->ipaddr_rule.qos_ipsrc_offset =
1563 			dpaa2_flow_extract_key_offset(
1564 				&priv->extract.qos_key_extract,
1565 				prot, NH_FLD_IP_SRC);
1566 		flow->ipaddr_rule.fs_ipsrc_offset =
1567 			dpaa2_flow_extract_key_offset(
1568 				&priv->extract.tc_key_extract[group],
1569 				prot, NH_FLD_IP_SRC);
1570 	}
1571 
1572 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1573 		(mask_ipv6 &&
1574 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1575 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1576 		index = dpaa2_flow_extract_search(
1577 				&priv->extract.qos_key_extract.dpkg,
1578 				NET_PROT_IP, NH_FLD_IP_DST);
1579 		if (index < 0) {
1580 			if (mask_ipv4)
1581 				size = NH_FLD_IPV4_ADDR_SIZE;
1582 			else
1583 				size = NH_FLD_IPV6_ADDR_SIZE;
1584 			ret = dpaa2_flow_extract_add(
1585 					&priv->extract.qos_key_extract,
1586 					NET_PROT_IP,
1587 					NH_FLD_IP_DST,
1588 					size);
1589 			if (ret) {
1590 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1591 
1592 				return -1;
1593 			}
1594 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1595 		}
1596 
1597 		index = dpaa2_flow_extract_search(
1598 				&priv->extract.tc_key_extract[group].dpkg,
1599 				NET_PROT_IP, NH_FLD_IP_DST);
1600 		if (index < 0) {
1601 			if (mask_ipv4)
1602 				size = NH_FLD_IPV4_ADDR_SIZE;
1603 			else
1604 				size = NH_FLD_IPV6_ADDR_SIZE;
1605 			ret = dpaa2_flow_extract_add(
1606 					&priv->extract.tc_key_extract[group],
1607 					NET_PROT_IP,
1608 					NH_FLD_IP_DST,
1609 					size);
1610 			if (ret) {
1611 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1612 
1613 				return -1;
1614 			}
1615 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1616 		}
1617 
1618 		if (spec_ipv4)
1619 			key = &spec_ipv4->hdr.dst_addr;
1620 		else
1621 			key = spec_ipv6->hdr.dst_addr;
1622 		if (mask_ipv4) {
1623 			mask = &mask_ipv4->hdr.dst_addr;
1624 			size = NH_FLD_IPV4_ADDR_SIZE;
1625 			prot = NET_PROT_IPV4;
1626 		} else {
1627 			mask = &mask_ipv6->hdr.dst_addr[0];
1628 			size = NH_FLD_IPV6_ADDR_SIZE;
1629 			prot = NET_PROT_IPV6;
1630 		}
1631 
1632 		ret = dpaa2_flow_rule_data_set(
1633 				&priv->extract.qos_key_extract,
1634 				&flow->qos_rule,
1635 				prot, NH_FLD_IP_DST,
1636 				key,	mask, size);
1637 		if (ret) {
1638 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1639 			return -1;
1640 		}
1641 
1642 		ret = dpaa2_flow_rule_data_set(
1643 				&priv->extract.tc_key_extract[group],
1644 				&flow->fs_rule,
1645 				prot, NH_FLD_IP_DST,
1646 				key,	mask, size);
1647 		if (ret) {
1648 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1649 			return -1;
1650 		}
1651 		flow->ipaddr_rule.qos_ipdst_offset =
1652 			dpaa2_flow_extract_key_offset(
1653 				&priv->extract.qos_key_extract,
1654 				prot, NH_FLD_IP_DST);
1655 		flow->ipaddr_rule.fs_ipdst_offset =
1656 			dpaa2_flow_extract_key_offset(
1657 				&priv->extract.tc_key_extract[group],
1658 				prot, NH_FLD_IP_DST);
1659 	}
1660 
1661 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1662 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1663 		index = dpaa2_flow_extract_search(
1664 				&priv->extract.qos_key_extract.dpkg,
1665 				NET_PROT_IP, NH_FLD_IP_PROTO);
1666 		if (index < 0) {
1667 			ret = dpaa2_flow_extract_add(
1668 				&priv->extract.qos_key_extract,
1669 				NET_PROT_IP,
1670 				NH_FLD_IP_PROTO,
1671 				NH_FLD_IP_PROTO_SIZE);
1672 			if (ret) {
1673 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1674 
1675 				return -1;
1676 			}
1677 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1678 		}
1679 
1680 		index = dpaa2_flow_extract_search(
1681 				&priv->extract.tc_key_extract[group].dpkg,
1682 				NET_PROT_IP, NH_FLD_IP_PROTO);
1683 		if (index < 0) {
1684 			ret = dpaa2_flow_extract_add(
1685 					&priv->extract.tc_key_extract[group],
1686 					NET_PROT_IP,
1687 					NH_FLD_IP_PROTO,
1688 					NH_FLD_IP_PROTO_SIZE);
1689 			if (ret) {
1690 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1691 
1692 				return -1;
1693 			}
1694 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1695 		}
1696 
1697 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1698 		if (ret) {
1699 			DPAA2_PMD_ERR(
1700 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1701 			return -1;
1702 		}
1703 
1704 		if (spec_ipv4)
1705 			key = &spec_ipv4->hdr.next_proto_id;
1706 		else
1707 			key = &spec_ipv6->hdr.proto;
1708 		if (mask_ipv4)
1709 			mask = &mask_ipv4->hdr.next_proto_id;
1710 		else
1711 			mask = &mask_ipv6->hdr.proto;
1712 
1713 		ret = dpaa2_flow_rule_data_set(
1714 				&priv->extract.qos_key_extract,
1715 				&flow->qos_rule,
1716 				NET_PROT_IP,
1717 				NH_FLD_IP_PROTO,
1718 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1719 		if (ret) {
1720 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1721 			return -1;
1722 		}
1723 
1724 		ret = dpaa2_flow_rule_data_set(
1725 				&priv->extract.tc_key_extract[group],
1726 				&flow->fs_rule,
1727 				NET_PROT_IP,
1728 				NH_FLD_IP_PROTO,
1729 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1730 		if (ret) {
1731 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1732 			return -1;
1733 		}
1734 	}
1735 
1736 	(*device_configured) |= local_cfg;
1737 
1738 	return 0;
1739 }
1740 
1741 static int
1742 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1743 			  struct rte_eth_dev *dev,
1744 			  const struct rte_flow_attr *attr,
1745 			  const struct rte_flow_item *pattern,
1746 			  const struct rte_flow_action actions[] __rte_unused,
1747 			  struct rte_flow_error *error __rte_unused,
1748 			  int *device_configured)
1749 {
1750 	int index, ret;
1751 	int local_cfg = 0;
1752 	uint32_t group;
1753 	const struct rte_flow_item_icmp *spec, *mask;
1754 
1755 	const struct rte_flow_item_icmp *last __rte_unused;
1756 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1757 
1758 	group = attr->group;
1759 
1760 	/* Parse pattern list to get the matching parameters */
1761 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1762 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1763 	mask    = (const struct rte_flow_item_icmp *)
1764 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1765 
1766 	/* Get traffic class index and flow id to be configured */
1767 	flow->tc_id = group;
1768 	flow->tc_index = attr->priority;
1769 
1770 	if (!spec) {
1771 		/* Don't care any field of ICMP header,
1772 		 * only care ICMP protocol.
1773 		 * Example: flow create 0 ingress pattern icmp /
1774 		 */
1775 		/* Next proto of Generical IP is actually used
1776 		 * for ICMP identification.
1777 		 */
1778 		struct proto_discrimination proto;
1779 
1780 		index = dpaa2_flow_extract_search(
1781 				&priv->extract.qos_key_extract.dpkg,
1782 				NET_PROT_IP, NH_FLD_IP_PROTO);
1783 		if (index < 0) {
1784 			ret = dpaa2_flow_proto_discrimination_extract(
1785 					&priv->extract.qos_key_extract,
1786 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1787 			if (ret) {
1788 				DPAA2_PMD_ERR(
1789 					"QoS Extract IP protocol to discriminate ICMP failed.");
1790 
1791 				return -1;
1792 			}
1793 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1794 		}
1795 
1796 		index = dpaa2_flow_extract_search(
1797 				&priv->extract.tc_key_extract[group].dpkg,
1798 				NET_PROT_IP, NH_FLD_IP_PROTO);
1799 		if (index < 0) {
1800 			ret = dpaa2_flow_proto_discrimination_extract(
1801 					&priv->extract.tc_key_extract[group],
1802 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1803 			if (ret) {
1804 				DPAA2_PMD_ERR(
1805 					"FS Extract IP protocol to discriminate ICMP failed.");
1806 
1807 				return -1;
1808 			}
1809 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1810 		}
1811 
1812 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1813 		if (ret) {
1814 			DPAA2_PMD_ERR(
1815 				"Move IP addr before ICMP discrimination set failed");
1816 			return -1;
1817 		}
1818 
1819 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1820 		proto.ip_proto = IPPROTO_ICMP;
1821 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1822 							proto, group);
1823 		if (ret) {
1824 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1825 			return -1;
1826 		}
1827 
1828 		(*device_configured) |= local_cfg;
1829 
1830 		return 0;
1831 	}
1832 
1833 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1834 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1835 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1836 
1837 		return -1;
1838 	}
1839 
1840 	if (mask->hdr.icmp_type) {
1841 		index = dpaa2_flow_extract_search(
1842 				&priv->extract.qos_key_extract.dpkg,
1843 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1844 		if (index < 0) {
1845 			ret = dpaa2_flow_extract_add(
1846 					&priv->extract.qos_key_extract,
1847 					NET_PROT_ICMP,
1848 					NH_FLD_ICMP_TYPE,
1849 					NH_FLD_ICMP_TYPE_SIZE);
1850 			if (ret) {
1851 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1852 
1853 				return -1;
1854 			}
1855 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1856 		}
1857 
1858 		index = dpaa2_flow_extract_search(
1859 				&priv->extract.tc_key_extract[group].dpkg,
1860 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1861 		if (index < 0) {
1862 			ret = dpaa2_flow_extract_add(
1863 					&priv->extract.tc_key_extract[group],
1864 					NET_PROT_ICMP,
1865 					NH_FLD_ICMP_TYPE,
1866 					NH_FLD_ICMP_TYPE_SIZE);
1867 			if (ret) {
1868 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1869 
1870 				return -1;
1871 			}
1872 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1873 		}
1874 
1875 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1876 		if (ret) {
1877 			DPAA2_PMD_ERR(
1878 				"Move ipaddr before ICMP TYPE set failed");
1879 			return -1;
1880 		}
1881 
1882 		ret = dpaa2_flow_rule_data_set(
1883 				&priv->extract.qos_key_extract,
1884 				&flow->qos_rule,
1885 				NET_PROT_ICMP,
1886 				NH_FLD_ICMP_TYPE,
1887 				&spec->hdr.icmp_type,
1888 				&mask->hdr.icmp_type,
1889 				NH_FLD_ICMP_TYPE_SIZE);
1890 		if (ret) {
1891 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1892 			return -1;
1893 		}
1894 
1895 		ret = dpaa2_flow_rule_data_set(
1896 				&priv->extract.tc_key_extract[group],
1897 				&flow->fs_rule,
1898 				NET_PROT_ICMP,
1899 				NH_FLD_ICMP_TYPE,
1900 				&spec->hdr.icmp_type,
1901 				&mask->hdr.icmp_type,
1902 				NH_FLD_ICMP_TYPE_SIZE);
1903 		if (ret) {
1904 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1905 			return -1;
1906 		}
1907 	}
1908 
1909 	if (mask->hdr.icmp_code) {
1910 		index = dpaa2_flow_extract_search(
1911 				&priv->extract.qos_key_extract.dpkg,
1912 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1913 		if (index < 0) {
1914 			ret = dpaa2_flow_extract_add(
1915 					&priv->extract.qos_key_extract,
1916 					NET_PROT_ICMP,
1917 					NH_FLD_ICMP_CODE,
1918 					NH_FLD_ICMP_CODE_SIZE);
1919 			if (ret) {
1920 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1921 
1922 				return -1;
1923 			}
1924 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1925 		}
1926 
1927 		index = dpaa2_flow_extract_search(
1928 				&priv->extract.tc_key_extract[group].dpkg,
1929 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1930 		if (index < 0) {
1931 			ret = dpaa2_flow_extract_add(
1932 					&priv->extract.tc_key_extract[group],
1933 					NET_PROT_ICMP,
1934 					NH_FLD_ICMP_CODE,
1935 					NH_FLD_ICMP_CODE_SIZE);
1936 			if (ret) {
1937 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1938 
1939 				return -1;
1940 			}
1941 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1942 		}
1943 
1944 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1945 		if (ret) {
1946 			DPAA2_PMD_ERR(
1947 				"Move ipaddr after ICMP CODE set failed");
1948 			return -1;
1949 		}
1950 
1951 		ret = dpaa2_flow_rule_data_set(
1952 				&priv->extract.qos_key_extract,
1953 				&flow->qos_rule,
1954 				NET_PROT_ICMP,
1955 				NH_FLD_ICMP_CODE,
1956 				&spec->hdr.icmp_code,
1957 				&mask->hdr.icmp_code,
1958 				NH_FLD_ICMP_CODE_SIZE);
1959 		if (ret) {
1960 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1961 			return -1;
1962 		}
1963 
1964 		ret = dpaa2_flow_rule_data_set(
1965 				&priv->extract.tc_key_extract[group],
1966 				&flow->fs_rule,
1967 				NET_PROT_ICMP,
1968 				NH_FLD_ICMP_CODE,
1969 				&spec->hdr.icmp_code,
1970 				&mask->hdr.icmp_code,
1971 				NH_FLD_ICMP_CODE_SIZE);
1972 		if (ret) {
1973 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1974 			return -1;
1975 		}
1976 	}
1977 
1978 	(*device_configured) |= local_cfg;
1979 
1980 	return 0;
1981 }
1982 
1983 static int
1984 dpaa2_configure_flow_udp(struct rte_flow *flow,
1985 			 struct rte_eth_dev *dev,
1986 			  const struct rte_flow_attr *attr,
1987 			  const struct rte_flow_item *pattern,
1988 			  const struct rte_flow_action actions[] __rte_unused,
1989 			  struct rte_flow_error *error __rte_unused,
1990 			  int *device_configured)
1991 {
1992 	int index, ret;
1993 	int local_cfg = 0;
1994 	uint32_t group;
1995 	const struct rte_flow_item_udp *spec, *mask;
1996 
1997 	const struct rte_flow_item_udp *last __rte_unused;
1998 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1999 
2000 	group = attr->group;
2001 
2002 	/* Parse pattern list to get the matching parameters */
2003 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
2004 	last    = (const struct rte_flow_item_udp *)pattern->last;
2005 	mask    = (const struct rte_flow_item_udp *)
2006 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2007 
2008 	/* Get traffic class index and flow id to be configured */
2009 	flow->tc_id = group;
2010 	flow->tc_index = attr->priority;
2011 
2012 	if (!spec || !mc_l4_port_identification) {
2013 		struct proto_discrimination proto;
2014 
2015 		index = dpaa2_flow_extract_search(
2016 				&priv->extract.qos_key_extract.dpkg,
2017 				NET_PROT_IP, NH_FLD_IP_PROTO);
2018 		if (index < 0) {
2019 			ret = dpaa2_flow_proto_discrimination_extract(
2020 					&priv->extract.qos_key_extract,
2021 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2022 			if (ret) {
2023 				DPAA2_PMD_ERR(
2024 					"QoS Extract IP protocol to discriminate UDP failed.");
2025 
2026 				return -1;
2027 			}
2028 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2029 		}
2030 
2031 		index = dpaa2_flow_extract_search(
2032 				&priv->extract.tc_key_extract[group].dpkg,
2033 				NET_PROT_IP, NH_FLD_IP_PROTO);
2034 		if (index < 0) {
2035 			ret = dpaa2_flow_proto_discrimination_extract(
2036 				&priv->extract.tc_key_extract[group],
2037 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2038 			if (ret) {
2039 				DPAA2_PMD_ERR(
2040 					"FS Extract IP protocol to discriminate UDP failed.");
2041 
2042 				return -1;
2043 			}
2044 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2045 		}
2046 
2047 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2048 		if (ret) {
2049 			DPAA2_PMD_ERR(
2050 				"Move IP addr before UDP discrimination set failed");
2051 			return -1;
2052 		}
2053 
2054 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2055 		proto.ip_proto = IPPROTO_UDP;
2056 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2057 							proto, group);
2058 		if (ret) {
2059 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2060 			return -1;
2061 		}
2062 
2063 		(*device_configured) |= local_cfg;
2064 
2065 		if (!spec)
2066 			return 0;
2067 	}
2068 
2069 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2070 		RTE_FLOW_ITEM_TYPE_UDP)) {
2071 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2072 
2073 		return -1;
2074 	}
2075 
2076 	if (mask->hdr.src_port) {
2077 		index = dpaa2_flow_extract_search(
2078 				&priv->extract.qos_key_extract.dpkg,
2079 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2080 		if (index < 0) {
2081 			ret = dpaa2_flow_extract_add(
2082 					&priv->extract.qos_key_extract,
2083 				NET_PROT_UDP,
2084 				NH_FLD_UDP_PORT_SRC,
2085 				NH_FLD_UDP_PORT_SIZE);
2086 			if (ret) {
2087 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2088 
2089 				return -1;
2090 			}
2091 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2092 		}
2093 
2094 		index = dpaa2_flow_extract_search(
2095 				&priv->extract.tc_key_extract[group].dpkg,
2096 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2097 		if (index < 0) {
2098 			ret = dpaa2_flow_extract_add(
2099 					&priv->extract.tc_key_extract[group],
2100 					NET_PROT_UDP,
2101 					NH_FLD_UDP_PORT_SRC,
2102 					NH_FLD_UDP_PORT_SIZE);
2103 			if (ret) {
2104 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2105 
2106 				return -1;
2107 			}
2108 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2109 		}
2110 
2111 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2112 		if (ret) {
2113 			DPAA2_PMD_ERR(
2114 				"Move ipaddr before UDP_PORT_SRC set failed");
2115 			return -1;
2116 		}
2117 
2118 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2119 				&flow->qos_rule,
2120 				NET_PROT_UDP,
2121 				NH_FLD_UDP_PORT_SRC,
2122 				&spec->hdr.src_port,
2123 				&mask->hdr.src_port,
2124 				NH_FLD_UDP_PORT_SIZE);
2125 		if (ret) {
2126 			DPAA2_PMD_ERR(
2127 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2128 			return -1;
2129 		}
2130 
2131 		ret = dpaa2_flow_rule_data_set(
2132 				&priv->extract.tc_key_extract[group],
2133 				&flow->fs_rule,
2134 				NET_PROT_UDP,
2135 				NH_FLD_UDP_PORT_SRC,
2136 				&spec->hdr.src_port,
2137 				&mask->hdr.src_port,
2138 				NH_FLD_UDP_PORT_SIZE);
2139 		if (ret) {
2140 			DPAA2_PMD_ERR(
2141 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2142 			return -1;
2143 		}
2144 	}
2145 
2146 	if (mask->hdr.dst_port) {
2147 		index = dpaa2_flow_extract_search(
2148 				&priv->extract.qos_key_extract.dpkg,
2149 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2150 		if (index < 0) {
2151 			ret = dpaa2_flow_extract_add(
2152 					&priv->extract.qos_key_extract,
2153 					NET_PROT_UDP,
2154 					NH_FLD_UDP_PORT_DST,
2155 					NH_FLD_UDP_PORT_SIZE);
2156 			if (ret) {
2157 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2158 
2159 				return -1;
2160 			}
2161 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2162 		}
2163 
2164 		index = dpaa2_flow_extract_search(
2165 				&priv->extract.tc_key_extract[group].dpkg,
2166 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2167 		if (index < 0) {
2168 			ret = dpaa2_flow_extract_add(
2169 					&priv->extract.tc_key_extract[group],
2170 					NET_PROT_UDP,
2171 					NH_FLD_UDP_PORT_DST,
2172 					NH_FLD_UDP_PORT_SIZE);
2173 			if (ret) {
2174 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2175 
2176 				return -1;
2177 			}
2178 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2179 		}
2180 
2181 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2182 		if (ret) {
2183 			DPAA2_PMD_ERR(
2184 				"Move ipaddr before UDP_PORT_DST set failed");
2185 			return -1;
2186 		}
2187 
2188 		ret = dpaa2_flow_rule_data_set(
2189 				&priv->extract.qos_key_extract,
2190 				&flow->qos_rule,
2191 				NET_PROT_UDP,
2192 				NH_FLD_UDP_PORT_DST,
2193 				&spec->hdr.dst_port,
2194 				&mask->hdr.dst_port,
2195 				NH_FLD_UDP_PORT_SIZE);
2196 		if (ret) {
2197 			DPAA2_PMD_ERR(
2198 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2199 			return -1;
2200 		}
2201 
2202 		ret = dpaa2_flow_rule_data_set(
2203 				&priv->extract.tc_key_extract[group],
2204 				&flow->fs_rule,
2205 				NET_PROT_UDP,
2206 				NH_FLD_UDP_PORT_DST,
2207 				&spec->hdr.dst_port,
2208 				&mask->hdr.dst_port,
2209 				NH_FLD_UDP_PORT_SIZE);
2210 		if (ret) {
2211 			DPAA2_PMD_ERR(
2212 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2213 			return -1;
2214 		}
2215 	}
2216 
2217 	(*device_configured) |= local_cfg;
2218 
2219 	return 0;
2220 }
2221 
2222 static int
2223 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2224 			 struct rte_eth_dev *dev,
2225 			 const struct rte_flow_attr *attr,
2226 			 const struct rte_flow_item *pattern,
2227 			 const struct rte_flow_action actions[] __rte_unused,
2228 			 struct rte_flow_error *error __rte_unused,
2229 			 int *device_configured)
2230 {
2231 	int index, ret;
2232 	int local_cfg = 0;
2233 	uint32_t group;
2234 	const struct rte_flow_item_tcp *spec, *mask;
2235 
2236 	const struct rte_flow_item_tcp *last __rte_unused;
2237 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2238 
2239 	group = attr->group;
2240 
2241 	/* Parse pattern list to get the matching parameters */
2242 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2243 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2244 	mask    = (const struct rte_flow_item_tcp *)
2245 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2246 
2247 	/* Get traffic class index and flow id to be configured */
2248 	flow->tc_id = group;
2249 	flow->tc_index = attr->priority;
2250 
2251 	if (!spec || !mc_l4_port_identification) {
2252 		struct proto_discrimination proto;
2253 
2254 		index = dpaa2_flow_extract_search(
2255 				&priv->extract.qos_key_extract.dpkg,
2256 				NET_PROT_IP, NH_FLD_IP_PROTO);
2257 		if (index < 0) {
2258 			ret = dpaa2_flow_proto_discrimination_extract(
2259 					&priv->extract.qos_key_extract,
2260 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2261 			if (ret) {
2262 				DPAA2_PMD_ERR(
2263 					"QoS Extract IP protocol to discriminate TCP failed.");
2264 
2265 				return -1;
2266 			}
2267 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2268 		}
2269 
2270 		index = dpaa2_flow_extract_search(
2271 				&priv->extract.tc_key_extract[group].dpkg,
2272 				NET_PROT_IP, NH_FLD_IP_PROTO);
2273 		if (index < 0) {
2274 			ret = dpaa2_flow_proto_discrimination_extract(
2275 				&priv->extract.tc_key_extract[group],
2276 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2277 			if (ret) {
2278 				DPAA2_PMD_ERR(
2279 					"FS Extract IP protocol to discriminate TCP failed.");
2280 
2281 				return -1;
2282 			}
2283 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2284 		}
2285 
2286 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2287 		if (ret) {
2288 			DPAA2_PMD_ERR(
2289 				"Move IP addr before TCP discrimination set failed");
2290 			return -1;
2291 		}
2292 
2293 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2294 		proto.ip_proto = IPPROTO_TCP;
2295 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2296 							proto, group);
2297 		if (ret) {
2298 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2299 			return -1;
2300 		}
2301 
2302 		(*device_configured) |= local_cfg;
2303 
2304 		if (!spec)
2305 			return 0;
2306 	}
2307 
2308 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2309 		RTE_FLOW_ITEM_TYPE_TCP)) {
2310 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2311 
2312 		return -1;
2313 	}
2314 
2315 	if (mask->hdr.src_port) {
2316 		index = dpaa2_flow_extract_search(
2317 				&priv->extract.qos_key_extract.dpkg,
2318 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2319 		if (index < 0) {
2320 			ret = dpaa2_flow_extract_add(
2321 					&priv->extract.qos_key_extract,
2322 					NET_PROT_TCP,
2323 					NH_FLD_TCP_PORT_SRC,
2324 					NH_FLD_TCP_PORT_SIZE);
2325 			if (ret) {
2326 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2327 
2328 				return -1;
2329 			}
2330 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2331 		}
2332 
2333 		index = dpaa2_flow_extract_search(
2334 				&priv->extract.tc_key_extract[group].dpkg,
2335 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2336 		if (index < 0) {
2337 			ret = dpaa2_flow_extract_add(
2338 					&priv->extract.tc_key_extract[group],
2339 					NET_PROT_TCP,
2340 					NH_FLD_TCP_PORT_SRC,
2341 					NH_FLD_TCP_PORT_SIZE);
2342 			if (ret) {
2343 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2344 
2345 				return -1;
2346 			}
2347 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2348 		}
2349 
2350 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2351 		if (ret) {
2352 			DPAA2_PMD_ERR(
2353 				"Move ipaddr before TCP_PORT_SRC set failed");
2354 			return -1;
2355 		}
2356 
2357 		ret = dpaa2_flow_rule_data_set(
2358 				&priv->extract.qos_key_extract,
2359 				&flow->qos_rule,
2360 				NET_PROT_TCP,
2361 				NH_FLD_TCP_PORT_SRC,
2362 				&spec->hdr.src_port,
2363 				&mask->hdr.src_port,
2364 				NH_FLD_TCP_PORT_SIZE);
2365 		if (ret) {
2366 			DPAA2_PMD_ERR(
2367 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2368 			return -1;
2369 		}
2370 
2371 		ret = dpaa2_flow_rule_data_set(
2372 				&priv->extract.tc_key_extract[group],
2373 				&flow->fs_rule,
2374 				NET_PROT_TCP,
2375 				NH_FLD_TCP_PORT_SRC,
2376 				&spec->hdr.src_port,
2377 				&mask->hdr.src_port,
2378 				NH_FLD_TCP_PORT_SIZE);
2379 		if (ret) {
2380 			DPAA2_PMD_ERR(
2381 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2382 			return -1;
2383 		}
2384 	}
2385 
2386 	if (mask->hdr.dst_port) {
2387 		index = dpaa2_flow_extract_search(
2388 				&priv->extract.qos_key_extract.dpkg,
2389 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2390 		if (index < 0) {
2391 			ret = dpaa2_flow_extract_add(
2392 					&priv->extract.qos_key_extract,
2393 					NET_PROT_TCP,
2394 					NH_FLD_TCP_PORT_DST,
2395 					NH_FLD_TCP_PORT_SIZE);
2396 			if (ret) {
2397 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2398 
2399 				return -1;
2400 			}
2401 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2402 		}
2403 
2404 		index = dpaa2_flow_extract_search(
2405 				&priv->extract.tc_key_extract[group].dpkg,
2406 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2407 		if (index < 0) {
2408 			ret = dpaa2_flow_extract_add(
2409 					&priv->extract.tc_key_extract[group],
2410 					NET_PROT_TCP,
2411 					NH_FLD_TCP_PORT_DST,
2412 					NH_FLD_TCP_PORT_SIZE);
2413 			if (ret) {
2414 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2415 
2416 				return -1;
2417 			}
2418 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2419 		}
2420 
2421 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2422 		if (ret) {
2423 			DPAA2_PMD_ERR(
2424 				"Move ipaddr before TCP_PORT_DST set failed");
2425 			return -1;
2426 		}
2427 
2428 		ret = dpaa2_flow_rule_data_set(
2429 				&priv->extract.qos_key_extract,
2430 				&flow->qos_rule,
2431 				NET_PROT_TCP,
2432 				NH_FLD_TCP_PORT_DST,
2433 				&spec->hdr.dst_port,
2434 				&mask->hdr.dst_port,
2435 				NH_FLD_TCP_PORT_SIZE);
2436 		if (ret) {
2437 			DPAA2_PMD_ERR(
2438 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2439 			return -1;
2440 		}
2441 
2442 		ret = dpaa2_flow_rule_data_set(
2443 				&priv->extract.tc_key_extract[group],
2444 				&flow->fs_rule,
2445 				NET_PROT_TCP,
2446 				NH_FLD_TCP_PORT_DST,
2447 				&spec->hdr.dst_port,
2448 				&mask->hdr.dst_port,
2449 				NH_FLD_TCP_PORT_SIZE);
2450 		if (ret) {
2451 			DPAA2_PMD_ERR(
2452 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2453 			return -1;
2454 		}
2455 	}
2456 
2457 	(*device_configured) |= local_cfg;
2458 
2459 	return 0;
2460 }
2461 
2462 static int
2463 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2464 			  struct rte_eth_dev *dev,
2465 			  const struct rte_flow_attr *attr,
2466 			  const struct rte_flow_item *pattern,
2467 			  const struct rte_flow_action actions[] __rte_unused,
2468 			  struct rte_flow_error *error __rte_unused,
2469 			  int *device_configured)
2470 {
2471 	int index, ret;
2472 	int local_cfg = 0;
2473 	uint32_t group;
2474 	const struct rte_flow_item_sctp *spec, *mask;
2475 
2476 	const struct rte_flow_item_sctp *last __rte_unused;
2477 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2478 
2479 	group = attr->group;
2480 
2481 	/* Parse pattern list to get the matching parameters */
2482 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2483 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2484 	mask    = (const struct rte_flow_item_sctp *)
2485 			(pattern->mask ? pattern->mask :
2486 				&dpaa2_flow_item_sctp_mask);
2487 
2488 	/* Get traffic class index and flow id to be configured */
2489 	flow->tc_id = group;
2490 	flow->tc_index = attr->priority;
2491 
2492 	if (!spec || !mc_l4_port_identification) {
2493 		struct proto_discrimination proto;
2494 
2495 		index = dpaa2_flow_extract_search(
2496 				&priv->extract.qos_key_extract.dpkg,
2497 				NET_PROT_IP, NH_FLD_IP_PROTO);
2498 		if (index < 0) {
2499 			ret = dpaa2_flow_proto_discrimination_extract(
2500 					&priv->extract.qos_key_extract,
2501 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2502 			if (ret) {
2503 				DPAA2_PMD_ERR(
2504 					"QoS Extract IP protocol to discriminate SCTP failed.");
2505 
2506 				return -1;
2507 			}
2508 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2509 		}
2510 
2511 		index = dpaa2_flow_extract_search(
2512 				&priv->extract.tc_key_extract[group].dpkg,
2513 				NET_PROT_IP, NH_FLD_IP_PROTO);
2514 		if (index < 0) {
2515 			ret = dpaa2_flow_proto_discrimination_extract(
2516 					&priv->extract.tc_key_extract[group],
2517 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2518 			if (ret) {
2519 				DPAA2_PMD_ERR(
2520 					"FS Extract IP protocol to discriminate SCTP failed.");
2521 
2522 				return -1;
2523 			}
2524 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2525 		}
2526 
2527 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2528 		if (ret) {
2529 			DPAA2_PMD_ERR(
2530 				"Move ipaddr before SCTP discrimination set failed");
2531 			return -1;
2532 		}
2533 
2534 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2535 		proto.ip_proto = IPPROTO_SCTP;
2536 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2537 							proto, group);
2538 		if (ret) {
2539 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2540 			return -1;
2541 		}
2542 
2543 		(*device_configured) |= local_cfg;
2544 
2545 		if (!spec)
2546 			return 0;
2547 	}
2548 
2549 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2550 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2551 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2552 
2553 		return -1;
2554 	}
2555 
2556 	if (mask->hdr.src_port) {
2557 		index = dpaa2_flow_extract_search(
2558 				&priv->extract.qos_key_extract.dpkg,
2559 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2560 		if (index < 0) {
2561 			ret = dpaa2_flow_extract_add(
2562 					&priv->extract.qos_key_extract,
2563 					NET_PROT_SCTP,
2564 					NH_FLD_SCTP_PORT_SRC,
2565 					NH_FLD_SCTP_PORT_SIZE);
2566 			if (ret) {
2567 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2568 
2569 				return -1;
2570 			}
2571 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2572 		}
2573 
2574 		index = dpaa2_flow_extract_search(
2575 				&priv->extract.tc_key_extract[group].dpkg,
2576 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2577 		if (index < 0) {
2578 			ret = dpaa2_flow_extract_add(
2579 					&priv->extract.tc_key_extract[group],
2580 					NET_PROT_SCTP,
2581 					NH_FLD_SCTP_PORT_SRC,
2582 					NH_FLD_SCTP_PORT_SIZE);
2583 			if (ret) {
2584 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2585 
2586 				return -1;
2587 			}
2588 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2589 		}
2590 
2591 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2592 		if (ret) {
2593 			DPAA2_PMD_ERR(
2594 				"Move ipaddr before SCTP_PORT_SRC set failed");
2595 			return -1;
2596 		}
2597 
2598 		ret = dpaa2_flow_rule_data_set(
2599 				&priv->extract.qos_key_extract,
2600 				&flow->qos_rule,
2601 				NET_PROT_SCTP,
2602 				NH_FLD_SCTP_PORT_SRC,
2603 				&spec->hdr.src_port,
2604 				&mask->hdr.src_port,
2605 				NH_FLD_SCTP_PORT_SIZE);
2606 		if (ret) {
2607 			DPAA2_PMD_ERR(
2608 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2609 			return -1;
2610 		}
2611 
2612 		ret = dpaa2_flow_rule_data_set(
2613 				&priv->extract.tc_key_extract[group],
2614 				&flow->fs_rule,
2615 				NET_PROT_SCTP,
2616 				NH_FLD_SCTP_PORT_SRC,
2617 				&spec->hdr.src_port,
2618 				&mask->hdr.src_port,
2619 				NH_FLD_SCTP_PORT_SIZE);
2620 		if (ret) {
2621 			DPAA2_PMD_ERR(
2622 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2623 			return -1;
2624 		}
2625 	}
2626 
2627 	if (mask->hdr.dst_port) {
2628 		index = dpaa2_flow_extract_search(
2629 				&priv->extract.qos_key_extract.dpkg,
2630 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2631 		if (index < 0) {
2632 			ret = dpaa2_flow_extract_add(
2633 					&priv->extract.qos_key_extract,
2634 					NET_PROT_SCTP,
2635 					NH_FLD_SCTP_PORT_DST,
2636 					NH_FLD_SCTP_PORT_SIZE);
2637 			if (ret) {
2638 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2639 
2640 				return -1;
2641 			}
2642 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2643 		}
2644 
2645 		index = dpaa2_flow_extract_search(
2646 				&priv->extract.tc_key_extract[group].dpkg,
2647 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2648 		if (index < 0) {
2649 			ret = dpaa2_flow_extract_add(
2650 					&priv->extract.tc_key_extract[group],
2651 					NET_PROT_SCTP,
2652 					NH_FLD_SCTP_PORT_DST,
2653 					NH_FLD_SCTP_PORT_SIZE);
2654 			if (ret) {
2655 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2656 
2657 				return -1;
2658 			}
2659 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2660 		}
2661 
2662 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2663 		if (ret) {
2664 			DPAA2_PMD_ERR(
2665 				"Move ipaddr before SCTP_PORT_DST set failed");
2666 			return -1;
2667 		}
2668 
2669 		ret = dpaa2_flow_rule_data_set(
2670 				&priv->extract.qos_key_extract,
2671 				&flow->qos_rule,
2672 				NET_PROT_SCTP,
2673 				NH_FLD_SCTP_PORT_DST,
2674 				&spec->hdr.dst_port,
2675 				&mask->hdr.dst_port,
2676 				NH_FLD_SCTP_PORT_SIZE);
2677 		if (ret) {
2678 			DPAA2_PMD_ERR(
2679 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2680 			return -1;
2681 		}
2682 
2683 		ret = dpaa2_flow_rule_data_set(
2684 				&priv->extract.tc_key_extract[group],
2685 				&flow->fs_rule,
2686 				NET_PROT_SCTP,
2687 				NH_FLD_SCTP_PORT_DST,
2688 				&spec->hdr.dst_port,
2689 				&mask->hdr.dst_port,
2690 				NH_FLD_SCTP_PORT_SIZE);
2691 		if (ret) {
2692 			DPAA2_PMD_ERR(
2693 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2694 			return -1;
2695 		}
2696 	}
2697 
2698 	(*device_configured) |= local_cfg;
2699 
2700 	return 0;
2701 }
2702 
2703 static int
2704 dpaa2_configure_flow_gre(struct rte_flow *flow,
2705 			 struct rte_eth_dev *dev,
2706 			 const struct rte_flow_attr *attr,
2707 			 const struct rte_flow_item *pattern,
2708 			 const struct rte_flow_action actions[] __rte_unused,
2709 			 struct rte_flow_error *error __rte_unused,
2710 			 int *device_configured)
2711 {
2712 	int index, ret;
2713 	int local_cfg = 0;
2714 	uint32_t group;
2715 	const struct rte_flow_item_gre *spec, *mask;
2716 
2717 	const struct rte_flow_item_gre *last __rte_unused;
2718 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2719 
2720 	group = attr->group;
2721 
2722 	/* Parse pattern list to get the matching parameters */
2723 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2724 	last    = (const struct rte_flow_item_gre *)pattern->last;
2725 	mask    = (const struct rte_flow_item_gre *)
2726 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2727 
2728 	/* Get traffic class index and flow id to be configured */
2729 	flow->tc_id = group;
2730 	flow->tc_index = attr->priority;
2731 
2732 	if (!spec) {
2733 		struct proto_discrimination proto;
2734 
2735 		index = dpaa2_flow_extract_search(
2736 				&priv->extract.qos_key_extract.dpkg,
2737 				NET_PROT_IP, NH_FLD_IP_PROTO);
2738 		if (index < 0) {
2739 			ret = dpaa2_flow_proto_discrimination_extract(
2740 					&priv->extract.qos_key_extract,
2741 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2742 			if (ret) {
2743 				DPAA2_PMD_ERR(
2744 					"QoS Extract IP protocol to discriminate GRE failed.");
2745 
2746 				return -1;
2747 			}
2748 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2749 		}
2750 
2751 		index = dpaa2_flow_extract_search(
2752 				&priv->extract.tc_key_extract[group].dpkg,
2753 				NET_PROT_IP, NH_FLD_IP_PROTO);
2754 		if (index < 0) {
2755 			ret = dpaa2_flow_proto_discrimination_extract(
2756 					&priv->extract.tc_key_extract[group],
2757 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2758 			if (ret) {
2759 				DPAA2_PMD_ERR(
2760 					"FS Extract IP protocol to discriminate GRE failed.");
2761 
2762 				return -1;
2763 			}
2764 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2765 		}
2766 
2767 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2768 		if (ret) {
2769 			DPAA2_PMD_ERR(
2770 				"Move IP addr before GRE discrimination set failed");
2771 			return -1;
2772 		}
2773 
2774 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2775 		proto.ip_proto = IPPROTO_GRE;
2776 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2777 							proto, group);
2778 		if (ret) {
2779 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2780 			return -1;
2781 		}
2782 
2783 		(*device_configured) |= local_cfg;
2784 
2785 		return 0;
2786 	}
2787 
2788 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2789 		RTE_FLOW_ITEM_TYPE_GRE)) {
2790 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2791 
2792 		return -1;
2793 	}
2794 
2795 	if (!mask->protocol)
2796 		return 0;
2797 
2798 	index = dpaa2_flow_extract_search(
2799 			&priv->extract.qos_key_extract.dpkg,
2800 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2801 	if (index < 0) {
2802 		ret = dpaa2_flow_extract_add(
2803 				&priv->extract.qos_key_extract,
2804 				NET_PROT_GRE,
2805 				NH_FLD_GRE_TYPE,
2806 				sizeof(rte_be16_t));
2807 		if (ret) {
2808 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2809 
2810 			return -1;
2811 		}
2812 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2813 	}
2814 
2815 	index = dpaa2_flow_extract_search(
2816 			&priv->extract.tc_key_extract[group].dpkg,
2817 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2818 	if (index < 0) {
2819 		ret = dpaa2_flow_extract_add(
2820 				&priv->extract.tc_key_extract[group],
2821 				NET_PROT_GRE,
2822 				NH_FLD_GRE_TYPE,
2823 				sizeof(rte_be16_t));
2824 		if (ret) {
2825 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2826 
2827 			return -1;
2828 		}
2829 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2830 	}
2831 
2832 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2833 	if (ret) {
2834 		DPAA2_PMD_ERR(
2835 			"Move ipaddr before GRE_TYPE set failed");
2836 		return -1;
2837 	}
2838 
2839 	ret = dpaa2_flow_rule_data_set(
2840 				&priv->extract.qos_key_extract,
2841 				&flow->qos_rule,
2842 				NET_PROT_GRE,
2843 				NH_FLD_GRE_TYPE,
2844 				&spec->protocol,
2845 				&mask->protocol,
2846 				sizeof(rte_be16_t));
2847 	if (ret) {
2848 		DPAA2_PMD_ERR(
2849 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2850 		return -1;
2851 	}
2852 
2853 	ret = dpaa2_flow_rule_data_set(
2854 			&priv->extract.tc_key_extract[group],
2855 			&flow->fs_rule,
2856 			NET_PROT_GRE,
2857 			NH_FLD_GRE_TYPE,
2858 			&spec->protocol,
2859 			&mask->protocol,
2860 			sizeof(rte_be16_t));
2861 	if (ret) {
2862 		DPAA2_PMD_ERR(
2863 			"FS NH_FLD_GRE_TYPE rule data set failed");
2864 		return -1;
2865 	}
2866 
2867 	(*device_configured) |= local_cfg;
2868 
2869 	return 0;
2870 }
2871 
2872 static int
2873 dpaa2_configure_flow_raw(struct rte_flow *flow,
2874 			 struct rte_eth_dev *dev,
2875 			 const struct rte_flow_attr *attr,
2876 			 const struct rte_flow_item *pattern,
2877 			 const struct rte_flow_action actions[] __rte_unused,
2878 			 struct rte_flow_error *error __rte_unused,
2879 			 int *device_configured)
2880 {
2881 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2882 	const struct rte_flow_item_raw *spec = pattern->spec;
2883 	const struct rte_flow_item_raw *mask = pattern->mask;
2884 	int prev_key_size =
2885 		priv->extract.qos_key_extract.key_info.key_total_size;
2886 	int local_cfg = 0, ret;
2887 	uint32_t group;
2888 
2889 	/* Need both spec and mask */
2890 	if (!spec || !mask) {
2891 		DPAA2_PMD_ERR("spec or mask not present.");
2892 		return -EINVAL;
2893 	}
2894 	/* Only supports non-relative with offset 0 */
2895 	if (spec->relative || spec->offset != 0 ||
2896 	    spec->search || spec->limit) {
2897 		DPAA2_PMD_ERR("relative and non zero offset not supported.");
2898 		return -EINVAL;
2899 	}
2900 	/* Spec len and mask len should be same */
2901 	if (spec->length != mask->length) {
2902 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2903 		return -EINVAL;
2904 	}
2905 
2906 	/* Get traffic class index and flow id to be configured */
2907 	group = attr->group;
2908 	flow->tc_id = group;
2909 	flow->tc_index = attr->priority;
2910 
2911 	if (prev_key_size <= spec->length) {
2912 		ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2913 						 spec->length);
2914 		if (ret) {
2915 			DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2916 			return -1;
2917 		}
2918 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2919 
2920 		ret = dpaa2_flow_extract_add_raw(
2921 					&priv->extract.tc_key_extract[group],
2922 					spec->length);
2923 		if (ret) {
2924 			DPAA2_PMD_ERR("FS Extract RAW add failed.");
2925 			return -1;
2926 		}
2927 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2928 	}
2929 
2930 	ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2931 					   mask->pattern, spec->length);
2932 	if (ret) {
2933 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2934 		return -1;
2935 	}
2936 
2937 	ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2938 					   mask->pattern, spec->length);
2939 	if (ret) {
2940 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2941 		return -1;
2942 	}
2943 
2944 	(*device_configured) |= local_cfg;
2945 
2946 	return 0;
2947 }
2948 
2949 static inline int
2950 dpaa2_fs_action_supported(enum rte_flow_action_type action)
2951 {
2952 	int i;
2953 
2954 	for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
2955 					sizeof(enum rte_flow_action_type)); i++) {
2956 		if (action == dpaa2_supported_fs_action_type[i])
2957 			return 1;
2958 	}
2959 
2960 	return 0;
2961 }
2962 /* The existing QoS/FS entry with IP address(es)
2963  * needs update after
2964  * new extract(s) are inserted before IP
2965  * address(es) extract(s).
2966  */
2967 static int
2968 dpaa2_flow_entry_update(
2969 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2970 {
2971 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2972 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2973 	int ret;
2974 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2975 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2976 	struct dpaa2_key_extract *qos_key_extract =
2977 		&priv->extract.qos_key_extract;
2978 	struct dpaa2_key_extract *tc_key_extract =
2979 		&priv->extract.tc_key_extract[tc_id];
2980 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2981 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2982 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2983 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2984 	int extend = -1, extend1, size = -1;
2985 	uint16_t qos_index;
2986 
2987 	while (curr) {
2988 		if (curr->ipaddr_rule.ipaddr_type ==
2989 			FLOW_NONE_IPADDR) {
2990 			curr = LIST_NEXT(curr, next);
2991 			continue;
2992 		}
2993 
2994 		if (curr->ipaddr_rule.ipaddr_type ==
2995 			FLOW_IPV4_ADDR) {
2996 			qos_ipsrc_offset =
2997 				qos_key_extract->key_info.ipv4_src_offset;
2998 			qos_ipdst_offset =
2999 				qos_key_extract->key_info.ipv4_dst_offset;
3000 			fs_ipsrc_offset =
3001 				tc_key_extract->key_info.ipv4_src_offset;
3002 			fs_ipdst_offset =
3003 				tc_key_extract->key_info.ipv4_dst_offset;
3004 			size = NH_FLD_IPV4_ADDR_SIZE;
3005 		} else {
3006 			qos_ipsrc_offset =
3007 				qos_key_extract->key_info.ipv6_src_offset;
3008 			qos_ipdst_offset =
3009 				qos_key_extract->key_info.ipv6_dst_offset;
3010 			fs_ipsrc_offset =
3011 				tc_key_extract->key_info.ipv6_src_offset;
3012 			fs_ipdst_offset =
3013 				tc_key_extract->key_info.ipv6_dst_offset;
3014 			size = NH_FLD_IPV6_ADDR_SIZE;
3015 		}
3016 
3017 		qos_index = curr->tc_id * priv->fs_entries +
3018 			curr->tc_index;
3019 
3020 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
3021 
3022 		if (priv->num_rx_tc > 1) {
3023 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3024 					priv->token, &curr->qos_rule);
3025 			if (ret) {
3026 				DPAA2_PMD_ERR("Qos entry remove failed.");
3027 				return -1;
3028 			}
3029 		}
3030 
3031 		extend = -1;
3032 
3033 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3034 			RTE_ASSERT(qos_ipsrc_offset >=
3035 				curr->ipaddr_rule.qos_ipsrc_offset);
3036 			extend1 = qos_ipsrc_offset -
3037 				curr->ipaddr_rule.qos_ipsrc_offset;
3038 			if (extend >= 0)
3039 				RTE_ASSERT(extend == extend1);
3040 			else
3041 				extend = extend1;
3042 
3043 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3044 				(size == NH_FLD_IPV6_ADDR_SIZE));
3045 
3046 			memcpy(ipsrc_key,
3047 				(char *)(size_t)curr->qos_rule.key_iova +
3048 				curr->ipaddr_rule.qos_ipsrc_offset,
3049 				size);
3050 			memset((char *)(size_t)curr->qos_rule.key_iova +
3051 				curr->ipaddr_rule.qos_ipsrc_offset,
3052 				0, size);
3053 
3054 			memcpy(ipsrc_mask,
3055 				(char *)(size_t)curr->qos_rule.mask_iova +
3056 				curr->ipaddr_rule.qos_ipsrc_offset,
3057 				size);
3058 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3059 				curr->ipaddr_rule.qos_ipsrc_offset,
3060 				0, size);
3061 
3062 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3063 		}
3064 
3065 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3066 			RTE_ASSERT(qos_ipdst_offset >=
3067 				curr->ipaddr_rule.qos_ipdst_offset);
3068 			extend1 = qos_ipdst_offset -
3069 				curr->ipaddr_rule.qos_ipdst_offset;
3070 			if (extend >= 0)
3071 				RTE_ASSERT(extend == extend1);
3072 			else
3073 				extend = extend1;
3074 
3075 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3076 				(size == NH_FLD_IPV6_ADDR_SIZE));
3077 
3078 			memcpy(ipdst_key,
3079 				(char *)(size_t)curr->qos_rule.key_iova +
3080 				curr->ipaddr_rule.qos_ipdst_offset,
3081 				size);
3082 			memset((char *)(size_t)curr->qos_rule.key_iova +
3083 				curr->ipaddr_rule.qos_ipdst_offset,
3084 				0, size);
3085 
3086 			memcpy(ipdst_mask,
3087 				(char *)(size_t)curr->qos_rule.mask_iova +
3088 				curr->ipaddr_rule.qos_ipdst_offset,
3089 				size);
3090 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3091 				curr->ipaddr_rule.qos_ipdst_offset,
3092 				0, size);
3093 
3094 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3095 		}
3096 
3097 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3098 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3099 				(size == NH_FLD_IPV6_ADDR_SIZE));
3100 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3101 				curr->ipaddr_rule.qos_ipsrc_offset,
3102 				ipsrc_key,
3103 				size);
3104 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3105 				curr->ipaddr_rule.qos_ipsrc_offset,
3106 				ipsrc_mask,
3107 				size);
3108 		}
3109 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3110 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3111 				(size == NH_FLD_IPV6_ADDR_SIZE));
3112 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3113 				curr->ipaddr_rule.qos_ipdst_offset,
3114 				ipdst_key,
3115 				size);
3116 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3117 				curr->ipaddr_rule.qos_ipdst_offset,
3118 				ipdst_mask,
3119 				size);
3120 		}
3121 
3122 		if (extend >= 0)
3123 			curr->qos_real_key_size += extend;
3124 
3125 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3126 
3127 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
3128 
3129 		if (priv->num_rx_tc > 1) {
3130 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3131 					priv->token, &curr->qos_rule,
3132 					curr->tc_id, qos_index,
3133 					0, 0);
3134 			if (ret) {
3135 				DPAA2_PMD_ERR("Qos entry update failed.");
3136 				return -1;
3137 			}
3138 		}
3139 
3140 		if (!dpaa2_fs_action_supported(curr->action)) {
3141 			curr = LIST_NEXT(curr, next);
3142 			continue;
3143 		}
3144 
3145 		dpaa2_flow_fs_entry_log("Before update", curr);
3146 		extend = -1;
3147 
3148 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3149 				priv->token, curr->tc_id, &curr->fs_rule);
3150 		if (ret) {
3151 			DPAA2_PMD_ERR("FS entry remove failed.");
3152 			return -1;
3153 		}
3154 
3155 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3156 			tc_id == curr->tc_id) {
3157 			RTE_ASSERT(fs_ipsrc_offset >=
3158 				curr->ipaddr_rule.fs_ipsrc_offset);
3159 			extend1 = fs_ipsrc_offset -
3160 				curr->ipaddr_rule.fs_ipsrc_offset;
3161 			if (extend >= 0)
3162 				RTE_ASSERT(extend == extend1);
3163 			else
3164 				extend = extend1;
3165 
3166 			memcpy(ipsrc_key,
3167 				(char *)(size_t)curr->fs_rule.key_iova +
3168 				curr->ipaddr_rule.fs_ipsrc_offset,
3169 				size);
3170 			memset((char *)(size_t)curr->fs_rule.key_iova +
3171 				curr->ipaddr_rule.fs_ipsrc_offset,
3172 				0, size);
3173 
3174 			memcpy(ipsrc_mask,
3175 				(char *)(size_t)curr->fs_rule.mask_iova +
3176 				curr->ipaddr_rule.fs_ipsrc_offset,
3177 				size);
3178 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3179 				curr->ipaddr_rule.fs_ipsrc_offset,
3180 				0, size);
3181 
3182 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3183 		}
3184 
3185 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3186 			tc_id == curr->tc_id) {
3187 			RTE_ASSERT(fs_ipdst_offset >=
3188 				curr->ipaddr_rule.fs_ipdst_offset);
3189 			extend1 = fs_ipdst_offset -
3190 				curr->ipaddr_rule.fs_ipdst_offset;
3191 			if (extend >= 0)
3192 				RTE_ASSERT(extend == extend1);
3193 			else
3194 				extend = extend1;
3195 
3196 			memcpy(ipdst_key,
3197 				(char *)(size_t)curr->fs_rule.key_iova +
3198 				curr->ipaddr_rule.fs_ipdst_offset,
3199 				size);
3200 			memset((char *)(size_t)curr->fs_rule.key_iova +
3201 				curr->ipaddr_rule.fs_ipdst_offset,
3202 				0, size);
3203 
3204 			memcpy(ipdst_mask,
3205 				(char *)(size_t)curr->fs_rule.mask_iova +
3206 				curr->ipaddr_rule.fs_ipdst_offset,
3207 				size);
3208 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3209 				curr->ipaddr_rule.fs_ipdst_offset,
3210 				0, size);
3211 
3212 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3213 		}
3214 
3215 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3216 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3217 				curr->ipaddr_rule.fs_ipsrc_offset,
3218 				ipsrc_key,
3219 				size);
3220 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3221 				curr->ipaddr_rule.fs_ipsrc_offset,
3222 				ipsrc_mask,
3223 				size);
3224 		}
3225 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3226 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3227 				curr->ipaddr_rule.fs_ipdst_offset,
3228 				ipdst_key,
3229 				size);
3230 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3231 				curr->ipaddr_rule.fs_ipdst_offset,
3232 				ipdst_mask,
3233 				size);
3234 		}
3235 
3236 		if (extend >= 0)
3237 			curr->fs_real_key_size += extend;
3238 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3239 
3240 		dpaa2_flow_fs_entry_log("Start update", curr);
3241 
3242 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3243 				priv->token, curr->tc_id, curr->tc_index,
3244 				&curr->fs_rule, &curr->action_cfg);
3245 		if (ret) {
3246 			DPAA2_PMD_ERR("FS entry update failed.");
3247 			return -1;
3248 		}
3249 
3250 		curr = LIST_NEXT(curr, next);
3251 	}
3252 
3253 	return 0;
3254 }
3255 
3256 static inline int
3257 dpaa2_flow_verify_attr(
3258 	struct dpaa2_dev_priv *priv,
3259 	const struct rte_flow_attr *attr)
3260 {
3261 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3262 
3263 	while (curr) {
3264 		if (curr->tc_id == attr->group &&
3265 			curr->tc_index == attr->priority) {
3266 			DPAA2_PMD_ERR(
3267 				"Flow with group %d and priority %d already exists.",
3268 				attr->group, attr->priority);
3269 
3270 			return -1;
3271 		}
3272 		curr = LIST_NEXT(curr, next);
3273 	}
3274 
3275 	return 0;
3276 }
3277 
3278 static inline struct rte_eth_dev *
3279 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3280 	const struct rte_flow_action *action)
3281 {
3282 	const struct rte_flow_action_phy_port *phy_port;
3283 	const struct rte_flow_action_port_id *port_id;
3284 	int idx = -1;
3285 	struct rte_eth_dev *dest_dev;
3286 
3287 	if (action->type == RTE_FLOW_ACTION_TYPE_PHY_PORT) {
3288 		phy_port = (const struct rte_flow_action_phy_port *)
3289 					action->conf;
3290 		if (!phy_port->original)
3291 			idx = phy_port->index;
3292 	} else if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3293 		port_id = (const struct rte_flow_action_port_id *)
3294 					action->conf;
3295 		if (!port_id->original)
3296 			idx = port_id->id;
3297 	} else {
3298 		return NULL;
3299 	}
3300 
3301 	if (idx >= 0) {
3302 		if (!rte_eth_dev_is_valid_port(idx))
3303 			return NULL;
3304 		dest_dev = &rte_eth_devices[idx];
3305 	} else {
3306 		dest_dev = priv->eth_dev;
3307 	}
3308 
3309 	if (!dpaa2_dev_is_dpaa2(dest_dev))
3310 		return NULL;
3311 
3312 	return dest_dev;
3313 }
3314 
3315 static inline int
3316 dpaa2_flow_verify_action(
3317 	struct dpaa2_dev_priv *priv,
3318 	const struct rte_flow_attr *attr,
3319 	const struct rte_flow_action actions[])
3320 {
3321 	int end_of_list = 0, i, j = 0;
3322 	const struct rte_flow_action_queue *dest_queue;
3323 	const struct rte_flow_action_rss *rss_conf;
3324 	struct dpaa2_queue *rxq;
3325 
3326 	while (!end_of_list) {
3327 		switch (actions[j].type) {
3328 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3329 			dest_queue = (const struct rte_flow_action_queue *)
3330 					(actions[j].conf);
3331 			rxq = priv->rx_vq[dest_queue->index];
3332 			if (attr->group != rxq->tc_index) {
3333 				DPAA2_PMD_ERR(
3334 					"RXQ[%d] does not belong to the group %d",
3335 					dest_queue->index, attr->group);
3336 
3337 				return -1;
3338 			}
3339 			break;
3340 		case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3341 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3342 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3343 				DPAA2_PMD_ERR("Invalid port id of action");
3344 				return -ENOTSUP;
3345 			}
3346 			break;
3347 		case RTE_FLOW_ACTION_TYPE_RSS:
3348 			rss_conf = (const struct rte_flow_action_rss *)
3349 					(actions[j].conf);
3350 			if (rss_conf->queue_num > priv->dist_queues) {
3351 				DPAA2_PMD_ERR(
3352 					"RSS number exceeds the distrbution size");
3353 				return -ENOTSUP;
3354 			}
3355 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3356 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3357 					DPAA2_PMD_ERR(
3358 						"RSS queue index exceeds the number of RXQs");
3359 					return -ENOTSUP;
3360 				}
3361 				rxq = priv->rx_vq[rss_conf->queue[i]];
3362 				if (rxq->tc_index != attr->group) {
3363 					DPAA2_PMD_ERR(
3364 						"Queue/Group combination are not supported\n");
3365 					return -ENOTSUP;
3366 				}
3367 			}
3368 
3369 			break;
3370 		case RTE_FLOW_ACTION_TYPE_END:
3371 			end_of_list = 1;
3372 			break;
3373 		default:
3374 			DPAA2_PMD_ERR("Invalid action type");
3375 			return -ENOTSUP;
3376 		}
3377 		j++;
3378 	}
3379 
3380 	return 0;
3381 }
3382 
3383 static int
3384 dpaa2_generic_flow_set(struct rte_flow *flow,
3385 		       struct rte_eth_dev *dev,
3386 		       const struct rte_flow_attr *attr,
3387 		       const struct rte_flow_item pattern[],
3388 		       const struct rte_flow_action actions[],
3389 		       struct rte_flow_error *error)
3390 {
3391 	const struct rte_flow_action_queue *dest_queue;
3392 	const struct rte_flow_action_rss *rss_conf;
3393 	int is_keycfg_configured = 0, end_of_list = 0;
3394 	int ret = 0, i = 0, j = 0;
3395 	struct dpni_rx_dist_cfg tc_cfg;
3396 	struct dpni_qos_tbl_cfg qos_cfg;
3397 	struct dpni_fs_action_cfg action;
3398 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3399 	struct dpaa2_queue *dest_q;
3400 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3401 	size_t param;
3402 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3403 	uint16_t qos_index;
3404 	struct rte_eth_dev *dest_dev;
3405 	struct dpaa2_dev_priv *dest_priv;
3406 
3407 	ret = dpaa2_flow_verify_attr(priv, attr);
3408 	if (ret)
3409 		return ret;
3410 
3411 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3412 	if (ret)
3413 		return ret;
3414 
3415 	/* Parse pattern list to get the matching parameters */
3416 	while (!end_of_list) {
3417 		switch (pattern[i].type) {
3418 		case RTE_FLOW_ITEM_TYPE_ETH:
3419 			ret = dpaa2_configure_flow_eth(flow,
3420 					dev, attr, &pattern[i], actions, error,
3421 					&is_keycfg_configured);
3422 			if (ret) {
3423 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3424 				return ret;
3425 			}
3426 			break;
3427 		case RTE_FLOW_ITEM_TYPE_VLAN:
3428 			ret = dpaa2_configure_flow_vlan(flow,
3429 					dev, attr, &pattern[i], actions, error,
3430 					&is_keycfg_configured);
3431 			if (ret) {
3432 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3433 				return ret;
3434 			}
3435 			break;
3436 		case RTE_FLOW_ITEM_TYPE_IPV4:
3437 		case RTE_FLOW_ITEM_TYPE_IPV6:
3438 			ret = dpaa2_configure_flow_generic_ip(flow,
3439 					dev, attr, &pattern[i], actions, error,
3440 					&is_keycfg_configured);
3441 			if (ret) {
3442 				DPAA2_PMD_ERR("IP flow configuration failed!");
3443 				return ret;
3444 			}
3445 			break;
3446 		case RTE_FLOW_ITEM_TYPE_ICMP:
3447 			ret = dpaa2_configure_flow_icmp(flow,
3448 					dev, attr, &pattern[i], actions, error,
3449 					&is_keycfg_configured);
3450 			if (ret) {
3451 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3452 				return ret;
3453 			}
3454 			break;
3455 		case RTE_FLOW_ITEM_TYPE_UDP:
3456 			ret = dpaa2_configure_flow_udp(flow,
3457 					dev, attr, &pattern[i], actions, error,
3458 					&is_keycfg_configured);
3459 			if (ret) {
3460 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3461 				return ret;
3462 			}
3463 			break;
3464 		case RTE_FLOW_ITEM_TYPE_TCP:
3465 			ret = dpaa2_configure_flow_tcp(flow,
3466 					dev, attr, &pattern[i], actions, error,
3467 					&is_keycfg_configured);
3468 			if (ret) {
3469 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3470 				return ret;
3471 			}
3472 			break;
3473 		case RTE_FLOW_ITEM_TYPE_SCTP:
3474 			ret = dpaa2_configure_flow_sctp(flow,
3475 					dev, attr, &pattern[i], actions, error,
3476 					&is_keycfg_configured);
3477 			if (ret) {
3478 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3479 				return ret;
3480 			}
3481 			break;
3482 		case RTE_FLOW_ITEM_TYPE_GRE:
3483 			ret = dpaa2_configure_flow_gre(flow,
3484 					dev, attr, &pattern[i], actions, error,
3485 					&is_keycfg_configured);
3486 			if (ret) {
3487 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3488 				return ret;
3489 			}
3490 			break;
3491 		case RTE_FLOW_ITEM_TYPE_RAW:
3492 			ret = dpaa2_configure_flow_raw(flow,
3493 						       dev, attr, &pattern[i],
3494 						       actions, error,
3495 						       &is_keycfg_configured);
3496 			if (ret) {
3497 				DPAA2_PMD_ERR("RAW flow configuration failed!");
3498 				return ret;
3499 			}
3500 			break;
3501 		case RTE_FLOW_ITEM_TYPE_END:
3502 			end_of_list = 1;
3503 			break; /*End of List*/
3504 		default:
3505 			DPAA2_PMD_ERR("Invalid action type");
3506 			ret = -ENOTSUP;
3507 			break;
3508 		}
3509 		i++;
3510 	}
3511 
3512 	/* Let's parse action on matching traffic */
3513 	end_of_list = 0;
3514 	while (!end_of_list) {
3515 		switch (actions[j].type) {
3516 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3517 		case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3518 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3519 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3520 			flow->action = actions[j].type;
3521 
3522 			if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3523 				dest_queue = (const struct rte_flow_action_queue *)
3524 								(actions[j].conf);
3525 				dest_q = priv->rx_vq[dest_queue->index];
3526 				action.flow_id = dest_q->flow_id;
3527 			} else {
3528 				dest_dev = dpaa2_flow_redirect_dev(priv,
3529 								   &actions[j]);
3530 				if (!dest_dev) {
3531 					DPAA2_PMD_ERR("Invalid destination device to redirect!");
3532 					return -1;
3533 				}
3534 
3535 				dest_priv = dest_dev->data->dev_private;
3536 				dest_q = dest_priv->tx_vq[0];
3537 				action.options =
3538 						DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
3539 				action.redirect_obj_token = dest_priv->token;
3540 				action.flow_id = dest_q->flow_id;
3541 			}
3542 
3543 			/* Configure FS table first*/
3544 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3545 				dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3546 				if (dpkg_prepare_key_cfg(
3547 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3548 				(uint8_t *)(size_t)priv->extract
3549 				.tc_extract_param[flow->tc_id]) < 0) {
3550 					DPAA2_PMD_ERR(
3551 					"Unable to prepare extract parameters");
3552 					return -1;
3553 				}
3554 
3555 				memset(&tc_cfg, 0,
3556 					sizeof(struct dpni_rx_dist_cfg));
3557 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3558 				tc_cfg.key_cfg_iova =
3559 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3560 				tc_cfg.tc = flow->tc_id;
3561 				tc_cfg.enable = false;
3562 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3563 						priv->token, &tc_cfg);
3564 				if (ret < 0) {
3565 					DPAA2_PMD_ERR(
3566 						"TC hash cannot be disabled.(%d)",
3567 						ret);
3568 					return -1;
3569 				}
3570 				tc_cfg.enable = true;
3571 				tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
3572 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3573 							 priv->token, &tc_cfg);
3574 				if (ret < 0) {
3575 					DPAA2_PMD_ERR(
3576 						"TC distribution cannot be configured.(%d)",
3577 						ret);
3578 					return -1;
3579 				}
3580 			}
3581 
3582 			/* Configure QoS table then.*/
3583 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3584 				dpaa2_flow_qos_table_extracts_log(priv);
3585 				if (dpkg_prepare_key_cfg(
3586 					&priv->extract.qos_key_extract.dpkg,
3587 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3588 					DPAA2_PMD_ERR(
3589 						"Unable to prepare extract parameters");
3590 					return -1;
3591 				}
3592 
3593 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3594 				qos_cfg.discard_on_miss = false;
3595 				qos_cfg.default_tc = 0;
3596 				qos_cfg.keep_entries = true;
3597 				qos_cfg.key_cfg_iova =
3598 					(size_t)priv->extract.qos_extract_param;
3599 				/* QoS table is effecitive for multiple TCs.*/
3600 				if (priv->num_rx_tc > 1) {
3601 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3602 						priv->token, &qos_cfg);
3603 					if (ret < 0) {
3604 						DPAA2_PMD_ERR(
3605 						"RSS QoS table can not be configured(%d)\n",
3606 							ret);
3607 						return -1;
3608 					}
3609 				}
3610 			}
3611 
3612 			flow->qos_real_key_size = priv->extract
3613 				.qos_key_extract.key_info.key_total_size;
3614 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3615 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3616 					flow->ipaddr_rule.qos_ipsrc_offset) {
3617 					flow->qos_real_key_size =
3618 						flow->ipaddr_rule.qos_ipdst_offset +
3619 						NH_FLD_IPV4_ADDR_SIZE;
3620 				} else {
3621 					flow->qos_real_key_size =
3622 						flow->ipaddr_rule.qos_ipsrc_offset +
3623 						NH_FLD_IPV4_ADDR_SIZE;
3624 				}
3625 			} else if (flow->ipaddr_rule.ipaddr_type ==
3626 				FLOW_IPV6_ADDR) {
3627 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3628 					flow->ipaddr_rule.qos_ipsrc_offset) {
3629 					flow->qos_real_key_size =
3630 						flow->ipaddr_rule.qos_ipdst_offset +
3631 						NH_FLD_IPV6_ADDR_SIZE;
3632 				} else {
3633 					flow->qos_real_key_size =
3634 						flow->ipaddr_rule.qos_ipsrc_offset +
3635 						NH_FLD_IPV6_ADDR_SIZE;
3636 				}
3637 			}
3638 
3639 			/* QoS entry added is only effective for multiple TCs.*/
3640 			if (priv->num_rx_tc > 1) {
3641 				qos_index = flow->tc_id * priv->fs_entries +
3642 					flow->tc_index;
3643 				if (qos_index >= priv->qos_entries) {
3644 					DPAA2_PMD_ERR("QoS table with %d entries full",
3645 						priv->qos_entries);
3646 					return -1;
3647 				}
3648 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3649 
3650 				dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3651 
3652 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3653 						priv->token, &flow->qos_rule,
3654 						flow->tc_id, qos_index,
3655 						0, 0);
3656 				if (ret < 0) {
3657 					DPAA2_PMD_ERR(
3658 						"Error in addnig entry to QoS table(%d)", ret);
3659 					return ret;
3660 				}
3661 			}
3662 
3663 			if (flow->tc_index >= priv->fs_entries) {
3664 				DPAA2_PMD_ERR("FS table with %d entries full",
3665 					priv->fs_entries);
3666 				return -1;
3667 			}
3668 
3669 			flow->fs_real_key_size =
3670 				priv->extract.tc_key_extract[flow->tc_id]
3671 				.key_info.key_total_size;
3672 
3673 			if (flow->ipaddr_rule.ipaddr_type ==
3674 				FLOW_IPV4_ADDR) {
3675 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3676 					flow->ipaddr_rule.fs_ipsrc_offset) {
3677 					flow->fs_real_key_size =
3678 						flow->ipaddr_rule.fs_ipdst_offset +
3679 						NH_FLD_IPV4_ADDR_SIZE;
3680 				} else {
3681 					flow->fs_real_key_size =
3682 						flow->ipaddr_rule.fs_ipsrc_offset +
3683 						NH_FLD_IPV4_ADDR_SIZE;
3684 				}
3685 			} else if (flow->ipaddr_rule.ipaddr_type ==
3686 				FLOW_IPV6_ADDR) {
3687 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3688 					flow->ipaddr_rule.fs_ipsrc_offset) {
3689 					flow->fs_real_key_size =
3690 						flow->ipaddr_rule.fs_ipdst_offset +
3691 						NH_FLD_IPV6_ADDR_SIZE;
3692 				} else {
3693 					flow->fs_real_key_size =
3694 						flow->ipaddr_rule.fs_ipsrc_offset +
3695 						NH_FLD_IPV6_ADDR_SIZE;
3696 				}
3697 			}
3698 
3699 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3700 
3701 			dpaa2_flow_fs_entry_log("Start add", flow);
3702 
3703 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3704 						flow->tc_id, flow->tc_index,
3705 						&flow->fs_rule, &action);
3706 			if (ret < 0) {
3707 				DPAA2_PMD_ERR(
3708 				"Error in adding entry to FS table(%d)", ret);
3709 				return ret;
3710 			}
3711 			memcpy(&flow->action_cfg, &action,
3712 				sizeof(struct dpni_fs_action_cfg));
3713 			break;
3714 		case RTE_FLOW_ACTION_TYPE_RSS:
3715 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3716 
3717 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3718 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3719 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3720 			if (ret < 0) {
3721 				DPAA2_PMD_ERR(
3722 				"unable to set flow distribution.please check queue config\n");
3723 				return ret;
3724 			}
3725 
3726 			/* Allocate DMA'ble memory to write the rules */
3727 			param = (size_t)rte_malloc(NULL, 256, 64);
3728 			if (!param) {
3729 				DPAA2_PMD_ERR("Memory allocation failure\n");
3730 				return -1;
3731 			}
3732 
3733 			if (dpkg_prepare_key_cfg(
3734 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3735 				(uint8_t *)param) < 0) {
3736 				DPAA2_PMD_ERR(
3737 				"Unable to prepare extract parameters");
3738 				rte_free((void *)param);
3739 				return -1;
3740 			}
3741 
3742 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3743 			tc_cfg.dist_size = rss_conf->queue_num;
3744 			tc_cfg.key_cfg_iova = (size_t)param;
3745 			tc_cfg.enable = true;
3746 			tc_cfg.tc = flow->tc_id;
3747 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3748 						 priv->token, &tc_cfg);
3749 			if (ret < 0) {
3750 				DPAA2_PMD_ERR(
3751 					"RSS TC table cannot be configured: %d\n",
3752 					ret);
3753 				rte_free((void *)param);
3754 				return -1;
3755 			}
3756 
3757 			rte_free((void *)param);
3758 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3759 				if (dpkg_prepare_key_cfg(
3760 					&priv->extract.qos_key_extract.dpkg,
3761 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3762 					DPAA2_PMD_ERR(
3763 					"Unable to prepare extract parameters");
3764 					return -1;
3765 				}
3766 				memset(&qos_cfg, 0,
3767 					sizeof(struct dpni_qos_tbl_cfg));
3768 				qos_cfg.discard_on_miss = true;
3769 				qos_cfg.keep_entries = true;
3770 				qos_cfg.key_cfg_iova =
3771 					(size_t)priv->extract.qos_extract_param;
3772 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3773 							 priv->token, &qos_cfg);
3774 				if (ret < 0) {
3775 					DPAA2_PMD_ERR(
3776 					"RSS QoS dist can't be configured-%d\n",
3777 					ret);
3778 					return -1;
3779 				}
3780 			}
3781 
3782 			/* Add Rule into QoS table */
3783 			qos_index = flow->tc_id * priv->fs_entries +
3784 				flow->tc_index;
3785 			if (qos_index >= priv->qos_entries) {
3786 				DPAA2_PMD_ERR("QoS table with %d entries full",
3787 					priv->qos_entries);
3788 				return -1;
3789 			}
3790 
3791 			flow->qos_real_key_size =
3792 			  priv->extract.qos_key_extract.key_info.key_total_size;
3793 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3794 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3795 						&flow->qos_rule, flow->tc_id,
3796 						qos_index, 0, 0);
3797 			if (ret < 0) {
3798 				DPAA2_PMD_ERR(
3799 				"Error in entry addition in QoS table(%d)",
3800 				ret);
3801 				return ret;
3802 			}
3803 			break;
3804 		case RTE_FLOW_ACTION_TYPE_END:
3805 			end_of_list = 1;
3806 			break;
3807 		default:
3808 			DPAA2_PMD_ERR("Invalid action type");
3809 			ret = -ENOTSUP;
3810 			break;
3811 		}
3812 		j++;
3813 	}
3814 
3815 	if (!ret) {
3816 		if (is_keycfg_configured &
3817 			(DPAA2_QOS_TABLE_RECONFIGURE |
3818 			DPAA2_FS_TABLE_RECONFIGURE)) {
3819 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3820 			if (ret) {
3821 				DPAA2_PMD_ERR("Flow entry update failed.");
3822 
3823 				return -1;
3824 			}
3825 		}
3826 		/* New rules are inserted. */
3827 		if (!curr) {
3828 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3829 		} else {
3830 			while (LIST_NEXT(curr, next))
3831 				curr = LIST_NEXT(curr, next);
3832 			LIST_INSERT_AFTER(curr, flow, next);
3833 		}
3834 	}
3835 	return ret;
3836 }
3837 
3838 static inline int
3839 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3840 		      const struct rte_flow_attr *attr)
3841 {
3842 	int ret = 0;
3843 
3844 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3845 		DPAA2_PMD_ERR("Priority group is out of range\n");
3846 		ret = -ENOTSUP;
3847 	}
3848 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3849 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3850 		ret = -ENOTSUP;
3851 	}
3852 	if (unlikely(attr->egress)) {
3853 		DPAA2_PMD_ERR(
3854 			"Flow configuration is not supported on egress side\n");
3855 		ret = -ENOTSUP;
3856 	}
3857 	if (unlikely(!attr->ingress)) {
3858 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3859 		ret = -EINVAL;
3860 	}
3861 	return ret;
3862 }
3863 
3864 static inline int
3865 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3866 {
3867 	unsigned int i, j, is_found = 0;
3868 	int ret = 0;
3869 
3870 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3871 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3872 			if (dpaa2_supported_pattern_type[i]
3873 					== pattern[j].type) {
3874 				is_found = 1;
3875 				break;
3876 			}
3877 		}
3878 		if (!is_found) {
3879 			ret = -ENOTSUP;
3880 			break;
3881 		}
3882 	}
3883 	/* Lets verify other combinations of given pattern rules */
3884 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3885 		if (!pattern[j].spec) {
3886 			ret = -EINVAL;
3887 			break;
3888 		}
3889 	}
3890 
3891 	return ret;
3892 }
3893 
3894 static inline int
3895 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3896 {
3897 	unsigned int i, j, is_found = 0;
3898 	int ret = 0;
3899 
3900 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3901 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3902 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3903 				is_found = 1;
3904 				break;
3905 			}
3906 		}
3907 		if (!is_found) {
3908 			ret = -ENOTSUP;
3909 			break;
3910 		}
3911 	}
3912 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3913 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3914 				!actions[j].conf)
3915 			ret = -EINVAL;
3916 	}
3917 	return ret;
3918 }
3919 
3920 static
3921 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3922 			const struct rte_flow_attr *flow_attr,
3923 			const struct rte_flow_item pattern[],
3924 			const struct rte_flow_action actions[],
3925 			struct rte_flow_error *error)
3926 {
3927 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3928 	struct dpni_attr dpni_attr;
3929 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3930 	uint16_t token = priv->token;
3931 	int ret = 0;
3932 
3933 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3934 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3935 	if (ret < 0) {
3936 		DPAA2_PMD_ERR(
3937 			"Failure to get dpni@%p attribute, err code  %d\n",
3938 			dpni, ret);
3939 		rte_flow_error_set(error, EPERM,
3940 			   RTE_FLOW_ERROR_TYPE_ATTR,
3941 			   flow_attr, "invalid");
3942 		return ret;
3943 	}
3944 
3945 	/* Verify input attributes */
3946 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3947 	if (ret < 0) {
3948 		DPAA2_PMD_ERR(
3949 			"Invalid attributes are given\n");
3950 		rte_flow_error_set(error, EPERM,
3951 			   RTE_FLOW_ERROR_TYPE_ATTR,
3952 			   flow_attr, "invalid");
3953 		goto not_valid_params;
3954 	}
3955 	/* Verify input pattern list */
3956 	ret = dpaa2_dev_verify_patterns(pattern);
3957 	if (ret < 0) {
3958 		DPAA2_PMD_ERR(
3959 			"Invalid pattern list is given\n");
3960 		rte_flow_error_set(error, EPERM,
3961 			   RTE_FLOW_ERROR_TYPE_ITEM,
3962 			   pattern, "invalid");
3963 		goto not_valid_params;
3964 	}
3965 	/* Verify input action list */
3966 	ret = dpaa2_dev_verify_actions(actions);
3967 	if (ret < 0) {
3968 		DPAA2_PMD_ERR(
3969 			"Invalid action list is given\n");
3970 		rte_flow_error_set(error, EPERM,
3971 			   RTE_FLOW_ERROR_TYPE_ACTION,
3972 			   actions, "invalid");
3973 		goto not_valid_params;
3974 	}
3975 not_valid_params:
3976 	return ret;
3977 }
3978 
3979 static
3980 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3981 				   const struct rte_flow_attr *attr,
3982 				   const struct rte_flow_item pattern[],
3983 				   const struct rte_flow_action actions[],
3984 				   struct rte_flow_error *error)
3985 {
3986 	struct rte_flow *flow = NULL;
3987 	size_t key_iova = 0, mask_iova = 0;
3988 	int ret;
3989 
3990 	dpaa2_flow_control_log =
3991 		getenv("DPAA2_FLOW_CONTROL_LOG");
3992 
3993 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3994 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3995 
3996 		dpaa2_flow_miss_flow_id =
3997 			atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3998 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3999 			DPAA2_PMD_ERR(
4000 				"The missed flow ID %d exceeds the max flow ID %d",
4001 				dpaa2_flow_miss_flow_id,
4002 				priv->dist_queues - 1);
4003 			return NULL;
4004 		}
4005 	}
4006 
4007 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
4008 	if (!flow) {
4009 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4010 		goto mem_failure;
4011 	}
4012 	/* Allocate DMA'ble memory to write the rules */
4013 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4014 	if (!key_iova) {
4015 		DPAA2_PMD_ERR(
4016 			"Memory allocation failure for rule configuration\n");
4017 		goto mem_failure;
4018 	}
4019 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4020 	if (!mask_iova) {
4021 		DPAA2_PMD_ERR(
4022 			"Memory allocation failure for rule configuration\n");
4023 		goto mem_failure;
4024 	}
4025 
4026 	flow->qos_rule.key_iova = key_iova;
4027 	flow->qos_rule.mask_iova = mask_iova;
4028 
4029 	/* Allocate DMA'ble memory to write the rules */
4030 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4031 	if (!key_iova) {
4032 		DPAA2_PMD_ERR(
4033 			"Memory allocation failure for rule configuration\n");
4034 		goto mem_failure;
4035 	}
4036 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4037 	if (!mask_iova) {
4038 		DPAA2_PMD_ERR(
4039 			"Memory allocation failure for rule configuration\n");
4040 		goto mem_failure;
4041 	}
4042 
4043 	flow->fs_rule.key_iova = key_iova;
4044 	flow->fs_rule.mask_iova = mask_iova;
4045 
4046 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
4047 	flow->ipaddr_rule.qos_ipsrc_offset =
4048 		IP_ADDRESS_OFFSET_INVALID;
4049 	flow->ipaddr_rule.qos_ipdst_offset =
4050 		IP_ADDRESS_OFFSET_INVALID;
4051 	flow->ipaddr_rule.fs_ipsrc_offset =
4052 		IP_ADDRESS_OFFSET_INVALID;
4053 	flow->ipaddr_rule.fs_ipdst_offset =
4054 		IP_ADDRESS_OFFSET_INVALID;
4055 
4056 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
4057 			actions, error);
4058 	if (ret < 0) {
4059 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4060 			rte_flow_error_set(error, EPERM,
4061 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4062 					attr, "unknown");
4063 		DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
4064 		goto creation_error;
4065 	}
4066 
4067 	return flow;
4068 mem_failure:
4069 	rte_flow_error_set(error, EPERM,
4070 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4071 			   NULL, "memory alloc");
4072 creation_error:
4073 	rte_free((void *)flow);
4074 	rte_free((void *)key_iova);
4075 	rte_free((void *)mask_iova);
4076 
4077 	return NULL;
4078 }
4079 
4080 static
4081 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4082 		       struct rte_flow *flow,
4083 		       struct rte_flow_error *error)
4084 {
4085 	int ret = 0;
4086 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4087 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4088 
4089 	switch (flow->action) {
4090 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4091 	case RTE_FLOW_ACTION_TYPE_PHY_PORT:
4092 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
4093 		if (priv->num_rx_tc > 1) {
4094 			/* Remove entry from QoS table first */
4095 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4096 					&flow->qos_rule);
4097 			if (ret < 0) {
4098 				DPAA2_PMD_ERR(
4099 					"Error in removing entry from QoS table(%d)", ret);
4100 				goto error;
4101 			}
4102 		}
4103 
4104 		/* Then remove entry from FS table */
4105 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4106 					   flow->tc_id, &flow->fs_rule);
4107 		if (ret < 0) {
4108 			DPAA2_PMD_ERR(
4109 				"Error in removing entry from FS table(%d)", ret);
4110 			goto error;
4111 		}
4112 		break;
4113 	case RTE_FLOW_ACTION_TYPE_RSS:
4114 		if (priv->num_rx_tc > 1) {
4115 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4116 					&flow->qos_rule);
4117 			if (ret < 0) {
4118 				DPAA2_PMD_ERR(
4119 					"Error in entry addition in QoS table(%d)", ret);
4120 				goto error;
4121 			}
4122 		}
4123 		break;
4124 	default:
4125 		DPAA2_PMD_ERR(
4126 		"Action type (%d) is not supported", flow->action);
4127 		ret = -ENOTSUP;
4128 		break;
4129 	}
4130 
4131 	LIST_REMOVE(flow, next);
4132 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
4133 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4134 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
4135 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4136 	/* Now free the flow */
4137 	rte_free(flow);
4138 
4139 error:
4140 	if (ret)
4141 		rte_flow_error_set(error, EPERM,
4142 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4143 				   NULL, "unknown");
4144 	return ret;
4145 }
4146 
4147 /**
4148  * Destroy user-configured flow rules.
4149  *
4150  * This function skips internal flows rules.
4151  *
4152  * @see rte_flow_flush()
4153  * @see rte_flow_ops
4154  */
4155 static int
4156 dpaa2_flow_flush(struct rte_eth_dev *dev,
4157 		struct rte_flow_error *error)
4158 {
4159 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4160 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
4161 
4162 	while (flow) {
4163 		struct rte_flow *next = LIST_NEXT(flow, next);
4164 
4165 		dpaa2_flow_destroy(dev, flow, error);
4166 		flow = next;
4167 	}
4168 	return 0;
4169 }
4170 
4171 static int
4172 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4173 		struct rte_flow *flow __rte_unused,
4174 		const struct rte_flow_action *actions __rte_unused,
4175 		void *data __rte_unused,
4176 		struct rte_flow_error *error __rte_unused)
4177 {
4178 	return 0;
4179 }
4180 
4181 /**
4182  * Clean up all flow rules.
4183  *
4184  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4185  * rules regardless of whether they are internal or user-configured.
4186  *
4187  * @param priv
4188  *   Pointer to private structure.
4189  */
4190 void
4191 dpaa2_flow_clean(struct rte_eth_dev *dev)
4192 {
4193 	struct rte_flow *flow;
4194 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4195 
4196 	while ((flow = LIST_FIRST(&priv->flows)))
4197 		dpaa2_flow_destroy(dev, flow, NULL);
4198 }
4199 
4200 const struct rte_flow_ops dpaa2_flow_ops = {
4201 	.create	= dpaa2_flow_create,
4202 	.validate = dpaa2_flow_validate,
4203 	.destroy = dpaa2_flow_destroy,
4204 	.flush	= dpaa2_flow_flush,
4205 	.query	= dpaa2_flow_query,
4206 };
4207