xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 12dc2539f7b12b2ec4570197c1e8a16a973d71f6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2021 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static uint16_t dpaa2_flow_miss_flow_id =
34 	DPNI_FS_MISS_DROP;
35 
36 #define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
37 
38 enum flow_rule_ipaddr_type {
39 	FLOW_NONE_IPADDR,
40 	FLOW_IPV4_ADDR,
41 	FLOW_IPV6_ADDR
42 };
43 
44 struct flow_rule_ipaddr {
45 	enum flow_rule_ipaddr_type ipaddr_type;
46 	int qos_ipsrc_offset;
47 	int qos_ipdst_offset;
48 	int fs_ipsrc_offset;
49 	int fs_ipdst_offset;
50 };
51 
52 struct rte_flow {
53 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 	struct dpni_rule_cfg qos_rule;
55 	struct dpni_rule_cfg fs_rule;
56 	uint8_t qos_real_key_size;
57 	uint8_t fs_real_key_size;
58 	uint8_t tc_id; /** Traffic Class ID. */
59 	uint8_t tc_index; /** index within this Traffic Class. */
60 	enum rte_flow_action_type action;
61 	/* Special for IP address to specify the offset
62 	 * in key/mask.
63 	 */
64 	struct flow_rule_ipaddr ipaddr_rule;
65 	struct dpni_fs_action_cfg action_cfg;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 };
81 
82 static const
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 	RTE_FLOW_ACTION_TYPE_END,
85 	RTE_FLOW_ACTION_TYPE_QUEUE,
86 	RTE_FLOW_ACTION_TYPE_PORT_ID,
87 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
88 	RTE_FLOW_ACTION_TYPE_RSS
89 };
90 
91 static const
92 enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
93 	RTE_FLOW_ACTION_TYPE_QUEUE,
94 	RTE_FLOW_ACTION_TYPE_PORT_ID,
95 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
96 };
97 
98 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
99 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
100 
101 #ifndef __cplusplus
102 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
103 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
104 	.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
105 	.hdr.ether_type = RTE_BE16(0xffff),
106 };
107 
108 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
109 	.hdr.vlan_tci = RTE_BE16(0xffff),
110 };
111 
112 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
113 	.hdr.src_addr = RTE_BE32(0xffffffff),
114 	.hdr.dst_addr = RTE_BE32(0xffffffff),
115 	.hdr.next_proto_id = 0xff,
116 };
117 
118 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
119 	.hdr = {
120 		.src_addr = RTE_IPV6_MASK_FULL,
121 		.dst_addr = RTE_IPV6_MASK_FULL,
122 		.proto = 0xff
123 	},
124 };
125 
126 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
127 	.hdr.icmp_type = 0xff,
128 	.hdr.icmp_code = 0xff,
129 };
130 
131 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
132 	.hdr = {
133 		.src_port = RTE_BE16(0xffff),
134 		.dst_port = RTE_BE16(0xffff),
135 	},
136 };
137 
138 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
139 	.hdr = {
140 		.src_port = RTE_BE16(0xffff),
141 		.dst_port = RTE_BE16(0xffff),
142 	},
143 };
144 
145 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
146 	.hdr = {
147 		.src_port = RTE_BE16(0xffff),
148 		.dst_port = RTE_BE16(0xffff),
149 	},
150 };
151 
152 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
153 	.protocol = RTE_BE16(0xffff),
154 };
155 
156 #endif
157 
158 static inline void dpaa2_prot_field_string(
159 	enum net_prot prot, uint32_t field,
160 	char *string)
161 {
162 	if (!dpaa2_flow_control_log)
163 		return;
164 
165 	if (prot == NET_PROT_ETH) {
166 		strcpy(string, "eth");
167 		if (field == NH_FLD_ETH_DA)
168 			strcat(string, ".dst");
169 		else if (field == NH_FLD_ETH_SA)
170 			strcat(string, ".src");
171 		else if (field == NH_FLD_ETH_TYPE)
172 			strcat(string, ".type");
173 		else
174 			strcat(string, ".unknown field");
175 	} else if (prot == NET_PROT_VLAN) {
176 		strcpy(string, "vlan");
177 		if (field == NH_FLD_VLAN_TCI)
178 			strcat(string, ".tci");
179 		else
180 			strcat(string, ".unknown field");
181 	} else if (prot == NET_PROT_IP) {
182 		strcpy(string, "ip");
183 		if (field == NH_FLD_IP_SRC)
184 			strcat(string, ".src");
185 		else if (field == NH_FLD_IP_DST)
186 			strcat(string, ".dst");
187 		else if (field == NH_FLD_IP_PROTO)
188 			strcat(string, ".proto");
189 		else
190 			strcat(string, ".unknown field");
191 	} else if (prot == NET_PROT_TCP) {
192 		strcpy(string, "tcp");
193 		if (field == NH_FLD_TCP_PORT_SRC)
194 			strcat(string, ".src");
195 		else if (field == NH_FLD_TCP_PORT_DST)
196 			strcat(string, ".dst");
197 		else
198 			strcat(string, ".unknown field");
199 	} else if (prot == NET_PROT_UDP) {
200 		strcpy(string, "udp");
201 		if (field == NH_FLD_UDP_PORT_SRC)
202 			strcat(string, ".src");
203 		else if (field == NH_FLD_UDP_PORT_DST)
204 			strcat(string, ".dst");
205 		else
206 			strcat(string, ".unknown field");
207 	} else if (prot == NET_PROT_ICMP) {
208 		strcpy(string, "icmp");
209 		if (field == NH_FLD_ICMP_TYPE)
210 			strcat(string, ".type");
211 		else if (field == NH_FLD_ICMP_CODE)
212 			strcat(string, ".code");
213 		else
214 			strcat(string, ".unknown field");
215 	} else if (prot == NET_PROT_SCTP) {
216 		strcpy(string, "sctp");
217 		if (field == NH_FLD_SCTP_PORT_SRC)
218 			strcat(string, ".src");
219 		else if (field == NH_FLD_SCTP_PORT_DST)
220 			strcat(string, ".dst");
221 		else
222 			strcat(string, ".unknown field");
223 	} else if (prot == NET_PROT_GRE) {
224 		strcpy(string, "gre");
225 		if (field == NH_FLD_GRE_TYPE)
226 			strcat(string, ".type");
227 		else
228 			strcat(string, ".unknown field");
229 	} else {
230 		strcpy(string, "unknown protocol");
231 	}
232 }
233 
234 static inline void dpaa2_flow_qos_table_extracts_log(
235 	const struct dpaa2_dev_priv *priv, FILE *f)
236 {
237 	int idx;
238 	char string[32];
239 
240 	if (!dpaa2_flow_control_log)
241 		return;
242 
243 	fprintf(f, "Setup QoS table: number of extracts: %d\r\n",
244 			priv->extract.qos_key_extract.dpkg.num_extracts);
245 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
246 		idx++) {
247 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
248 			.extracts[idx].extract.from_hdr.prot,
249 			priv->extract.qos_key_extract.dpkg.extracts[idx]
250 			.extract.from_hdr.field,
251 			string);
252 		fprintf(f, "%s", string);
253 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
254 			fprintf(f, " / ");
255 	}
256 	fprintf(f, "\r\n");
257 }
258 
259 static inline void dpaa2_flow_fs_table_extracts_log(
260 	const struct dpaa2_dev_priv *priv, int tc_id, FILE *f)
261 {
262 	int idx;
263 	char string[32];
264 
265 	if (!dpaa2_flow_control_log)
266 		return;
267 
268 	fprintf(f, "Setup FS table: number of extracts of TC[%d]: %d\r\n",
269 			tc_id, priv->extract.tc_key_extract[tc_id]
270 			.dpkg.num_extracts);
271 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
272 		.dpkg.num_extracts; idx++) {
273 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
274 			.dpkg.extracts[idx].extract.from_hdr.prot,
275 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
276 			.extract.from_hdr.field,
277 			string);
278 		fprintf(f, "%s", string);
279 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
280 			.dpkg.num_extracts)
281 			fprintf(f, " / ");
282 	}
283 	fprintf(f, "\r\n");
284 }
285 
286 static inline void dpaa2_flow_qos_entry_log(
287 	const char *log_info, const struct rte_flow *flow, int qos_index, FILE *f)
288 {
289 	int idx;
290 	uint8_t *key, *mask;
291 
292 	if (!dpaa2_flow_control_log)
293 		return;
294 
295 	fprintf(f, "\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
296 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
297 
298 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
299 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
300 
301 	fprintf(f, "key:\r\n");
302 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
303 		fprintf(f, "%02x ", key[idx]);
304 
305 	fprintf(f, "\r\nmask:\r\n");
306 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
307 		fprintf(f, "%02x ", mask[idx]);
308 
309 	fprintf(f, "\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
310 		flow->ipaddr_rule.qos_ipsrc_offset,
311 		flow->ipaddr_rule.qos_ipdst_offset);
312 }
313 
314 static inline void dpaa2_flow_fs_entry_log(
315 	const char *log_info, const struct rte_flow *flow, FILE *f)
316 {
317 	int idx;
318 	uint8_t *key, *mask;
319 
320 	if (!dpaa2_flow_control_log)
321 		return;
322 
323 	fprintf(f, "\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
324 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
325 
326 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
327 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
328 
329 	fprintf(f, "key:\r\n");
330 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
331 		fprintf(f, "%02x ", key[idx]);
332 
333 	fprintf(f, "\r\nmask:\r\n");
334 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
335 		fprintf(f, "%02x ", mask[idx]);
336 
337 	fprintf(f, "\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
338 		flow->ipaddr_rule.fs_ipsrc_offset,
339 		flow->ipaddr_rule.fs_ipdst_offset);
340 }
341 
342 static inline void dpaa2_flow_extract_key_set(
343 	struct dpaa2_key_info *key_info, int index, uint8_t size)
344 {
345 	key_info->key_size[index] = size;
346 	if (index > 0) {
347 		key_info->key_offset[index] =
348 			key_info->key_offset[index - 1] +
349 			key_info->key_size[index - 1];
350 	} else {
351 		key_info->key_offset[index] = 0;
352 	}
353 	key_info->key_total_size += size;
354 }
355 
356 static int dpaa2_flow_extract_add(
357 	struct dpaa2_key_extract *key_extract,
358 	enum net_prot prot,
359 	uint32_t field, uint8_t field_size)
360 {
361 	int index, ip_src = -1, ip_dst = -1;
362 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
363 	struct dpaa2_key_info *key_info = &key_extract->key_info;
364 
365 	if (dpkg->num_extracts >=
366 		DPKG_MAX_NUM_OF_EXTRACTS) {
367 		DPAA2_PMD_WARN("Number of extracts overflows");
368 		return -1;
369 	}
370 	/* Before reorder, the IP SRC and IP DST are already last
371 	 * extract(s).
372 	 */
373 	for (index = 0; index < dpkg->num_extracts; index++) {
374 		if (dpkg->extracts[index].extract.from_hdr.prot ==
375 			NET_PROT_IP) {
376 			if (dpkg->extracts[index].extract.from_hdr.field ==
377 				NH_FLD_IP_SRC) {
378 				ip_src = index;
379 			}
380 			if (dpkg->extracts[index].extract.from_hdr.field ==
381 				NH_FLD_IP_DST) {
382 				ip_dst = index;
383 			}
384 		}
385 	}
386 
387 	if (ip_src >= 0)
388 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
389 
390 	if (ip_dst >= 0)
391 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
392 
393 	if (prot == NET_PROT_IP &&
394 		(field == NH_FLD_IP_SRC ||
395 		field == NH_FLD_IP_DST)) {
396 		index = dpkg->num_extracts;
397 	} else {
398 		if (ip_src >= 0 && ip_dst >= 0)
399 			index = dpkg->num_extracts - 2;
400 		else if (ip_src >= 0 || ip_dst >= 0)
401 			index = dpkg->num_extracts - 1;
402 		else
403 			index = dpkg->num_extracts;
404 	}
405 
406 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
407 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
408 	dpkg->extracts[index].extract.from_hdr.prot = prot;
409 	dpkg->extracts[index].extract.from_hdr.field = field;
410 	if (prot == NET_PROT_IP &&
411 		(field == NH_FLD_IP_SRC ||
412 		field == NH_FLD_IP_DST)) {
413 		dpaa2_flow_extract_key_set(key_info, index, 0);
414 	} else {
415 		dpaa2_flow_extract_key_set(key_info, index, field_size);
416 	}
417 
418 	if (prot == NET_PROT_IP) {
419 		if (field == NH_FLD_IP_SRC) {
420 			if (key_info->ipv4_dst_offset >= 0) {
421 				key_info->ipv4_src_offset =
422 					key_info->ipv4_dst_offset +
423 					NH_FLD_IPV4_ADDR_SIZE;
424 			} else {
425 				key_info->ipv4_src_offset =
426 					key_info->key_offset[index - 1] +
427 						key_info->key_size[index - 1];
428 			}
429 			if (key_info->ipv6_dst_offset >= 0) {
430 				key_info->ipv6_src_offset =
431 					key_info->ipv6_dst_offset +
432 					NH_FLD_IPV6_ADDR_SIZE;
433 			} else {
434 				key_info->ipv6_src_offset =
435 					key_info->key_offset[index - 1] +
436 						key_info->key_size[index - 1];
437 			}
438 		} else if (field == NH_FLD_IP_DST) {
439 			if (key_info->ipv4_src_offset >= 0) {
440 				key_info->ipv4_dst_offset =
441 					key_info->ipv4_src_offset +
442 					NH_FLD_IPV4_ADDR_SIZE;
443 			} else {
444 				key_info->ipv4_dst_offset =
445 					key_info->key_offset[index - 1] +
446 						key_info->key_size[index - 1];
447 			}
448 			if (key_info->ipv6_src_offset >= 0) {
449 				key_info->ipv6_dst_offset =
450 					key_info->ipv6_src_offset +
451 					NH_FLD_IPV6_ADDR_SIZE;
452 			} else {
453 				key_info->ipv6_dst_offset =
454 					key_info->key_offset[index - 1] +
455 						key_info->key_size[index - 1];
456 			}
457 		}
458 	}
459 
460 	if (index == dpkg->num_extracts) {
461 		dpkg->num_extracts++;
462 		return 0;
463 	}
464 
465 	if (ip_src >= 0) {
466 		ip_src++;
467 		dpkg->extracts[ip_src].type =
468 			DPKG_EXTRACT_FROM_HDR;
469 		dpkg->extracts[ip_src].extract.from_hdr.type =
470 			DPKG_FULL_FIELD;
471 		dpkg->extracts[ip_src].extract.from_hdr.prot =
472 			NET_PROT_IP;
473 		dpkg->extracts[ip_src].extract.from_hdr.field =
474 			NH_FLD_IP_SRC;
475 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
476 		key_info->ipv4_src_offset += field_size;
477 		key_info->ipv6_src_offset += field_size;
478 	}
479 	if (ip_dst >= 0) {
480 		ip_dst++;
481 		dpkg->extracts[ip_dst].type =
482 			DPKG_EXTRACT_FROM_HDR;
483 		dpkg->extracts[ip_dst].extract.from_hdr.type =
484 			DPKG_FULL_FIELD;
485 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
486 			NET_PROT_IP;
487 		dpkg->extracts[ip_dst].extract.from_hdr.field =
488 			NH_FLD_IP_DST;
489 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
490 		key_info->ipv4_dst_offset += field_size;
491 		key_info->ipv6_dst_offset += field_size;
492 	}
493 
494 	dpkg->num_extracts++;
495 
496 	return 0;
497 }
498 
499 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
500 				      int size)
501 {
502 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
503 	struct dpaa2_key_info *key_info = &key_extract->key_info;
504 	int last_extract_size, index;
505 
506 	if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
507 	    DPKG_EXTRACT_FROM_DATA) {
508 		DPAA2_PMD_WARN("RAW extract cannot be combined with others");
509 		return -1;
510 	}
511 
512 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
513 	dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
514 	if (last_extract_size)
515 		dpkg->num_extracts++;
516 	else
517 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
518 
519 	for (index = 0; index < dpkg->num_extracts; index++) {
520 		dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
521 		if (index == dpkg->num_extracts - 1)
522 			dpkg->extracts[index].extract.from_data.size =
523 				last_extract_size;
524 		else
525 			dpkg->extracts[index].extract.from_data.size =
526 				DPAA2_FLOW_MAX_KEY_SIZE;
527 		dpkg->extracts[index].extract.from_data.offset =
528 			DPAA2_FLOW_MAX_KEY_SIZE * index;
529 	}
530 
531 	key_info->key_total_size = size;
532 	return 0;
533 }
534 
535 /* Protocol discrimination.
536  * Discriminate IPv4/IPv6/vLan by Eth type.
537  * Discriminate UDP/TCP/ICMP by next proto of IP.
538  */
539 static inline int
540 dpaa2_flow_proto_discrimination_extract(
541 	struct dpaa2_key_extract *key_extract,
542 	enum rte_flow_item_type type)
543 {
544 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
545 		return dpaa2_flow_extract_add(
546 				key_extract, NET_PROT_ETH,
547 				NH_FLD_ETH_TYPE,
548 				sizeof(rte_be16_t));
549 	} else if (type == (enum rte_flow_item_type)
550 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
551 		return dpaa2_flow_extract_add(
552 				key_extract, NET_PROT_IP,
553 				NH_FLD_IP_PROTO,
554 				NH_FLD_IP_PROTO_SIZE);
555 	}
556 
557 	return -1;
558 }
559 
560 static inline int dpaa2_flow_extract_search(
561 	struct dpkg_profile_cfg *dpkg,
562 	enum net_prot prot, uint32_t field)
563 {
564 	int i;
565 
566 	for (i = 0; i < dpkg->num_extracts; i++) {
567 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
568 			dpkg->extracts[i].extract.from_hdr.field == field) {
569 			return i;
570 		}
571 	}
572 
573 	return -1;
574 }
575 
576 static inline int dpaa2_flow_extract_key_offset(
577 	struct dpaa2_key_extract *key_extract,
578 	enum net_prot prot, uint32_t field)
579 {
580 	int i;
581 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
582 	struct dpaa2_key_info *key_info = &key_extract->key_info;
583 
584 	if (prot == NET_PROT_IPV4 ||
585 		prot == NET_PROT_IPV6)
586 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
587 	else
588 		i = dpaa2_flow_extract_search(dpkg, prot, field);
589 
590 	if (i >= 0) {
591 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
592 			return key_info->ipv4_src_offset;
593 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
594 			return key_info->ipv4_dst_offset;
595 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
596 			return key_info->ipv6_src_offset;
597 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
598 			return key_info->ipv6_dst_offset;
599 		else
600 			return key_info->key_offset[i];
601 	} else {
602 		return -1;
603 	}
604 }
605 
606 struct proto_discrimination {
607 	enum rte_flow_item_type type;
608 	union {
609 		rte_be16_t eth_type;
610 		uint8_t ip_proto;
611 	};
612 };
613 
614 static int
615 dpaa2_flow_proto_discrimination_rule(
616 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
617 	struct proto_discrimination proto, int group)
618 {
619 	enum net_prot prot;
620 	uint32_t field;
621 	int offset;
622 	size_t key_iova;
623 	size_t mask_iova;
624 	rte_be16_t eth_type;
625 	uint8_t ip_proto;
626 
627 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
628 		prot = NET_PROT_ETH;
629 		field = NH_FLD_ETH_TYPE;
630 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
631 		prot = NET_PROT_IP;
632 		field = NH_FLD_IP_PROTO;
633 	} else {
634 		DPAA2_PMD_ERR(
635 			"Only Eth and IP support to discriminate next proto.");
636 		return -1;
637 	}
638 
639 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
640 			prot, field);
641 	if (offset < 0) {
642 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
643 				prot, field);
644 		return -1;
645 	}
646 	key_iova = flow->qos_rule.key_iova + offset;
647 	mask_iova = flow->qos_rule.mask_iova + offset;
648 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
649 		eth_type = proto.eth_type;
650 		memcpy((void *)key_iova, (const void *)(&eth_type),
651 			sizeof(rte_be16_t));
652 		eth_type = 0xffff;
653 		memcpy((void *)mask_iova, (const void *)(&eth_type),
654 			sizeof(rte_be16_t));
655 	} else {
656 		ip_proto = proto.ip_proto;
657 		memcpy((void *)key_iova, (const void *)(&ip_proto),
658 			sizeof(uint8_t));
659 		ip_proto = 0xff;
660 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
661 			sizeof(uint8_t));
662 	}
663 
664 	offset = dpaa2_flow_extract_key_offset(
665 			&priv->extract.tc_key_extract[group],
666 			prot, field);
667 	if (offset < 0) {
668 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
669 				prot, field);
670 		return -1;
671 	}
672 	key_iova = flow->fs_rule.key_iova + offset;
673 	mask_iova = flow->fs_rule.mask_iova + offset;
674 
675 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
676 		eth_type = proto.eth_type;
677 		memcpy((void *)key_iova, (const void *)(&eth_type),
678 			sizeof(rte_be16_t));
679 		eth_type = 0xffff;
680 		memcpy((void *)mask_iova, (const void *)(&eth_type),
681 			sizeof(rte_be16_t));
682 	} else {
683 		ip_proto = proto.ip_proto;
684 		memcpy((void *)key_iova, (const void *)(&ip_proto),
685 			sizeof(uint8_t));
686 		ip_proto = 0xff;
687 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
688 			sizeof(uint8_t));
689 	}
690 
691 	return 0;
692 }
693 
694 static inline int
695 dpaa2_flow_rule_data_set(
696 	struct dpaa2_key_extract *key_extract,
697 	struct dpni_rule_cfg *rule,
698 	enum net_prot prot, uint32_t field,
699 	const void *key, const void *mask, int size)
700 {
701 	int offset = dpaa2_flow_extract_key_offset(key_extract,
702 				prot, field);
703 
704 	if (offset < 0) {
705 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
706 			prot, field);
707 		return -1;
708 	}
709 
710 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
711 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
712 
713 	return 0;
714 }
715 
716 static inline int
717 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
718 			     const void *key, const void *mask, int size)
719 {
720 	int offset = 0;
721 
722 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
723 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
724 
725 	return 0;
726 }
727 
728 static inline int
729 _dpaa2_flow_rule_move_ipaddr_tail(
730 	struct dpaa2_key_extract *key_extract,
731 	struct dpni_rule_cfg *rule, int src_offset,
732 	uint32_t field, bool ipv4)
733 {
734 	size_t key_src;
735 	size_t mask_src;
736 	size_t key_dst;
737 	size_t mask_dst;
738 	int dst_offset, len;
739 	enum net_prot prot;
740 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
741 
742 	if (field != NH_FLD_IP_SRC &&
743 		field != NH_FLD_IP_DST) {
744 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
745 		return -1;
746 	}
747 	if (ipv4)
748 		prot = NET_PROT_IPV4;
749 	else
750 		prot = NET_PROT_IPV6;
751 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
752 				prot, field);
753 	if (dst_offset < 0) {
754 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
755 		return -1;
756 	}
757 	key_src = rule->key_iova + src_offset;
758 	mask_src = rule->mask_iova + src_offset;
759 	key_dst = rule->key_iova + dst_offset;
760 	mask_dst = rule->mask_iova + dst_offset;
761 	if (ipv4)
762 		len = sizeof(rte_be32_t);
763 	else
764 		len = NH_FLD_IPV6_ADDR_SIZE;
765 
766 	memcpy(tmp, (char *)key_src, len);
767 	memset((char *)key_src, 0, len);
768 	memcpy((char *)key_dst, tmp, len);
769 
770 	memcpy(tmp, (char *)mask_src, len);
771 	memset((char *)mask_src, 0, len);
772 	memcpy((char *)mask_dst, tmp, len);
773 
774 	return 0;
775 }
776 
777 static inline int
778 dpaa2_flow_rule_move_ipaddr_tail(
779 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
780 	int fs_group)
781 {
782 	int ret;
783 	enum net_prot prot;
784 
785 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
786 		return 0;
787 
788 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
789 		prot = NET_PROT_IPV4;
790 	else
791 		prot = NET_PROT_IPV6;
792 
793 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
794 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
795 				&priv->extract.qos_key_extract,
796 				&flow->qos_rule,
797 				flow->ipaddr_rule.qos_ipsrc_offset,
798 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
799 		if (ret) {
800 			DPAA2_PMD_ERR("QoS src address reorder failed");
801 			return -1;
802 		}
803 		flow->ipaddr_rule.qos_ipsrc_offset =
804 			dpaa2_flow_extract_key_offset(
805 				&priv->extract.qos_key_extract,
806 				prot, NH_FLD_IP_SRC);
807 	}
808 
809 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
810 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
811 				&priv->extract.qos_key_extract,
812 				&flow->qos_rule,
813 				flow->ipaddr_rule.qos_ipdst_offset,
814 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
815 		if (ret) {
816 			DPAA2_PMD_ERR("QoS dst address reorder failed");
817 			return -1;
818 		}
819 		flow->ipaddr_rule.qos_ipdst_offset =
820 			dpaa2_flow_extract_key_offset(
821 				&priv->extract.qos_key_extract,
822 				prot, NH_FLD_IP_DST);
823 	}
824 
825 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
826 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
827 				&priv->extract.tc_key_extract[fs_group],
828 				&flow->fs_rule,
829 				flow->ipaddr_rule.fs_ipsrc_offset,
830 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
831 		if (ret) {
832 			DPAA2_PMD_ERR("FS src address reorder failed");
833 			return -1;
834 		}
835 		flow->ipaddr_rule.fs_ipsrc_offset =
836 			dpaa2_flow_extract_key_offset(
837 				&priv->extract.tc_key_extract[fs_group],
838 				prot, NH_FLD_IP_SRC);
839 	}
840 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
841 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
842 				&priv->extract.tc_key_extract[fs_group],
843 				&flow->fs_rule,
844 				flow->ipaddr_rule.fs_ipdst_offset,
845 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
846 		if (ret) {
847 			DPAA2_PMD_ERR("FS dst address reorder failed");
848 			return -1;
849 		}
850 		flow->ipaddr_rule.fs_ipdst_offset =
851 			dpaa2_flow_extract_key_offset(
852 				&priv->extract.tc_key_extract[fs_group],
853 				prot, NH_FLD_IP_DST);
854 	}
855 
856 	return 0;
857 }
858 
859 static int
860 dpaa2_flow_extract_support(
861 	const uint8_t *mask_src,
862 	enum rte_flow_item_type type)
863 {
864 	char mask[64];
865 	int i, size = 0;
866 	const char *mask_support = 0;
867 
868 	switch (type) {
869 	case RTE_FLOW_ITEM_TYPE_ETH:
870 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
871 		size = sizeof(struct rte_flow_item_eth);
872 		break;
873 	case RTE_FLOW_ITEM_TYPE_VLAN:
874 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
875 		size = sizeof(struct rte_flow_item_vlan);
876 		break;
877 	case RTE_FLOW_ITEM_TYPE_IPV4:
878 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
879 		size = sizeof(struct rte_flow_item_ipv4);
880 		break;
881 	case RTE_FLOW_ITEM_TYPE_IPV6:
882 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
883 		size = sizeof(struct rte_flow_item_ipv6);
884 		break;
885 	case RTE_FLOW_ITEM_TYPE_ICMP:
886 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
887 		size = sizeof(struct rte_flow_item_icmp);
888 		break;
889 	case RTE_FLOW_ITEM_TYPE_UDP:
890 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
891 		size = sizeof(struct rte_flow_item_udp);
892 		break;
893 	case RTE_FLOW_ITEM_TYPE_TCP:
894 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
895 		size = sizeof(struct rte_flow_item_tcp);
896 		break;
897 	case RTE_FLOW_ITEM_TYPE_SCTP:
898 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
899 		size = sizeof(struct rte_flow_item_sctp);
900 		break;
901 	case RTE_FLOW_ITEM_TYPE_GRE:
902 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
903 		size = sizeof(struct rte_flow_item_gre);
904 		break;
905 	default:
906 		return -1;
907 	}
908 
909 	memcpy(mask, mask_support, size);
910 
911 	for (i = 0; i < size; i++)
912 		mask[i] = (mask[i] | mask_src[i]);
913 
914 	if (memcmp(mask, mask_support, size))
915 		return -1;
916 
917 	return 0;
918 }
919 
920 static int
921 dpaa2_configure_flow_eth(struct rte_flow *flow,
922 			 struct rte_eth_dev *dev,
923 			 const struct rte_flow_attr *attr,
924 			 const struct rte_flow_item *pattern,
925 			 const struct rte_flow_action actions[] __rte_unused,
926 			 struct rte_flow_error *error __rte_unused,
927 			 int *device_configured)
928 {
929 	int index, ret;
930 	int local_cfg = 0;
931 	uint32_t group;
932 	const struct rte_flow_item_eth *spec, *mask;
933 
934 	/* TODO: Currently upper bound of range parameter is not implemented */
935 	const struct rte_flow_item_eth *last __rte_unused;
936 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
937 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
938 
939 	group = attr->group;
940 
941 	/* Parse pattern list to get the matching parameters */
942 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
943 	last    = (const struct rte_flow_item_eth *)pattern->last;
944 	mask    = (const struct rte_flow_item_eth *)
945 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
946 	if (!spec) {
947 		/* Don't care any field of eth header,
948 		 * only care eth protocol.
949 		 */
950 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
951 		return 0;
952 	}
953 
954 	/* Get traffic class index and flow id to be configured */
955 	flow->tc_id = group;
956 	flow->tc_index = attr->priority;
957 
958 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
959 		RTE_FLOW_ITEM_TYPE_ETH)) {
960 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
961 
962 		return -1;
963 	}
964 
965 	if (memcmp((const char *)&mask->hdr.src_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
966 		index = dpaa2_flow_extract_search(
967 				&priv->extract.qos_key_extract.dpkg,
968 				NET_PROT_ETH, NH_FLD_ETH_SA);
969 		if (index < 0) {
970 			ret = dpaa2_flow_extract_add(
971 					&priv->extract.qos_key_extract,
972 					NET_PROT_ETH, NH_FLD_ETH_SA,
973 					RTE_ETHER_ADDR_LEN);
974 			if (ret) {
975 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
976 
977 				return -1;
978 			}
979 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
980 		}
981 		index = dpaa2_flow_extract_search(
982 				&priv->extract.tc_key_extract[group].dpkg,
983 				NET_PROT_ETH, NH_FLD_ETH_SA);
984 		if (index < 0) {
985 			ret = dpaa2_flow_extract_add(
986 					&priv->extract.tc_key_extract[group],
987 					NET_PROT_ETH, NH_FLD_ETH_SA,
988 					RTE_ETHER_ADDR_LEN);
989 			if (ret) {
990 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
991 				return -1;
992 			}
993 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
994 		}
995 
996 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
997 		if (ret) {
998 			DPAA2_PMD_ERR(
999 				"Move ipaddr before ETH_SA rule set failed");
1000 			return -1;
1001 		}
1002 
1003 		ret = dpaa2_flow_rule_data_set(
1004 				&priv->extract.qos_key_extract,
1005 				&flow->qos_rule,
1006 				NET_PROT_ETH,
1007 				NH_FLD_ETH_SA,
1008 				&spec->hdr.src_addr.addr_bytes,
1009 				&mask->hdr.src_addr.addr_bytes,
1010 				sizeof(struct rte_ether_addr));
1011 		if (ret) {
1012 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1013 			return -1;
1014 		}
1015 
1016 		ret = dpaa2_flow_rule_data_set(
1017 				&priv->extract.tc_key_extract[group],
1018 				&flow->fs_rule,
1019 				NET_PROT_ETH,
1020 				NH_FLD_ETH_SA,
1021 				&spec->hdr.src_addr.addr_bytes,
1022 				&mask->hdr.src_addr.addr_bytes,
1023 				sizeof(struct rte_ether_addr));
1024 		if (ret) {
1025 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1026 			return -1;
1027 		}
1028 	}
1029 
1030 	if (memcmp((const char *)&mask->hdr.dst_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1031 		index = dpaa2_flow_extract_search(
1032 				&priv->extract.qos_key_extract.dpkg,
1033 				NET_PROT_ETH, NH_FLD_ETH_DA);
1034 		if (index < 0) {
1035 			ret = dpaa2_flow_extract_add(
1036 					&priv->extract.qos_key_extract,
1037 					NET_PROT_ETH, NH_FLD_ETH_DA,
1038 					RTE_ETHER_ADDR_LEN);
1039 			if (ret) {
1040 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1041 
1042 				return -1;
1043 			}
1044 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1045 		}
1046 
1047 		index = dpaa2_flow_extract_search(
1048 				&priv->extract.tc_key_extract[group].dpkg,
1049 				NET_PROT_ETH, NH_FLD_ETH_DA);
1050 		if (index < 0) {
1051 			ret = dpaa2_flow_extract_add(
1052 					&priv->extract.tc_key_extract[group],
1053 					NET_PROT_ETH, NH_FLD_ETH_DA,
1054 					RTE_ETHER_ADDR_LEN);
1055 			if (ret) {
1056 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1057 
1058 				return -1;
1059 			}
1060 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1061 		}
1062 
1063 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1064 		if (ret) {
1065 			DPAA2_PMD_ERR(
1066 				"Move ipaddr before ETH DA rule set failed");
1067 			return -1;
1068 		}
1069 
1070 		ret = dpaa2_flow_rule_data_set(
1071 				&priv->extract.qos_key_extract,
1072 				&flow->qos_rule,
1073 				NET_PROT_ETH,
1074 				NH_FLD_ETH_DA,
1075 				&spec->hdr.dst_addr.addr_bytes,
1076 				&mask->hdr.dst_addr.addr_bytes,
1077 				sizeof(struct rte_ether_addr));
1078 		if (ret) {
1079 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1080 			return -1;
1081 		}
1082 
1083 		ret = dpaa2_flow_rule_data_set(
1084 				&priv->extract.tc_key_extract[group],
1085 				&flow->fs_rule,
1086 				NET_PROT_ETH,
1087 				NH_FLD_ETH_DA,
1088 				&spec->hdr.dst_addr.addr_bytes,
1089 				&mask->hdr.dst_addr.addr_bytes,
1090 				sizeof(struct rte_ether_addr));
1091 		if (ret) {
1092 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1093 			return -1;
1094 		}
1095 	}
1096 
1097 	if (memcmp((const char *)&mask->hdr.ether_type, zero_cmp, sizeof(rte_be16_t))) {
1098 		index = dpaa2_flow_extract_search(
1099 				&priv->extract.qos_key_extract.dpkg,
1100 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1101 		if (index < 0) {
1102 			ret = dpaa2_flow_extract_add(
1103 					&priv->extract.qos_key_extract,
1104 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1105 					RTE_ETHER_TYPE_LEN);
1106 			if (ret) {
1107 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1108 
1109 				return -1;
1110 			}
1111 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1112 		}
1113 		index = dpaa2_flow_extract_search(
1114 				&priv->extract.tc_key_extract[group].dpkg,
1115 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1116 		if (index < 0) {
1117 			ret = dpaa2_flow_extract_add(
1118 					&priv->extract.tc_key_extract[group],
1119 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1120 					RTE_ETHER_TYPE_LEN);
1121 			if (ret) {
1122 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1123 
1124 				return -1;
1125 			}
1126 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1127 		}
1128 
1129 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1130 		if (ret) {
1131 			DPAA2_PMD_ERR(
1132 				"Move ipaddr before ETH TYPE rule set failed");
1133 				return -1;
1134 		}
1135 
1136 		ret = dpaa2_flow_rule_data_set(
1137 				&priv->extract.qos_key_extract,
1138 				&flow->qos_rule,
1139 				NET_PROT_ETH,
1140 				NH_FLD_ETH_TYPE,
1141 				&spec->hdr.ether_type,
1142 				&mask->hdr.ether_type,
1143 				sizeof(rte_be16_t));
1144 		if (ret) {
1145 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1146 			return -1;
1147 		}
1148 
1149 		ret = dpaa2_flow_rule_data_set(
1150 				&priv->extract.tc_key_extract[group],
1151 				&flow->fs_rule,
1152 				NET_PROT_ETH,
1153 				NH_FLD_ETH_TYPE,
1154 				&spec->hdr.ether_type,
1155 				&mask->hdr.ether_type,
1156 				sizeof(rte_be16_t));
1157 		if (ret) {
1158 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1159 			return -1;
1160 		}
1161 	}
1162 
1163 	(*device_configured) |= local_cfg;
1164 
1165 	return 0;
1166 }
1167 
1168 static int
1169 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1170 			  struct rte_eth_dev *dev,
1171 			  const struct rte_flow_attr *attr,
1172 			  const struct rte_flow_item *pattern,
1173 			  const struct rte_flow_action actions[] __rte_unused,
1174 			  struct rte_flow_error *error __rte_unused,
1175 			  int *device_configured)
1176 {
1177 	int index, ret;
1178 	int local_cfg = 0;
1179 	uint32_t group;
1180 	const struct rte_flow_item_vlan *spec, *mask;
1181 
1182 	const struct rte_flow_item_vlan *last __rte_unused;
1183 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1184 
1185 	group = attr->group;
1186 
1187 	/* Parse pattern list to get the matching parameters */
1188 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1189 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1190 	mask    = (const struct rte_flow_item_vlan *)
1191 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1192 
1193 	/* Get traffic class index and flow id to be configured */
1194 	flow->tc_id = group;
1195 	flow->tc_index = attr->priority;
1196 
1197 	if (!spec) {
1198 		/* Don't care any field of vlan header,
1199 		 * only care vlan protocol.
1200 		 */
1201 		/* Eth type is actually used for vLan classification.
1202 		 */
1203 		struct proto_discrimination proto;
1204 
1205 		index = dpaa2_flow_extract_search(
1206 				&priv->extract.qos_key_extract.dpkg,
1207 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1208 		if (index < 0) {
1209 			ret = dpaa2_flow_proto_discrimination_extract(
1210 						&priv->extract.qos_key_extract,
1211 						RTE_FLOW_ITEM_TYPE_ETH);
1212 			if (ret) {
1213 				DPAA2_PMD_ERR(
1214 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1215 
1216 				return -1;
1217 			}
1218 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1219 		}
1220 
1221 		index = dpaa2_flow_extract_search(
1222 				&priv->extract.tc_key_extract[group].dpkg,
1223 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1224 		if (index < 0) {
1225 			ret = dpaa2_flow_proto_discrimination_extract(
1226 					&priv->extract.tc_key_extract[group],
1227 					RTE_FLOW_ITEM_TYPE_ETH);
1228 			if (ret) {
1229 				DPAA2_PMD_ERR(
1230 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1231 
1232 				return -1;
1233 			}
1234 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1235 		}
1236 
1237 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1238 		if (ret) {
1239 			DPAA2_PMD_ERR(
1240 			"Move ipaddr before vLan discrimination set failed");
1241 			return -1;
1242 		}
1243 
1244 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1245 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1246 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1247 							proto, group);
1248 		if (ret) {
1249 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1250 			return -1;
1251 		}
1252 
1253 		(*device_configured) |= local_cfg;
1254 
1255 		return 0;
1256 	}
1257 
1258 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1259 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1260 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1261 
1262 		return -1;
1263 	}
1264 
1265 	if (!mask->hdr.vlan_tci)
1266 		return 0;
1267 
1268 	index = dpaa2_flow_extract_search(
1269 				&priv->extract.qos_key_extract.dpkg,
1270 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1271 	if (index < 0) {
1272 		ret = dpaa2_flow_extract_add(
1273 						&priv->extract.qos_key_extract,
1274 						NET_PROT_VLAN,
1275 						NH_FLD_VLAN_TCI,
1276 						sizeof(rte_be16_t));
1277 		if (ret) {
1278 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1279 
1280 			return -1;
1281 		}
1282 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1283 	}
1284 
1285 	index = dpaa2_flow_extract_search(
1286 			&priv->extract.tc_key_extract[group].dpkg,
1287 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1288 	if (index < 0) {
1289 		ret = dpaa2_flow_extract_add(
1290 				&priv->extract.tc_key_extract[group],
1291 				NET_PROT_VLAN,
1292 				NH_FLD_VLAN_TCI,
1293 				sizeof(rte_be16_t));
1294 		if (ret) {
1295 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1296 
1297 			return -1;
1298 		}
1299 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1300 	}
1301 
1302 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1303 	if (ret) {
1304 		DPAA2_PMD_ERR(
1305 			"Move ipaddr before VLAN TCI rule set failed");
1306 		return -1;
1307 	}
1308 
1309 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1310 				&flow->qos_rule,
1311 				NET_PROT_VLAN,
1312 				NH_FLD_VLAN_TCI,
1313 				&spec->hdr.vlan_tci,
1314 				&mask->hdr.vlan_tci,
1315 				sizeof(rte_be16_t));
1316 	if (ret) {
1317 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1318 		return -1;
1319 	}
1320 
1321 	ret = dpaa2_flow_rule_data_set(
1322 			&priv->extract.tc_key_extract[group],
1323 			&flow->fs_rule,
1324 			NET_PROT_VLAN,
1325 			NH_FLD_VLAN_TCI,
1326 			&spec->hdr.vlan_tci,
1327 			&mask->hdr.vlan_tci,
1328 			sizeof(rte_be16_t));
1329 	if (ret) {
1330 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1331 		return -1;
1332 	}
1333 
1334 	(*device_configured) |= local_cfg;
1335 
1336 	return 0;
1337 }
1338 
1339 static int
1340 dpaa2_configure_flow_ip_discrimation(
1341 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1342 	const struct rte_flow_item *pattern,
1343 	int *local_cfg,	int *device_configured,
1344 	uint32_t group)
1345 {
1346 	int index, ret;
1347 	struct proto_discrimination proto;
1348 
1349 	index = dpaa2_flow_extract_search(
1350 			&priv->extract.qos_key_extract.dpkg,
1351 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1352 	if (index < 0) {
1353 		ret = dpaa2_flow_proto_discrimination_extract(
1354 				&priv->extract.qos_key_extract,
1355 				RTE_FLOW_ITEM_TYPE_ETH);
1356 		if (ret) {
1357 			DPAA2_PMD_ERR(
1358 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1359 			return -1;
1360 		}
1361 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1362 	}
1363 
1364 	index = dpaa2_flow_extract_search(
1365 			&priv->extract.tc_key_extract[group].dpkg,
1366 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1367 	if (index < 0) {
1368 		ret = dpaa2_flow_proto_discrimination_extract(
1369 				&priv->extract.tc_key_extract[group],
1370 				RTE_FLOW_ITEM_TYPE_ETH);
1371 		if (ret) {
1372 			DPAA2_PMD_ERR(
1373 			"FS Extract ETH_TYPE to discriminate IP failed.");
1374 			return -1;
1375 		}
1376 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1377 	}
1378 
1379 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1380 	if (ret) {
1381 		DPAA2_PMD_ERR(
1382 			"Move ipaddr before IP discrimination set failed");
1383 		return -1;
1384 	}
1385 
1386 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1387 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1388 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1389 	else
1390 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1391 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1392 	if (ret) {
1393 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1394 		return -1;
1395 	}
1396 
1397 	(*device_configured) |= (*local_cfg);
1398 
1399 	return 0;
1400 }
1401 
1402 
1403 static int
1404 dpaa2_configure_flow_generic_ip(
1405 	struct rte_flow *flow,
1406 	struct rte_eth_dev *dev,
1407 	const struct rte_flow_attr *attr,
1408 	const struct rte_flow_item *pattern,
1409 	const struct rte_flow_action actions[] __rte_unused,
1410 	struct rte_flow_error *error __rte_unused,
1411 	int *device_configured)
1412 {
1413 	int index, ret;
1414 	int local_cfg = 0;
1415 	uint32_t group;
1416 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1417 		*mask_ipv4 = 0;
1418 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1419 		*mask_ipv6 = 0;
1420 	const void *key, *mask;
1421 	enum net_prot prot;
1422 
1423 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1424 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1425 	int size;
1426 
1427 	group = attr->group;
1428 
1429 	/* Parse pattern list to get the matching parameters */
1430 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1431 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1432 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1433 			(pattern->mask ? pattern->mask :
1434 					&dpaa2_flow_item_ipv4_mask);
1435 	} else {
1436 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1437 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1438 			(pattern->mask ? pattern->mask :
1439 					&dpaa2_flow_item_ipv6_mask);
1440 	}
1441 
1442 	/* Get traffic class index and flow id to be configured */
1443 	flow->tc_id = group;
1444 	flow->tc_index = attr->priority;
1445 
1446 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1447 			flow, pattern, &local_cfg,
1448 			device_configured, group);
1449 	if (ret) {
1450 		DPAA2_PMD_ERR("IP discrimination failed!");
1451 		return -1;
1452 	}
1453 
1454 	if (!spec_ipv4 && !spec_ipv6)
1455 		return 0;
1456 
1457 	if (mask_ipv4) {
1458 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1459 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1460 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1461 
1462 			return -1;
1463 		}
1464 	}
1465 
1466 	if (mask_ipv6) {
1467 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1468 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1469 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1470 
1471 			return -1;
1472 		}
1473 	}
1474 
1475 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1476 		mask_ipv4->hdr.dst_addr)) {
1477 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1478 	} else if (mask_ipv6 &&
1479 		(memcmp(&mask_ipv6->hdr.src_addr,
1480 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1481 		memcmp(&mask_ipv6->hdr.dst_addr,
1482 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1483 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1484 	}
1485 
1486 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1487 		(mask_ipv6 &&
1488 			memcmp(&mask_ipv6->hdr.src_addr,
1489 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1490 		index = dpaa2_flow_extract_search(
1491 				&priv->extract.qos_key_extract.dpkg,
1492 				NET_PROT_IP, NH_FLD_IP_SRC);
1493 		if (index < 0) {
1494 			ret = dpaa2_flow_extract_add(
1495 					&priv->extract.qos_key_extract,
1496 					NET_PROT_IP,
1497 					NH_FLD_IP_SRC,
1498 					0);
1499 			if (ret) {
1500 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1501 
1502 				return -1;
1503 			}
1504 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1505 		}
1506 
1507 		index = dpaa2_flow_extract_search(
1508 				&priv->extract.tc_key_extract[group].dpkg,
1509 				NET_PROT_IP, NH_FLD_IP_SRC);
1510 		if (index < 0) {
1511 			ret = dpaa2_flow_extract_add(
1512 					&priv->extract.tc_key_extract[group],
1513 					NET_PROT_IP,
1514 					NH_FLD_IP_SRC,
1515 					0);
1516 			if (ret) {
1517 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1518 
1519 				return -1;
1520 			}
1521 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1522 		}
1523 
1524 		if (spec_ipv4)
1525 			key = &spec_ipv4->hdr.src_addr;
1526 		else
1527 			key = &spec_ipv6->hdr.src_addr;
1528 		if (mask_ipv4) {
1529 			mask = &mask_ipv4->hdr.src_addr;
1530 			size = NH_FLD_IPV4_ADDR_SIZE;
1531 			prot = NET_PROT_IPV4;
1532 		} else {
1533 			mask = &mask_ipv6->hdr.src_addr;
1534 			size = NH_FLD_IPV6_ADDR_SIZE;
1535 			prot = NET_PROT_IPV6;
1536 		}
1537 
1538 		ret = dpaa2_flow_rule_data_set(
1539 				&priv->extract.qos_key_extract,
1540 				&flow->qos_rule,
1541 				prot, NH_FLD_IP_SRC,
1542 				key,	mask, size);
1543 		if (ret) {
1544 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1545 			return -1;
1546 		}
1547 
1548 		ret = dpaa2_flow_rule_data_set(
1549 				&priv->extract.tc_key_extract[group],
1550 				&flow->fs_rule,
1551 				prot, NH_FLD_IP_SRC,
1552 				key,	mask, size);
1553 		if (ret) {
1554 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1555 			return -1;
1556 		}
1557 
1558 		flow->ipaddr_rule.qos_ipsrc_offset =
1559 			dpaa2_flow_extract_key_offset(
1560 				&priv->extract.qos_key_extract,
1561 				prot, NH_FLD_IP_SRC);
1562 		flow->ipaddr_rule.fs_ipsrc_offset =
1563 			dpaa2_flow_extract_key_offset(
1564 				&priv->extract.tc_key_extract[group],
1565 				prot, NH_FLD_IP_SRC);
1566 	}
1567 
1568 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1569 		(mask_ipv6 &&
1570 			memcmp(&mask_ipv6->hdr.dst_addr,
1571 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1572 		index = dpaa2_flow_extract_search(
1573 				&priv->extract.qos_key_extract.dpkg,
1574 				NET_PROT_IP, NH_FLD_IP_DST);
1575 		if (index < 0) {
1576 			if (mask_ipv4)
1577 				size = NH_FLD_IPV4_ADDR_SIZE;
1578 			else
1579 				size = NH_FLD_IPV6_ADDR_SIZE;
1580 			ret = dpaa2_flow_extract_add(
1581 					&priv->extract.qos_key_extract,
1582 					NET_PROT_IP,
1583 					NH_FLD_IP_DST,
1584 					size);
1585 			if (ret) {
1586 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1587 
1588 				return -1;
1589 			}
1590 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1591 		}
1592 
1593 		index = dpaa2_flow_extract_search(
1594 				&priv->extract.tc_key_extract[group].dpkg,
1595 				NET_PROT_IP, NH_FLD_IP_DST);
1596 		if (index < 0) {
1597 			if (mask_ipv4)
1598 				size = NH_FLD_IPV4_ADDR_SIZE;
1599 			else
1600 				size = NH_FLD_IPV6_ADDR_SIZE;
1601 			ret = dpaa2_flow_extract_add(
1602 					&priv->extract.tc_key_extract[group],
1603 					NET_PROT_IP,
1604 					NH_FLD_IP_DST,
1605 					size);
1606 			if (ret) {
1607 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1608 
1609 				return -1;
1610 			}
1611 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1612 		}
1613 
1614 		if (spec_ipv4)
1615 			key = &spec_ipv4->hdr.dst_addr;
1616 		else
1617 			key = &spec_ipv6->hdr.dst_addr;
1618 		if (mask_ipv4) {
1619 			mask = &mask_ipv4->hdr.dst_addr;
1620 			size = NH_FLD_IPV4_ADDR_SIZE;
1621 			prot = NET_PROT_IPV4;
1622 		} else {
1623 			mask = &mask_ipv6->hdr.dst_addr;
1624 			size = NH_FLD_IPV6_ADDR_SIZE;
1625 			prot = NET_PROT_IPV6;
1626 		}
1627 
1628 		ret = dpaa2_flow_rule_data_set(
1629 				&priv->extract.qos_key_extract,
1630 				&flow->qos_rule,
1631 				prot, NH_FLD_IP_DST,
1632 				key,	mask, size);
1633 		if (ret) {
1634 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1635 			return -1;
1636 		}
1637 
1638 		ret = dpaa2_flow_rule_data_set(
1639 				&priv->extract.tc_key_extract[group],
1640 				&flow->fs_rule,
1641 				prot, NH_FLD_IP_DST,
1642 				key,	mask, size);
1643 		if (ret) {
1644 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1645 			return -1;
1646 		}
1647 		flow->ipaddr_rule.qos_ipdst_offset =
1648 			dpaa2_flow_extract_key_offset(
1649 				&priv->extract.qos_key_extract,
1650 				prot, NH_FLD_IP_DST);
1651 		flow->ipaddr_rule.fs_ipdst_offset =
1652 			dpaa2_flow_extract_key_offset(
1653 				&priv->extract.tc_key_extract[group],
1654 				prot, NH_FLD_IP_DST);
1655 	}
1656 
1657 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1658 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1659 		index = dpaa2_flow_extract_search(
1660 				&priv->extract.qos_key_extract.dpkg,
1661 				NET_PROT_IP, NH_FLD_IP_PROTO);
1662 		if (index < 0) {
1663 			ret = dpaa2_flow_extract_add(
1664 				&priv->extract.qos_key_extract,
1665 				NET_PROT_IP,
1666 				NH_FLD_IP_PROTO,
1667 				NH_FLD_IP_PROTO_SIZE);
1668 			if (ret) {
1669 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1670 
1671 				return -1;
1672 			}
1673 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1674 		}
1675 
1676 		index = dpaa2_flow_extract_search(
1677 				&priv->extract.tc_key_extract[group].dpkg,
1678 				NET_PROT_IP, NH_FLD_IP_PROTO);
1679 		if (index < 0) {
1680 			ret = dpaa2_flow_extract_add(
1681 					&priv->extract.tc_key_extract[group],
1682 					NET_PROT_IP,
1683 					NH_FLD_IP_PROTO,
1684 					NH_FLD_IP_PROTO_SIZE);
1685 			if (ret) {
1686 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1687 
1688 				return -1;
1689 			}
1690 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1691 		}
1692 
1693 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1694 		if (ret) {
1695 			DPAA2_PMD_ERR(
1696 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1697 			return -1;
1698 		}
1699 
1700 		if (spec_ipv4)
1701 			key = &spec_ipv4->hdr.next_proto_id;
1702 		else
1703 			key = &spec_ipv6->hdr.proto;
1704 		if (mask_ipv4)
1705 			mask = &mask_ipv4->hdr.next_proto_id;
1706 		else
1707 			mask = &mask_ipv6->hdr.proto;
1708 
1709 		ret = dpaa2_flow_rule_data_set(
1710 				&priv->extract.qos_key_extract,
1711 				&flow->qos_rule,
1712 				NET_PROT_IP,
1713 				NH_FLD_IP_PROTO,
1714 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1715 		if (ret) {
1716 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1717 			return -1;
1718 		}
1719 
1720 		ret = dpaa2_flow_rule_data_set(
1721 				&priv->extract.tc_key_extract[group],
1722 				&flow->fs_rule,
1723 				NET_PROT_IP,
1724 				NH_FLD_IP_PROTO,
1725 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1726 		if (ret) {
1727 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1728 			return -1;
1729 		}
1730 	}
1731 
1732 	(*device_configured) |= local_cfg;
1733 
1734 	return 0;
1735 }
1736 
1737 static int
1738 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1739 			  struct rte_eth_dev *dev,
1740 			  const struct rte_flow_attr *attr,
1741 			  const struct rte_flow_item *pattern,
1742 			  const struct rte_flow_action actions[] __rte_unused,
1743 			  struct rte_flow_error *error __rte_unused,
1744 			  int *device_configured)
1745 {
1746 	int index, ret;
1747 	int local_cfg = 0;
1748 	uint32_t group;
1749 	const struct rte_flow_item_icmp *spec, *mask;
1750 
1751 	const struct rte_flow_item_icmp *last __rte_unused;
1752 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1753 
1754 	group = attr->group;
1755 
1756 	/* Parse pattern list to get the matching parameters */
1757 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1758 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1759 	mask    = (const struct rte_flow_item_icmp *)
1760 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1761 
1762 	/* Get traffic class index and flow id to be configured */
1763 	flow->tc_id = group;
1764 	flow->tc_index = attr->priority;
1765 
1766 	if (!spec) {
1767 		/* Don't care any field of ICMP header,
1768 		 * only care ICMP protocol.
1769 		 * Example: flow create 0 ingress pattern icmp /
1770 		 */
1771 		/* Next proto of Generical IP is actually used
1772 		 * for ICMP identification.
1773 		 */
1774 		struct proto_discrimination proto;
1775 
1776 		index = dpaa2_flow_extract_search(
1777 				&priv->extract.qos_key_extract.dpkg,
1778 				NET_PROT_IP, NH_FLD_IP_PROTO);
1779 		if (index < 0) {
1780 			ret = dpaa2_flow_proto_discrimination_extract(
1781 					&priv->extract.qos_key_extract,
1782 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1783 			if (ret) {
1784 				DPAA2_PMD_ERR(
1785 					"QoS Extract IP protocol to discriminate ICMP failed.");
1786 
1787 				return -1;
1788 			}
1789 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1790 		}
1791 
1792 		index = dpaa2_flow_extract_search(
1793 				&priv->extract.tc_key_extract[group].dpkg,
1794 				NET_PROT_IP, NH_FLD_IP_PROTO);
1795 		if (index < 0) {
1796 			ret = dpaa2_flow_proto_discrimination_extract(
1797 					&priv->extract.tc_key_extract[group],
1798 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1799 			if (ret) {
1800 				DPAA2_PMD_ERR(
1801 					"FS Extract IP protocol to discriminate ICMP failed.");
1802 
1803 				return -1;
1804 			}
1805 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1806 		}
1807 
1808 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1809 		if (ret) {
1810 			DPAA2_PMD_ERR(
1811 				"Move IP addr before ICMP discrimination set failed");
1812 			return -1;
1813 		}
1814 
1815 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1816 		proto.ip_proto = IPPROTO_ICMP;
1817 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1818 							proto, group);
1819 		if (ret) {
1820 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1821 			return -1;
1822 		}
1823 
1824 		(*device_configured) |= local_cfg;
1825 
1826 		return 0;
1827 	}
1828 
1829 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1830 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1831 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1832 
1833 		return -1;
1834 	}
1835 
1836 	if (mask->hdr.icmp_type) {
1837 		index = dpaa2_flow_extract_search(
1838 				&priv->extract.qos_key_extract.dpkg,
1839 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1840 		if (index < 0) {
1841 			ret = dpaa2_flow_extract_add(
1842 					&priv->extract.qos_key_extract,
1843 					NET_PROT_ICMP,
1844 					NH_FLD_ICMP_TYPE,
1845 					NH_FLD_ICMP_TYPE_SIZE);
1846 			if (ret) {
1847 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1848 
1849 				return -1;
1850 			}
1851 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1852 		}
1853 
1854 		index = dpaa2_flow_extract_search(
1855 				&priv->extract.tc_key_extract[group].dpkg,
1856 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1857 		if (index < 0) {
1858 			ret = dpaa2_flow_extract_add(
1859 					&priv->extract.tc_key_extract[group],
1860 					NET_PROT_ICMP,
1861 					NH_FLD_ICMP_TYPE,
1862 					NH_FLD_ICMP_TYPE_SIZE);
1863 			if (ret) {
1864 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1865 
1866 				return -1;
1867 			}
1868 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1869 		}
1870 
1871 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1872 		if (ret) {
1873 			DPAA2_PMD_ERR(
1874 				"Move ipaddr before ICMP TYPE set failed");
1875 			return -1;
1876 		}
1877 
1878 		ret = dpaa2_flow_rule_data_set(
1879 				&priv->extract.qos_key_extract,
1880 				&flow->qos_rule,
1881 				NET_PROT_ICMP,
1882 				NH_FLD_ICMP_TYPE,
1883 				&spec->hdr.icmp_type,
1884 				&mask->hdr.icmp_type,
1885 				NH_FLD_ICMP_TYPE_SIZE);
1886 		if (ret) {
1887 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1888 			return -1;
1889 		}
1890 
1891 		ret = dpaa2_flow_rule_data_set(
1892 				&priv->extract.tc_key_extract[group],
1893 				&flow->fs_rule,
1894 				NET_PROT_ICMP,
1895 				NH_FLD_ICMP_TYPE,
1896 				&spec->hdr.icmp_type,
1897 				&mask->hdr.icmp_type,
1898 				NH_FLD_ICMP_TYPE_SIZE);
1899 		if (ret) {
1900 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1901 			return -1;
1902 		}
1903 	}
1904 
1905 	if (mask->hdr.icmp_code) {
1906 		index = dpaa2_flow_extract_search(
1907 				&priv->extract.qos_key_extract.dpkg,
1908 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1909 		if (index < 0) {
1910 			ret = dpaa2_flow_extract_add(
1911 					&priv->extract.qos_key_extract,
1912 					NET_PROT_ICMP,
1913 					NH_FLD_ICMP_CODE,
1914 					NH_FLD_ICMP_CODE_SIZE);
1915 			if (ret) {
1916 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1917 
1918 				return -1;
1919 			}
1920 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1921 		}
1922 
1923 		index = dpaa2_flow_extract_search(
1924 				&priv->extract.tc_key_extract[group].dpkg,
1925 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1926 		if (index < 0) {
1927 			ret = dpaa2_flow_extract_add(
1928 					&priv->extract.tc_key_extract[group],
1929 					NET_PROT_ICMP,
1930 					NH_FLD_ICMP_CODE,
1931 					NH_FLD_ICMP_CODE_SIZE);
1932 			if (ret) {
1933 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1934 
1935 				return -1;
1936 			}
1937 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1938 		}
1939 
1940 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1941 		if (ret) {
1942 			DPAA2_PMD_ERR(
1943 				"Move ipaddr after ICMP CODE set failed");
1944 			return -1;
1945 		}
1946 
1947 		ret = dpaa2_flow_rule_data_set(
1948 				&priv->extract.qos_key_extract,
1949 				&flow->qos_rule,
1950 				NET_PROT_ICMP,
1951 				NH_FLD_ICMP_CODE,
1952 				&spec->hdr.icmp_code,
1953 				&mask->hdr.icmp_code,
1954 				NH_FLD_ICMP_CODE_SIZE);
1955 		if (ret) {
1956 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1957 			return -1;
1958 		}
1959 
1960 		ret = dpaa2_flow_rule_data_set(
1961 				&priv->extract.tc_key_extract[group],
1962 				&flow->fs_rule,
1963 				NET_PROT_ICMP,
1964 				NH_FLD_ICMP_CODE,
1965 				&spec->hdr.icmp_code,
1966 				&mask->hdr.icmp_code,
1967 				NH_FLD_ICMP_CODE_SIZE);
1968 		if (ret) {
1969 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1970 			return -1;
1971 		}
1972 	}
1973 
1974 	(*device_configured) |= local_cfg;
1975 
1976 	return 0;
1977 }
1978 
1979 static int
1980 dpaa2_configure_flow_udp(struct rte_flow *flow,
1981 			 struct rte_eth_dev *dev,
1982 			  const struct rte_flow_attr *attr,
1983 			  const struct rte_flow_item *pattern,
1984 			  const struct rte_flow_action actions[] __rte_unused,
1985 			  struct rte_flow_error *error __rte_unused,
1986 			  int *device_configured)
1987 {
1988 	int index, ret;
1989 	int local_cfg = 0;
1990 	uint32_t group;
1991 	const struct rte_flow_item_udp *spec, *mask;
1992 
1993 	const struct rte_flow_item_udp *last __rte_unused;
1994 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1995 
1996 	group = attr->group;
1997 
1998 	/* Parse pattern list to get the matching parameters */
1999 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
2000 	last    = (const struct rte_flow_item_udp *)pattern->last;
2001 	mask    = (const struct rte_flow_item_udp *)
2002 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2003 
2004 	/* Get traffic class index and flow id to be configured */
2005 	flow->tc_id = group;
2006 	flow->tc_index = attr->priority;
2007 
2008 	if (!spec || !mc_l4_port_identification) {
2009 		struct proto_discrimination proto;
2010 
2011 		index = dpaa2_flow_extract_search(
2012 				&priv->extract.qos_key_extract.dpkg,
2013 				NET_PROT_IP, NH_FLD_IP_PROTO);
2014 		if (index < 0) {
2015 			ret = dpaa2_flow_proto_discrimination_extract(
2016 					&priv->extract.qos_key_extract,
2017 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2018 			if (ret) {
2019 				DPAA2_PMD_ERR(
2020 					"QoS Extract IP protocol to discriminate UDP failed.");
2021 
2022 				return -1;
2023 			}
2024 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2025 		}
2026 
2027 		index = dpaa2_flow_extract_search(
2028 				&priv->extract.tc_key_extract[group].dpkg,
2029 				NET_PROT_IP, NH_FLD_IP_PROTO);
2030 		if (index < 0) {
2031 			ret = dpaa2_flow_proto_discrimination_extract(
2032 				&priv->extract.tc_key_extract[group],
2033 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2034 			if (ret) {
2035 				DPAA2_PMD_ERR(
2036 					"FS Extract IP protocol to discriminate UDP failed.");
2037 
2038 				return -1;
2039 			}
2040 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2041 		}
2042 
2043 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2044 		if (ret) {
2045 			DPAA2_PMD_ERR(
2046 				"Move IP addr before UDP discrimination set failed");
2047 			return -1;
2048 		}
2049 
2050 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2051 		proto.ip_proto = IPPROTO_UDP;
2052 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2053 							proto, group);
2054 		if (ret) {
2055 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2056 			return -1;
2057 		}
2058 
2059 		(*device_configured) |= local_cfg;
2060 
2061 		if (!spec)
2062 			return 0;
2063 	}
2064 
2065 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2066 		RTE_FLOW_ITEM_TYPE_UDP)) {
2067 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2068 
2069 		return -1;
2070 	}
2071 
2072 	if (mask->hdr.src_port) {
2073 		index = dpaa2_flow_extract_search(
2074 				&priv->extract.qos_key_extract.dpkg,
2075 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2076 		if (index < 0) {
2077 			ret = dpaa2_flow_extract_add(
2078 					&priv->extract.qos_key_extract,
2079 				NET_PROT_UDP,
2080 				NH_FLD_UDP_PORT_SRC,
2081 				NH_FLD_UDP_PORT_SIZE);
2082 			if (ret) {
2083 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2084 
2085 				return -1;
2086 			}
2087 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2088 		}
2089 
2090 		index = dpaa2_flow_extract_search(
2091 				&priv->extract.tc_key_extract[group].dpkg,
2092 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2093 		if (index < 0) {
2094 			ret = dpaa2_flow_extract_add(
2095 					&priv->extract.tc_key_extract[group],
2096 					NET_PROT_UDP,
2097 					NH_FLD_UDP_PORT_SRC,
2098 					NH_FLD_UDP_PORT_SIZE);
2099 			if (ret) {
2100 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2101 
2102 				return -1;
2103 			}
2104 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2105 		}
2106 
2107 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2108 		if (ret) {
2109 			DPAA2_PMD_ERR(
2110 				"Move ipaddr before UDP_PORT_SRC set failed");
2111 			return -1;
2112 		}
2113 
2114 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2115 				&flow->qos_rule,
2116 				NET_PROT_UDP,
2117 				NH_FLD_UDP_PORT_SRC,
2118 				&spec->hdr.src_port,
2119 				&mask->hdr.src_port,
2120 				NH_FLD_UDP_PORT_SIZE);
2121 		if (ret) {
2122 			DPAA2_PMD_ERR(
2123 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2124 			return -1;
2125 		}
2126 
2127 		ret = dpaa2_flow_rule_data_set(
2128 				&priv->extract.tc_key_extract[group],
2129 				&flow->fs_rule,
2130 				NET_PROT_UDP,
2131 				NH_FLD_UDP_PORT_SRC,
2132 				&spec->hdr.src_port,
2133 				&mask->hdr.src_port,
2134 				NH_FLD_UDP_PORT_SIZE);
2135 		if (ret) {
2136 			DPAA2_PMD_ERR(
2137 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2138 			return -1;
2139 		}
2140 	}
2141 
2142 	if (mask->hdr.dst_port) {
2143 		index = dpaa2_flow_extract_search(
2144 				&priv->extract.qos_key_extract.dpkg,
2145 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2146 		if (index < 0) {
2147 			ret = dpaa2_flow_extract_add(
2148 					&priv->extract.qos_key_extract,
2149 					NET_PROT_UDP,
2150 					NH_FLD_UDP_PORT_DST,
2151 					NH_FLD_UDP_PORT_SIZE);
2152 			if (ret) {
2153 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2154 
2155 				return -1;
2156 			}
2157 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2158 		}
2159 
2160 		index = dpaa2_flow_extract_search(
2161 				&priv->extract.tc_key_extract[group].dpkg,
2162 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2163 		if (index < 0) {
2164 			ret = dpaa2_flow_extract_add(
2165 					&priv->extract.tc_key_extract[group],
2166 					NET_PROT_UDP,
2167 					NH_FLD_UDP_PORT_DST,
2168 					NH_FLD_UDP_PORT_SIZE);
2169 			if (ret) {
2170 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2171 
2172 				return -1;
2173 			}
2174 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2175 		}
2176 
2177 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2178 		if (ret) {
2179 			DPAA2_PMD_ERR(
2180 				"Move ipaddr before UDP_PORT_DST set failed");
2181 			return -1;
2182 		}
2183 
2184 		ret = dpaa2_flow_rule_data_set(
2185 				&priv->extract.qos_key_extract,
2186 				&flow->qos_rule,
2187 				NET_PROT_UDP,
2188 				NH_FLD_UDP_PORT_DST,
2189 				&spec->hdr.dst_port,
2190 				&mask->hdr.dst_port,
2191 				NH_FLD_UDP_PORT_SIZE);
2192 		if (ret) {
2193 			DPAA2_PMD_ERR(
2194 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2195 			return -1;
2196 		}
2197 
2198 		ret = dpaa2_flow_rule_data_set(
2199 				&priv->extract.tc_key_extract[group],
2200 				&flow->fs_rule,
2201 				NET_PROT_UDP,
2202 				NH_FLD_UDP_PORT_DST,
2203 				&spec->hdr.dst_port,
2204 				&mask->hdr.dst_port,
2205 				NH_FLD_UDP_PORT_SIZE);
2206 		if (ret) {
2207 			DPAA2_PMD_ERR(
2208 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2209 			return -1;
2210 		}
2211 	}
2212 
2213 	(*device_configured) |= local_cfg;
2214 
2215 	return 0;
2216 }
2217 
2218 static int
2219 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2220 			 struct rte_eth_dev *dev,
2221 			 const struct rte_flow_attr *attr,
2222 			 const struct rte_flow_item *pattern,
2223 			 const struct rte_flow_action actions[] __rte_unused,
2224 			 struct rte_flow_error *error __rte_unused,
2225 			 int *device_configured)
2226 {
2227 	int index, ret;
2228 	int local_cfg = 0;
2229 	uint32_t group;
2230 	const struct rte_flow_item_tcp *spec, *mask;
2231 
2232 	const struct rte_flow_item_tcp *last __rte_unused;
2233 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2234 
2235 	group = attr->group;
2236 
2237 	/* Parse pattern list to get the matching parameters */
2238 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2239 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2240 	mask    = (const struct rte_flow_item_tcp *)
2241 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2242 
2243 	/* Get traffic class index and flow id to be configured */
2244 	flow->tc_id = group;
2245 	flow->tc_index = attr->priority;
2246 
2247 	if (!spec || !mc_l4_port_identification) {
2248 		struct proto_discrimination proto;
2249 
2250 		index = dpaa2_flow_extract_search(
2251 				&priv->extract.qos_key_extract.dpkg,
2252 				NET_PROT_IP, NH_FLD_IP_PROTO);
2253 		if (index < 0) {
2254 			ret = dpaa2_flow_proto_discrimination_extract(
2255 					&priv->extract.qos_key_extract,
2256 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2257 			if (ret) {
2258 				DPAA2_PMD_ERR(
2259 					"QoS Extract IP protocol to discriminate TCP failed.");
2260 
2261 				return -1;
2262 			}
2263 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2264 		}
2265 
2266 		index = dpaa2_flow_extract_search(
2267 				&priv->extract.tc_key_extract[group].dpkg,
2268 				NET_PROT_IP, NH_FLD_IP_PROTO);
2269 		if (index < 0) {
2270 			ret = dpaa2_flow_proto_discrimination_extract(
2271 				&priv->extract.tc_key_extract[group],
2272 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2273 			if (ret) {
2274 				DPAA2_PMD_ERR(
2275 					"FS Extract IP protocol to discriminate TCP failed.");
2276 
2277 				return -1;
2278 			}
2279 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2280 		}
2281 
2282 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2283 		if (ret) {
2284 			DPAA2_PMD_ERR(
2285 				"Move IP addr before TCP discrimination set failed");
2286 			return -1;
2287 		}
2288 
2289 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2290 		proto.ip_proto = IPPROTO_TCP;
2291 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2292 							proto, group);
2293 		if (ret) {
2294 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2295 			return -1;
2296 		}
2297 
2298 		(*device_configured) |= local_cfg;
2299 
2300 		if (!spec)
2301 			return 0;
2302 	}
2303 
2304 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2305 		RTE_FLOW_ITEM_TYPE_TCP)) {
2306 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2307 
2308 		return -1;
2309 	}
2310 
2311 	if (mask->hdr.src_port) {
2312 		index = dpaa2_flow_extract_search(
2313 				&priv->extract.qos_key_extract.dpkg,
2314 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2315 		if (index < 0) {
2316 			ret = dpaa2_flow_extract_add(
2317 					&priv->extract.qos_key_extract,
2318 					NET_PROT_TCP,
2319 					NH_FLD_TCP_PORT_SRC,
2320 					NH_FLD_TCP_PORT_SIZE);
2321 			if (ret) {
2322 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2323 
2324 				return -1;
2325 			}
2326 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2327 		}
2328 
2329 		index = dpaa2_flow_extract_search(
2330 				&priv->extract.tc_key_extract[group].dpkg,
2331 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2332 		if (index < 0) {
2333 			ret = dpaa2_flow_extract_add(
2334 					&priv->extract.tc_key_extract[group],
2335 					NET_PROT_TCP,
2336 					NH_FLD_TCP_PORT_SRC,
2337 					NH_FLD_TCP_PORT_SIZE);
2338 			if (ret) {
2339 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2340 
2341 				return -1;
2342 			}
2343 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2344 		}
2345 
2346 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2347 		if (ret) {
2348 			DPAA2_PMD_ERR(
2349 				"Move ipaddr before TCP_PORT_SRC set failed");
2350 			return -1;
2351 		}
2352 
2353 		ret = dpaa2_flow_rule_data_set(
2354 				&priv->extract.qos_key_extract,
2355 				&flow->qos_rule,
2356 				NET_PROT_TCP,
2357 				NH_FLD_TCP_PORT_SRC,
2358 				&spec->hdr.src_port,
2359 				&mask->hdr.src_port,
2360 				NH_FLD_TCP_PORT_SIZE);
2361 		if (ret) {
2362 			DPAA2_PMD_ERR(
2363 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2364 			return -1;
2365 		}
2366 
2367 		ret = dpaa2_flow_rule_data_set(
2368 				&priv->extract.tc_key_extract[group],
2369 				&flow->fs_rule,
2370 				NET_PROT_TCP,
2371 				NH_FLD_TCP_PORT_SRC,
2372 				&spec->hdr.src_port,
2373 				&mask->hdr.src_port,
2374 				NH_FLD_TCP_PORT_SIZE);
2375 		if (ret) {
2376 			DPAA2_PMD_ERR(
2377 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2378 			return -1;
2379 		}
2380 	}
2381 
2382 	if (mask->hdr.dst_port) {
2383 		index = dpaa2_flow_extract_search(
2384 				&priv->extract.qos_key_extract.dpkg,
2385 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2386 		if (index < 0) {
2387 			ret = dpaa2_flow_extract_add(
2388 					&priv->extract.qos_key_extract,
2389 					NET_PROT_TCP,
2390 					NH_FLD_TCP_PORT_DST,
2391 					NH_FLD_TCP_PORT_SIZE);
2392 			if (ret) {
2393 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2394 
2395 				return -1;
2396 			}
2397 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2398 		}
2399 
2400 		index = dpaa2_flow_extract_search(
2401 				&priv->extract.tc_key_extract[group].dpkg,
2402 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2403 		if (index < 0) {
2404 			ret = dpaa2_flow_extract_add(
2405 					&priv->extract.tc_key_extract[group],
2406 					NET_PROT_TCP,
2407 					NH_FLD_TCP_PORT_DST,
2408 					NH_FLD_TCP_PORT_SIZE);
2409 			if (ret) {
2410 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2411 
2412 				return -1;
2413 			}
2414 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2415 		}
2416 
2417 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2418 		if (ret) {
2419 			DPAA2_PMD_ERR(
2420 				"Move ipaddr before TCP_PORT_DST set failed");
2421 			return -1;
2422 		}
2423 
2424 		ret = dpaa2_flow_rule_data_set(
2425 				&priv->extract.qos_key_extract,
2426 				&flow->qos_rule,
2427 				NET_PROT_TCP,
2428 				NH_FLD_TCP_PORT_DST,
2429 				&spec->hdr.dst_port,
2430 				&mask->hdr.dst_port,
2431 				NH_FLD_TCP_PORT_SIZE);
2432 		if (ret) {
2433 			DPAA2_PMD_ERR(
2434 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2435 			return -1;
2436 		}
2437 
2438 		ret = dpaa2_flow_rule_data_set(
2439 				&priv->extract.tc_key_extract[group],
2440 				&flow->fs_rule,
2441 				NET_PROT_TCP,
2442 				NH_FLD_TCP_PORT_DST,
2443 				&spec->hdr.dst_port,
2444 				&mask->hdr.dst_port,
2445 				NH_FLD_TCP_PORT_SIZE);
2446 		if (ret) {
2447 			DPAA2_PMD_ERR(
2448 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2449 			return -1;
2450 		}
2451 	}
2452 
2453 	(*device_configured) |= local_cfg;
2454 
2455 	return 0;
2456 }
2457 
2458 static int
2459 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2460 			  struct rte_eth_dev *dev,
2461 			  const struct rte_flow_attr *attr,
2462 			  const struct rte_flow_item *pattern,
2463 			  const struct rte_flow_action actions[] __rte_unused,
2464 			  struct rte_flow_error *error __rte_unused,
2465 			  int *device_configured)
2466 {
2467 	int index, ret;
2468 	int local_cfg = 0;
2469 	uint32_t group;
2470 	const struct rte_flow_item_sctp *spec, *mask;
2471 
2472 	const struct rte_flow_item_sctp *last __rte_unused;
2473 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2474 
2475 	group = attr->group;
2476 
2477 	/* Parse pattern list to get the matching parameters */
2478 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2479 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2480 	mask    = (const struct rte_flow_item_sctp *)
2481 			(pattern->mask ? pattern->mask :
2482 				&dpaa2_flow_item_sctp_mask);
2483 
2484 	/* Get traffic class index and flow id to be configured */
2485 	flow->tc_id = group;
2486 	flow->tc_index = attr->priority;
2487 
2488 	if (!spec || !mc_l4_port_identification) {
2489 		struct proto_discrimination proto;
2490 
2491 		index = dpaa2_flow_extract_search(
2492 				&priv->extract.qos_key_extract.dpkg,
2493 				NET_PROT_IP, NH_FLD_IP_PROTO);
2494 		if (index < 0) {
2495 			ret = dpaa2_flow_proto_discrimination_extract(
2496 					&priv->extract.qos_key_extract,
2497 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2498 			if (ret) {
2499 				DPAA2_PMD_ERR(
2500 					"QoS Extract IP protocol to discriminate SCTP failed.");
2501 
2502 				return -1;
2503 			}
2504 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2505 		}
2506 
2507 		index = dpaa2_flow_extract_search(
2508 				&priv->extract.tc_key_extract[group].dpkg,
2509 				NET_PROT_IP, NH_FLD_IP_PROTO);
2510 		if (index < 0) {
2511 			ret = dpaa2_flow_proto_discrimination_extract(
2512 					&priv->extract.tc_key_extract[group],
2513 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2514 			if (ret) {
2515 				DPAA2_PMD_ERR(
2516 					"FS Extract IP protocol to discriminate SCTP failed.");
2517 
2518 				return -1;
2519 			}
2520 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2521 		}
2522 
2523 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2524 		if (ret) {
2525 			DPAA2_PMD_ERR(
2526 				"Move ipaddr before SCTP discrimination set failed");
2527 			return -1;
2528 		}
2529 
2530 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2531 		proto.ip_proto = IPPROTO_SCTP;
2532 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2533 							proto, group);
2534 		if (ret) {
2535 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2536 			return -1;
2537 		}
2538 
2539 		(*device_configured) |= local_cfg;
2540 
2541 		if (!spec)
2542 			return 0;
2543 	}
2544 
2545 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2546 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2547 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2548 
2549 		return -1;
2550 	}
2551 
2552 	if (mask->hdr.src_port) {
2553 		index = dpaa2_flow_extract_search(
2554 				&priv->extract.qos_key_extract.dpkg,
2555 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2556 		if (index < 0) {
2557 			ret = dpaa2_flow_extract_add(
2558 					&priv->extract.qos_key_extract,
2559 					NET_PROT_SCTP,
2560 					NH_FLD_SCTP_PORT_SRC,
2561 					NH_FLD_SCTP_PORT_SIZE);
2562 			if (ret) {
2563 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2564 
2565 				return -1;
2566 			}
2567 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2568 		}
2569 
2570 		index = dpaa2_flow_extract_search(
2571 				&priv->extract.tc_key_extract[group].dpkg,
2572 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2573 		if (index < 0) {
2574 			ret = dpaa2_flow_extract_add(
2575 					&priv->extract.tc_key_extract[group],
2576 					NET_PROT_SCTP,
2577 					NH_FLD_SCTP_PORT_SRC,
2578 					NH_FLD_SCTP_PORT_SIZE);
2579 			if (ret) {
2580 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2581 
2582 				return -1;
2583 			}
2584 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2585 		}
2586 
2587 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2588 		if (ret) {
2589 			DPAA2_PMD_ERR(
2590 				"Move ipaddr before SCTP_PORT_SRC set failed");
2591 			return -1;
2592 		}
2593 
2594 		ret = dpaa2_flow_rule_data_set(
2595 				&priv->extract.qos_key_extract,
2596 				&flow->qos_rule,
2597 				NET_PROT_SCTP,
2598 				NH_FLD_SCTP_PORT_SRC,
2599 				&spec->hdr.src_port,
2600 				&mask->hdr.src_port,
2601 				NH_FLD_SCTP_PORT_SIZE);
2602 		if (ret) {
2603 			DPAA2_PMD_ERR(
2604 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2605 			return -1;
2606 		}
2607 
2608 		ret = dpaa2_flow_rule_data_set(
2609 				&priv->extract.tc_key_extract[group],
2610 				&flow->fs_rule,
2611 				NET_PROT_SCTP,
2612 				NH_FLD_SCTP_PORT_SRC,
2613 				&spec->hdr.src_port,
2614 				&mask->hdr.src_port,
2615 				NH_FLD_SCTP_PORT_SIZE);
2616 		if (ret) {
2617 			DPAA2_PMD_ERR(
2618 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2619 			return -1;
2620 		}
2621 	}
2622 
2623 	if (mask->hdr.dst_port) {
2624 		index = dpaa2_flow_extract_search(
2625 				&priv->extract.qos_key_extract.dpkg,
2626 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2627 		if (index < 0) {
2628 			ret = dpaa2_flow_extract_add(
2629 					&priv->extract.qos_key_extract,
2630 					NET_PROT_SCTP,
2631 					NH_FLD_SCTP_PORT_DST,
2632 					NH_FLD_SCTP_PORT_SIZE);
2633 			if (ret) {
2634 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2635 
2636 				return -1;
2637 			}
2638 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2639 		}
2640 
2641 		index = dpaa2_flow_extract_search(
2642 				&priv->extract.tc_key_extract[group].dpkg,
2643 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2644 		if (index < 0) {
2645 			ret = dpaa2_flow_extract_add(
2646 					&priv->extract.tc_key_extract[group],
2647 					NET_PROT_SCTP,
2648 					NH_FLD_SCTP_PORT_DST,
2649 					NH_FLD_SCTP_PORT_SIZE);
2650 			if (ret) {
2651 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2652 
2653 				return -1;
2654 			}
2655 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2656 		}
2657 
2658 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2659 		if (ret) {
2660 			DPAA2_PMD_ERR(
2661 				"Move ipaddr before SCTP_PORT_DST set failed");
2662 			return -1;
2663 		}
2664 
2665 		ret = dpaa2_flow_rule_data_set(
2666 				&priv->extract.qos_key_extract,
2667 				&flow->qos_rule,
2668 				NET_PROT_SCTP,
2669 				NH_FLD_SCTP_PORT_DST,
2670 				&spec->hdr.dst_port,
2671 				&mask->hdr.dst_port,
2672 				NH_FLD_SCTP_PORT_SIZE);
2673 		if (ret) {
2674 			DPAA2_PMD_ERR(
2675 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2676 			return -1;
2677 		}
2678 
2679 		ret = dpaa2_flow_rule_data_set(
2680 				&priv->extract.tc_key_extract[group],
2681 				&flow->fs_rule,
2682 				NET_PROT_SCTP,
2683 				NH_FLD_SCTP_PORT_DST,
2684 				&spec->hdr.dst_port,
2685 				&mask->hdr.dst_port,
2686 				NH_FLD_SCTP_PORT_SIZE);
2687 		if (ret) {
2688 			DPAA2_PMD_ERR(
2689 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2690 			return -1;
2691 		}
2692 	}
2693 
2694 	(*device_configured) |= local_cfg;
2695 
2696 	return 0;
2697 }
2698 
2699 static int
2700 dpaa2_configure_flow_gre(struct rte_flow *flow,
2701 			 struct rte_eth_dev *dev,
2702 			 const struct rte_flow_attr *attr,
2703 			 const struct rte_flow_item *pattern,
2704 			 const struct rte_flow_action actions[] __rte_unused,
2705 			 struct rte_flow_error *error __rte_unused,
2706 			 int *device_configured)
2707 {
2708 	int index, ret;
2709 	int local_cfg = 0;
2710 	uint32_t group;
2711 	const struct rte_flow_item_gre *spec, *mask;
2712 
2713 	const struct rte_flow_item_gre *last __rte_unused;
2714 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2715 
2716 	group = attr->group;
2717 
2718 	/* Parse pattern list to get the matching parameters */
2719 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2720 	last    = (const struct rte_flow_item_gre *)pattern->last;
2721 	mask    = (const struct rte_flow_item_gre *)
2722 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2723 
2724 	/* Get traffic class index and flow id to be configured */
2725 	flow->tc_id = group;
2726 	flow->tc_index = attr->priority;
2727 
2728 	if (!spec) {
2729 		struct proto_discrimination proto;
2730 
2731 		index = dpaa2_flow_extract_search(
2732 				&priv->extract.qos_key_extract.dpkg,
2733 				NET_PROT_IP, NH_FLD_IP_PROTO);
2734 		if (index < 0) {
2735 			ret = dpaa2_flow_proto_discrimination_extract(
2736 					&priv->extract.qos_key_extract,
2737 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2738 			if (ret) {
2739 				DPAA2_PMD_ERR(
2740 					"QoS Extract IP protocol to discriminate GRE failed.");
2741 
2742 				return -1;
2743 			}
2744 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2745 		}
2746 
2747 		index = dpaa2_flow_extract_search(
2748 				&priv->extract.tc_key_extract[group].dpkg,
2749 				NET_PROT_IP, NH_FLD_IP_PROTO);
2750 		if (index < 0) {
2751 			ret = dpaa2_flow_proto_discrimination_extract(
2752 					&priv->extract.tc_key_extract[group],
2753 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2754 			if (ret) {
2755 				DPAA2_PMD_ERR(
2756 					"FS Extract IP protocol to discriminate GRE failed.");
2757 
2758 				return -1;
2759 			}
2760 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2761 		}
2762 
2763 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2764 		if (ret) {
2765 			DPAA2_PMD_ERR(
2766 				"Move IP addr before GRE discrimination set failed");
2767 			return -1;
2768 		}
2769 
2770 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2771 		proto.ip_proto = IPPROTO_GRE;
2772 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2773 							proto, group);
2774 		if (ret) {
2775 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2776 			return -1;
2777 		}
2778 
2779 		(*device_configured) |= local_cfg;
2780 
2781 		return 0;
2782 	}
2783 
2784 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2785 		RTE_FLOW_ITEM_TYPE_GRE)) {
2786 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2787 
2788 		return -1;
2789 	}
2790 
2791 	if (!mask->protocol)
2792 		return 0;
2793 
2794 	index = dpaa2_flow_extract_search(
2795 			&priv->extract.qos_key_extract.dpkg,
2796 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2797 	if (index < 0) {
2798 		ret = dpaa2_flow_extract_add(
2799 				&priv->extract.qos_key_extract,
2800 				NET_PROT_GRE,
2801 				NH_FLD_GRE_TYPE,
2802 				sizeof(rte_be16_t));
2803 		if (ret) {
2804 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2805 
2806 			return -1;
2807 		}
2808 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2809 	}
2810 
2811 	index = dpaa2_flow_extract_search(
2812 			&priv->extract.tc_key_extract[group].dpkg,
2813 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2814 	if (index < 0) {
2815 		ret = dpaa2_flow_extract_add(
2816 				&priv->extract.tc_key_extract[group],
2817 				NET_PROT_GRE,
2818 				NH_FLD_GRE_TYPE,
2819 				sizeof(rte_be16_t));
2820 		if (ret) {
2821 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2822 
2823 			return -1;
2824 		}
2825 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2826 	}
2827 
2828 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2829 	if (ret) {
2830 		DPAA2_PMD_ERR(
2831 			"Move ipaddr before GRE_TYPE set failed");
2832 		return -1;
2833 	}
2834 
2835 	ret = dpaa2_flow_rule_data_set(
2836 				&priv->extract.qos_key_extract,
2837 				&flow->qos_rule,
2838 				NET_PROT_GRE,
2839 				NH_FLD_GRE_TYPE,
2840 				&spec->protocol,
2841 				&mask->protocol,
2842 				sizeof(rte_be16_t));
2843 	if (ret) {
2844 		DPAA2_PMD_ERR(
2845 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2846 		return -1;
2847 	}
2848 
2849 	ret = dpaa2_flow_rule_data_set(
2850 			&priv->extract.tc_key_extract[group],
2851 			&flow->fs_rule,
2852 			NET_PROT_GRE,
2853 			NH_FLD_GRE_TYPE,
2854 			&spec->protocol,
2855 			&mask->protocol,
2856 			sizeof(rte_be16_t));
2857 	if (ret) {
2858 		DPAA2_PMD_ERR(
2859 			"FS NH_FLD_GRE_TYPE rule data set failed");
2860 		return -1;
2861 	}
2862 
2863 	(*device_configured) |= local_cfg;
2864 
2865 	return 0;
2866 }
2867 
2868 static int
2869 dpaa2_configure_flow_raw(struct rte_flow *flow,
2870 			 struct rte_eth_dev *dev,
2871 			 const struct rte_flow_attr *attr,
2872 			 const struct rte_flow_item *pattern,
2873 			 const struct rte_flow_action actions[] __rte_unused,
2874 			 struct rte_flow_error *error __rte_unused,
2875 			 int *device_configured)
2876 {
2877 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2878 	const struct rte_flow_item_raw *spec = pattern->spec;
2879 	const struct rte_flow_item_raw *mask = pattern->mask;
2880 	int prev_key_size =
2881 		priv->extract.qos_key_extract.key_info.key_total_size;
2882 	int local_cfg = 0, ret;
2883 	uint32_t group;
2884 
2885 	/* Need both spec and mask */
2886 	if (!spec || !mask) {
2887 		DPAA2_PMD_ERR("spec or mask not present.");
2888 		return -EINVAL;
2889 	}
2890 	/* Only supports non-relative with offset 0 */
2891 	if (spec->relative || spec->offset != 0 ||
2892 	    spec->search || spec->limit) {
2893 		DPAA2_PMD_ERR("relative and non zero offset not supported.");
2894 		return -EINVAL;
2895 	}
2896 	/* Spec len and mask len should be same */
2897 	if (spec->length != mask->length) {
2898 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2899 		return -EINVAL;
2900 	}
2901 
2902 	/* Get traffic class index and flow id to be configured */
2903 	group = attr->group;
2904 	flow->tc_id = group;
2905 	flow->tc_index = attr->priority;
2906 
2907 	if (prev_key_size <= spec->length) {
2908 		ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2909 						 spec->length);
2910 		if (ret) {
2911 			DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2912 			return -1;
2913 		}
2914 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2915 
2916 		ret = dpaa2_flow_extract_add_raw(
2917 					&priv->extract.tc_key_extract[group],
2918 					spec->length);
2919 		if (ret) {
2920 			DPAA2_PMD_ERR("FS Extract RAW add failed.");
2921 			return -1;
2922 		}
2923 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2924 	}
2925 
2926 	ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2927 					   mask->pattern, spec->length);
2928 	if (ret) {
2929 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2930 		return -1;
2931 	}
2932 
2933 	ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2934 					   mask->pattern, spec->length);
2935 	if (ret) {
2936 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2937 		return -1;
2938 	}
2939 
2940 	(*device_configured) |= local_cfg;
2941 
2942 	return 0;
2943 }
2944 
2945 static inline int
2946 dpaa2_fs_action_supported(enum rte_flow_action_type action)
2947 {
2948 	int i;
2949 
2950 	for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
2951 					sizeof(enum rte_flow_action_type)); i++) {
2952 		if (action == dpaa2_supported_fs_action_type[i])
2953 			return 1;
2954 	}
2955 
2956 	return 0;
2957 }
2958 /* The existing QoS/FS entry with IP address(es)
2959  * needs update after
2960  * new extract(s) are inserted before IP
2961  * address(es) extract(s).
2962  */
2963 static int
2964 dpaa2_flow_entry_update(
2965 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2966 {
2967 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2968 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2969 	int ret;
2970 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2971 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2972 	struct dpaa2_key_extract *qos_key_extract =
2973 		&priv->extract.qos_key_extract;
2974 	struct dpaa2_key_extract *tc_key_extract =
2975 		&priv->extract.tc_key_extract[tc_id];
2976 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2977 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2978 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2979 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2980 	int extend = -1, extend1, size = -1;
2981 	uint16_t qos_index;
2982 
2983 	while (curr) {
2984 		if (curr->ipaddr_rule.ipaddr_type ==
2985 			FLOW_NONE_IPADDR) {
2986 			curr = LIST_NEXT(curr, next);
2987 			continue;
2988 		}
2989 
2990 		if (curr->ipaddr_rule.ipaddr_type ==
2991 			FLOW_IPV4_ADDR) {
2992 			qos_ipsrc_offset =
2993 				qos_key_extract->key_info.ipv4_src_offset;
2994 			qos_ipdst_offset =
2995 				qos_key_extract->key_info.ipv4_dst_offset;
2996 			fs_ipsrc_offset =
2997 				tc_key_extract->key_info.ipv4_src_offset;
2998 			fs_ipdst_offset =
2999 				tc_key_extract->key_info.ipv4_dst_offset;
3000 			size = NH_FLD_IPV4_ADDR_SIZE;
3001 		} else {
3002 			qos_ipsrc_offset =
3003 				qos_key_extract->key_info.ipv6_src_offset;
3004 			qos_ipdst_offset =
3005 				qos_key_extract->key_info.ipv6_dst_offset;
3006 			fs_ipsrc_offset =
3007 				tc_key_extract->key_info.ipv6_src_offset;
3008 			fs_ipdst_offset =
3009 				tc_key_extract->key_info.ipv6_dst_offset;
3010 			size = NH_FLD_IPV6_ADDR_SIZE;
3011 		}
3012 
3013 		qos_index = curr->tc_id * priv->fs_entries +
3014 			curr->tc_index;
3015 
3016 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index, stdout);
3017 
3018 		if (priv->num_rx_tc > 1) {
3019 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3020 					priv->token, &curr->qos_rule);
3021 			if (ret) {
3022 				DPAA2_PMD_ERR("Qos entry remove failed.");
3023 				return -1;
3024 			}
3025 		}
3026 
3027 		extend = -1;
3028 
3029 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3030 			RTE_ASSERT(qos_ipsrc_offset >=
3031 				curr->ipaddr_rule.qos_ipsrc_offset);
3032 			extend1 = qos_ipsrc_offset -
3033 				curr->ipaddr_rule.qos_ipsrc_offset;
3034 			if (extend >= 0)
3035 				RTE_ASSERT(extend == extend1);
3036 			else
3037 				extend = extend1;
3038 
3039 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3040 				(size == NH_FLD_IPV6_ADDR_SIZE));
3041 
3042 			memcpy(ipsrc_key,
3043 				(char *)(size_t)curr->qos_rule.key_iova +
3044 				curr->ipaddr_rule.qos_ipsrc_offset,
3045 				size);
3046 			memset((char *)(size_t)curr->qos_rule.key_iova +
3047 				curr->ipaddr_rule.qos_ipsrc_offset,
3048 				0, size);
3049 
3050 			memcpy(ipsrc_mask,
3051 				(char *)(size_t)curr->qos_rule.mask_iova +
3052 				curr->ipaddr_rule.qos_ipsrc_offset,
3053 				size);
3054 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3055 				curr->ipaddr_rule.qos_ipsrc_offset,
3056 				0, size);
3057 
3058 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3059 		}
3060 
3061 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3062 			RTE_ASSERT(qos_ipdst_offset >=
3063 				curr->ipaddr_rule.qos_ipdst_offset);
3064 			extend1 = qos_ipdst_offset -
3065 				curr->ipaddr_rule.qos_ipdst_offset;
3066 			if (extend >= 0)
3067 				RTE_ASSERT(extend == extend1);
3068 			else
3069 				extend = extend1;
3070 
3071 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3072 				(size == NH_FLD_IPV6_ADDR_SIZE));
3073 
3074 			memcpy(ipdst_key,
3075 				(char *)(size_t)curr->qos_rule.key_iova +
3076 				curr->ipaddr_rule.qos_ipdst_offset,
3077 				size);
3078 			memset((char *)(size_t)curr->qos_rule.key_iova +
3079 				curr->ipaddr_rule.qos_ipdst_offset,
3080 				0, size);
3081 
3082 			memcpy(ipdst_mask,
3083 				(char *)(size_t)curr->qos_rule.mask_iova +
3084 				curr->ipaddr_rule.qos_ipdst_offset,
3085 				size);
3086 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3087 				curr->ipaddr_rule.qos_ipdst_offset,
3088 				0, size);
3089 
3090 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3091 		}
3092 
3093 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3094 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3095 				(size == NH_FLD_IPV6_ADDR_SIZE));
3096 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3097 				curr->ipaddr_rule.qos_ipsrc_offset,
3098 				ipsrc_key,
3099 				size);
3100 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3101 				curr->ipaddr_rule.qos_ipsrc_offset,
3102 				ipsrc_mask,
3103 				size);
3104 		}
3105 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3106 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3107 				(size == NH_FLD_IPV6_ADDR_SIZE));
3108 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3109 				curr->ipaddr_rule.qos_ipdst_offset,
3110 				ipdst_key,
3111 				size);
3112 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3113 				curr->ipaddr_rule.qos_ipdst_offset,
3114 				ipdst_mask,
3115 				size);
3116 		}
3117 
3118 		if (extend >= 0)
3119 			curr->qos_real_key_size += extend;
3120 
3121 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3122 
3123 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index, stdout);
3124 
3125 		if (priv->num_rx_tc > 1) {
3126 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3127 					priv->token, &curr->qos_rule,
3128 					curr->tc_id, qos_index,
3129 					0, 0);
3130 			if (ret) {
3131 				DPAA2_PMD_ERR("Qos entry update failed.");
3132 				return -1;
3133 			}
3134 		}
3135 
3136 		if (!dpaa2_fs_action_supported(curr->action)) {
3137 			curr = LIST_NEXT(curr, next);
3138 			continue;
3139 		}
3140 
3141 		dpaa2_flow_fs_entry_log("Before update", curr, stdout);
3142 		extend = -1;
3143 
3144 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3145 				priv->token, curr->tc_id, &curr->fs_rule);
3146 		if (ret) {
3147 			DPAA2_PMD_ERR("FS entry remove failed.");
3148 			return -1;
3149 		}
3150 
3151 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3152 			tc_id == curr->tc_id) {
3153 			RTE_ASSERT(fs_ipsrc_offset >=
3154 				curr->ipaddr_rule.fs_ipsrc_offset);
3155 			extend1 = fs_ipsrc_offset -
3156 				curr->ipaddr_rule.fs_ipsrc_offset;
3157 			if (extend >= 0)
3158 				RTE_ASSERT(extend == extend1);
3159 			else
3160 				extend = extend1;
3161 
3162 			memcpy(ipsrc_key,
3163 				(char *)(size_t)curr->fs_rule.key_iova +
3164 				curr->ipaddr_rule.fs_ipsrc_offset,
3165 				size);
3166 			memset((char *)(size_t)curr->fs_rule.key_iova +
3167 				curr->ipaddr_rule.fs_ipsrc_offset,
3168 				0, size);
3169 
3170 			memcpy(ipsrc_mask,
3171 				(char *)(size_t)curr->fs_rule.mask_iova +
3172 				curr->ipaddr_rule.fs_ipsrc_offset,
3173 				size);
3174 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3175 				curr->ipaddr_rule.fs_ipsrc_offset,
3176 				0, size);
3177 
3178 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3179 		}
3180 
3181 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3182 			tc_id == curr->tc_id) {
3183 			RTE_ASSERT(fs_ipdst_offset >=
3184 				curr->ipaddr_rule.fs_ipdst_offset);
3185 			extend1 = fs_ipdst_offset -
3186 				curr->ipaddr_rule.fs_ipdst_offset;
3187 			if (extend >= 0)
3188 				RTE_ASSERT(extend == extend1);
3189 			else
3190 				extend = extend1;
3191 
3192 			memcpy(ipdst_key,
3193 				(char *)(size_t)curr->fs_rule.key_iova +
3194 				curr->ipaddr_rule.fs_ipdst_offset,
3195 				size);
3196 			memset((char *)(size_t)curr->fs_rule.key_iova +
3197 				curr->ipaddr_rule.fs_ipdst_offset,
3198 				0, size);
3199 
3200 			memcpy(ipdst_mask,
3201 				(char *)(size_t)curr->fs_rule.mask_iova +
3202 				curr->ipaddr_rule.fs_ipdst_offset,
3203 				size);
3204 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3205 				curr->ipaddr_rule.fs_ipdst_offset,
3206 				0, size);
3207 
3208 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3209 		}
3210 
3211 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3212 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3213 				curr->ipaddr_rule.fs_ipsrc_offset,
3214 				ipsrc_key,
3215 				size);
3216 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3217 				curr->ipaddr_rule.fs_ipsrc_offset,
3218 				ipsrc_mask,
3219 				size);
3220 		}
3221 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3222 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3223 				curr->ipaddr_rule.fs_ipdst_offset,
3224 				ipdst_key,
3225 				size);
3226 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3227 				curr->ipaddr_rule.fs_ipdst_offset,
3228 				ipdst_mask,
3229 				size);
3230 		}
3231 
3232 		if (extend >= 0)
3233 			curr->fs_real_key_size += extend;
3234 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3235 
3236 		dpaa2_flow_fs_entry_log("Start update", curr, stdout);
3237 
3238 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3239 				priv->token, curr->tc_id, curr->tc_index,
3240 				&curr->fs_rule, &curr->action_cfg);
3241 		if (ret) {
3242 			DPAA2_PMD_ERR("FS entry update failed.");
3243 			return -1;
3244 		}
3245 
3246 		curr = LIST_NEXT(curr, next);
3247 	}
3248 
3249 	return 0;
3250 }
3251 
3252 static inline int
3253 dpaa2_flow_verify_attr(
3254 	struct dpaa2_dev_priv *priv,
3255 	const struct rte_flow_attr *attr)
3256 {
3257 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3258 
3259 	while (curr) {
3260 		if (curr->tc_id == attr->group &&
3261 			curr->tc_index == attr->priority) {
3262 			DPAA2_PMD_ERR(
3263 				"Flow with group %d and priority %d already exists.",
3264 				attr->group, attr->priority);
3265 
3266 			return -1;
3267 		}
3268 		curr = LIST_NEXT(curr, next);
3269 	}
3270 
3271 	return 0;
3272 }
3273 
3274 static inline struct rte_eth_dev *
3275 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3276 	const struct rte_flow_action *action)
3277 {
3278 	const struct rte_flow_action_port_id *port_id;
3279 	int idx = -1;
3280 	struct rte_eth_dev *dest_dev;
3281 
3282 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3283 		port_id = (const struct rte_flow_action_port_id *)
3284 					action->conf;
3285 		if (!port_id->original)
3286 			idx = port_id->id;
3287 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3288 		const struct rte_flow_action_ethdev *ethdev;
3289 
3290 		ethdev = (const struct rte_flow_action_ethdev *)action->conf;
3291 		idx = ethdev->port_id;
3292 	} else {
3293 		return NULL;
3294 	}
3295 
3296 	if (idx >= 0) {
3297 		if (!rte_eth_dev_is_valid_port(idx))
3298 			return NULL;
3299 		if (!rte_pmd_dpaa2_dev_is_dpaa2(idx))
3300 			return NULL;
3301 		dest_dev = &rte_eth_devices[idx];
3302 	} else {
3303 		dest_dev = priv->eth_dev;
3304 	}
3305 
3306 	return dest_dev;
3307 }
3308 
3309 static inline int
3310 dpaa2_flow_verify_action(
3311 	struct dpaa2_dev_priv *priv,
3312 	const struct rte_flow_attr *attr,
3313 	const struct rte_flow_action actions[])
3314 {
3315 	int end_of_list = 0, i, j = 0;
3316 	const struct rte_flow_action_queue *dest_queue;
3317 	const struct rte_flow_action_rss *rss_conf;
3318 	struct dpaa2_queue *rxq;
3319 
3320 	while (!end_of_list) {
3321 		switch (actions[j].type) {
3322 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3323 			dest_queue = (const struct rte_flow_action_queue *)
3324 					(actions[j].conf);
3325 			rxq = priv->rx_vq[dest_queue->index];
3326 			if (attr->group != rxq->tc_index) {
3327 				DPAA2_PMD_ERR(
3328 					"RXQ[%d] does not belong to the group %d",
3329 					dest_queue->index, attr->group);
3330 
3331 				return -1;
3332 			}
3333 			break;
3334 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3335 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3336 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3337 				DPAA2_PMD_ERR("Invalid port id of action");
3338 				return -ENOTSUP;
3339 			}
3340 			break;
3341 		case RTE_FLOW_ACTION_TYPE_RSS:
3342 			rss_conf = (const struct rte_flow_action_rss *)
3343 					(actions[j].conf);
3344 			if (rss_conf->queue_num > priv->dist_queues) {
3345 				DPAA2_PMD_ERR(
3346 					"RSS number exceeds the distribution size");
3347 				return -ENOTSUP;
3348 			}
3349 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3350 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3351 					DPAA2_PMD_ERR(
3352 						"RSS queue index exceeds the number of RXQs");
3353 					return -ENOTSUP;
3354 				}
3355 				rxq = priv->rx_vq[rss_conf->queue[i]];
3356 				if (rxq->tc_index != attr->group) {
3357 					DPAA2_PMD_ERR(
3358 						"Queue/Group combination are not supported");
3359 					return -ENOTSUP;
3360 				}
3361 			}
3362 
3363 			break;
3364 		case RTE_FLOW_ACTION_TYPE_END:
3365 			end_of_list = 1;
3366 			break;
3367 		default:
3368 			DPAA2_PMD_ERR("Invalid action type");
3369 			return -ENOTSUP;
3370 		}
3371 		j++;
3372 	}
3373 
3374 	return 0;
3375 }
3376 
3377 static int
3378 dpaa2_generic_flow_set(struct rte_flow *flow,
3379 		       struct rte_eth_dev *dev,
3380 		       const struct rte_flow_attr *attr,
3381 		       const struct rte_flow_item pattern[],
3382 		       const struct rte_flow_action actions[],
3383 		       struct rte_flow_error *error)
3384 {
3385 	const struct rte_flow_action_queue *dest_queue;
3386 	const struct rte_flow_action_rss *rss_conf;
3387 	int is_keycfg_configured = 0, end_of_list = 0;
3388 	int ret = 0, i = 0, j = 0;
3389 	struct dpni_rx_dist_cfg tc_cfg;
3390 	struct dpni_qos_tbl_cfg qos_cfg;
3391 	struct dpni_fs_action_cfg action;
3392 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3393 	struct dpaa2_queue *dest_q;
3394 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3395 	size_t param;
3396 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3397 	uint16_t qos_index;
3398 	struct rte_eth_dev *dest_dev;
3399 	struct dpaa2_dev_priv *dest_priv;
3400 
3401 	ret = dpaa2_flow_verify_attr(priv, attr);
3402 	if (ret)
3403 		return ret;
3404 
3405 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3406 	if (ret)
3407 		return ret;
3408 
3409 	/* Parse pattern list to get the matching parameters */
3410 	while (!end_of_list) {
3411 		switch (pattern[i].type) {
3412 		case RTE_FLOW_ITEM_TYPE_ETH:
3413 			ret = dpaa2_configure_flow_eth(flow,
3414 					dev, attr, &pattern[i], actions, error,
3415 					&is_keycfg_configured);
3416 			if (ret) {
3417 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3418 				return ret;
3419 			}
3420 			break;
3421 		case RTE_FLOW_ITEM_TYPE_VLAN:
3422 			ret = dpaa2_configure_flow_vlan(flow,
3423 					dev, attr, &pattern[i], actions, error,
3424 					&is_keycfg_configured);
3425 			if (ret) {
3426 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3427 				return ret;
3428 			}
3429 			break;
3430 		case RTE_FLOW_ITEM_TYPE_IPV4:
3431 		case RTE_FLOW_ITEM_TYPE_IPV6:
3432 			ret = dpaa2_configure_flow_generic_ip(flow,
3433 					dev, attr, &pattern[i], actions, error,
3434 					&is_keycfg_configured);
3435 			if (ret) {
3436 				DPAA2_PMD_ERR("IP flow configuration failed!");
3437 				return ret;
3438 			}
3439 			break;
3440 		case RTE_FLOW_ITEM_TYPE_ICMP:
3441 			ret = dpaa2_configure_flow_icmp(flow,
3442 					dev, attr, &pattern[i], actions, error,
3443 					&is_keycfg_configured);
3444 			if (ret) {
3445 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3446 				return ret;
3447 			}
3448 			break;
3449 		case RTE_FLOW_ITEM_TYPE_UDP:
3450 			ret = dpaa2_configure_flow_udp(flow,
3451 					dev, attr, &pattern[i], actions, error,
3452 					&is_keycfg_configured);
3453 			if (ret) {
3454 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3455 				return ret;
3456 			}
3457 			break;
3458 		case RTE_FLOW_ITEM_TYPE_TCP:
3459 			ret = dpaa2_configure_flow_tcp(flow,
3460 					dev, attr, &pattern[i], actions, error,
3461 					&is_keycfg_configured);
3462 			if (ret) {
3463 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3464 				return ret;
3465 			}
3466 			break;
3467 		case RTE_FLOW_ITEM_TYPE_SCTP:
3468 			ret = dpaa2_configure_flow_sctp(flow,
3469 					dev, attr, &pattern[i], actions, error,
3470 					&is_keycfg_configured);
3471 			if (ret) {
3472 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3473 				return ret;
3474 			}
3475 			break;
3476 		case RTE_FLOW_ITEM_TYPE_GRE:
3477 			ret = dpaa2_configure_flow_gre(flow,
3478 					dev, attr, &pattern[i], actions, error,
3479 					&is_keycfg_configured);
3480 			if (ret) {
3481 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3482 				return ret;
3483 			}
3484 			break;
3485 		case RTE_FLOW_ITEM_TYPE_RAW:
3486 			ret = dpaa2_configure_flow_raw(flow,
3487 						       dev, attr, &pattern[i],
3488 						       actions, error,
3489 						       &is_keycfg_configured);
3490 			if (ret) {
3491 				DPAA2_PMD_ERR("RAW flow configuration failed!");
3492 				return ret;
3493 			}
3494 			break;
3495 		case RTE_FLOW_ITEM_TYPE_END:
3496 			end_of_list = 1;
3497 			break; /*End of List*/
3498 		default:
3499 			DPAA2_PMD_ERR("Invalid action type");
3500 			ret = -ENOTSUP;
3501 			break;
3502 		}
3503 		i++;
3504 	}
3505 
3506 	/* Let's parse action on matching traffic */
3507 	end_of_list = 0;
3508 	while (!end_of_list) {
3509 		switch (actions[j].type) {
3510 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3511 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3512 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3513 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3514 			flow->action = actions[j].type;
3515 
3516 			if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3517 				dest_queue = (const struct rte_flow_action_queue *)
3518 								(actions[j].conf);
3519 				dest_q = priv->rx_vq[dest_queue->index];
3520 				action.flow_id = dest_q->flow_id;
3521 			} else {
3522 				dest_dev = dpaa2_flow_redirect_dev(priv,
3523 								   &actions[j]);
3524 				if (!dest_dev) {
3525 					DPAA2_PMD_ERR("Invalid destination device to redirect!");
3526 					return -1;
3527 				}
3528 
3529 				dest_priv = dest_dev->data->dev_private;
3530 				dest_q = dest_priv->tx_vq[0];
3531 				action.options =
3532 						DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
3533 				action.redirect_obj_token = dest_priv->token;
3534 				action.flow_id = dest_q->flow_id;
3535 			}
3536 
3537 			/* Configure FS table first*/
3538 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3539 				dpaa2_flow_fs_table_extracts_log(priv,
3540 							flow->tc_id, stdout);
3541 				if (dpkg_prepare_key_cfg(
3542 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3543 				(uint8_t *)(size_t)priv->extract
3544 				.tc_extract_param[flow->tc_id]) < 0) {
3545 					DPAA2_PMD_ERR(
3546 					"Unable to prepare extract parameters");
3547 					return -1;
3548 				}
3549 
3550 				memset(&tc_cfg, 0,
3551 					sizeof(struct dpni_rx_dist_cfg));
3552 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3553 				tc_cfg.key_cfg_iova =
3554 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3555 				tc_cfg.tc = flow->tc_id;
3556 				tc_cfg.enable = false;
3557 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3558 						priv->token, &tc_cfg);
3559 				if (ret < 0) {
3560 					DPAA2_PMD_ERR(
3561 						"TC hash cannot be disabled.(%d)",
3562 						ret);
3563 					return -1;
3564 				}
3565 				tc_cfg.enable = true;
3566 				tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
3567 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3568 							 priv->token, &tc_cfg);
3569 				if (ret < 0) {
3570 					DPAA2_PMD_ERR(
3571 						"TC distribution cannot be configured.(%d)",
3572 						ret);
3573 					return -1;
3574 				}
3575 			}
3576 
3577 			/* Configure QoS table then.*/
3578 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3579 				dpaa2_flow_qos_table_extracts_log(priv, stdout);
3580 				if (dpkg_prepare_key_cfg(
3581 					&priv->extract.qos_key_extract.dpkg,
3582 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3583 					DPAA2_PMD_ERR(
3584 						"Unable to prepare extract parameters");
3585 					return -1;
3586 				}
3587 
3588 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3589 				qos_cfg.discard_on_miss = false;
3590 				qos_cfg.default_tc = 0;
3591 				qos_cfg.keep_entries = true;
3592 				qos_cfg.key_cfg_iova =
3593 					(size_t)priv->extract.qos_extract_param;
3594 				/* QoS table is effective for multiple TCs. */
3595 				if (priv->num_rx_tc > 1) {
3596 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3597 						priv->token, &qos_cfg);
3598 					if (ret < 0) {
3599 						DPAA2_PMD_ERR(
3600 						"RSS QoS table can not be configured(%d)",
3601 							ret);
3602 						return -1;
3603 					}
3604 				}
3605 			}
3606 
3607 			flow->qos_real_key_size = priv->extract
3608 				.qos_key_extract.key_info.key_total_size;
3609 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3610 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3611 					flow->ipaddr_rule.qos_ipsrc_offset) {
3612 					flow->qos_real_key_size =
3613 						flow->ipaddr_rule.qos_ipdst_offset +
3614 						NH_FLD_IPV4_ADDR_SIZE;
3615 				} else {
3616 					flow->qos_real_key_size =
3617 						flow->ipaddr_rule.qos_ipsrc_offset +
3618 						NH_FLD_IPV4_ADDR_SIZE;
3619 				}
3620 			} else if (flow->ipaddr_rule.ipaddr_type ==
3621 				FLOW_IPV6_ADDR) {
3622 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3623 					flow->ipaddr_rule.qos_ipsrc_offset) {
3624 					flow->qos_real_key_size =
3625 						flow->ipaddr_rule.qos_ipdst_offset +
3626 						NH_FLD_IPV6_ADDR_SIZE;
3627 				} else {
3628 					flow->qos_real_key_size =
3629 						flow->ipaddr_rule.qos_ipsrc_offset +
3630 						NH_FLD_IPV6_ADDR_SIZE;
3631 				}
3632 			}
3633 
3634 			/* QoS entry added is only effective for multiple TCs.*/
3635 			if (priv->num_rx_tc > 1) {
3636 				qos_index = flow->tc_id * priv->fs_entries +
3637 					flow->tc_index;
3638 				if (qos_index >= priv->qos_entries) {
3639 					DPAA2_PMD_ERR("QoS table with %d entries full",
3640 						priv->qos_entries);
3641 					return -1;
3642 				}
3643 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3644 
3645 				dpaa2_flow_qos_entry_log("Start add", flow,
3646 							qos_index, stdout);
3647 
3648 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3649 						priv->token, &flow->qos_rule,
3650 						flow->tc_id, qos_index,
3651 						0, 0);
3652 				if (ret < 0) {
3653 					DPAA2_PMD_ERR(
3654 						"Error in adding entry to QoS table(%d)", ret);
3655 					return ret;
3656 				}
3657 			}
3658 
3659 			if (flow->tc_index >= priv->fs_entries) {
3660 				DPAA2_PMD_ERR("FS table with %d entries full",
3661 					priv->fs_entries);
3662 				return -1;
3663 			}
3664 
3665 			flow->fs_real_key_size =
3666 				priv->extract.tc_key_extract[flow->tc_id]
3667 				.key_info.key_total_size;
3668 
3669 			if (flow->ipaddr_rule.ipaddr_type ==
3670 				FLOW_IPV4_ADDR) {
3671 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3672 					flow->ipaddr_rule.fs_ipsrc_offset) {
3673 					flow->fs_real_key_size =
3674 						flow->ipaddr_rule.fs_ipdst_offset +
3675 						NH_FLD_IPV4_ADDR_SIZE;
3676 				} else {
3677 					flow->fs_real_key_size =
3678 						flow->ipaddr_rule.fs_ipsrc_offset +
3679 						NH_FLD_IPV4_ADDR_SIZE;
3680 				}
3681 			} else if (flow->ipaddr_rule.ipaddr_type ==
3682 				FLOW_IPV6_ADDR) {
3683 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3684 					flow->ipaddr_rule.fs_ipsrc_offset) {
3685 					flow->fs_real_key_size =
3686 						flow->ipaddr_rule.fs_ipdst_offset +
3687 						NH_FLD_IPV6_ADDR_SIZE;
3688 				} else {
3689 					flow->fs_real_key_size =
3690 						flow->ipaddr_rule.fs_ipsrc_offset +
3691 						NH_FLD_IPV6_ADDR_SIZE;
3692 				}
3693 			}
3694 
3695 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3696 
3697 			dpaa2_flow_fs_entry_log("Start add", flow, stdout);
3698 
3699 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3700 						flow->tc_id, flow->tc_index,
3701 						&flow->fs_rule, &action);
3702 			if (ret < 0) {
3703 				DPAA2_PMD_ERR(
3704 				"Error in adding entry to FS table(%d)", ret);
3705 				return ret;
3706 			}
3707 			memcpy(&flow->action_cfg, &action,
3708 				sizeof(struct dpni_fs_action_cfg));
3709 			break;
3710 		case RTE_FLOW_ACTION_TYPE_RSS:
3711 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3712 
3713 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3714 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3715 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3716 			if (ret < 0) {
3717 				DPAA2_PMD_ERR(
3718 				"unable to set flow distribution.please check queue config");
3719 				return ret;
3720 			}
3721 
3722 			/* Allocate DMA'ble memory to write the rules */
3723 			param = (size_t)rte_malloc(NULL, 256, 64);
3724 			if (!param) {
3725 				DPAA2_PMD_ERR("Memory allocation failure");
3726 				return -1;
3727 			}
3728 
3729 			if (dpkg_prepare_key_cfg(
3730 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3731 				(uint8_t *)param) < 0) {
3732 				DPAA2_PMD_ERR(
3733 				"Unable to prepare extract parameters");
3734 				rte_free((void *)param);
3735 				return -1;
3736 			}
3737 
3738 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3739 			tc_cfg.dist_size = rss_conf->queue_num;
3740 			tc_cfg.key_cfg_iova = (size_t)param;
3741 			tc_cfg.enable = true;
3742 			tc_cfg.tc = flow->tc_id;
3743 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3744 						 priv->token, &tc_cfg);
3745 			if (ret < 0) {
3746 				DPAA2_PMD_ERR(
3747 					"RSS TC table cannot be configured: %d",
3748 					ret);
3749 				rte_free((void *)param);
3750 				return -1;
3751 			}
3752 
3753 			rte_free((void *)param);
3754 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3755 				if (dpkg_prepare_key_cfg(
3756 					&priv->extract.qos_key_extract.dpkg,
3757 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3758 					DPAA2_PMD_ERR(
3759 					"Unable to prepare extract parameters");
3760 					return -1;
3761 				}
3762 				memset(&qos_cfg, 0,
3763 					sizeof(struct dpni_qos_tbl_cfg));
3764 				qos_cfg.discard_on_miss = true;
3765 				qos_cfg.keep_entries = true;
3766 				qos_cfg.key_cfg_iova =
3767 					(size_t)priv->extract.qos_extract_param;
3768 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3769 							 priv->token, &qos_cfg);
3770 				if (ret < 0) {
3771 					DPAA2_PMD_ERR(
3772 					"RSS QoS dist can't be configured-%d",
3773 					ret);
3774 					return -1;
3775 				}
3776 			}
3777 
3778 			/* Add Rule into QoS table */
3779 			qos_index = flow->tc_id * priv->fs_entries +
3780 				flow->tc_index;
3781 			if (qos_index >= priv->qos_entries) {
3782 				DPAA2_PMD_ERR("QoS table with %d entries full",
3783 					priv->qos_entries);
3784 				return -1;
3785 			}
3786 
3787 			flow->qos_real_key_size =
3788 			  priv->extract.qos_key_extract.key_info.key_total_size;
3789 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3790 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3791 						&flow->qos_rule, flow->tc_id,
3792 						qos_index, 0, 0);
3793 			if (ret < 0) {
3794 				DPAA2_PMD_ERR(
3795 				"Error in entry addition in QoS table(%d)",
3796 				ret);
3797 				return ret;
3798 			}
3799 			break;
3800 		case RTE_FLOW_ACTION_TYPE_END:
3801 			end_of_list = 1;
3802 			break;
3803 		default:
3804 			DPAA2_PMD_ERR("Invalid action type");
3805 			ret = -ENOTSUP;
3806 			break;
3807 		}
3808 		j++;
3809 	}
3810 
3811 	if (!ret) {
3812 		if (is_keycfg_configured &
3813 			(DPAA2_QOS_TABLE_RECONFIGURE |
3814 			DPAA2_FS_TABLE_RECONFIGURE)) {
3815 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3816 			if (ret) {
3817 				DPAA2_PMD_ERR("Flow entry update failed.");
3818 
3819 				return -1;
3820 			}
3821 		}
3822 		/* New rules are inserted. */
3823 		if (!curr) {
3824 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3825 		} else {
3826 			while (LIST_NEXT(curr, next))
3827 				curr = LIST_NEXT(curr, next);
3828 			LIST_INSERT_AFTER(curr, flow, next);
3829 		}
3830 	}
3831 	return ret;
3832 }
3833 
3834 static inline int
3835 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3836 		      const struct rte_flow_attr *attr)
3837 {
3838 	int ret = 0;
3839 
3840 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3841 		DPAA2_PMD_ERR("Priority group is out of range");
3842 		ret = -ENOTSUP;
3843 	}
3844 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3845 		DPAA2_PMD_ERR("Priority within the group is out of range");
3846 		ret = -ENOTSUP;
3847 	}
3848 	if (unlikely(attr->egress)) {
3849 		DPAA2_PMD_ERR(
3850 			"Flow configuration is not supported on egress side");
3851 		ret = -ENOTSUP;
3852 	}
3853 	if (unlikely(!attr->ingress)) {
3854 		DPAA2_PMD_ERR("Ingress flag must be configured");
3855 		ret = -EINVAL;
3856 	}
3857 	return ret;
3858 }
3859 
3860 static inline int
3861 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3862 {
3863 	unsigned int i, j, is_found = 0;
3864 	int ret = 0;
3865 
3866 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3867 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3868 			if (dpaa2_supported_pattern_type[i]
3869 					== pattern[j].type) {
3870 				is_found = 1;
3871 				break;
3872 			}
3873 		}
3874 		if (!is_found) {
3875 			ret = -ENOTSUP;
3876 			break;
3877 		}
3878 	}
3879 	/* Lets verify other combinations of given pattern rules */
3880 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3881 		if (!pattern[j].spec) {
3882 			ret = -EINVAL;
3883 			break;
3884 		}
3885 	}
3886 
3887 	return ret;
3888 }
3889 
3890 static inline int
3891 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3892 {
3893 	unsigned int i, j, is_found = 0;
3894 	int ret = 0;
3895 
3896 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3897 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3898 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3899 				is_found = 1;
3900 				break;
3901 			}
3902 		}
3903 		if (!is_found) {
3904 			ret = -ENOTSUP;
3905 			break;
3906 		}
3907 	}
3908 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3909 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3910 				!actions[j].conf)
3911 			ret = -EINVAL;
3912 	}
3913 	return ret;
3914 }
3915 
3916 static
3917 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3918 			const struct rte_flow_attr *flow_attr,
3919 			const struct rte_flow_item pattern[],
3920 			const struct rte_flow_action actions[],
3921 			struct rte_flow_error *error)
3922 {
3923 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3924 	struct dpni_attr dpni_attr;
3925 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3926 	uint16_t token = priv->token;
3927 	int ret = 0;
3928 
3929 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3930 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3931 	if (ret < 0) {
3932 		DPAA2_PMD_ERR(
3933 			"Failure to get dpni@%p attribute, err code  %d",
3934 			dpni, ret);
3935 		rte_flow_error_set(error, EPERM,
3936 			   RTE_FLOW_ERROR_TYPE_ATTR,
3937 			   flow_attr, "invalid");
3938 		return ret;
3939 	}
3940 
3941 	/* Verify input attributes */
3942 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3943 	if (ret < 0) {
3944 		DPAA2_PMD_ERR(
3945 			"Invalid attributes are given");
3946 		rte_flow_error_set(error, EPERM,
3947 			   RTE_FLOW_ERROR_TYPE_ATTR,
3948 			   flow_attr, "invalid");
3949 		goto not_valid_params;
3950 	}
3951 	/* Verify input pattern list */
3952 	ret = dpaa2_dev_verify_patterns(pattern);
3953 	if (ret < 0) {
3954 		DPAA2_PMD_ERR(
3955 			"Invalid pattern list is given");
3956 		rte_flow_error_set(error, EPERM,
3957 			   RTE_FLOW_ERROR_TYPE_ITEM,
3958 			   pattern, "invalid");
3959 		goto not_valid_params;
3960 	}
3961 	/* Verify input action list */
3962 	ret = dpaa2_dev_verify_actions(actions);
3963 	if (ret < 0) {
3964 		DPAA2_PMD_ERR(
3965 			"Invalid action list is given");
3966 		rte_flow_error_set(error, EPERM,
3967 			   RTE_FLOW_ERROR_TYPE_ACTION,
3968 			   actions, "invalid");
3969 		goto not_valid_params;
3970 	}
3971 not_valid_params:
3972 	return ret;
3973 }
3974 
3975 static
3976 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3977 				   const struct rte_flow_attr *attr,
3978 				   const struct rte_flow_item pattern[],
3979 				   const struct rte_flow_action actions[],
3980 				   struct rte_flow_error *error)
3981 {
3982 	struct rte_flow *flow = NULL;
3983 	size_t key_iova = 0, mask_iova = 0;
3984 	int ret;
3985 
3986 	dpaa2_flow_control_log =
3987 		getenv("DPAA2_FLOW_CONTROL_LOG");
3988 
3989 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3990 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3991 
3992 		dpaa2_flow_miss_flow_id =
3993 			atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3994 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3995 			DPAA2_PMD_ERR(
3996 				"The missed flow ID %d exceeds the max flow ID %d",
3997 				dpaa2_flow_miss_flow_id,
3998 				priv->dist_queues - 1);
3999 			return NULL;
4000 		}
4001 	}
4002 
4003 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
4004 	if (!flow) {
4005 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4006 		goto mem_failure;
4007 	}
4008 	/* Allocate DMA'ble memory to write the rules */
4009 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4010 	if (!key_iova) {
4011 		DPAA2_PMD_ERR(
4012 			"Memory allocation failure for rule configuration");
4013 		goto mem_failure;
4014 	}
4015 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4016 	if (!mask_iova) {
4017 		DPAA2_PMD_ERR(
4018 			"Memory allocation failure for rule configuration");
4019 		goto mem_failure;
4020 	}
4021 
4022 	flow->qos_rule.key_iova = key_iova;
4023 	flow->qos_rule.mask_iova = mask_iova;
4024 
4025 	/* Allocate DMA'ble memory to write the rules */
4026 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4027 	if (!key_iova) {
4028 		DPAA2_PMD_ERR(
4029 			"Memory allocation failure for rule configuration");
4030 		goto mem_failure;
4031 	}
4032 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4033 	if (!mask_iova) {
4034 		DPAA2_PMD_ERR(
4035 			"Memory allocation failure for rule configuration");
4036 		goto mem_failure;
4037 	}
4038 
4039 	flow->fs_rule.key_iova = key_iova;
4040 	flow->fs_rule.mask_iova = mask_iova;
4041 
4042 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
4043 	flow->ipaddr_rule.qos_ipsrc_offset =
4044 		IP_ADDRESS_OFFSET_INVALID;
4045 	flow->ipaddr_rule.qos_ipdst_offset =
4046 		IP_ADDRESS_OFFSET_INVALID;
4047 	flow->ipaddr_rule.fs_ipsrc_offset =
4048 		IP_ADDRESS_OFFSET_INVALID;
4049 	flow->ipaddr_rule.fs_ipdst_offset =
4050 		IP_ADDRESS_OFFSET_INVALID;
4051 
4052 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
4053 			actions, error);
4054 	if (ret < 0) {
4055 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4056 			rte_flow_error_set(error, EPERM,
4057 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4058 					attr, "unknown");
4059 		DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
4060 		goto creation_error;
4061 	}
4062 
4063 	return flow;
4064 mem_failure:
4065 	rte_flow_error_set(error, EPERM,
4066 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4067 			   NULL, "memory alloc");
4068 creation_error:
4069 	rte_free((void *)flow);
4070 	rte_free((void *)key_iova);
4071 	rte_free((void *)mask_iova);
4072 
4073 	return NULL;
4074 }
4075 
4076 static
4077 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4078 		       struct rte_flow *flow,
4079 		       struct rte_flow_error *error)
4080 {
4081 	int ret = 0;
4082 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4083 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4084 
4085 	switch (flow->action) {
4086 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4087 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4088 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
4089 		if (priv->num_rx_tc > 1) {
4090 			/* Remove entry from QoS table first */
4091 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4092 					&flow->qos_rule);
4093 			if (ret < 0) {
4094 				DPAA2_PMD_ERR(
4095 					"Error in removing entry from QoS table(%d)", ret);
4096 				goto error;
4097 			}
4098 		}
4099 
4100 		/* Then remove entry from FS table */
4101 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4102 					   flow->tc_id, &flow->fs_rule);
4103 		if (ret < 0) {
4104 			DPAA2_PMD_ERR(
4105 				"Error in removing entry from FS table(%d)", ret);
4106 			goto error;
4107 		}
4108 		break;
4109 	case RTE_FLOW_ACTION_TYPE_RSS:
4110 		if (priv->num_rx_tc > 1) {
4111 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4112 					&flow->qos_rule);
4113 			if (ret < 0) {
4114 				DPAA2_PMD_ERR(
4115 					"Error in entry addition in QoS table(%d)", ret);
4116 				goto error;
4117 			}
4118 		}
4119 		break;
4120 	default:
4121 		DPAA2_PMD_ERR(
4122 		"Action type (%d) is not supported", flow->action);
4123 		ret = -ENOTSUP;
4124 		break;
4125 	}
4126 
4127 	LIST_REMOVE(flow, next);
4128 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
4129 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4130 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
4131 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4132 	/* Now free the flow */
4133 	rte_free(flow);
4134 
4135 error:
4136 	if (ret)
4137 		rte_flow_error_set(error, EPERM,
4138 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4139 				   NULL, "unknown");
4140 	return ret;
4141 }
4142 
4143 /**
4144  * Destroy user-configured flow rules.
4145  *
4146  * This function skips internal flows rules.
4147  *
4148  * @see rte_flow_flush()
4149  * @see rte_flow_ops
4150  */
4151 static int
4152 dpaa2_flow_flush(struct rte_eth_dev *dev,
4153 		struct rte_flow_error *error)
4154 {
4155 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4156 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
4157 
4158 	while (flow) {
4159 		struct rte_flow *next = LIST_NEXT(flow, next);
4160 
4161 		dpaa2_flow_destroy(dev, flow, error);
4162 		flow = next;
4163 	}
4164 	return 0;
4165 }
4166 
4167 static int
4168 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4169 		struct rte_flow *flow __rte_unused,
4170 		const struct rte_flow_action *actions __rte_unused,
4171 		void *data __rte_unused,
4172 		struct rte_flow_error *error __rte_unused)
4173 {
4174 	return 0;
4175 }
4176 
4177 /**
4178  * Clean up all flow rules.
4179  *
4180  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4181  * rules regardless of whether they are internal or user-configured.
4182  *
4183  * @param priv
4184  *   Pointer to private structure.
4185  */
4186 void
4187 dpaa2_flow_clean(struct rte_eth_dev *dev)
4188 {
4189 	struct rte_flow *flow;
4190 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4191 
4192 	while ((flow = LIST_FIRST(&priv->flows)))
4193 		dpaa2_flow_destroy(dev, flow, NULL);
4194 }
4195 
4196 const struct rte_flow_ops dpaa2_flow_ops = {
4197 	.create	= dpaa2_flow_create,
4198 	.validate = dpaa2_flow_validate,
4199 	.destroy = dpaa2_flow_destroy,
4200 	.flush	= dpaa2_flow_flush,
4201 	.query	= dpaa2_flow_query,
4202 };
4203