xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision f5ed2ea0ce8303592d16a3aa3fa60a7c0c4c2d5f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 
34 #define FIXED_ENTRY_SIZE 54
35 
36 enum flow_rule_ipaddr_type {
37 	FLOW_NONE_IPADDR,
38 	FLOW_IPV4_ADDR,
39 	FLOW_IPV6_ADDR
40 };
41 
42 struct flow_rule_ipaddr {
43 	enum flow_rule_ipaddr_type ipaddr_type;
44 	int qos_ipsrc_offset;
45 	int qos_ipdst_offset;
46 	int fs_ipsrc_offset;
47 	int fs_ipdst_offset;
48 };
49 
50 struct rte_flow {
51 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
52 	struct dpni_rule_cfg qos_rule;
53 	struct dpni_rule_cfg fs_rule;
54 	uint8_t qos_real_key_size;
55 	uint8_t fs_real_key_size;
56 	uint8_t tc_id; /** Traffic Class ID. */
57 	uint8_t tc_index; /** index within this Traffic Class. */
58 	enum rte_flow_action_type action;
59 	uint16_t flow_id;
60 	/* Special for IP address to specify the offset
61 	 * in key/mask.
62 	 */
63 	struct flow_rule_ipaddr ipaddr_rule;
64 	struct dpni_fs_action_cfg action_cfg;
65 };
66 
67 static const
68 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
69 	RTE_FLOW_ITEM_TYPE_END,
70 	RTE_FLOW_ITEM_TYPE_ETH,
71 	RTE_FLOW_ITEM_TYPE_VLAN,
72 	RTE_FLOW_ITEM_TYPE_IPV4,
73 	RTE_FLOW_ITEM_TYPE_IPV6,
74 	RTE_FLOW_ITEM_TYPE_ICMP,
75 	RTE_FLOW_ITEM_TYPE_UDP,
76 	RTE_FLOW_ITEM_TYPE_TCP,
77 	RTE_FLOW_ITEM_TYPE_SCTP,
78 	RTE_FLOW_ITEM_TYPE_GRE,
79 };
80 
81 static const
82 enum rte_flow_action_type dpaa2_supported_action_type[] = {
83 	RTE_FLOW_ACTION_TYPE_END,
84 	RTE_FLOW_ACTION_TYPE_QUEUE,
85 	RTE_FLOW_ACTION_TYPE_RSS
86 };
87 
88 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
89 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
90 
91 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
92 
93 #ifndef __cplusplus
94 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
95 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
96 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
97 	.type = RTE_BE16(0xffff),
98 };
99 
100 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
101 	.tci = RTE_BE16(0xffff),
102 };
103 
104 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
105 	.hdr.src_addr = RTE_BE32(0xffffffff),
106 	.hdr.dst_addr = RTE_BE32(0xffffffff),
107 	.hdr.next_proto_id = 0xff,
108 };
109 
110 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
111 	.hdr = {
112 		.src_addr =
113 			"\xff\xff\xff\xff\xff\xff\xff\xff"
114 			"\xff\xff\xff\xff\xff\xff\xff\xff",
115 		.dst_addr =
116 			"\xff\xff\xff\xff\xff\xff\xff\xff"
117 			"\xff\xff\xff\xff\xff\xff\xff\xff",
118 		.proto = 0xff
119 	},
120 };
121 
122 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
123 	.hdr.icmp_type = 0xff,
124 	.hdr.icmp_code = 0xff,
125 };
126 
127 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
128 	.hdr = {
129 		.src_port = RTE_BE16(0xffff),
130 		.dst_port = RTE_BE16(0xffff),
131 	},
132 };
133 
134 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
135 	.hdr = {
136 		.src_port = RTE_BE16(0xffff),
137 		.dst_port = RTE_BE16(0xffff),
138 	},
139 };
140 
141 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
142 	.hdr = {
143 		.src_port = RTE_BE16(0xffff),
144 		.dst_port = RTE_BE16(0xffff),
145 	},
146 };
147 
148 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
149 	.protocol = RTE_BE16(0xffff),
150 };
151 
152 #endif
153 
154 static inline void dpaa2_prot_field_string(
155 	enum net_prot prot, uint32_t field,
156 	char *string)
157 {
158 	if (!dpaa2_flow_control_log)
159 		return;
160 
161 	if (prot == NET_PROT_ETH) {
162 		strcpy(string, "eth");
163 		if (field == NH_FLD_ETH_DA)
164 			strcat(string, ".dst");
165 		else if (field == NH_FLD_ETH_SA)
166 			strcat(string, ".src");
167 		else if (field == NH_FLD_ETH_TYPE)
168 			strcat(string, ".type");
169 		else
170 			strcat(string, ".unknown field");
171 	} else if (prot == NET_PROT_VLAN) {
172 		strcpy(string, "vlan");
173 		if (field == NH_FLD_VLAN_TCI)
174 			strcat(string, ".tci");
175 		else
176 			strcat(string, ".unknown field");
177 	} else if (prot == NET_PROT_IP) {
178 		strcpy(string, "ip");
179 		if (field == NH_FLD_IP_SRC)
180 			strcat(string, ".src");
181 		else if (field == NH_FLD_IP_DST)
182 			strcat(string, ".dst");
183 		else if (field == NH_FLD_IP_PROTO)
184 			strcat(string, ".proto");
185 		else
186 			strcat(string, ".unknown field");
187 	} else if (prot == NET_PROT_TCP) {
188 		strcpy(string, "tcp");
189 		if (field == NH_FLD_TCP_PORT_SRC)
190 			strcat(string, ".src");
191 		else if (field == NH_FLD_TCP_PORT_DST)
192 			strcat(string, ".dst");
193 		else
194 			strcat(string, ".unknown field");
195 	} else if (prot == NET_PROT_UDP) {
196 		strcpy(string, "udp");
197 		if (field == NH_FLD_UDP_PORT_SRC)
198 			strcat(string, ".src");
199 		else if (field == NH_FLD_UDP_PORT_DST)
200 			strcat(string, ".dst");
201 		else
202 			strcat(string, ".unknown field");
203 	} else if (prot == NET_PROT_ICMP) {
204 		strcpy(string, "icmp");
205 		if (field == NH_FLD_ICMP_TYPE)
206 			strcat(string, ".type");
207 		else if (field == NH_FLD_ICMP_CODE)
208 			strcat(string, ".code");
209 		else
210 			strcat(string, ".unknown field");
211 	} else if (prot == NET_PROT_SCTP) {
212 		strcpy(string, "sctp");
213 		if (field == NH_FLD_SCTP_PORT_SRC)
214 			strcat(string, ".src");
215 		else if (field == NH_FLD_SCTP_PORT_DST)
216 			strcat(string, ".dst");
217 		else
218 			strcat(string, ".unknown field");
219 	} else if (prot == NET_PROT_GRE) {
220 		strcpy(string, "gre");
221 		if (field == NH_FLD_GRE_TYPE)
222 			strcat(string, ".type");
223 		else
224 			strcat(string, ".unknown field");
225 	} else {
226 		strcpy(string, "unknown protocol");
227 	}
228 }
229 
230 static inline void dpaa2_flow_qos_table_extracts_log(
231 	const struct dpaa2_dev_priv *priv)
232 {
233 	int idx;
234 	char string[32];
235 
236 	if (!dpaa2_flow_control_log)
237 		return;
238 
239 	printf("Setup QoS table: number of extracts: %d\r\n",
240 			priv->extract.qos_key_extract.dpkg.num_extracts);
241 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
242 		idx++) {
243 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
244 			.extracts[idx].extract.from_hdr.prot,
245 			priv->extract.qos_key_extract.dpkg.extracts[idx]
246 			.extract.from_hdr.field,
247 			string);
248 		printf("%s", string);
249 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
250 			printf(" / ");
251 	}
252 	printf("\r\n");
253 }
254 
255 static inline void dpaa2_flow_fs_table_extracts_log(
256 	const struct dpaa2_dev_priv *priv, int tc_id)
257 {
258 	int idx;
259 	char string[32];
260 
261 	if (!dpaa2_flow_control_log)
262 		return;
263 
264 	printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
265 			tc_id, priv->extract.tc_key_extract[tc_id]
266 			.dpkg.num_extracts);
267 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
268 		.dpkg.num_extracts; idx++) {
269 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
270 			.dpkg.extracts[idx].extract.from_hdr.prot,
271 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
272 			.extract.from_hdr.field,
273 			string);
274 		printf("%s", string);
275 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
276 			.dpkg.num_extracts)
277 			printf(" / ");
278 	}
279 	printf("\r\n");
280 }
281 
282 static inline void dpaa2_flow_qos_entry_log(
283 	const char *log_info, const struct rte_flow *flow, int qos_index)
284 {
285 	int idx;
286 	uint8_t *key, *mask;
287 
288 	if (!dpaa2_flow_control_log)
289 		return;
290 
291 	printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
292 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
293 
294 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
295 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
296 
297 	printf("key:\r\n");
298 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
299 		printf("%02x ", key[idx]);
300 
301 	printf("\r\nmask:\r\n");
302 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
303 		printf("%02x ", mask[idx]);
304 
305 	printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
306 		flow->ipaddr_rule.qos_ipsrc_offset,
307 		flow->ipaddr_rule.qos_ipdst_offset);
308 }
309 
310 static inline void dpaa2_flow_fs_entry_log(
311 	const char *log_info, const struct rte_flow *flow)
312 {
313 	int idx;
314 	uint8_t *key, *mask;
315 
316 	if (!dpaa2_flow_control_log)
317 		return;
318 
319 	printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
320 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
321 
322 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
323 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
324 
325 	printf("key:\r\n");
326 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
327 		printf("%02x ", key[idx]);
328 
329 	printf("\r\nmask:\r\n");
330 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
331 		printf("%02x ", mask[idx]);
332 
333 	printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
334 		flow->ipaddr_rule.fs_ipsrc_offset,
335 		flow->ipaddr_rule.fs_ipdst_offset);
336 }
337 
338 static inline void dpaa2_flow_extract_key_set(
339 	struct dpaa2_key_info *key_info, int index, uint8_t size)
340 {
341 	key_info->key_size[index] = size;
342 	if (index > 0) {
343 		key_info->key_offset[index] =
344 			key_info->key_offset[index - 1] +
345 			key_info->key_size[index - 1];
346 	} else {
347 		key_info->key_offset[index] = 0;
348 	}
349 	key_info->key_total_size += size;
350 }
351 
352 static int dpaa2_flow_extract_add(
353 	struct dpaa2_key_extract *key_extract,
354 	enum net_prot prot,
355 	uint32_t field, uint8_t field_size)
356 {
357 	int index, ip_src = -1, ip_dst = -1;
358 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
359 	struct dpaa2_key_info *key_info = &key_extract->key_info;
360 
361 	if (dpkg->num_extracts >=
362 		DPKG_MAX_NUM_OF_EXTRACTS) {
363 		DPAA2_PMD_WARN("Number of extracts overflows");
364 		return -1;
365 	}
366 	/* Before reorder, the IP SRC and IP DST are already last
367 	 * extract(s).
368 	 */
369 	for (index = 0; index < dpkg->num_extracts; index++) {
370 		if (dpkg->extracts[index].extract.from_hdr.prot ==
371 			NET_PROT_IP) {
372 			if (dpkg->extracts[index].extract.from_hdr.field ==
373 				NH_FLD_IP_SRC) {
374 				ip_src = index;
375 			}
376 			if (dpkg->extracts[index].extract.from_hdr.field ==
377 				NH_FLD_IP_DST) {
378 				ip_dst = index;
379 			}
380 		}
381 	}
382 
383 	if (ip_src >= 0)
384 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
385 
386 	if (ip_dst >= 0)
387 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
388 
389 	if (prot == NET_PROT_IP &&
390 		(field == NH_FLD_IP_SRC ||
391 		field == NH_FLD_IP_DST)) {
392 		index = dpkg->num_extracts;
393 	} else {
394 		if (ip_src >= 0 && ip_dst >= 0)
395 			index = dpkg->num_extracts - 2;
396 		else if (ip_src >= 0 || ip_dst >= 0)
397 			index = dpkg->num_extracts - 1;
398 		else
399 			index = dpkg->num_extracts;
400 	}
401 
402 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
403 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
404 	dpkg->extracts[index].extract.from_hdr.prot = prot;
405 	dpkg->extracts[index].extract.from_hdr.field = field;
406 	if (prot == NET_PROT_IP &&
407 		(field == NH_FLD_IP_SRC ||
408 		field == NH_FLD_IP_DST)) {
409 		dpaa2_flow_extract_key_set(key_info, index, 0);
410 	} else {
411 		dpaa2_flow_extract_key_set(key_info, index, field_size);
412 	}
413 
414 	if (prot == NET_PROT_IP) {
415 		if (field == NH_FLD_IP_SRC) {
416 			if (key_info->ipv4_dst_offset >= 0) {
417 				key_info->ipv4_src_offset =
418 					key_info->ipv4_dst_offset +
419 					NH_FLD_IPV4_ADDR_SIZE;
420 			} else {
421 				key_info->ipv4_src_offset =
422 					key_info->key_offset[index - 1] +
423 						key_info->key_size[index - 1];
424 			}
425 			if (key_info->ipv6_dst_offset >= 0) {
426 				key_info->ipv6_src_offset =
427 					key_info->ipv6_dst_offset +
428 					NH_FLD_IPV6_ADDR_SIZE;
429 			} else {
430 				key_info->ipv6_src_offset =
431 					key_info->key_offset[index - 1] +
432 						key_info->key_size[index - 1];
433 			}
434 		} else if (field == NH_FLD_IP_DST) {
435 			if (key_info->ipv4_src_offset >= 0) {
436 				key_info->ipv4_dst_offset =
437 					key_info->ipv4_src_offset +
438 					NH_FLD_IPV4_ADDR_SIZE;
439 			} else {
440 				key_info->ipv4_dst_offset =
441 					key_info->key_offset[index - 1] +
442 						key_info->key_size[index - 1];
443 			}
444 			if (key_info->ipv6_src_offset >= 0) {
445 				key_info->ipv6_dst_offset =
446 					key_info->ipv6_src_offset +
447 					NH_FLD_IPV6_ADDR_SIZE;
448 			} else {
449 				key_info->ipv6_dst_offset =
450 					key_info->key_offset[index - 1] +
451 						key_info->key_size[index - 1];
452 			}
453 		}
454 	}
455 
456 	if (index == dpkg->num_extracts) {
457 		dpkg->num_extracts++;
458 		return 0;
459 	}
460 
461 	if (ip_src >= 0) {
462 		ip_src++;
463 		dpkg->extracts[ip_src].type =
464 			DPKG_EXTRACT_FROM_HDR;
465 		dpkg->extracts[ip_src].extract.from_hdr.type =
466 			DPKG_FULL_FIELD;
467 		dpkg->extracts[ip_src].extract.from_hdr.prot =
468 			NET_PROT_IP;
469 		dpkg->extracts[ip_src].extract.from_hdr.field =
470 			NH_FLD_IP_SRC;
471 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
472 		key_info->ipv4_src_offset += field_size;
473 		key_info->ipv6_src_offset += field_size;
474 	}
475 	if (ip_dst >= 0) {
476 		ip_dst++;
477 		dpkg->extracts[ip_dst].type =
478 			DPKG_EXTRACT_FROM_HDR;
479 		dpkg->extracts[ip_dst].extract.from_hdr.type =
480 			DPKG_FULL_FIELD;
481 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
482 			NET_PROT_IP;
483 		dpkg->extracts[ip_dst].extract.from_hdr.field =
484 			NH_FLD_IP_DST;
485 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
486 		key_info->ipv4_dst_offset += field_size;
487 		key_info->ipv6_dst_offset += field_size;
488 	}
489 
490 	dpkg->num_extracts++;
491 
492 	return 0;
493 }
494 
495 /* Protocol discrimination.
496  * Discriminate IPv4/IPv6/vLan by Eth type.
497  * Discriminate UDP/TCP/ICMP by next proto of IP.
498  */
499 static inline int
500 dpaa2_flow_proto_discrimination_extract(
501 	struct dpaa2_key_extract *key_extract,
502 	enum rte_flow_item_type type)
503 {
504 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
505 		return dpaa2_flow_extract_add(
506 				key_extract, NET_PROT_ETH,
507 				NH_FLD_ETH_TYPE,
508 				sizeof(rte_be16_t));
509 	} else if (type == (enum rte_flow_item_type)
510 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
511 		return dpaa2_flow_extract_add(
512 				key_extract, NET_PROT_IP,
513 				NH_FLD_IP_PROTO,
514 				NH_FLD_IP_PROTO_SIZE);
515 	}
516 
517 	return -1;
518 }
519 
520 static inline int dpaa2_flow_extract_search(
521 	struct dpkg_profile_cfg *dpkg,
522 	enum net_prot prot, uint32_t field)
523 {
524 	int i;
525 
526 	for (i = 0; i < dpkg->num_extracts; i++) {
527 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
528 			dpkg->extracts[i].extract.from_hdr.field == field) {
529 			return i;
530 		}
531 	}
532 
533 	return -1;
534 }
535 
536 static inline int dpaa2_flow_extract_key_offset(
537 	struct dpaa2_key_extract *key_extract,
538 	enum net_prot prot, uint32_t field)
539 {
540 	int i;
541 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
542 	struct dpaa2_key_info *key_info = &key_extract->key_info;
543 
544 	if (prot == NET_PROT_IPV4 ||
545 		prot == NET_PROT_IPV6)
546 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
547 	else
548 		i = dpaa2_flow_extract_search(dpkg, prot, field);
549 
550 	if (i >= 0) {
551 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
552 			return key_info->ipv4_src_offset;
553 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
554 			return key_info->ipv4_dst_offset;
555 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
556 			return key_info->ipv6_src_offset;
557 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
558 			return key_info->ipv6_dst_offset;
559 		else
560 			return key_info->key_offset[i];
561 	} else {
562 		return -1;
563 	}
564 }
565 
566 struct proto_discrimination {
567 	enum rte_flow_item_type type;
568 	union {
569 		rte_be16_t eth_type;
570 		uint8_t ip_proto;
571 	};
572 };
573 
574 static int
575 dpaa2_flow_proto_discrimination_rule(
576 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
577 	struct proto_discrimination proto, int group)
578 {
579 	enum net_prot prot;
580 	uint32_t field;
581 	int offset;
582 	size_t key_iova;
583 	size_t mask_iova;
584 	rte_be16_t eth_type;
585 	uint8_t ip_proto;
586 
587 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
588 		prot = NET_PROT_ETH;
589 		field = NH_FLD_ETH_TYPE;
590 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
591 		prot = NET_PROT_IP;
592 		field = NH_FLD_IP_PROTO;
593 	} else {
594 		DPAA2_PMD_ERR(
595 			"Only Eth and IP support to discriminate next proto.");
596 		return -1;
597 	}
598 
599 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
600 			prot, field);
601 	if (offset < 0) {
602 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
603 				prot, field);
604 		return -1;
605 	}
606 	key_iova = flow->qos_rule.key_iova + offset;
607 	mask_iova = flow->qos_rule.mask_iova + offset;
608 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
609 		eth_type = proto.eth_type;
610 		memcpy((void *)key_iova, (const void *)(&eth_type),
611 			sizeof(rte_be16_t));
612 		eth_type = 0xffff;
613 		memcpy((void *)mask_iova, (const void *)(&eth_type),
614 			sizeof(rte_be16_t));
615 	} else {
616 		ip_proto = proto.ip_proto;
617 		memcpy((void *)key_iova, (const void *)(&ip_proto),
618 			sizeof(uint8_t));
619 		ip_proto = 0xff;
620 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
621 			sizeof(uint8_t));
622 	}
623 
624 	offset = dpaa2_flow_extract_key_offset(
625 			&priv->extract.tc_key_extract[group],
626 			prot, field);
627 	if (offset < 0) {
628 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
629 				prot, field);
630 		return -1;
631 	}
632 	key_iova = flow->fs_rule.key_iova + offset;
633 	mask_iova = flow->fs_rule.mask_iova + offset;
634 
635 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
636 		eth_type = proto.eth_type;
637 		memcpy((void *)key_iova, (const void *)(&eth_type),
638 			sizeof(rte_be16_t));
639 		eth_type = 0xffff;
640 		memcpy((void *)mask_iova, (const void *)(&eth_type),
641 			sizeof(rte_be16_t));
642 	} else {
643 		ip_proto = proto.ip_proto;
644 		memcpy((void *)key_iova, (const void *)(&ip_proto),
645 			sizeof(uint8_t));
646 		ip_proto = 0xff;
647 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
648 			sizeof(uint8_t));
649 	}
650 
651 	return 0;
652 }
653 
654 static inline int
655 dpaa2_flow_rule_data_set(
656 	struct dpaa2_key_extract *key_extract,
657 	struct dpni_rule_cfg *rule,
658 	enum net_prot prot, uint32_t field,
659 	const void *key, const void *mask, int size)
660 {
661 	int offset = dpaa2_flow_extract_key_offset(key_extract,
662 				prot, field);
663 
664 	if (offset < 0) {
665 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
666 			prot, field);
667 		return -1;
668 	}
669 
670 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
671 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
672 
673 	return 0;
674 }
675 
676 static inline int
677 _dpaa2_flow_rule_move_ipaddr_tail(
678 	struct dpaa2_key_extract *key_extract,
679 	struct dpni_rule_cfg *rule, int src_offset,
680 	uint32_t field, bool ipv4)
681 {
682 	size_t key_src;
683 	size_t mask_src;
684 	size_t key_dst;
685 	size_t mask_dst;
686 	int dst_offset, len;
687 	enum net_prot prot;
688 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
689 
690 	if (field != NH_FLD_IP_SRC &&
691 		field != NH_FLD_IP_DST) {
692 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
693 		return -1;
694 	}
695 	if (ipv4)
696 		prot = NET_PROT_IPV4;
697 	else
698 		prot = NET_PROT_IPV6;
699 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
700 				prot, field);
701 	if (dst_offset < 0) {
702 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
703 		return -1;
704 	}
705 	key_src = rule->key_iova + src_offset;
706 	mask_src = rule->mask_iova + src_offset;
707 	key_dst = rule->key_iova + dst_offset;
708 	mask_dst = rule->mask_iova + dst_offset;
709 	if (ipv4)
710 		len = sizeof(rte_be32_t);
711 	else
712 		len = NH_FLD_IPV6_ADDR_SIZE;
713 
714 	memcpy(tmp, (char *)key_src, len);
715 	memset((char *)key_src, 0, len);
716 	memcpy((char *)key_dst, tmp, len);
717 
718 	memcpy(tmp, (char *)mask_src, len);
719 	memset((char *)mask_src, 0, len);
720 	memcpy((char *)mask_dst, tmp, len);
721 
722 	return 0;
723 }
724 
725 static inline int
726 dpaa2_flow_rule_move_ipaddr_tail(
727 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
728 	int fs_group)
729 {
730 	int ret;
731 	enum net_prot prot;
732 
733 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
734 		return 0;
735 
736 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
737 		prot = NET_PROT_IPV4;
738 	else
739 		prot = NET_PROT_IPV6;
740 
741 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
742 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
743 				&priv->extract.qos_key_extract,
744 				&flow->qos_rule,
745 				flow->ipaddr_rule.qos_ipsrc_offset,
746 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
747 		if (ret) {
748 			DPAA2_PMD_ERR("QoS src address reorder failed");
749 			return -1;
750 		}
751 		flow->ipaddr_rule.qos_ipsrc_offset =
752 			dpaa2_flow_extract_key_offset(
753 				&priv->extract.qos_key_extract,
754 				prot, NH_FLD_IP_SRC);
755 	}
756 
757 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
758 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
759 				&priv->extract.qos_key_extract,
760 				&flow->qos_rule,
761 				flow->ipaddr_rule.qos_ipdst_offset,
762 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
763 		if (ret) {
764 			DPAA2_PMD_ERR("QoS dst address reorder failed");
765 			return -1;
766 		}
767 		flow->ipaddr_rule.qos_ipdst_offset =
768 			dpaa2_flow_extract_key_offset(
769 				&priv->extract.qos_key_extract,
770 				prot, NH_FLD_IP_DST);
771 	}
772 
773 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
774 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
775 				&priv->extract.tc_key_extract[fs_group],
776 				&flow->fs_rule,
777 				flow->ipaddr_rule.fs_ipsrc_offset,
778 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
779 		if (ret) {
780 			DPAA2_PMD_ERR("FS src address reorder failed");
781 			return -1;
782 		}
783 		flow->ipaddr_rule.fs_ipsrc_offset =
784 			dpaa2_flow_extract_key_offset(
785 				&priv->extract.tc_key_extract[fs_group],
786 				prot, NH_FLD_IP_SRC);
787 	}
788 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
789 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
790 				&priv->extract.tc_key_extract[fs_group],
791 				&flow->fs_rule,
792 				flow->ipaddr_rule.fs_ipdst_offset,
793 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
794 		if (ret) {
795 			DPAA2_PMD_ERR("FS dst address reorder failed");
796 			return -1;
797 		}
798 		flow->ipaddr_rule.fs_ipdst_offset =
799 			dpaa2_flow_extract_key_offset(
800 				&priv->extract.tc_key_extract[fs_group],
801 				prot, NH_FLD_IP_DST);
802 	}
803 
804 	return 0;
805 }
806 
807 static int
808 dpaa2_flow_extract_support(
809 	const uint8_t *mask_src,
810 	enum rte_flow_item_type type)
811 {
812 	char mask[64];
813 	int i, size = 0;
814 	const char *mask_support = 0;
815 
816 	switch (type) {
817 	case RTE_FLOW_ITEM_TYPE_ETH:
818 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
819 		size = sizeof(struct rte_flow_item_eth);
820 		break;
821 	case RTE_FLOW_ITEM_TYPE_VLAN:
822 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
823 		size = sizeof(struct rte_flow_item_vlan);
824 		break;
825 	case RTE_FLOW_ITEM_TYPE_IPV4:
826 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
827 		size = sizeof(struct rte_flow_item_ipv4);
828 		break;
829 	case RTE_FLOW_ITEM_TYPE_IPV6:
830 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
831 		size = sizeof(struct rte_flow_item_ipv6);
832 		break;
833 	case RTE_FLOW_ITEM_TYPE_ICMP:
834 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
835 		size = sizeof(struct rte_flow_item_icmp);
836 		break;
837 	case RTE_FLOW_ITEM_TYPE_UDP:
838 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
839 		size = sizeof(struct rte_flow_item_udp);
840 		break;
841 	case RTE_FLOW_ITEM_TYPE_TCP:
842 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
843 		size = sizeof(struct rte_flow_item_tcp);
844 		break;
845 	case RTE_FLOW_ITEM_TYPE_SCTP:
846 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
847 		size = sizeof(struct rte_flow_item_sctp);
848 		break;
849 	case RTE_FLOW_ITEM_TYPE_GRE:
850 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
851 		size = sizeof(struct rte_flow_item_gre);
852 		break;
853 	default:
854 		return -1;
855 	}
856 
857 	memcpy(mask, mask_support, size);
858 
859 	for (i = 0; i < size; i++)
860 		mask[i] = (mask[i] | mask_src[i]);
861 
862 	if (memcmp(mask, mask_support, size))
863 		return -1;
864 
865 	return 0;
866 }
867 
868 static int
869 dpaa2_configure_flow_eth(struct rte_flow *flow,
870 			 struct rte_eth_dev *dev,
871 			 const struct rte_flow_attr *attr,
872 			 const struct rte_flow_item *pattern,
873 			 const struct rte_flow_action actions[] __rte_unused,
874 			 struct rte_flow_error *error __rte_unused,
875 			 int *device_configured)
876 {
877 	int index, ret;
878 	int local_cfg = 0;
879 	uint32_t group;
880 	const struct rte_flow_item_eth *spec, *mask;
881 
882 	/* TODO: Currently upper bound of range parameter is not implemented */
883 	const struct rte_flow_item_eth *last __rte_unused;
884 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
885 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
886 
887 	group = attr->group;
888 
889 	/* Parse pattern list to get the matching parameters */
890 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
891 	last    = (const struct rte_flow_item_eth *)pattern->last;
892 	mask    = (const struct rte_flow_item_eth *)
893 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
894 	if (!spec) {
895 		/* Don't care any field of eth header,
896 		 * only care eth protocol.
897 		 */
898 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
899 		return 0;
900 	}
901 
902 	/* Get traffic class index and flow id to be configured */
903 	flow->tc_id = group;
904 	flow->tc_index = attr->priority;
905 
906 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
907 		RTE_FLOW_ITEM_TYPE_ETH)) {
908 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
909 
910 		return -1;
911 	}
912 
913 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
914 		index = dpaa2_flow_extract_search(
915 				&priv->extract.qos_key_extract.dpkg,
916 				NET_PROT_ETH, NH_FLD_ETH_SA);
917 		if (index < 0) {
918 			ret = dpaa2_flow_extract_add(
919 					&priv->extract.qos_key_extract,
920 					NET_PROT_ETH, NH_FLD_ETH_SA,
921 					RTE_ETHER_ADDR_LEN);
922 			if (ret) {
923 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
924 
925 				return -1;
926 			}
927 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
928 		}
929 		index = dpaa2_flow_extract_search(
930 				&priv->extract.tc_key_extract[group].dpkg,
931 				NET_PROT_ETH, NH_FLD_ETH_SA);
932 		if (index < 0) {
933 			ret = dpaa2_flow_extract_add(
934 					&priv->extract.tc_key_extract[group],
935 					NET_PROT_ETH, NH_FLD_ETH_SA,
936 					RTE_ETHER_ADDR_LEN);
937 			if (ret) {
938 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
939 				return -1;
940 			}
941 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
942 		}
943 
944 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
945 		if (ret) {
946 			DPAA2_PMD_ERR(
947 				"Move ipaddr before ETH_SA rule set failed");
948 			return -1;
949 		}
950 
951 		ret = dpaa2_flow_rule_data_set(
952 				&priv->extract.qos_key_extract,
953 				&flow->qos_rule,
954 				NET_PROT_ETH,
955 				NH_FLD_ETH_SA,
956 				&spec->src.addr_bytes,
957 				&mask->src.addr_bytes,
958 				sizeof(struct rte_ether_addr));
959 		if (ret) {
960 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
961 			return -1;
962 		}
963 
964 		ret = dpaa2_flow_rule_data_set(
965 				&priv->extract.tc_key_extract[group],
966 				&flow->fs_rule,
967 				NET_PROT_ETH,
968 				NH_FLD_ETH_SA,
969 				&spec->src.addr_bytes,
970 				&mask->src.addr_bytes,
971 				sizeof(struct rte_ether_addr));
972 		if (ret) {
973 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
974 			return -1;
975 		}
976 	}
977 
978 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
979 		index = dpaa2_flow_extract_search(
980 				&priv->extract.qos_key_extract.dpkg,
981 				NET_PROT_ETH, NH_FLD_ETH_DA);
982 		if (index < 0) {
983 			ret = dpaa2_flow_extract_add(
984 					&priv->extract.qos_key_extract,
985 					NET_PROT_ETH, NH_FLD_ETH_DA,
986 					RTE_ETHER_ADDR_LEN);
987 			if (ret) {
988 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
989 
990 				return -1;
991 			}
992 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
993 		}
994 
995 		index = dpaa2_flow_extract_search(
996 				&priv->extract.tc_key_extract[group].dpkg,
997 				NET_PROT_ETH, NH_FLD_ETH_DA);
998 		if (index < 0) {
999 			ret = dpaa2_flow_extract_add(
1000 					&priv->extract.tc_key_extract[group],
1001 					NET_PROT_ETH, NH_FLD_ETH_DA,
1002 					RTE_ETHER_ADDR_LEN);
1003 			if (ret) {
1004 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1005 
1006 				return -1;
1007 			}
1008 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1009 		}
1010 
1011 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1012 		if (ret) {
1013 			DPAA2_PMD_ERR(
1014 				"Move ipaddr before ETH DA rule set failed");
1015 			return -1;
1016 		}
1017 
1018 		ret = dpaa2_flow_rule_data_set(
1019 				&priv->extract.qos_key_extract,
1020 				&flow->qos_rule,
1021 				NET_PROT_ETH,
1022 				NH_FLD_ETH_DA,
1023 				&spec->dst.addr_bytes,
1024 				&mask->dst.addr_bytes,
1025 				sizeof(struct rte_ether_addr));
1026 		if (ret) {
1027 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1028 			return -1;
1029 		}
1030 
1031 		ret = dpaa2_flow_rule_data_set(
1032 				&priv->extract.tc_key_extract[group],
1033 				&flow->fs_rule,
1034 				NET_PROT_ETH,
1035 				NH_FLD_ETH_DA,
1036 				&spec->dst.addr_bytes,
1037 				&mask->dst.addr_bytes,
1038 				sizeof(struct rte_ether_addr));
1039 		if (ret) {
1040 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1041 			return -1;
1042 		}
1043 	}
1044 
1045 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1046 		index = dpaa2_flow_extract_search(
1047 				&priv->extract.qos_key_extract.dpkg,
1048 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1049 		if (index < 0) {
1050 			ret = dpaa2_flow_extract_add(
1051 					&priv->extract.qos_key_extract,
1052 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1053 					RTE_ETHER_TYPE_LEN);
1054 			if (ret) {
1055 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1056 
1057 				return -1;
1058 			}
1059 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1060 		}
1061 		index = dpaa2_flow_extract_search(
1062 				&priv->extract.tc_key_extract[group].dpkg,
1063 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1064 		if (index < 0) {
1065 			ret = dpaa2_flow_extract_add(
1066 					&priv->extract.tc_key_extract[group],
1067 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1068 					RTE_ETHER_TYPE_LEN);
1069 			if (ret) {
1070 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1071 
1072 				return -1;
1073 			}
1074 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1075 		}
1076 
1077 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1078 		if (ret) {
1079 			DPAA2_PMD_ERR(
1080 				"Move ipaddr before ETH TYPE rule set failed");
1081 				return -1;
1082 		}
1083 
1084 		ret = dpaa2_flow_rule_data_set(
1085 				&priv->extract.qos_key_extract,
1086 				&flow->qos_rule,
1087 				NET_PROT_ETH,
1088 				NH_FLD_ETH_TYPE,
1089 				&spec->type,
1090 				&mask->type,
1091 				sizeof(rte_be16_t));
1092 		if (ret) {
1093 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1094 			return -1;
1095 		}
1096 
1097 		ret = dpaa2_flow_rule_data_set(
1098 				&priv->extract.tc_key_extract[group],
1099 				&flow->fs_rule,
1100 				NET_PROT_ETH,
1101 				NH_FLD_ETH_TYPE,
1102 				&spec->type,
1103 				&mask->type,
1104 				sizeof(rte_be16_t));
1105 		if (ret) {
1106 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1107 			return -1;
1108 		}
1109 	}
1110 
1111 	(*device_configured) |= local_cfg;
1112 
1113 	return 0;
1114 }
1115 
1116 static int
1117 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1118 			  struct rte_eth_dev *dev,
1119 			  const struct rte_flow_attr *attr,
1120 			  const struct rte_flow_item *pattern,
1121 			  const struct rte_flow_action actions[] __rte_unused,
1122 			  struct rte_flow_error *error __rte_unused,
1123 			  int *device_configured)
1124 {
1125 	int index, ret;
1126 	int local_cfg = 0;
1127 	uint32_t group;
1128 	const struct rte_flow_item_vlan *spec, *mask;
1129 
1130 	const struct rte_flow_item_vlan *last __rte_unused;
1131 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1132 
1133 	group = attr->group;
1134 
1135 	/* Parse pattern list to get the matching parameters */
1136 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1137 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1138 	mask    = (const struct rte_flow_item_vlan *)
1139 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1140 
1141 	/* Get traffic class index and flow id to be configured */
1142 	flow->tc_id = group;
1143 	flow->tc_index = attr->priority;
1144 
1145 	if (!spec) {
1146 		/* Don't care any field of vlan header,
1147 		 * only care vlan protocol.
1148 		 */
1149 		/* Eth type is actually used for vLan classification.
1150 		 */
1151 		struct proto_discrimination proto;
1152 
1153 		index = dpaa2_flow_extract_search(
1154 				&priv->extract.qos_key_extract.dpkg,
1155 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1156 		if (index < 0) {
1157 			ret = dpaa2_flow_proto_discrimination_extract(
1158 						&priv->extract.qos_key_extract,
1159 						RTE_FLOW_ITEM_TYPE_ETH);
1160 			if (ret) {
1161 				DPAA2_PMD_ERR(
1162 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1163 
1164 				return -1;
1165 			}
1166 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1167 		}
1168 
1169 		index = dpaa2_flow_extract_search(
1170 				&priv->extract.tc_key_extract[group].dpkg,
1171 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1172 		if (index < 0) {
1173 			ret = dpaa2_flow_proto_discrimination_extract(
1174 					&priv->extract.tc_key_extract[group],
1175 					RTE_FLOW_ITEM_TYPE_ETH);
1176 			if (ret) {
1177 				DPAA2_PMD_ERR(
1178 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1179 
1180 				return -1;
1181 			}
1182 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1183 		}
1184 
1185 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1186 		if (ret) {
1187 			DPAA2_PMD_ERR(
1188 			"Move ipaddr before vLan discrimination set failed");
1189 			return -1;
1190 		}
1191 
1192 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1193 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1194 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1195 							proto, group);
1196 		if (ret) {
1197 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1198 			return -1;
1199 		}
1200 
1201 		(*device_configured) |= local_cfg;
1202 
1203 		return 0;
1204 	}
1205 
1206 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1207 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1208 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1209 
1210 		return -1;
1211 	}
1212 
1213 	if (!mask->tci)
1214 		return 0;
1215 
1216 	index = dpaa2_flow_extract_search(
1217 				&priv->extract.qos_key_extract.dpkg,
1218 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1219 	if (index < 0) {
1220 		ret = dpaa2_flow_extract_add(
1221 						&priv->extract.qos_key_extract,
1222 						NET_PROT_VLAN,
1223 						NH_FLD_VLAN_TCI,
1224 						sizeof(rte_be16_t));
1225 		if (ret) {
1226 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1227 
1228 			return -1;
1229 		}
1230 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1231 	}
1232 
1233 	index = dpaa2_flow_extract_search(
1234 			&priv->extract.tc_key_extract[group].dpkg,
1235 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1236 	if (index < 0) {
1237 		ret = dpaa2_flow_extract_add(
1238 				&priv->extract.tc_key_extract[group],
1239 				NET_PROT_VLAN,
1240 				NH_FLD_VLAN_TCI,
1241 				sizeof(rte_be16_t));
1242 		if (ret) {
1243 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1244 
1245 			return -1;
1246 		}
1247 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1248 	}
1249 
1250 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1251 	if (ret) {
1252 		DPAA2_PMD_ERR(
1253 			"Move ipaddr before VLAN TCI rule set failed");
1254 		return -1;
1255 	}
1256 
1257 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1258 				&flow->qos_rule,
1259 				NET_PROT_VLAN,
1260 				NH_FLD_VLAN_TCI,
1261 				&spec->tci,
1262 				&mask->tci,
1263 				sizeof(rte_be16_t));
1264 	if (ret) {
1265 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1266 		return -1;
1267 	}
1268 
1269 	ret = dpaa2_flow_rule_data_set(
1270 			&priv->extract.tc_key_extract[group],
1271 			&flow->fs_rule,
1272 			NET_PROT_VLAN,
1273 			NH_FLD_VLAN_TCI,
1274 			&spec->tci,
1275 			&mask->tci,
1276 			sizeof(rte_be16_t));
1277 	if (ret) {
1278 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1279 		return -1;
1280 	}
1281 
1282 	(*device_configured) |= local_cfg;
1283 
1284 	return 0;
1285 }
1286 
1287 static int
1288 dpaa2_configure_flow_generic_ip(
1289 	struct rte_flow *flow,
1290 	struct rte_eth_dev *dev,
1291 	const struct rte_flow_attr *attr,
1292 	const struct rte_flow_item *pattern,
1293 	const struct rte_flow_action actions[] __rte_unused,
1294 	struct rte_flow_error *error __rte_unused,
1295 	int *device_configured)
1296 {
1297 	int index, ret;
1298 	int local_cfg = 0;
1299 	uint32_t group;
1300 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1301 		*mask_ipv4 = 0;
1302 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1303 		*mask_ipv6 = 0;
1304 	const void *key, *mask;
1305 	enum net_prot prot;
1306 
1307 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1308 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1309 	int size;
1310 
1311 	group = attr->group;
1312 
1313 	/* Parse pattern list to get the matching parameters */
1314 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1315 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1316 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1317 			(pattern->mask ? pattern->mask :
1318 					&dpaa2_flow_item_ipv4_mask);
1319 	} else {
1320 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1321 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1322 			(pattern->mask ? pattern->mask :
1323 					&dpaa2_flow_item_ipv6_mask);
1324 	}
1325 
1326 	/* Get traffic class index and flow id to be configured */
1327 	flow->tc_id = group;
1328 	flow->tc_index = attr->priority;
1329 
1330 	if (!spec_ipv4 && !spec_ipv6) {
1331 		/* Don't care any field of IP header,
1332 		 * only care IP protocol.
1333 		 * Example: flow create 0 ingress pattern ipv6 /
1334 		 */
1335 		/* Eth type is actually used for IP identification.
1336 		 */
1337 		/* TODO: Current design only supports Eth + IP,
1338 		 *  Eth + vLan + IP needs to add.
1339 		 */
1340 		struct proto_discrimination proto;
1341 
1342 		index = dpaa2_flow_extract_search(
1343 				&priv->extract.qos_key_extract.dpkg,
1344 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1345 		if (index < 0) {
1346 			ret = dpaa2_flow_proto_discrimination_extract(
1347 					&priv->extract.qos_key_extract,
1348 					RTE_FLOW_ITEM_TYPE_ETH);
1349 			if (ret) {
1350 				DPAA2_PMD_ERR(
1351 				"QoS Ext ETH_TYPE to discriminate IP failed.");
1352 
1353 				return -1;
1354 			}
1355 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1356 		}
1357 
1358 		index = dpaa2_flow_extract_search(
1359 				&priv->extract.tc_key_extract[group].dpkg,
1360 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1361 		if (index < 0) {
1362 			ret = dpaa2_flow_proto_discrimination_extract(
1363 					&priv->extract.tc_key_extract[group],
1364 					RTE_FLOW_ITEM_TYPE_ETH);
1365 			if (ret) {
1366 				DPAA2_PMD_ERR(
1367 				"FS Ext ETH_TYPE to discriminate IP failed");
1368 
1369 				return -1;
1370 			}
1371 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1372 		}
1373 
1374 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1375 		if (ret) {
1376 			DPAA2_PMD_ERR(
1377 			"Move ipaddr before IP discrimination set failed");
1378 			return -1;
1379 		}
1380 
1381 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1382 		if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1383 			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1384 		else
1385 			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1386 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1387 							proto, group);
1388 		if (ret) {
1389 			DPAA2_PMD_ERR("IP discrimination rule set failed");
1390 			return -1;
1391 		}
1392 
1393 		(*device_configured) |= local_cfg;
1394 
1395 		return 0;
1396 	}
1397 
1398 	if (mask_ipv4) {
1399 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1400 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1401 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1402 
1403 			return -1;
1404 		}
1405 	}
1406 
1407 	if (mask_ipv6) {
1408 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1409 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1410 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1411 
1412 			return -1;
1413 		}
1414 	}
1415 
1416 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1417 		mask_ipv4->hdr.dst_addr)) {
1418 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1419 	} else if (mask_ipv6 &&
1420 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1421 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1422 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1423 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1424 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1425 	}
1426 
1427 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1428 		(mask_ipv6 &&
1429 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1430 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1431 		index = dpaa2_flow_extract_search(
1432 				&priv->extract.qos_key_extract.dpkg,
1433 				NET_PROT_IP, NH_FLD_IP_SRC);
1434 		if (index < 0) {
1435 			ret = dpaa2_flow_extract_add(
1436 						&priv->extract.qos_key_extract,
1437 						NET_PROT_IP,
1438 						NH_FLD_IP_SRC,
1439 						0);
1440 			if (ret) {
1441 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1442 
1443 				return -1;
1444 			}
1445 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1446 		}
1447 
1448 		index = dpaa2_flow_extract_search(
1449 				&priv->extract.tc_key_extract[group].dpkg,
1450 				NET_PROT_IP, NH_FLD_IP_SRC);
1451 		if (index < 0) {
1452 			ret = dpaa2_flow_extract_add(
1453 					&priv->extract.tc_key_extract[group],
1454 					NET_PROT_IP,
1455 					NH_FLD_IP_SRC,
1456 					0);
1457 			if (ret) {
1458 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1459 
1460 				return -1;
1461 			}
1462 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1463 		}
1464 
1465 		if (spec_ipv4)
1466 			key = &spec_ipv4->hdr.src_addr;
1467 		else
1468 			key = &spec_ipv6->hdr.src_addr[0];
1469 		if (mask_ipv4) {
1470 			mask = &mask_ipv4->hdr.src_addr;
1471 			size = NH_FLD_IPV4_ADDR_SIZE;
1472 			prot = NET_PROT_IPV4;
1473 		} else {
1474 			mask = &mask_ipv6->hdr.src_addr[0];
1475 			size = NH_FLD_IPV6_ADDR_SIZE;
1476 			prot = NET_PROT_IPV6;
1477 		}
1478 
1479 		ret = dpaa2_flow_rule_data_set(
1480 				&priv->extract.qos_key_extract,
1481 				&flow->qos_rule,
1482 				prot, NH_FLD_IP_SRC,
1483 				key,	mask, size);
1484 		if (ret) {
1485 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1486 			return -1;
1487 		}
1488 
1489 		ret = dpaa2_flow_rule_data_set(
1490 				&priv->extract.tc_key_extract[group],
1491 				&flow->fs_rule,
1492 				prot, NH_FLD_IP_SRC,
1493 				key,	mask, size);
1494 		if (ret) {
1495 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1496 			return -1;
1497 		}
1498 
1499 		flow->ipaddr_rule.qos_ipsrc_offset =
1500 			dpaa2_flow_extract_key_offset(
1501 				&priv->extract.qos_key_extract,
1502 				prot, NH_FLD_IP_SRC);
1503 		flow->ipaddr_rule.fs_ipsrc_offset =
1504 			dpaa2_flow_extract_key_offset(
1505 				&priv->extract.tc_key_extract[group],
1506 				prot, NH_FLD_IP_SRC);
1507 	}
1508 
1509 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1510 		(mask_ipv6 &&
1511 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1512 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1513 		index = dpaa2_flow_extract_search(
1514 				&priv->extract.qos_key_extract.dpkg,
1515 				NET_PROT_IP, NH_FLD_IP_DST);
1516 		if (index < 0) {
1517 			if (mask_ipv4)
1518 				size = NH_FLD_IPV4_ADDR_SIZE;
1519 			else
1520 				size = NH_FLD_IPV6_ADDR_SIZE;
1521 			ret = dpaa2_flow_extract_add(
1522 						&priv->extract.qos_key_extract,
1523 						NET_PROT_IP,
1524 						NH_FLD_IP_DST,
1525 						size);
1526 			if (ret) {
1527 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1528 
1529 				return -1;
1530 			}
1531 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1532 		}
1533 
1534 		index = dpaa2_flow_extract_search(
1535 				&priv->extract.tc_key_extract[group].dpkg,
1536 				NET_PROT_IP, NH_FLD_IP_DST);
1537 		if (index < 0) {
1538 			if (mask_ipv4)
1539 				size = NH_FLD_IPV4_ADDR_SIZE;
1540 			else
1541 				size = NH_FLD_IPV6_ADDR_SIZE;
1542 			ret = dpaa2_flow_extract_add(
1543 					&priv->extract.tc_key_extract[group],
1544 					NET_PROT_IP,
1545 					NH_FLD_IP_DST,
1546 					size);
1547 			if (ret) {
1548 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1549 
1550 				return -1;
1551 			}
1552 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1553 		}
1554 
1555 		if (spec_ipv4)
1556 			key = &spec_ipv4->hdr.dst_addr;
1557 		else
1558 			key = spec_ipv6->hdr.dst_addr;
1559 		if (mask_ipv4) {
1560 			mask = &mask_ipv4->hdr.dst_addr;
1561 			size = NH_FLD_IPV4_ADDR_SIZE;
1562 			prot = NET_PROT_IPV4;
1563 		} else {
1564 			mask = &mask_ipv6->hdr.dst_addr[0];
1565 			size = NH_FLD_IPV6_ADDR_SIZE;
1566 			prot = NET_PROT_IPV6;
1567 		}
1568 
1569 		ret = dpaa2_flow_rule_data_set(
1570 				&priv->extract.qos_key_extract,
1571 				&flow->qos_rule,
1572 				prot, NH_FLD_IP_DST,
1573 				key,	mask, size);
1574 		if (ret) {
1575 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1576 			return -1;
1577 		}
1578 
1579 		ret = dpaa2_flow_rule_data_set(
1580 				&priv->extract.tc_key_extract[group],
1581 				&flow->fs_rule,
1582 				prot, NH_FLD_IP_DST,
1583 				key,	mask, size);
1584 		if (ret) {
1585 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1586 			return -1;
1587 		}
1588 		flow->ipaddr_rule.qos_ipdst_offset =
1589 			dpaa2_flow_extract_key_offset(
1590 				&priv->extract.qos_key_extract,
1591 				prot, NH_FLD_IP_DST);
1592 		flow->ipaddr_rule.fs_ipdst_offset =
1593 			dpaa2_flow_extract_key_offset(
1594 				&priv->extract.tc_key_extract[group],
1595 				prot, NH_FLD_IP_DST);
1596 	}
1597 
1598 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1599 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1600 		index = dpaa2_flow_extract_search(
1601 				&priv->extract.qos_key_extract.dpkg,
1602 				NET_PROT_IP, NH_FLD_IP_PROTO);
1603 		if (index < 0) {
1604 			ret = dpaa2_flow_extract_add(
1605 				&priv->extract.qos_key_extract,
1606 				NET_PROT_IP,
1607 				NH_FLD_IP_PROTO,
1608 				NH_FLD_IP_PROTO_SIZE);
1609 			if (ret) {
1610 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1611 
1612 				return -1;
1613 			}
1614 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1615 		}
1616 
1617 		index = dpaa2_flow_extract_search(
1618 				&priv->extract.tc_key_extract[group].dpkg,
1619 				NET_PROT_IP, NH_FLD_IP_PROTO);
1620 		if (index < 0) {
1621 			ret = dpaa2_flow_extract_add(
1622 					&priv->extract.tc_key_extract[group],
1623 					NET_PROT_IP,
1624 					NH_FLD_IP_PROTO,
1625 					NH_FLD_IP_PROTO_SIZE);
1626 			if (ret) {
1627 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1628 
1629 				return -1;
1630 			}
1631 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1632 		}
1633 
1634 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1635 		if (ret) {
1636 			DPAA2_PMD_ERR(
1637 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1638 			return -1;
1639 		}
1640 
1641 		if (spec_ipv4)
1642 			key = &spec_ipv4->hdr.next_proto_id;
1643 		else
1644 			key = &spec_ipv6->hdr.proto;
1645 		if (mask_ipv4)
1646 			mask = &mask_ipv4->hdr.next_proto_id;
1647 		else
1648 			mask = &mask_ipv6->hdr.proto;
1649 
1650 		ret = dpaa2_flow_rule_data_set(
1651 				&priv->extract.qos_key_extract,
1652 				&flow->qos_rule,
1653 				NET_PROT_IP,
1654 				NH_FLD_IP_PROTO,
1655 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1656 		if (ret) {
1657 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1658 			return -1;
1659 		}
1660 
1661 		ret = dpaa2_flow_rule_data_set(
1662 				&priv->extract.tc_key_extract[group],
1663 				&flow->fs_rule,
1664 				NET_PROT_IP,
1665 				NH_FLD_IP_PROTO,
1666 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1667 		if (ret) {
1668 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1669 			return -1;
1670 		}
1671 	}
1672 
1673 	(*device_configured) |= local_cfg;
1674 
1675 	return 0;
1676 }
1677 
1678 static int
1679 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1680 			  struct rte_eth_dev *dev,
1681 			  const struct rte_flow_attr *attr,
1682 			  const struct rte_flow_item *pattern,
1683 			  const struct rte_flow_action actions[] __rte_unused,
1684 			  struct rte_flow_error *error __rte_unused,
1685 			  int *device_configured)
1686 {
1687 	int index, ret;
1688 	int local_cfg = 0;
1689 	uint32_t group;
1690 	const struct rte_flow_item_icmp *spec, *mask;
1691 
1692 	const struct rte_flow_item_icmp *last __rte_unused;
1693 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1694 
1695 	group = attr->group;
1696 
1697 	/* Parse pattern list to get the matching parameters */
1698 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1699 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1700 	mask    = (const struct rte_flow_item_icmp *)
1701 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1702 
1703 	/* Get traffic class index and flow id to be configured */
1704 	flow->tc_id = group;
1705 	flow->tc_index = attr->priority;
1706 
1707 	if (!spec) {
1708 		/* Don't care any field of ICMP header,
1709 		 * only care ICMP protocol.
1710 		 * Example: flow create 0 ingress pattern icmp /
1711 		 */
1712 		/* Next proto of Generical IP is actually used
1713 		 * for ICMP identification.
1714 		 */
1715 		struct proto_discrimination proto;
1716 
1717 		index = dpaa2_flow_extract_search(
1718 				&priv->extract.qos_key_extract.dpkg,
1719 				NET_PROT_IP, NH_FLD_IP_PROTO);
1720 		if (index < 0) {
1721 			ret = dpaa2_flow_proto_discrimination_extract(
1722 					&priv->extract.qos_key_extract,
1723 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1724 			if (ret) {
1725 				DPAA2_PMD_ERR(
1726 					"QoS Extract IP protocol to discriminate ICMP failed.");
1727 
1728 				return -1;
1729 			}
1730 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1731 		}
1732 
1733 		index = dpaa2_flow_extract_search(
1734 				&priv->extract.tc_key_extract[group].dpkg,
1735 				NET_PROT_IP, NH_FLD_IP_PROTO);
1736 		if (index < 0) {
1737 			ret = dpaa2_flow_proto_discrimination_extract(
1738 					&priv->extract.tc_key_extract[group],
1739 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1740 			if (ret) {
1741 				DPAA2_PMD_ERR(
1742 					"FS Extract IP protocol to discriminate ICMP failed.");
1743 
1744 				return -1;
1745 			}
1746 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1747 		}
1748 
1749 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1750 		if (ret) {
1751 			DPAA2_PMD_ERR(
1752 				"Move IP addr before ICMP discrimination set failed");
1753 			return -1;
1754 		}
1755 
1756 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1757 		proto.ip_proto = IPPROTO_ICMP;
1758 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1759 							proto, group);
1760 		if (ret) {
1761 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1762 			return -1;
1763 		}
1764 
1765 		(*device_configured) |= local_cfg;
1766 
1767 		return 0;
1768 	}
1769 
1770 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1771 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1772 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1773 
1774 		return -1;
1775 	}
1776 
1777 	if (mask->hdr.icmp_type) {
1778 		index = dpaa2_flow_extract_search(
1779 				&priv->extract.qos_key_extract.dpkg,
1780 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1781 		if (index < 0) {
1782 			ret = dpaa2_flow_extract_add(
1783 					&priv->extract.qos_key_extract,
1784 					NET_PROT_ICMP,
1785 					NH_FLD_ICMP_TYPE,
1786 					NH_FLD_ICMP_TYPE_SIZE);
1787 			if (ret) {
1788 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1789 
1790 				return -1;
1791 			}
1792 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1793 		}
1794 
1795 		index = dpaa2_flow_extract_search(
1796 				&priv->extract.tc_key_extract[group].dpkg,
1797 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1798 		if (index < 0) {
1799 			ret = dpaa2_flow_extract_add(
1800 					&priv->extract.tc_key_extract[group],
1801 					NET_PROT_ICMP,
1802 					NH_FLD_ICMP_TYPE,
1803 					NH_FLD_ICMP_TYPE_SIZE);
1804 			if (ret) {
1805 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1806 
1807 				return -1;
1808 			}
1809 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1810 		}
1811 
1812 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1813 		if (ret) {
1814 			DPAA2_PMD_ERR(
1815 				"Move ipaddr before ICMP TYPE set failed");
1816 			return -1;
1817 		}
1818 
1819 		ret = dpaa2_flow_rule_data_set(
1820 				&priv->extract.qos_key_extract,
1821 				&flow->qos_rule,
1822 				NET_PROT_ICMP,
1823 				NH_FLD_ICMP_TYPE,
1824 				&spec->hdr.icmp_type,
1825 				&mask->hdr.icmp_type,
1826 				NH_FLD_ICMP_TYPE_SIZE);
1827 		if (ret) {
1828 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1829 			return -1;
1830 		}
1831 
1832 		ret = dpaa2_flow_rule_data_set(
1833 				&priv->extract.tc_key_extract[group],
1834 				&flow->fs_rule,
1835 				NET_PROT_ICMP,
1836 				NH_FLD_ICMP_TYPE,
1837 				&spec->hdr.icmp_type,
1838 				&mask->hdr.icmp_type,
1839 				NH_FLD_ICMP_TYPE_SIZE);
1840 		if (ret) {
1841 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1842 			return -1;
1843 		}
1844 	}
1845 
1846 	if (mask->hdr.icmp_code) {
1847 		index = dpaa2_flow_extract_search(
1848 				&priv->extract.qos_key_extract.dpkg,
1849 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1850 		if (index < 0) {
1851 			ret = dpaa2_flow_extract_add(
1852 					&priv->extract.qos_key_extract,
1853 					NET_PROT_ICMP,
1854 					NH_FLD_ICMP_CODE,
1855 					NH_FLD_ICMP_CODE_SIZE);
1856 			if (ret) {
1857 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1858 
1859 				return -1;
1860 			}
1861 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1862 		}
1863 
1864 		index = dpaa2_flow_extract_search(
1865 				&priv->extract.tc_key_extract[group].dpkg,
1866 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1867 		if (index < 0) {
1868 			ret = dpaa2_flow_extract_add(
1869 					&priv->extract.tc_key_extract[group],
1870 					NET_PROT_ICMP,
1871 					NH_FLD_ICMP_CODE,
1872 					NH_FLD_ICMP_CODE_SIZE);
1873 			if (ret) {
1874 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1875 
1876 				return -1;
1877 			}
1878 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1879 		}
1880 
1881 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1882 		if (ret) {
1883 			DPAA2_PMD_ERR(
1884 				"Move ipaddr after ICMP CODE set failed");
1885 			return -1;
1886 		}
1887 
1888 		ret = dpaa2_flow_rule_data_set(
1889 				&priv->extract.qos_key_extract,
1890 				&flow->qos_rule,
1891 				NET_PROT_ICMP,
1892 				NH_FLD_ICMP_CODE,
1893 				&spec->hdr.icmp_code,
1894 				&mask->hdr.icmp_code,
1895 				NH_FLD_ICMP_CODE_SIZE);
1896 		if (ret) {
1897 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1898 			return -1;
1899 		}
1900 
1901 		ret = dpaa2_flow_rule_data_set(
1902 				&priv->extract.tc_key_extract[group],
1903 				&flow->fs_rule,
1904 				NET_PROT_ICMP,
1905 				NH_FLD_ICMP_CODE,
1906 				&spec->hdr.icmp_code,
1907 				&mask->hdr.icmp_code,
1908 				NH_FLD_ICMP_CODE_SIZE);
1909 		if (ret) {
1910 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1911 			return -1;
1912 		}
1913 	}
1914 
1915 	(*device_configured) |= local_cfg;
1916 
1917 	return 0;
1918 }
1919 
1920 static int
1921 dpaa2_configure_flow_udp(struct rte_flow *flow,
1922 			 struct rte_eth_dev *dev,
1923 			  const struct rte_flow_attr *attr,
1924 			  const struct rte_flow_item *pattern,
1925 			  const struct rte_flow_action actions[] __rte_unused,
1926 			  struct rte_flow_error *error __rte_unused,
1927 			  int *device_configured)
1928 {
1929 	int index, ret;
1930 	int local_cfg = 0;
1931 	uint32_t group;
1932 	const struct rte_flow_item_udp *spec, *mask;
1933 
1934 	const struct rte_flow_item_udp *last __rte_unused;
1935 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1936 
1937 	group = attr->group;
1938 
1939 	/* Parse pattern list to get the matching parameters */
1940 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1941 	last    = (const struct rte_flow_item_udp *)pattern->last;
1942 	mask    = (const struct rte_flow_item_udp *)
1943 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1944 
1945 	/* Get traffic class index and flow id to be configured */
1946 	flow->tc_id = group;
1947 	flow->tc_index = attr->priority;
1948 
1949 	if (!spec || !mc_l4_port_identification) {
1950 		struct proto_discrimination proto;
1951 
1952 		index = dpaa2_flow_extract_search(
1953 				&priv->extract.qos_key_extract.dpkg,
1954 				NET_PROT_IP, NH_FLD_IP_PROTO);
1955 		if (index < 0) {
1956 			ret = dpaa2_flow_proto_discrimination_extract(
1957 					&priv->extract.qos_key_extract,
1958 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1959 			if (ret) {
1960 				DPAA2_PMD_ERR(
1961 					"QoS Extract IP protocol to discriminate UDP failed.");
1962 
1963 				return -1;
1964 			}
1965 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1966 		}
1967 
1968 		index = dpaa2_flow_extract_search(
1969 				&priv->extract.tc_key_extract[group].dpkg,
1970 				NET_PROT_IP, NH_FLD_IP_PROTO);
1971 		if (index < 0) {
1972 			ret = dpaa2_flow_proto_discrimination_extract(
1973 				&priv->extract.tc_key_extract[group],
1974 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1975 			if (ret) {
1976 				DPAA2_PMD_ERR(
1977 					"FS Extract IP protocol to discriminate UDP failed.");
1978 
1979 				return -1;
1980 			}
1981 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1982 		}
1983 
1984 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1985 		if (ret) {
1986 			DPAA2_PMD_ERR(
1987 				"Move IP addr before UDP discrimination set failed");
1988 			return -1;
1989 		}
1990 
1991 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1992 		proto.ip_proto = IPPROTO_UDP;
1993 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1994 							proto, group);
1995 		if (ret) {
1996 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
1997 			return -1;
1998 		}
1999 
2000 		(*device_configured) |= local_cfg;
2001 
2002 		if (!spec)
2003 			return 0;
2004 	}
2005 
2006 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2007 		RTE_FLOW_ITEM_TYPE_UDP)) {
2008 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2009 
2010 		return -1;
2011 	}
2012 
2013 	if (mask->hdr.src_port) {
2014 		index = dpaa2_flow_extract_search(
2015 				&priv->extract.qos_key_extract.dpkg,
2016 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2017 		if (index < 0) {
2018 			ret = dpaa2_flow_extract_add(
2019 					&priv->extract.qos_key_extract,
2020 				NET_PROT_UDP,
2021 				NH_FLD_UDP_PORT_SRC,
2022 				NH_FLD_UDP_PORT_SIZE);
2023 			if (ret) {
2024 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2025 
2026 				return -1;
2027 			}
2028 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2029 		}
2030 
2031 		index = dpaa2_flow_extract_search(
2032 				&priv->extract.tc_key_extract[group].dpkg,
2033 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2034 		if (index < 0) {
2035 			ret = dpaa2_flow_extract_add(
2036 					&priv->extract.tc_key_extract[group],
2037 					NET_PROT_UDP,
2038 					NH_FLD_UDP_PORT_SRC,
2039 					NH_FLD_UDP_PORT_SIZE);
2040 			if (ret) {
2041 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2042 
2043 				return -1;
2044 			}
2045 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2046 		}
2047 
2048 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2049 		if (ret) {
2050 			DPAA2_PMD_ERR(
2051 				"Move ipaddr before UDP_PORT_SRC set failed");
2052 			return -1;
2053 		}
2054 
2055 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2056 				&flow->qos_rule,
2057 				NET_PROT_UDP,
2058 				NH_FLD_UDP_PORT_SRC,
2059 				&spec->hdr.src_port,
2060 				&mask->hdr.src_port,
2061 				NH_FLD_UDP_PORT_SIZE);
2062 		if (ret) {
2063 			DPAA2_PMD_ERR(
2064 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2065 			return -1;
2066 		}
2067 
2068 		ret = dpaa2_flow_rule_data_set(
2069 				&priv->extract.tc_key_extract[group],
2070 				&flow->fs_rule,
2071 				NET_PROT_UDP,
2072 				NH_FLD_UDP_PORT_SRC,
2073 				&spec->hdr.src_port,
2074 				&mask->hdr.src_port,
2075 				NH_FLD_UDP_PORT_SIZE);
2076 		if (ret) {
2077 			DPAA2_PMD_ERR(
2078 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2079 			return -1;
2080 		}
2081 	}
2082 
2083 	if (mask->hdr.dst_port) {
2084 		index = dpaa2_flow_extract_search(
2085 				&priv->extract.qos_key_extract.dpkg,
2086 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2087 		if (index < 0) {
2088 			ret = dpaa2_flow_extract_add(
2089 					&priv->extract.qos_key_extract,
2090 					NET_PROT_UDP,
2091 					NH_FLD_UDP_PORT_DST,
2092 					NH_FLD_UDP_PORT_SIZE);
2093 			if (ret) {
2094 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2095 
2096 				return -1;
2097 			}
2098 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2099 		}
2100 
2101 		index = dpaa2_flow_extract_search(
2102 				&priv->extract.tc_key_extract[group].dpkg,
2103 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2104 		if (index < 0) {
2105 			ret = dpaa2_flow_extract_add(
2106 					&priv->extract.tc_key_extract[group],
2107 					NET_PROT_UDP,
2108 					NH_FLD_UDP_PORT_DST,
2109 					NH_FLD_UDP_PORT_SIZE);
2110 			if (ret) {
2111 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2112 
2113 				return -1;
2114 			}
2115 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2116 		}
2117 
2118 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2119 		if (ret) {
2120 			DPAA2_PMD_ERR(
2121 				"Move ipaddr before UDP_PORT_DST set failed");
2122 			return -1;
2123 		}
2124 
2125 		ret = dpaa2_flow_rule_data_set(
2126 				&priv->extract.qos_key_extract,
2127 				&flow->qos_rule,
2128 				NET_PROT_UDP,
2129 				NH_FLD_UDP_PORT_DST,
2130 				&spec->hdr.dst_port,
2131 				&mask->hdr.dst_port,
2132 				NH_FLD_UDP_PORT_SIZE);
2133 		if (ret) {
2134 			DPAA2_PMD_ERR(
2135 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2136 			return -1;
2137 		}
2138 
2139 		ret = dpaa2_flow_rule_data_set(
2140 				&priv->extract.tc_key_extract[group],
2141 				&flow->fs_rule,
2142 				NET_PROT_UDP,
2143 				NH_FLD_UDP_PORT_DST,
2144 				&spec->hdr.dst_port,
2145 				&mask->hdr.dst_port,
2146 				NH_FLD_UDP_PORT_SIZE);
2147 		if (ret) {
2148 			DPAA2_PMD_ERR(
2149 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2150 			return -1;
2151 		}
2152 	}
2153 
2154 	(*device_configured) |= local_cfg;
2155 
2156 	return 0;
2157 }
2158 
2159 static int
2160 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2161 			 struct rte_eth_dev *dev,
2162 			 const struct rte_flow_attr *attr,
2163 			 const struct rte_flow_item *pattern,
2164 			 const struct rte_flow_action actions[] __rte_unused,
2165 			 struct rte_flow_error *error __rte_unused,
2166 			 int *device_configured)
2167 {
2168 	int index, ret;
2169 	int local_cfg = 0;
2170 	uint32_t group;
2171 	const struct rte_flow_item_tcp *spec, *mask;
2172 
2173 	const struct rte_flow_item_tcp *last __rte_unused;
2174 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2175 
2176 	group = attr->group;
2177 
2178 	/* Parse pattern list to get the matching parameters */
2179 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2180 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2181 	mask    = (const struct rte_flow_item_tcp *)
2182 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2183 
2184 	/* Get traffic class index and flow id to be configured */
2185 	flow->tc_id = group;
2186 	flow->tc_index = attr->priority;
2187 
2188 	if (!spec || !mc_l4_port_identification) {
2189 		struct proto_discrimination proto;
2190 
2191 		index = dpaa2_flow_extract_search(
2192 				&priv->extract.qos_key_extract.dpkg,
2193 				NET_PROT_IP, NH_FLD_IP_PROTO);
2194 		if (index < 0) {
2195 			ret = dpaa2_flow_proto_discrimination_extract(
2196 					&priv->extract.qos_key_extract,
2197 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2198 			if (ret) {
2199 				DPAA2_PMD_ERR(
2200 					"QoS Extract IP protocol to discriminate TCP failed.");
2201 
2202 				return -1;
2203 			}
2204 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2205 		}
2206 
2207 		index = dpaa2_flow_extract_search(
2208 				&priv->extract.tc_key_extract[group].dpkg,
2209 				NET_PROT_IP, NH_FLD_IP_PROTO);
2210 		if (index < 0) {
2211 			ret = dpaa2_flow_proto_discrimination_extract(
2212 				&priv->extract.tc_key_extract[group],
2213 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2214 			if (ret) {
2215 				DPAA2_PMD_ERR(
2216 					"FS Extract IP protocol to discriminate TCP failed.");
2217 
2218 				return -1;
2219 			}
2220 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2221 		}
2222 
2223 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2224 		if (ret) {
2225 			DPAA2_PMD_ERR(
2226 				"Move IP addr before TCP discrimination set failed");
2227 			return -1;
2228 		}
2229 
2230 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2231 		proto.ip_proto = IPPROTO_TCP;
2232 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2233 							proto, group);
2234 		if (ret) {
2235 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2236 			return -1;
2237 		}
2238 
2239 		(*device_configured) |= local_cfg;
2240 
2241 		if (!spec)
2242 			return 0;
2243 	}
2244 
2245 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2246 		RTE_FLOW_ITEM_TYPE_TCP)) {
2247 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2248 
2249 		return -1;
2250 	}
2251 
2252 	if (mask->hdr.src_port) {
2253 		index = dpaa2_flow_extract_search(
2254 				&priv->extract.qos_key_extract.dpkg,
2255 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2256 		if (index < 0) {
2257 			ret = dpaa2_flow_extract_add(
2258 					&priv->extract.qos_key_extract,
2259 					NET_PROT_TCP,
2260 					NH_FLD_TCP_PORT_SRC,
2261 					NH_FLD_TCP_PORT_SIZE);
2262 			if (ret) {
2263 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2264 
2265 				return -1;
2266 			}
2267 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2268 		}
2269 
2270 		index = dpaa2_flow_extract_search(
2271 				&priv->extract.tc_key_extract[group].dpkg,
2272 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2273 		if (index < 0) {
2274 			ret = dpaa2_flow_extract_add(
2275 					&priv->extract.tc_key_extract[group],
2276 					NET_PROT_TCP,
2277 					NH_FLD_TCP_PORT_SRC,
2278 					NH_FLD_TCP_PORT_SIZE);
2279 			if (ret) {
2280 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2281 
2282 				return -1;
2283 			}
2284 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2285 		}
2286 
2287 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2288 		if (ret) {
2289 			DPAA2_PMD_ERR(
2290 				"Move ipaddr before TCP_PORT_SRC set failed");
2291 			return -1;
2292 		}
2293 
2294 		ret = dpaa2_flow_rule_data_set(
2295 				&priv->extract.qos_key_extract,
2296 				&flow->qos_rule,
2297 				NET_PROT_TCP,
2298 				NH_FLD_TCP_PORT_SRC,
2299 				&spec->hdr.src_port,
2300 				&mask->hdr.src_port,
2301 				NH_FLD_TCP_PORT_SIZE);
2302 		if (ret) {
2303 			DPAA2_PMD_ERR(
2304 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2305 			return -1;
2306 		}
2307 
2308 		ret = dpaa2_flow_rule_data_set(
2309 				&priv->extract.tc_key_extract[group],
2310 				&flow->fs_rule,
2311 				NET_PROT_TCP,
2312 				NH_FLD_TCP_PORT_SRC,
2313 				&spec->hdr.src_port,
2314 				&mask->hdr.src_port,
2315 				NH_FLD_TCP_PORT_SIZE);
2316 		if (ret) {
2317 			DPAA2_PMD_ERR(
2318 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2319 			return -1;
2320 		}
2321 	}
2322 
2323 	if (mask->hdr.dst_port) {
2324 		index = dpaa2_flow_extract_search(
2325 				&priv->extract.qos_key_extract.dpkg,
2326 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2327 		if (index < 0) {
2328 			ret = dpaa2_flow_extract_add(
2329 					&priv->extract.qos_key_extract,
2330 					NET_PROT_TCP,
2331 					NH_FLD_TCP_PORT_DST,
2332 					NH_FLD_TCP_PORT_SIZE);
2333 			if (ret) {
2334 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2335 
2336 				return -1;
2337 			}
2338 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2339 		}
2340 
2341 		index = dpaa2_flow_extract_search(
2342 				&priv->extract.tc_key_extract[group].dpkg,
2343 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2344 		if (index < 0) {
2345 			ret = dpaa2_flow_extract_add(
2346 					&priv->extract.tc_key_extract[group],
2347 					NET_PROT_TCP,
2348 					NH_FLD_TCP_PORT_DST,
2349 					NH_FLD_TCP_PORT_SIZE);
2350 			if (ret) {
2351 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2352 
2353 				return -1;
2354 			}
2355 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2356 		}
2357 
2358 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2359 		if (ret) {
2360 			DPAA2_PMD_ERR(
2361 				"Move ipaddr before TCP_PORT_DST set failed");
2362 			return -1;
2363 		}
2364 
2365 		ret = dpaa2_flow_rule_data_set(
2366 				&priv->extract.qos_key_extract,
2367 				&flow->qos_rule,
2368 				NET_PROT_TCP,
2369 				NH_FLD_TCP_PORT_DST,
2370 				&spec->hdr.dst_port,
2371 				&mask->hdr.dst_port,
2372 				NH_FLD_TCP_PORT_SIZE);
2373 		if (ret) {
2374 			DPAA2_PMD_ERR(
2375 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2376 			return -1;
2377 		}
2378 
2379 		ret = dpaa2_flow_rule_data_set(
2380 				&priv->extract.tc_key_extract[group],
2381 				&flow->fs_rule,
2382 				NET_PROT_TCP,
2383 				NH_FLD_TCP_PORT_DST,
2384 				&spec->hdr.dst_port,
2385 				&mask->hdr.dst_port,
2386 				NH_FLD_TCP_PORT_SIZE);
2387 		if (ret) {
2388 			DPAA2_PMD_ERR(
2389 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2390 			return -1;
2391 		}
2392 	}
2393 
2394 	(*device_configured) |= local_cfg;
2395 
2396 	return 0;
2397 }
2398 
2399 static int
2400 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2401 			  struct rte_eth_dev *dev,
2402 			  const struct rte_flow_attr *attr,
2403 			  const struct rte_flow_item *pattern,
2404 			  const struct rte_flow_action actions[] __rte_unused,
2405 			  struct rte_flow_error *error __rte_unused,
2406 			  int *device_configured)
2407 {
2408 	int index, ret;
2409 	int local_cfg = 0;
2410 	uint32_t group;
2411 	const struct rte_flow_item_sctp *spec, *mask;
2412 
2413 	const struct rte_flow_item_sctp *last __rte_unused;
2414 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2415 
2416 	group = attr->group;
2417 
2418 	/* Parse pattern list to get the matching parameters */
2419 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2420 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2421 	mask    = (const struct rte_flow_item_sctp *)
2422 			(pattern->mask ? pattern->mask :
2423 				&dpaa2_flow_item_sctp_mask);
2424 
2425 	/* Get traffic class index and flow id to be configured */
2426 	flow->tc_id = group;
2427 	flow->tc_index = attr->priority;
2428 
2429 	if (!spec || !mc_l4_port_identification) {
2430 		struct proto_discrimination proto;
2431 
2432 		index = dpaa2_flow_extract_search(
2433 				&priv->extract.qos_key_extract.dpkg,
2434 				NET_PROT_IP, NH_FLD_IP_PROTO);
2435 		if (index < 0) {
2436 			ret = dpaa2_flow_proto_discrimination_extract(
2437 					&priv->extract.qos_key_extract,
2438 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2439 			if (ret) {
2440 				DPAA2_PMD_ERR(
2441 					"QoS Extract IP protocol to discriminate SCTP failed.");
2442 
2443 				return -1;
2444 			}
2445 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2446 		}
2447 
2448 		index = dpaa2_flow_extract_search(
2449 				&priv->extract.tc_key_extract[group].dpkg,
2450 				NET_PROT_IP, NH_FLD_IP_PROTO);
2451 		if (index < 0) {
2452 			ret = dpaa2_flow_proto_discrimination_extract(
2453 					&priv->extract.tc_key_extract[group],
2454 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2455 			if (ret) {
2456 				DPAA2_PMD_ERR(
2457 					"FS Extract IP protocol to discriminate SCTP failed.");
2458 
2459 				return -1;
2460 			}
2461 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2462 		}
2463 
2464 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2465 		if (ret) {
2466 			DPAA2_PMD_ERR(
2467 				"Move ipaddr before SCTP discrimination set failed");
2468 			return -1;
2469 		}
2470 
2471 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2472 		proto.ip_proto = IPPROTO_SCTP;
2473 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2474 							proto, group);
2475 		if (ret) {
2476 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2477 			return -1;
2478 		}
2479 
2480 		(*device_configured) |= local_cfg;
2481 
2482 		if (!spec)
2483 			return 0;
2484 	}
2485 
2486 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2487 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2488 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2489 
2490 		return -1;
2491 	}
2492 
2493 	if (mask->hdr.src_port) {
2494 		index = dpaa2_flow_extract_search(
2495 				&priv->extract.qos_key_extract.dpkg,
2496 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2497 		if (index < 0) {
2498 			ret = dpaa2_flow_extract_add(
2499 					&priv->extract.qos_key_extract,
2500 					NET_PROT_SCTP,
2501 					NH_FLD_SCTP_PORT_SRC,
2502 					NH_FLD_SCTP_PORT_SIZE);
2503 			if (ret) {
2504 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2505 
2506 				return -1;
2507 			}
2508 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2509 		}
2510 
2511 		index = dpaa2_flow_extract_search(
2512 				&priv->extract.tc_key_extract[group].dpkg,
2513 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2514 		if (index < 0) {
2515 			ret = dpaa2_flow_extract_add(
2516 					&priv->extract.tc_key_extract[group],
2517 					NET_PROT_SCTP,
2518 					NH_FLD_SCTP_PORT_SRC,
2519 					NH_FLD_SCTP_PORT_SIZE);
2520 			if (ret) {
2521 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2522 
2523 				return -1;
2524 			}
2525 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2526 		}
2527 
2528 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2529 		if (ret) {
2530 			DPAA2_PMD_ERR(
2531 				"Move ipaddr before SCTP_PORT_SRC set failed");
2532 			return -1;
2533 		}
2534 
2535 		ret = dpaa2_flow_rule_data_set(
2536 				&priv->extract.qos_key_extract,
2537 				&flow->qos_rule,
2538 				NET_PROT_SCTP,
2539 				NH_FLD_SCTP_PORT_SRC,
2540 				&spec->hdr.src_port,
2541 				&mask->hdr.src_port,
2542 				NH_FLD_SCTP_PORT_SIZE);
2543 		if (ret) {
2544 			DPAA2_PMD_ERR(
2545 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2546 			return -1;
2547 		}
2548 
2549 		ret = dpaa2_flow_rule_data_set(
2550 				&priv->extract.tc_key_extract[group],
2551 				&flow->fs_rule,
2552 				NET_PROT_SCTP,
2553 				NH_FLD_SCTP_PORT_SRC,
2554 				&spec->hdr.src_port,
2555 				&mask->hdr.src_port,
2556 				NH_FLD_SCTP_PORT_SIZE);
2557 		if (ret) {
2558 			DPAA2_PMD_ERR(
2559 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2560 			return -1;
2561 		}
2562 	}
2563 
2564 	if (mask->hdr.dst_port) {
2565 		index = dpaa2_flow_extract_search(
2566 				&priv->extract.qos_key_extract.dpkg,
2567 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2568 		if (index < 0) {
2569 			ret = dpaa2_flow_extract_add(
2570 					&priv->extract.qos_key_extract,
2571 					NET_PROT_SCTP,
2572 					NH_FLD_SCTP_PORT_DST,
2573 					NH_FLD_SCTP_PORT_SIZE);
2574 			if (ret) {
2575 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2576 
2577 				return -1;
2578 			}
2579 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2580 		}
2581 
2582 		index = dpaa2_flow_extract_search(
2583 				&priv->extract.tc_key_extract[group].dpkg,
2584 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2585 		if (index < 0) {
2586 			ret = dpaa2_flow_extract_add(
2587 					&priv->extract.tc_key_extract[group],
2588 					NET_PROT_SCTP,
2589 					NH_FLD_SCTP_PORT_DST,
2590 					NH_FLD_SCTP_PORT_SIZE);
2591 			if (ret) {
2592 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2593 
2594 				return -1;
2595 			}
2596 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2597 		}
2598 
2599 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2600 		if (ret) {
2601 			DPAA2_PMD_ERR(
2602 				"Move ipaddr before SCTP_PORT_DST set failed");
2603 			return -1;
2604 		}
2605 
2606 		ret = dpaa2_flow_rule_data_set(
2607 				&priv->extract.qos_key_extract,
2608 				&flow->qos_rule,
2609 				NET_PROT_SCTP,
2610 				NH_FLD_SCTP_PORT_DST,
2611 				&spec->hdr.dst_port,
2612 				&mask->hdr.dst_port,
2613 				NH_FLD_SCTP_PORT_SIZE);
2614 		if (ret) {
2615 			DPAA2_PMD_ERR(
2616 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2617 			return -1;
2618 		}
2619 
2620 		ret = dpaa2_flow_rule_data_set(
2621 				&priv->extract.tc_key_extract[group],
2622 				&flow->fs_rule,
2623 				NET_PROT_SCTP,
2624 				NH_FLD_SCTP_PORT_DST,
2625 				&spec->hdr.dst_port,
2626 				&mask->hdr.dst_port,
2627 				NH_FLD_SCTP_PORT_SIZE);
2628 		if (ret) {
2629 			DPAA2_PMD_ERR(
2630 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2631 			return -1;
2632 		}
2633 	}
2634 
2635 	(*device_configured) |= local_cfg;
2636 
2637 	return 0;
2638 }
2639 
2640 static int
2641 dpaa2_configure_flow_gre(struct rte_flow *flow,
2642 			 struct rte_eth_dev *dev,
2643 			 const struct rte_flow_attr *attr,
2644 			 const struct rte_flow_item *pattern,
2645 			 const struct rte_flow_action actions[] __rte_unused,
2646 			 struct rte_flow_error *error __rte_unused,
2647 			 int *device_configured)
2648 {
2649 	int index, ret;
2650 	int local_cfg = 0;
2651 	uint32_t group;
2652 	const struct rte_flow_item_gre *spec, *mask;
2653 
2654 	const struct rte_flow_item_gre *last __rte_unused;
2655 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2656 
2657 	group = attr->group;
2658 
2659 	/* Parse pattern list to get the matching parameters */
2660 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2661 	last    = (const struct rte_flow_item_gre *)pattern->last;
2662 	mask    = (const struct rte_flow_item_gre *)
2663 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2664 
2665 	/* Get traffic class index and flow id to be configured */
2666 	flow->tc_id = group;
2667 	flow->tc_index = attr->priority;
2668 
2669 	if (!spec) {
2670 		struct proto_discrimination proto;
2671 
2672 		index = dpaa2_flow_extract_search(
2673 				&priv->extract.qos_key_extract.dpkg,
2674 				NET_PROT_IP, NH_FLD_IP_PROTO);
2675 		if (index < 0) {
2676 			ret = dpaa2_flow_proto_discrimination_extract(
2677 					&priv->extract.qos_key_extract,
2678 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2679 			if (ret) {
2680 				DPAA2_PMD_ERR(
2681 					"QoS Extract IP protocol to discriminate GRE failed.");
2682 
2683 				return -1;
2684 			}
2685 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2686 		}
2687 
2688 		index = dpaa2_flow_extract_search(
2689 				&priv->extract.tc_key_extract[group].dpkg,
2690 				NET_PROT_IP, NH_FLD_IP_PROTO);
2691 		if (index < 0) {
2692 			ret = dpaa2_flow_proto_discrimination_extract(
2693 					&priv->extract.tc_key_extract[group],
2694 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2695 			if (ret) {
2696 				DPAA2_PMD_ERR(
2697 					"FS Extract IP protocol to discriminate GRE failed.");
2698 
2699 				return -1;
2700 			}
2701 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2702 		}
2703 
2704 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2705 		if (ret) {
2706 			DPAA2_PMD_ERR(
2707 				"Move IP addr before GRE discrimination set failed");
2708 			return -1;
2709 		}
2710 
2711 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2712 		proto.ip_proto = IPPROTO_GRE;
2713 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2714 							proto, group);
2715 		if (ret) {
2716 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2717 			return -1;
2718 		}
2719 
2720 		(*device_configured) |= local_cfg;
2721 
2722 		return 0;
2723 	}
2724 
2725 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2726 		RTE_FLOW_ITEM_TYPE_GRE)) {
2727 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2728 
2729 		return -1;
2730 	}
2731 
2732 	if (!mask->protocol)
2733 		return 0;
2734 
2735 	index = dpaa2_flow_extract_search(
2736 			&priv->extract.qos_key_extract.dpkg,
2737 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2738 	if (index < 0) {
2739 		ret = dpaa2_flow_extract_add(
2740 				&priv->extract.qos_key_extract,
2741 				NET_PROT_GRE,
2742 				NH_FLD_GRE_TYPE,
2743 				sizeof(rte_be16_t));
2744 		if (ret) {
2745 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2746 
2747 			return -1;
2748 		}
2749 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2750 	}
2751 
2752 	index = dpaa2_flow_extract_search(
2753 			&priv->extract.tc_key_extract[group].dpkg,
2754 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2755 	if (index < 0) {
2756 		ret = dpaa2_flow_extract_add(
2757 				&priv->extract.tc_key_extract[group],
2758 				NET_PROT_GRE,
2759 				NH_FLD_GRE_TYPE,
2760 				sizeof(rte_be16_t));
2761 		if (ret) {
2762 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2763 
2764 			return -1;
2765 		}
2766 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2767 	}
2768 
2769 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2770 	if (ret) {
2771 		DPAA2_PMD_ERR(
2772 			"Move ipaddr before GRE_TYPE set failed");
2773 		return -1;
2774 	}
2775 
2776 	ret = dpaa2_flow_rule_data_set(
2777 				&priv->extract.qos_key_extract,
2778 				&flow->qos_rule,
2779 				NET_PROT_GRE,
2780 				NH_FLD_GRE_TYPE,
2781 				&spec->protocol,
2782 				&mask->protocol,
2783 				sizeof(rte_be16_t));
2784 	if (ret) {
2785 		DPAA2_PMD_ERR(
2786 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2787 		return -1;
2788 	}
2789 
2790 	ret = dpaa2_flow_rule_data_set(
2791 			&priv->extract.tc_key_extract[group],
2792 			&flow->fs_rule,
2793 			NET_PROT_GRE,
2794 			NH_FLD_GRE_TYPE,
2795 			&spec->protocol,
2796 			&mask->protocol,
2797 			sizeof(rte_be16_t));
2798 	if (ret) {
2799 		DPAA2_PMD_ERR(
2800 			"FS NH_FLD_GRE_TYPE rule data set failed");
2801 		return -1;
2802 	}
2803 
2804 	(*device_configured) |= local_cfg;
2805 
2806 	return 0;
2807 }
2808 
2809 /* The existing QoS/FS entry with IP address(es)
2810  * needs update after
2811  * new extract(s) are inserted before IP
2812  * address(es) extract(s).
2813  */
2814 static int
2815 dpaa2_flow_entry_update(
2816 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2817 {
2818 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2819 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2820 	int ret;
2821 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2822 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2823 	struct dpaa2_key_extract *qos_key_extract =
2824 		&priv->extract.qos_key_extract;
2825 	struct dpaa2_key_extract *tc_key_extract =
2826 		&priv->extract.tc_key_extract[tc_id];
2827 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2828 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2829 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2830 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2831 	int extend = -1, extend1, size = -1;
2832 	uint16_t qos_index;
2833 
2834 	while (curr) {
2835 		if (curr->ipaddr_rule.ipaddr_type ==
2836 			FLOW_NONE_IPADDR) {
2837 			curr = LIST_NEXT(curr, next);
2838 			continue;
2839 		}
2840 
2841 		if (curr->ipaddr_rule.ipaddr_type ==
2842 			FLOW_IPV4_ADDR) {
2843 			qos_ipsrc_offset =
2844 				qos_key_extract->key_info.ipv4_src_offset;
2845 			qos_ipdst_offset =
2846 				qos_key_extract->key_info.ipv4_dst_offset;
2847 			fs_ipsrc_offset =
2848 				tc_key_extract->key_info.ipv4_src_offset;
2849 			fs_ipdst_offset =
2850 				tc_key_extract->key_info.ipv4_dst_offset;
2851 			size = NH_FLD_IPV4_ADDR_SIZE;
2852 		} else {
2853 			qos_ipsrc_offset =
2854 				qos_key_extract->key_info.ipv6_src_offset;
2855 			qos_ipdst_offset =
2856 				qos_key_extract->key_info.ipv6_dst_offset;
2857 			fs_ipsrc_offset =
2858 				tc_key_extract->key_info.ipv6_src_offset;
2859 			fs_ipdst_offset =
2860 				tc_key_extract->key_info.ipv6_dst_offset;
2861 			size = NH_FLD_IPV6_ADDR_SIZE;
2862 		}
2863 
2864 		qos_index = curr->tc_id * priv->fs_entries +
2865 			curr->tc_index;
2866 
2867 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
2868 
2869 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
2870 				priv->token, &curr->qos_rule);
2871 		if (ret) {
2872 			DPAA2_PMD_ERR("Qos entry remove failed.");
2873 			return -1;
2874 		}
2875 
2876 		extend = -1;
2877 
2878 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2879 			RTE_ASSERT(qos_ipsrc_offset >=
2880 				curr->ipaddr_rule.qos_ipsrc_offset);
2881 			extend1 = qos_ipsrc_offset -
2882 				curr->ipaddr_rule.qos_ipsrc_offset;
2883 			if (extend >= 0)
2884 				RTE_ASSERT(extend == extend1);
2885 			else
2886 				extend = extend1;
2887 
2888 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2889 				(size == NH_FLD_IPV6_ADDR_SIZE));
2890 
2891 			memcpy(ipsrc_key,
2892 				(char *)(size_t)curr->qos_rule.key_iova +
2893 				curr->ipaddr_rule.qos_ipsrc_offset,
2894 				size);
2895 			memset((char *)(size_t)curr->qos_rule.key_iova +
2896 				curr->ipaddr_rule.qos_ipsrc_offset,
2897 				0, size);
2898 
2899 			memcpy(ipsrc_mask,
2900 				(char *)(size_t)curr->qos_rule.mask_iova +
2901 				curr->ipaddr_rule.qos_ipsrc_offset,
2902 				size);
2903 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2904 				curr->ipaddr_rule.qos_ipsrc_offset,
2905 				0, size);
2906 
2907 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
2908 		}
2909 
2910 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2911 			RTE_ASSERT(qos_ipdst_offset >=
2912 				curr->ipaddr_rule.qos_ipdst_offset);
2913 			extend1 = qos_ipdst_offset -
2914 				curr->ipaddr_rule.qos_ipdst_offset;
2915 			if (extend >= 0)
2916 				RTE_ASSERT(extend == extend1);
2917 			else
2918 				extend = extend1;
2919 
2920 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2921 				(size == NH_FLD_IPV6_ADDR_SIZE));
2922 
2923 			memcpy(ipdst_key,
2924 				(char *)(size_t)curr->qos_rule.key_iova +
2925 				curr->ipaddr_rule.qos_ipdst_offset,
2926 				size);
2927 			memset((char *)(size_t)curr->qos_rule.key_iova +
2928 				curr->ipaddr_rule.qos_ipdst_offset,
2929 				0, size);
2930 
2931 			memcpy(ipdst_mask,
2932 				(char *)(size_t)curr->qos_rule.mask_iova +
2933 				curr->ipaddr_rule.qos_ipdst_offset,
2934 				size);
2935 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2936 				curr->ipaddr_rule.qos_ipdst_offset,
2937 				0, size);
2938 
2939 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
2940 		}
2941 
2942 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2943 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2944 				(size == NH_FLD_IPV6_ADDR_SIZE));
2945 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2946 				curr->ipaddr_rule.qos_ipsrc_offset,
2947 				ipsrc_key,
2948 				size);
2949 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2950 				curr->ipaddr_rule.qos_ipsrc_offset,
2951 				ipsrc_mask,
2952 				size);
2953 		}
2954 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2955 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2956 				(size == NH_FLD_IPV6_ADDR_SIZE));
2957 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2958 				curr->ipaddr_rule.qos_ipdst_offset,
2959 				ipdst_key,
2960 				size);
2961 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2962 				curr->ipaddr_rule.qos_ipdst_offset,
2963 				ipdst_mask,
2964 				size);
2965 		}
2966 
2967 		if (extend >= 0)
2968 			curr->qos_real_key_size += extend;
2969 
2970 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
2971 
2972 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
2973 
2974 		ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
2975 				priv->token, &curr->qos_rule,
2976 				curr->tc_id, qos_index,
2977 				0, 0);
2978 		if (ret) {
2979 			DPAA2_PMD_ERR("Qos entry update failed.");
2980 			return -1;
2981 		}
2982 
2983 		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
2984 			curr = LIST_NEXT(curr, next);
2985 			continue;
2986 		}
2987 
2988 		dpaa2_flow_fs_entry_log("Before update", curr);
2989 		extend = -1;
2990 
2991 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
2992 				priv->token, curr->tc_id, &curr->fs_rule);
2993 		if (ret) {
2994 			DPAA2_PMD_ERR("FS entry remove failed.");
2995 			return -1;
2996 		}
2997 
2998 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
2999 			tc_id == curr->tc_id) {
3000 			RTE_ASSERT(fs_ipsrc_offset >=
3001 				curr->ipaddr_rule.fs_ipsrc_offset);
3002 			extend1 = fs_ipsrc_offset -
3003 				curr->ipaddr_rule.fs_ipsrc_offset;
3004 			if (extend >= 0)
3005 				RTE_ASSERT(extend == extend1);
3006 			else
3007 				extend = extend1;
3008 
3009 			memcpy(ipsrc_key,
3010 				(char *)(size_t)curr->fs_rule.key_iova +
3011 				curr->ipaddr_rule.fs_ipsrc_offset,
3012 				size);
3013 			memset((char *)(size_t)curr->fs_rule.key_iova +
3014 				curr->ipaddr_rule.fs_ipsrc_offset,
3015 				0, size);
3016 
3017 			memcpy(ipsrc_mask,
3018 				(char *)(size_t)curr->fs_rule.mask_iova +
3019 				curr->ipaddr_rule.fs_ipsrc_offset,
3020 				size);
3021 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3022 				curr->ipaddr_rule.fs_ipsrc_offset,
3023 				0, size);
3024 
3025 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3026 		}
3027 
3028 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3029 			tc_id == curr->tc_id) {
3030 			RTE_ASSERT(fs_ipdst_offset >=
3031 				curr->ipaddr_rule.fs_ipdst_offset);
3032 			extend1 = fs_ipdst_offset -
3033 				curr->ipaddr_rule.fs_ipdst_offset;
3034 			if (extend >= 0)
3035 				RTE_ASSERT(extend == extend1);
3036 			else
3037 				extend = extend1;
3038 
3039 			memcpy(ipdst_key,
3040 				(char *)(size_t)curr->fs_rule.key_iova +
3041 				curr->ipaddr_rule.fs_ipdst_offset,
3042 				size);
3043 			memset((char *)(size_t)curr->fs_rule.key_iova +
3044 				curr->ipaddr_rule.fs_ipdst_offset,
3045 				0, size);
3046 
3047 			memcpy(ipdst_mask,
3048 				(char *)(size_t)curr->fs_rule.mask_iova +
3049 				curr->ipaddr_rule.fs_ipdst_offset,
3050 				size);
3051 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3052 				curr->ipaddr_rule.fs_ipdst_offset,
3053 				0, size);
3054 
3055 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3056 		}
3057 
3058 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3059 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3060 				curr->ipaddr_rule.fs_ipsrc_offset,
3061 				ipsrc_key,
3062 				size);
3063 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3064 				curr->ipaddr_rule.fs_ipsrc_offset,
3065 				ipsrc_mask,
3066 				size);
3067 		}
3068 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3069 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3070 				curr->ipaddr_rule.fs_ipdst_offset,
3071 				ipdst_key,
3072 				size);
3073 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3074 				curr->ipaddr_rule.fs_ipdst_offset,
3075 				ipdst_mask,
3076 				size);
3077 		}
3078 
3079 		if (extend >= 0)
3080 			curr->fs_real_key_size += extend;
3081 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3082 
3083 		dpaa2_flow_fs_entry_log("Start update", curr);
3084 
3085 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3086 				priv->token, curr->tc_id, curr->tc_index,
3087 				&curr->fs_rule, &curr->action_cfg);
3088 		if (ret) {
3089 			DPAA2_PMD_ERR("FS entry update failed.");
3090 			return -1;
3091 		}
3092 
3093 		curr = LIST_NEXT(curr, next);
3094 	}
3095 
3096 	return 0;
3097 }
3098 
3099 static inline int
3100 dpaa2_flow_verify_attr(
3101 	struct dpaa2_dev_priv *priv,
3102 	const struct rte_flow_attr *attr)
3103 {
3104 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3105 
3106 	while (curr) {
3107 		if (curr->tc_id == attr->group &&
3108 			curr->tc_index == attr->priority) {
3109 			DPAA2_PMD_ERR(
3110 				"Flow with group %d and priority %d already exists.",
3111 				attr->group, attr->priority);
3112 
3113 			return -1;
3114 		}
3115 		curr = LIST_NEXT(curr, next);
3116 	}
3117 
3118 	return 0;
3119 }
3120 
3121 static int
3122 dpaa2_generic_flow_set(struct rte_flow *flow,
3123 		       struct rte_eth_dev *dev,
3124 		       const struct rte_flow_attr *attr,
3125 		       const struct rte_flow_item pattern[],
3126 		       const struct rte_flow_action actions[],
3127 		       struct rte_flow_error *error)
3128 {
3129 	const struct rte_flow_action_queue *dest_queue;
3130 	const struct rte_flow_action_rss *rss_conf;
3131 	int is_keycfg_configured = 0, end_of_list = 0;
3132 	int ret = 0, i = 0, j = 0;
3133 	struct dpni_rx_tc_dist_cfg tc_cfg;
3134 	struct dpni_qos_tbl_cfg qos_cfg;
3135 	struct dpni_fs_action_cfg action;
3136 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3137 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3138 	size_t param;
3139 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3140 	uint16_t qos_index;
3141 
3142 	ret = dpaa2_flow_verify_attr(priv, attr);
3143 	if (ret)
3144 		return ret;
3145 
3146 	/* Parse pattern list to get the matching parameters */
3147 	while (!end_of_list) {
3148 		switch (pattern[i].type) {
3149 		case RTE_FLOW_ITEM_TYPE_ETH:
3150 			ret = dpaa2_configure_flow_eth(flow,
3151 					dev, attr, &pattern[i], actions, error,
3152 					&is_keycfg_configured);
3153 			if (ret) {
3154 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3155 				return ret;
3156 			}
3157 			break;
3158 		case RTE_FLOW_ITEM_TYPE_VLAN:
3159 			ret = dpaa2_configure_flow_vlan(flow,
3160 					dev, attr, &pattern[i], actions, error,
3161 					&is_keycfg_configured);
3162 			if (ret) {
3163 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3164 				return ret;
3165 			}
3166 			break;
3167 		case RTE_FLOW_ITEM_TYPE_IPV4:
3168 		case RTE_FLOW_ITEM_TYPE_IPV6:
3169 			ret = dpaa2_configure_flow_generic_ip(flow,
3170 					dev, attr, &pattern[i], actions, error,
3171 					&is_keycfg_configured);
3172 			if (ret) {
3173 				DPAA2_PMD_ERR("IP flow configuration failed!");
3174 				return ret;
3175 			}
3176 			break;
3177 		case RTE_FLOW_ITEM_TYPE_ICMP:
3178 			ret = dpaa2_configure_flow_icmp(flow,
3179 					dev, attr, &pattern[i], actions, error,
3180 					&is_keycfg_configured);
3181 			if (ret) {
3182 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3183 				return ret;
3184 			}
3185 			break;
3186 		case RTE_FLOW_ITEM_TYPE_UDP:
3187 			ret = dpaa2_configure_flow_udp(flow,
3188 					dev, attr, &pattern[i], actions, error,
3189 					&is_keycfg_configured);
3190 			if (ret) {
3191 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3192 				return ret;
3193 			}
3194 			break;
3195 		case RTE_FLOW_ITEM_TYPE_TCP:
3196 			ret = dpaa2_configure_flow_tcp(flow,
3197 					dev, attr, &pattern[i], actions, error,
3198 					&is_keycfg_configured);
3199 			if (ret) {
3200 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3201 				return ret;
3202 			}
3203 			break;
3204 		case RTE_FLOW_ITEM_TYPE_SCTP:
3205 			ret = dpaa2_configure_flow_sctp(flow,
3206 					dev, attr, &pattern[i], actions, error,
3207 					&is_keycfg_configured);
3208 			if (ret) {
3209 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3210 				return ret;
3211 			}
3212 			break;
3213 		case RTE_FLOW_ITEM_TYPE_GRE:
3214 			ret = dpaa2_configure_flow_gre(flow,
3215 					dev, attr, &pattern[i], actions, error,
3216 					&is_keycfg_configured);
3217 			if (ret) {
3218 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3219 				return ret;
3220 			}
3221 			break;
3222 		case RTE_FLOW_ITEM_TYPE_END:
3223 			end_of_list = 1;
3224 			break; /*End of List*/
3225 		default:
3226 			DPAA2_PMD_ERR("Invalid action type");
3227 			ret = -ENOTSUP;
3228 			break;
3229 		}
3230 		i++;
3231 	}
3232 
3233 	/* Let's parse action on matching traffic */
3234 	end_of_list = 0;
3235 	while (!end_of_list) {
3236 		switch (actions[j].type) {
3237 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3238 			dest_queue =
3239 				(const struct rte_flow_action_queue *)(actions[j].conf);
3240 			flow->flow_id = dest_queue->index;
3241 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3242 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3243 			action.flow_id = flow->flow_id;
3244 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3245 				dpaa2_flow_qos_table_extracts_log(priv);
3246 				if (dpkg_prepare_key_cfg(
3247 					&priv->extract.qos_key_extract.dpkg,
3248 					(uint8_t *)(size_t)priv->extract.qos_extract_param)
3249 					< 0) {
3250 					DPAA2_PMD_ERR(
3251 					"Unable to prepare extract parameters");
3252 					return -1;
3253 				}
3254 
3255 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3256 				qos_cfg.discard_on_miss = true;
3257 				qos_cfg.keep_entries = true;
3258 				qos_cfg.key_cfg_iova =
3259 					(size_t)priv->extract.qos_extract_param;
3260 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3261 						priv->token, &qos_cfg);
3262 				if (ret < 0) {
3263 					DPAA2_PMD_ERR(
3264 					"Distribution cannot be configured.(%d)"
3265 					, ret);
3266 					return -1;
3267 				}
3268 			}
3269 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3270 				dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3271 				if (dpkg_prepare_key_cfg(
3272 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3273 				(uint8_t *)(size_t)priv->extract
3274 				.tc_extract_param[flow->tc_id]) < 0) {
3275 					DPAA2_PMD_ERR(
3276 					"Unable to prepare extract parameters");
3277 					return -1;
3278 				}
3279 
3280 				memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3281 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3282 				tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
3283 				tc_cfg.key_cfg_iova =
3284 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3285 				tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3286 				tc_cfg.fs_cfg.keep_entries = true;
3287 				ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3288 							 priv->token,
3289 							 flow->tc_id, &tc_cfg);
3290 				if (ret < 0) {
3291 					DPAA2_PMD_ERR(
3292 					"Distribution cannot be configured.(%d)"
3293 					, ret);
3294 					return -1;
3295 				}
3296 			}
3297 			/* Configure QoS table first */
3298 
3299 			action.flow_id = action.flow_id % priv->num_rx_tc;
3300 
3301 			qos_index = flow->tc_id * priv->fs_entries +
3302 				flow->tc_index;
3303 
3304 			if (qos_index >= priv->qos_entries) {
3305 				DPAA2_PMD_ERR("QoS table with %d entries full",
3306 					priv->qos_entries);
3307 				return -1;
3308 			}
3309 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3310 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3311 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3312 					flow->ipaddr_rule.qos_ipsrc_offset) {
3313 					flow->qos_real_key_size =
3314 						flow->ipaddr_rule.qos_ipdst_offset +
3315 						NH_FLD_IPV4_ADDR_SIZE;
3316 				} else {
3317 					flow->qos_real_key_size =
3318 						flow->ipaddr_rule.qos_ipsrc_offset +
3319 						NH_FLD_IPV4_ADDR_SIZE;
3320 				}
3321 			} else if (flow->ipaddr_rule.ipaddr_type ==
3322 				FLOW_IPV6_ADDR) {
3323 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3324 					flow->ipaddr_rule.qos_ipsrc_offset) {
3325 					flow->qos_real_key_size =
3326 						flow->ipaddr_rule.qos_ipdst_offset +
3327 						NH_FLD_IPV6_ADDR_SIZE;
3328 				} else {
3329 					flow->qos_real_key_size =
3330 						flow->ipaddr_rule.qos_ipsrc_offset +
3331 						NH_FLD_IPV6_ADDR_SIZE;
3332 				}
3333 			}
3334 
3335 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3336 
3337 			dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3338 
3339 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3340 						priv->token, &flow->qos_rule,
3341 						flow->tc_id, qos_index,
3342 						0, 0);
3343 			if (ret < 0) {
3344 				DPAA2_PMD_ERR(
3345 				"Error in addnig entry to QoS table(%d)", ret);
3346 				return ret;
3347 			}
3348 
3349 			/* Then Configure FS table */
3350 			if (flow->tc_index >= priv->fs_entries) {
3351 				DPAA2_PMD_ERR("FS table with %d entries full",
3352 					priv->fs_entries);
3353 				return -1;
3354 			}
3355 
3356 			flow->fs_real_key_size =
3357 				priv->extract.tc_key_extract[flow->tc_id]
3358 				.key_info.key_total_size;
3359 
3360 			if (flow->ipaddr_rule.ipaddr_type ==
3361 				FLOW_IPV4_ADDR) {
3362 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3363 					flow->ipaddr_rule.fs_ipsrc_offset) {
3364 					flow->fs_real_key_size =
3365 						flow->ipaddr_rule.fs_ipdst_offset +
3366 						NH_FLD_IPV4_ADDR_SIZE;
3367 				} else {
3368 					flow->fs_real_key_size =
3369 						flow->ipaddr_rule.fs_ipsrc_offset +
3370 						NH_FLD_IPV4_ADDR_SIZE;
3371 				}
3372 			} else if (flow->ipaddr_rule.ipaddr_type ==
3373 				FLOW_IPV6_ADDR) {
3374 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3375 					flow->ipaddr_rule.fs_ipsrc_offset) {
3376 					flow->fs_real_key_size =
3377 						flow->ipaddr_rule.fs_ipdst_offset +
3378 						NH_FLD_IPV6_ADDR_SIZE;
3379 				} else {
3380 					flow->fs_real_key_size =
3381 						flow->ipaddr_rule.fs_ipsrc_offset +
3382 						NH_FLD_IPV6_ADDR_SIZE;
3383 				}
3384 			}
3385 
3386 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3387 
3388 			dpaa2_flow_fs_entry_log("Start add", flow);
3389 
3390 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3391 						flow->tc_id, flow->tc_index,
3392 						&flow->fs_rule, &action);
3393 			if (ret < 0) {
3394 				DPAA2_PMD_ERR(
3395 				"Error in adding entry to FS table(%d)", ret);
3396 				return ret;
3397 			}
3398 			memcpy(&flow->action_cfg, &action,
3399 				sizeof(struct dpni_fs_action_cfg));
3400 			break;
3401 		case RTE_FLOW_ACTION_TYPE_RSS:
3402 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3403 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3404 				if (rss_conf->queue[i] <
3405 					(attr->group * priv->dist_queues) ||
3406 					rss_conf->queue[i] >=
3407 					((attr->group + 1) * priv->dist_queues)) {
3408 					DPAA2_PMD_ERR(
3409 					"Queue/Group combination are not supported\n");
3410 					return -ENOTSUP;
3411 				}
3412 			}
3413 
3414 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3415 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3416 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3417 			if (ret < 0) {
3418 				DPAA2_PMD_ERR(
3419 				"unable to set flow distribution.please check queue config\n");
3420 				return ret;
3421 			}
3422 
3423 			/* Allocate DMA'ble memory to write the rules */
3424 			param = (size_t)rte_malloc(NULL, 256, 64);
3425 			if (!param) {
3426 				DPAA2_PMD_ERR("Memory allocation failure\n");
3427 				return -1;
3428 			}
3429 
3430 			if (dpkg_prepare_key_cfg(
3431 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3432 				(uint8_t *)param) < 0) {
3433 				DPAA2_PMD_ERR(
3434 				"Unable to prepare extract parameters");
3435 				rte_free((void *)param);
3436 				return -1;
3437 			}
3438 
3439 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3440 			tc_cfg.dist_size = rss_conf->queue_num;
3441 			tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3442 			tc_cfg.key_cfg_iova = (size_t)param;
3443 			tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3444 
3445 			ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3446 						 priv->token, flow->tc_id,
3447 						 &tc_cfg);
3448 			if (ret < 0) {
3449 				DPAA2_PMD_ERR(
3450 				"Distribution cannot be configured: %d\n", ret);
3451 				rte_free((void *)param);
3452 				return -1;
3453 			}
3454 
3455 			rte_free((void *)param);
3456 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3457 				if (dpkg_prepare_key_cfg(
3458 					&priv->extract.qos_key_extract.dpkg,
3459 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3460 					DPAA2_PMD_ERR(
3461 					"Unable to prepare extract parameters");
3462 					return -1;
3463 				}
3464 				memset(&qos_cfg, 0,
3465 					sizeof(struct dpni_qos_tbl_cfg));
3466 				qos_cfg.discard_on_miss = true;
3467 				qos_cfg.keep_entries = true;
3468 				qos_cfg.key_cfg_iova =
3469 					(size_t)priv->extract.qos_extract_param;
3470 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3471 							 priv->token, &qos_cfg);
3472 				if (ret < 0) {
3473 					DPAA2_PMD_ERR(
3474 					"Distribution can't be configured %d\n",
3475 					ret);
3476 					return -1;
3477 				}
3478 			}
3479 
3480 			/* Add Rule into QoS table */
3481 			qos_index = flow->tc_id * priv->fs_entries +
3482 				flow->tc_index;
3483 			if (qos_index >= priv->qos_entries) {
3484 				DPAA2_PMD_ERR("QoS table with %d entries full",
3485 					priv->qos_entries);
3486 				return -1;
3487 			}
3488 
3489 			flow->qos_real_key_size =
3490 			  priv->extract.qos_key_extract.key_info.key_total_size;
3491 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3492 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3493 						&flow->qos_rule, flow->tc_id,
3494 						qos_index, 0, 0);
3495 			if (ret < 0) {
3496 				DPAA2_PMD_ERR(
3497 				"Error in entry addition in QoS table(%d)",
3498 				ret);
3499 				return ret;
3500 			}
3501 			break;
3502 		case RTE_FLOW_ACTION_TYPE_END:
3503 			end_of_list = 1;
3504 			break;
3505 		default:
3506 			DPAA2_PMD_ERR("Invalid action type");
3507 			ret = -ENOTSUP;
3508 			break;
3509 		}
3510 		j++;
3511 	}
3512 
3513 	if (!ret) {
3514 		if (is_keycfg_configured &
3515 			(DPAA2_QOS_TABLE_RECONFIGURE |
3516 			DPAA2_FS_TABLE_RECONFIGURE)) {
3517 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3518 			if (ret) {
3519 				DPAA2_PMD_ERR("Flow entry update failed.");
3520 
3521 				return -1;
3522 			}
3523 		}
3524 		/* New rules are inserted. */
3525 		if (!curr) {
3526 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3527 		} else {
3528 			while (LIST_NEXT(curr, next))
3529 				curr = LIST_NEXT(curr, next);
3530 			LIST_INSERT_AFTER(curr, flow, next);
3531 		}
3532 	}
3533 	return ret;
3534 }
3535 
3536 static inline int
3537 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3538 		      const struct rte_flow_attr *attr)
3539 {
3540 	int ret = 0;
3541 
3542 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3543 		DPAA2_PMD_ERR("Priority group is out of range\n");
3544 		ret = -ENOTSUP;
3545 	}
3546 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3547 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3548 		ret = -ENOTSUP;
3549 	}
3550 	if (unlikely(attr->egress)) {
3551 		DPAA2_PMD_ERR(
3552 			"Flow configuration is not supported on egress side\n");
3553 		ret = -ENOTSUP;
3554 	}
3555 	if (unlikely(!attr->ingress)) {
3556 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3557 		ret = -EINVAL;
3558 	}
3559 	return ret;
3560 }
3561 
3562 static inline int
3563 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3564 {
3565 	unsigned int i, j, is_found = 0;
3566 	int ret = 0;
3567 
3568 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3569 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3570 			if (dpaa2_supported_pattern_type[i]
3571 					== pattern[j].type) {
3572 				is_found = 1;
3573 				break;
3574 			}
3575 		}
3576 		if (!is_found) {
3577 			ret = -ENOTSUP;
3578 			break;
3579 		}
3580 	}
3581 	/* Lets verify other combinations of given pattern rules */
3582 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3583 		if (!pattern[j].spec) {
3584 			ret = -EINVAL;
3585 			break;
3586 		}
3587 	}
3588 
3589 	return ret;
3590 }
3591 
3592 static inline int
3593 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3594 {
3595 	unsigned int i, j, is_found = 0;
3596 	int ret = 0;
3597 
3598 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3599 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3600 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3601 				is_found = 1;
3602 				break;
3603 			}
3604 		}
3605 		if (!is_found) {
3606 			ret = -ENOTSUP;
3607 			break;
3608 		}
3609 	}
3610 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3611 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3612 				!actions[j].conf)
3613 			ret = -EINVAL;
3614 	}
3615 	return ret;
3616 }
3617 
3618 static
3619 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3620 			const struct rte_flow_attr *flow_attr,
3621 			const struct rte_flow_item pattern[],
3622 			const struct rte_flow_action actions[],
3623 			struct rte_flow_error *error)
3624 {
3625 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3626 	struct dpni_attr dpni_attr;
3627 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3628 	uint16_t token = priv->token;
3629 	int ret = 0;
3630 
3631 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3632 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3633 	if (ret < 0) {
3634 		DPAA2_PMD_ERR(
3635 			"Failure to get dpni@%p attribute, err code  %d\n",
3636 			dpni, ret);
3637 		rte_flow_error_set(error, EPERM,
3638 			   RTE_FLOW_ERROR_TYPE_ATTR,
3639 			   flow_attr, "invalid");
3640 		return ret;
3641 	}
3642 
3643 	/* Verify input attributes */
3644 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3645 	if (ret < 0) {
3646 		DPAA2_PMD_ERR(
3647 			"Invalid attributes are given\n");
3648 		rte_flow_error_set(error, EPERM,
3649 			   RTE_FLOW_ERROR_TYPE_ATTR,
3650 			   flow_attr, "invalid");
3651 		goto not_valid_params;
3652 	}
3653 	/* Verify input pattern list */
3654 	ret = dpaa2_dev_verify_patterns(pattern);
3655 	if (ret < 0) {
3656 		DPAA2_PMD_ERR(
3657 			"Invalid pattern list is given\n");
3658 		rte_flow_error_set(error, EPERM,
3659 			   RTE_FLOW_ERROR_TYPE_ITEM,
3660 			   pattern, "invalid");
3661 		goto not_valid_params;
3662 	}
3663 	/* Verify input action list */
3664 	ret = dpaa2_dev_verify_actions(actions);
3665 	if (ret < 0) {
3666 		DPAA2_PMD_ERR(
3667 			"Invalid action list is given\n");
3668 		rte_flow_error_set(error, EPERM,
3669 			   RTE_FLOW_ERROR_TYPE_ACTION,
3670 			   actions, "invalid");
3671 		goto not_valid_params;
3672 	}
3673 not_valid_params:
3674 	return ret;
3675 }
3676 
3677 static
3678 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3679 				   const struct rte_flow_attr *attr,
3680 				   const struct rte_flow_item pattern[],
3681 				   const struct rte_flow_action actions[],
3682 				   struct rte_flow_error *error)
3683 {
3684 	struct rte_flow *flow = NULL;
3685 	size_t key_iova = 0, mask_iova = 0;
3686 	int ret;
3687 
3688 	dpaa2_flow_control_log =
3689 		getenv("DPAA2_FLOW_CONTROL_LOG");
3690 
3691 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3692 	if (!flow) {
3693 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3694 		goto mem_failure;
3695 	}
3696 	/* Allocate DMA'ble memory to write the rules */
3697 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3698 	if (!key_iova) {
3699 		DPAA2_PMD_ERR(
3700 			"Memory allocation failure for rule configuration\n");
3701 		goto mem_failure;
3702 	}
3703 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3704 	if (!mask_iova) {
3705 		DPAA2_PMD_ERR(
3706 			"Memory allocation failure for rule configuration\n");
3707 		goto mem_failure;
3708 	}
3709 
3710 	flow->qos_rule.key_iova = key_iova;
3711 	flow->qos_rule.mask_iova = mask_iova;
3712 
3713 	/* Allocate DMA'ble memory to write the rules */
3714 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3715 	if (!key_iova) {
3716 		DPAA2_PMD_ERR(
3717 			"Memory allocation failure for rule configuration\n");
3718 		goto mem_failure;
3719 	}
3720 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3721 	if (!mask_iova) {
3722 		DPAA2_PMD_ERR(
3723 			"Memory allocation failure for rule configuration\n");
3724 		goto mem_failure;
3725 	}
3726 
3727 	flow->fs_rule.key_iova = key_iova;
3728 	flow->fs_rule.mask_iova = mask_iova;
3729 
3730 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3731 	flow->ipaddr_rule.qos_ipsrc_offset =
3732 		IP_ADDRESS_OFFSET_INVALID;
3733 	flow->ipaddr_rule.qos_ipdst_offset =
3734 		IP_ADDRESS_OFFSET_INVALID;
3735 	flow->ipaddr_rule.fs_ipsrc_offset =
3736 		IP_ADDRESS_OFFSET_INVALID;
3737 	flow->ipaddr_rule.fs_ipdst_offset =
3738 		IP_ADDRESS_OFFSET_INVALID;
3739 
3740 	switch (dpaa2_filter_type) {
3741 	case RTE_ETH_FILTER_GENERIC:
3742 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3743 					     actions, error);
3744 		if (ret < 0) {
3745 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3746 				rte_flow_error_set(error, EPERM,
3747 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3748 						attr, "unknown");
3749 			DPAA2_PMD_ERR(
3750 			"Failure to create flow, return code (%d)", ret);
3751 			goto creation_error;
3752 		}
3753 		break;
3754 	default:
3755 		DPAA2_PMD_ERR("Filter type (%d) not supported",
3756 		dpaa2_filter_type);
3757 		break;
3758 	}
3759 
3760 	return flow;
3761 mem_failure:
3762 	rte_flow_error_set(error, EPERM,
3763 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3764 			   NULL, "memory alloc");
3765 creation_error:
3766 	rte_free((void *)flow);
3767 	rte_free((void *)key_iova);
3768 	rte_free((void *)mask_iova);
3769 
3770 	return NULL;
3771 }
3772 
3773 static
3774 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3775 		       struct rte_flow *flow,
3776 		       struct rte_flow_error *error)
3777 {
3778 	int ret = 0;
3779 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3780 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3781 
3782 	switch (flow->action) {
3783 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3784 		/* Remove entry from QoS table first */
3785 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3786 					   &flow->qos_rule);
3787 		if (ret < 0) {
3788 			DPAA2_PMD_ERR(
3789 				"Error in adding entry to QoS table(%d)", ret);
3790 			goto error;
3791 		}
3792 
3793 		/* Then remove entry from FS table */
3794 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3795 					   flow->tc_id, &flow->fs_rule);
3796 		if (ret < 0) {
3797 			DPAA2_PMD_ERR(
3798 				"Error in entry addition in FS table(%d)", ret);
3799 			goto error;
3800 		}
3801 		break;
3802 	case RTE_FLOW_ACTION_TYPE_RSS:
3803 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3804 					   &flow->qos_rule);
3805 		if (ret < 0) {
3806 			DPAA2_PMD_ERR(
3807 			"Error in entry addition in QoS table(%d)", ret);
3808 			goto error;
3809 		}
3810 		break;
3811 	default:
3812 		DPAA2_PMD_ERR(
3813 		"Action type (%d) is not supported", flow->action);
3814 		ret = -ENOTSUP;
3815 		break;
3816 	}
3817 
3818 	LIST_REMOVE(flow, next);
3819 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
3820 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
3821 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
3822 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
3823 	/* Now free the flow */
3824 	rte_free(flow);
3825 
3826 error:
3827 	if (ret)
3828 		rte_flow_error_set(error, EPERM,
3829 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3830 				   NULL, "unknown");
3831 	return ret;
3832 }
3833 
3834 /**
3835  * Destroy user-configured flow rules.
3836  *
3837  * This function skips internal flows rules.
3838  *
3839  * @see rte_flow_flush()
3840  * @see rte_flow_ops
3841  */
3842 static int
3843 dpaa2_flow_flush(struct rte_eth_dev *dev,
3844 		struct rte_flow_error *error)
3845 {
3846 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3847 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
3848 
3849 	while (flow) {
3850 		struct rte_flow *next = LIST_NEXT(flow, next);
3851 
3852 		dpaa2_flow_destroy(dev, flow, error);
3853 		flow = next;
3854 	}
3855 	return 0;
3856 }
3857 
3858 static int
3859 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3860 		struct rte_flow *flow __rte_unused,
3861 		const struct rte_flow_action *actions __rte_unused,
3862 		void *data __rte_unused,
3863 		struct rte_flow_error *error __rte_unused)
3864 {
3865 	return 0;
3866 }
3867 
3868 /**
3869  * Clean up all flow rules.
3870  *
3871  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3872  * rules regardless of whether they are internal or user-configured.
3873  *
3874  * @param priv
3875  *   Pointer to private structure.
3876  */
3877 void
3878 dpaa2_flow_clean(struct rte_eth_dev *dev)
3879 {
3880 	struct rte_flow *flow;
3881 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3882 
3883 	while ((flow = LIST_FIRST(&priv->flows)))
3884 		dpaa2_flow_destroy(dev, flow, NULL);
3885 }
3886 
3887 const struct rte_flow_ops dpaa2_flow_ops = {
3888 	.create	= dpaa2_flow_create,
3889 	.validate = dpaa2_flow_validate,
3890 	.destroy = dpaa2_flow_destroy,
3891 	.flush	= dpaa2_flow_flush,
3892 	.query	= dpaa2_flow_query,
3893 };
3894