xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2021 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static uint16_t dpaa2_flow_miss_flow_id =
34 	DPNI_FS_MISS_DROP;
35 
36 #define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
37 
38 enum flow_rule_ipaddr_type {
39 	FLOW_NONE_IPADDR,
40 	FLOW_IPV4_ADDR,
41 	FLOW_IPV6_ADDR
42 };
43 
44 struct flow_rule_ipaddr {
45 	enum flow_rule_ipaddr_type ipaddr_type;
46 	int qos_ipsrc_offset;
47 	int qos_ipdst_offset;
48 	int fs_ipsrc_offset;
49 	int fs_ipdst_offset;
50 };
51 
52 struct rte_flow {
53 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 	struct dpni_rule_cfg qos_rule;
55 	struct dpni_rule_cfg fs_rule;
56 	uint8_t qos_real_key_size;
57 	uint8_t fs_real_key_size;
58 	uint8_t tc_id; /** Traffic Class ID. */
59 	uint8_t tc_index; /** index within this Traffic Class. */
60 	enum rte_flow_action_type action;
61 	/* Special for IP address to specify the offset
62 	 * in key/mask.
63 	 */
64 	struct flow_rule_ipaddr ipaddr_rule;
65 	struct dpni_fs_action_cfg action_cfg;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 };
81 
82 static const
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 	RTE_FLOW_ACTION_TYPE_END,
85 	RTE_FLOW_ACTION_TYPE_QUEUE,
86 	RTE_FLOW_ACTION_TYPE_PORT_ID,
87 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
88 	RTE_FLOW_ACTION_TYPE_RSS
89 };
90 
91 static const
92 enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
93 	RTE_FLOW_ACTION_TYPE_QUEUE,
94 	RTE_FLOW_ACTION_TYPE_PORT_ID,
95 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
96 };
97 
98 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
99 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
100 
101 #ifndef __cplusplus
102 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
103 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
104 	.hdr.src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
105 	.hdr.ether_type = RTE_BE16(0xffff),
106 };
107 
108 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
109 	.hdr.vlan_tci = RTE_BE16(0xffff),
110 };
111 
112 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
113 	.hdr.src_addr = RTE_BE32(0xffffffff),
114 	.hdr.dst_addr = RTE_BE32(0xffffffff),
115 	.hdr.next_proto_id = 0xff,
116 };
117 
118 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
119 	.hdr = {
120 		.src_addr =
121 			"\xff\xff\xff\xff\xff\xff\xff\xff"
122 			"\xff\xff\xff\xff\xff\xff\xff\xff",
123 		.dst_addr =
124 			"\xff\xff\xff\xff\xff\xff\xff\xff"
125 			"\xff\xff\xff\xff\xff\xff\xff\xff",
126 		.proto = 0xff
127 	},
128 };
129 
130 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
131 	.hdr.icmp_type = 0xff,
132 	.hdr.icmp_code = 0xff,
133 };
134 
135 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
136 	.hdr = {
137 		.src_port = RTE_BE16(0xffff),
138 		.dst_port = RTE_BE16(0xffff),
139 	},
140 };
141 
142 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
143 	.hdr = {
144 		.src_port = RTE_BE16(0xffff),
145 		.dst_port = RTE_BE16(0xffff),
146 	},
147 };
148 
149 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
150 	.hdr = {
151 		.src_port = RTE_BE16(0xffff),
152 		.dst_port = RTE_BE16(0xffff),
153 	},
154 };
155 
156 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
157 	.protocol = RTE_BE16(0xffff),
158 };
159 
160 #endif
161 
162 static inline void dpaa2_prot_field_string(
163 	enum net_prot prot, uint32_t field,
164 	char *string)
165 {
166 	if (!dpaa2_flow_control_log)
167 		return;
168 
169 	if (prot == NET_PROT_ETH) {
170 		strcpy(string, "eth");
171 		if (field == NH_FLD_ETH_DA)
172 			strcat(string, ".dst");
173 		else if (field == NH_FLD_ETH_SA)
174 			strcat(string, ".src");
175 		else if (field == NH_FLD_ETH_TYPE)
176 			strcat(string, ".type");
177 		else
178 			strcat(string, ".unknown field");
179 	} else if (prot == NET_PROT_VLAN) {
180 		strcpy(string, "vlan");
181 		if (field == NH_FLD_VLAN_TCI)
182 			strcat(string, ".tci");
183 		else
184 			strcat(string, ".unknown field");
185 	} else if (prot == NET_PROT_IP) {
186 		strcpy(string, "ip");
187 		if (field == NH_FLD_IP_SRC)
188 			strcat(string, ".src");
189 		else if (field == NH_FLD_IP_DST)
190 			strcat(string, ".dst");
191 		else if (field == NH_FLD_IP_PROTO)
192 			strcat(string, ".proto");
193 		else
194 			strcat(string, ".unknown field");
195 	} else if (prot == NET_PROT_TCP) {
196 		strcpy(string, "tcp");
197 		if (field == NH_FLD_TCP_PORT_SRC)
198 			strcat(string, ".src");
199 		else if (field == NH_FLD_TCP_PORT_DST)
200 			strcat(string, ".dst");
201 		else
202 			strcat(string, ".unknown field");
203 	} else if (prot == NET_PROT_UDP) {
204 		strcpy(string, "udp");
205 		if (field == NH_FLD_UDP_PORT_SRC)
206 			strcat(string, ".src");
207 		else if (field == NH_FLD_UDP_PORT_DST)
208 			strcat(string, ".dst");
209 		else
210 			strcat(string, ".unknown field");
211 	} else if (prot == NET_PROT_ICMP) {
212 		strcpy(string, "icmp");
213 		if (field == NH_FLD_ICMP_TYPE)
214 			strcat(string, ".type");
215 		else if (field == NH_FLD_ICMP_CODE)
216 			strcat(string, ".code");
217 		else
218 			strcat(string, ".unknown field");
219 	} else if (prot == NET_PROT_SCTP) {
220 		strcpy(string, "sctp");
221 		if (field == NH_FLD_SCTP_PORT_SRC)
222 			strcat(string, ".src");
223 		else if (field == NH_FLD_SCTP_PORT_DST)
224 			strcat(string, ".dst");
225 		else
226 			strcat(string, ".unknown field");
227 	} else if (prot == NET_PROT_GRE) {
228 		strcpy(string, "gre");
229 		if (field == NH_FLD_GRE_TYPE)
230 			strcat(string, ".type");
231 		else
232 			strcat(string, ".unknown field");
233 	} else {
234 		strcpy(string, "unknown protocol");
235 	}
236 }
237 
238 static inline void dpaa2_flow_qos_table_extracts_log(
239 	const struct dpaa2_dev_priv *priv)
240 {
241 	int idx;
242 	char string[32];
243 
244 	if (!dpaa2_flow_control_log)
245 		return;
246 
247 	printf("Setup QoS table: number of extracts: %d\r\n",
248 			priv->extract.qos_key_extract.dpkg.num_extracts);
249 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
250 		idx++) {
251 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
252 			.extracts[idx].extract.from_hdr.prot,
253 			priv->extract.qos_key_extract.dpkg.extracts[idx]
254 			.extract.from_hdr.field,
255 			string);
256 		printf("%s", string);
257 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
258 			printf(" / ");
259 	}
260 	printf("\r\n");
261 }
262 
263 static inline void dpaa2_flow_fs_table_extracts_log(
264 	const struct dpaa2_dev_priv *priv, int tc_id)
265 {
266 	int idx;
267 	char string[32];
268 
269 	if (!dpaa2_flow_control_log)
270 		return;
271 
272 	printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
273 			tc_id, priv->extract.tc_key_extract[tc_id]
274 			.dpkg.num_extracts);
275 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
276 		.dpkg.num_extracts; idx++) {
277 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
278 			.dpkg.extracts[idx].extract.from_hdr.prot,
279 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
280 			.extract.from_hdr.field,
281 			string);
282 		printf("%s", string);
283 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
284 			.dpkg.num_extracts)
285 			printf(" / ");
286 	}
287 	printf("\r\n");
288 }
289 
290 static inline void dpaa2_flow_qos_entry_log(
291 	const char *log_info, const struct rte_flow *flow, int qos_index)
292 {
293 	int idx;
294 	uint8_t *key, *mask;
295 
296 	if (!dpaa2_flow_control_log)
297 		return;
298 
299 	printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
300 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
301 
302 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
303 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
304 
305 	printf("key:\r\n");
306 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
307 		printf("%02x ", key[idx]);
308 
309 	printf("\r\nmask:\r\n");
310 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
311 		printf("%02x ", mask[idx]);
312 
313 	printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
314 		flow->ipaddr_rule.qos_ipsrc_offset,
315 		flow->ipaddr_rule.qos_ipdst_offset);
316 }
317 
318 static inline void dpaa2_flow_fs_entry_log(
319 	const char *log_info, const struct rte_flow *flow)
320 {
321 	int idx;
322 	uint8_t *key, *mask;
323 
324 	if (!dpaa2_flow_control_log)
325 		return;
326 
327 	printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
328 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
329 
330 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
331 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
332 
333 	printf("key:\r\n");
334 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
335 		printf("%02x ", key[idx]);
336 
337 	printf("\r\nmask:\r\n");
338 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
339 		printf("%02x ", mask[idx]);
340 
341 	printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
342 		flow->ipaddr_rule.fs_ipsrc_offset,
343 		flow->ipaddr_rule.fs_ipdst_offset);
344 }
345 
346 static inline void dpaa2_flow_extract_key_set(
347 	struct dpaa2_key_info *key_info, int index, uint8_t size)
348 {
349 	key_info->key_size[index] = size;
350 	if (index > 0) {
351 		key_info->key_offset[index] =
352 			key_info->key_offset[index - 1] +
353 			key_info->key_size[index - 1];
354 	} else {
355 		key_info->key_offset[index] = 0;
356 	}
357 	key_info->key_total_size += size;
358 }
359 
360 static int dpaa2_flow_extract_add(
361 	struct dpaa2_key_extract *key_extract,
362 	enum net_prot prot,
363 	uint32_t field, uint8_t field_size)
364 {
365 	int index, ip_src = -1, ip_dst = -1;
366 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
367 	struct dpaa2_key_info *key_info = &key_extract->key_info;
368 
369 	if (dpkg->num_extracts >=
370 		DPKG_MAX_NUM_OF_EXTRACTS) {
371 		DPAA2_PMD_WARN("Number of extracts overflows");
372 		return -1;
373 	}
374 	/* Before reorder, the IP SRC and IP DST are already last
375 	 * extract(s).
376 	 */
377 	for (index = 0; index < dpkg->num_extracts; index++) {
378 		if (dpkg->extracts[index].extract.from_hdr.prot ==
379 			NET_PROT_IP) {
380 			if (dpkg->extracts[index].extract.from_hdr.field ==
381 				NH_FLD_IP_SRC) {
382 				ip_src = index;
383 			}
384 			if (dpkg->extracts[index].extract.from_hdr.field ==
385 				NH_FLD_IP_DST) {
386 				ip_dst = index;
387 			}
388 		}
389 	}
390 
391 	if (ip_src >= 0)
392 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
393 
394 	if (ip_dst >= 0)
395 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
396 
397 	if (prot == NET_PROT_IP &&
398 		(field == NH_FLD_IP_SRC ||
399 		field == NH_FLD_IP_DST)) {
400 		index = dpkg->num_extracts;
401 	} else {
402 		if (ip_src >= 0 && ip_dst >= 0)
403 			index = dpkg->num_extracts - 2;
404 		else if (ip_src >= 0 || ip_dst >= 0)
405 			index = dpkg->num_extracts - 1;
406 		else
407 			index = dpkg->num_extracts;
408 	}
409 
410 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
411 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
412 	dpkg->extracts[index].extract.from_hdr.prot = prot;
413 	dpkg->extracts[index].extract.from_hdr.field = field;
414 	if (prot == NET_PROT_IP &&
415 		(field == NH_FLD_IP_SRC ||
416 		field == NH_FLD_IP_DST)) {
417 		dpaa2_flow_extract_key_set(key_info, index, 0);
418 	} else {
419 		dpaa2_flow_extract_key_set(key_info, index, field_size);
420 	}
421 
422 	if (prot == NET_PROT_IP) {
423 		if (field == NH_FLD_IP_SRC) {
424 			if (key_info->ipv4_dst_offset >= 0) {
425 				key_info->ipv4_src_offset =
426 					key_info->ipv4_dst_offset +
427 					NH_FLD_IPV4_ADDR_SIZE;
428 			} else {
429 				key_info->ipv4_src_offset =
430 					key_info->key_offset[index - 1] +
431 						key_info->key_size[index - 1];
432 			}
433 			if (key_info->ipv6_dst_offset >= 0) {
434 				key_info->ipv6_src_offset =
435 					key_info->ipv6_dst_offset +
436 					NH_FLD_IPV6_ADDR_SIZE;
437 			} else {
438 				key_info->ipv6_src_offset =
439 					key_info->key_offset[index - 1] +
440 						key_info->key_size[index - 1];
441 			}
442 		} else if (field == NH_FLD_IP_DST) {
443 			if (key_info->ipv4_src_offset >= 0) {
444 				key_info->ipv4_dst_offset =
445 					key_info->ipv4_src_offset +
446 					NH_FLD_IPV4_ADDR_SIZE;
447 			} else {
448 				key_info->ipv4_dst_offset =
449 					key_info->key_offset[index - 1] +
450 						key_info->key_size[index - 1];
451 			}
452 			if (key_info->ipv6_src_offset >= 0) {
453 				key_info->ipv6_dst_offset =
454 					key_info->ipv6_src_offset +
455 					NH_FLD_IPV6_ADDR_SIZE;
456 			} else {
457 				key_info->ipv6_dst_offset =
458 					key_info->key_offset[index - 1] +
459 						key_info->key_size[index - 1];
460 			}
461 		}
462 	}
463 
464 	if (index == dpkg->num_extracts) {
465 		dpkg->num_extracts++;
466 		return 0;
467 	}
468 
469 	if (ip_src >= 0) {
470 		ip_src++;
471 		dpkg->extracts[ip_src].type =
472 			DPKG_EXTRACT_FROM_HDR;
473 		dpkg->extracts[ip_src].extract.from_hdr.type =
474 			DPKG_FULL_FIELD;
475 		dpkg->extracts[ip_src].extract.from_hdr.prot =
476 			NET_PROT_IP;
477 		dpkg->extracts[ip_src].extract.from_hdr.field =
478 			NH_FLD_IP_SRC;
479 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
480 		key_info->ipv4_src_offset += field_size;
481 		key_info->ipv6_src_offset += field_size;
482 	}
483 	if (ip_dst >= 0) {
484 		ip_dst++;
485 		dpkg->extracts[ip_dst].type =
486 			DPKG_EXTRACT_FROM_HDR;
487 		dpkg->extracts[ip_dst].extract.from_hdr.type =
488 			DPKG_FULL_FIELD;
489 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
490 			NET_PROT_IP;
491 		dpkg->extracts[ip_dst].extract.from_hdr.field =
492 			NH_FLD_IP_DST;
493 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
494 		key_info->ipv4_dst_offset += field_size;
495 		key_info->ipv6_dst_offset += field_size;
496 	}
497 
498 	dpkg->num_extracts++;
499 
500 	return 0;
501 }
502 
503 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
504 				      int size)
505 {
506 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
507 	struct dpaa2_key_info *key_info = &key_extract->key_info;
508 	int last_extract_size, index;
509 
510 	if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
511 	    DPKG_EXTRACT_FROM_DATA) {
512 		DPAA2_PMD_WARN("RAW extract cannot be combined with others");
513 		return -1;
514 	}
515 
516 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
517 	dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
518 	if (last_extract_size)
519 		dpkg->num_extracts++;
520 	else
521 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
522 
523 	for (index = 0; index < dpkg->num_extracts; index++) {
524 		dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
525 		if (index == dpkg->num_extracts - 1)
526 			dpkg->extracts[index].extract.from_data.size =
527 				last_extract_size;
528 		else
529 			dpkg->extracts[index].extract.from_data.size =
530 				DPAA2_FLOW_MAX_KEY_SIZE;
531 		dpkg->extracts[index].extract.from_data.offset =
532 			DPAA2_FLOW_MAX_KEY_SIZE * index;
533 	}
534 
535 	key_info->key_total_size = size;
536 	return 0;
537 }
538 
539 /* Protocol discrimination.
540  * Discriminate IPv4/IPv6/vLan by Eth type.
541  * Discriminate UDP/TCP/ICMP by next proto of IP.
542  */
543 static inline int
544 dpaa2_flow_proto_discrimination_extract(
545 	struct dpaa2_key_extract *key_extract,
546 	enum rte_flow_item_type type)
547 {
548 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
549 		return dpaa2_flow_extract_add(
550 				key_extract, NET_PROT_ETH,
551 				NH_FLD_ETH_TYPE,
552 				sizeof(rte_be16_t));
553 	} else if (type == (enum rte_flow_item_type)
554 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
555 		return dpaa2_flow_extract_add(
556 				key_extract, NET_PROT_IP,
557 				NH_FLD_IP_PROTO,
558 				NH_FLD_IP_PROTO_SIZE);
559 	}
560 
561 	return -1;
562 }
563 
564 static inline int dpaa2_flow_extract_search(
565 	struct dpkg_profile_cfg *dpkg,
566 	enum net_prot prot, uint32_t field)
567 {
568 	int i;
569 
570 	for (i = 0; i < dpkg->num_extracts; i++) {
571 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
572 			dpkg->extracts[i].extract.from_hdr.field == field) {
573 			return i;
574 		}
575 	}
576 
577 	return -1;
578 }
579 
580 static inline int dpaa2_flow_extract_key_offset(
581 	struct dpaa2_key_extract *key_extract,
582 	enum net_prot prot, uint32_t field)
583 {
584 	int i;
585 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
586 	struct dpaa2_key_info *key_info = &key_extract->key_info;
587 
588 	if (prot == NET_PROT_IPV4 ||
589 		prot == NET_PROT_IPV6)
590 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
591 	else
592 		i = dpaa2_flow_extract_search(dpkg, prot, field);
593 
594 	if (i >= 0) {
595 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
596 			return key_info->ipv4_src_offset;
597 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
598 			return key_info->ipv4_dst_offset;
599 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
600 			return key_info->ipv6_src_offset;
601 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
602 			return key_info->ipv6_dst_offset;
603 		else
604 			return key_info->key_offset[i];
605 	} else {
606 		return -1;
607 	}
608 }
609 
610 struct proto_discrimination {
611 	enum rte_flow_item_type type;
612 	union {
613 		rte_be16_t eth_type;
614 		uint8_t ip_proto;
615 	};
616 };
617 
618 static int
619 dpaa2_flow_proto_discrimination_rule(
620 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
621 	struct proto_discrimination proto, int group)
622 {
623 	enum net_prot prot;
624 	uint32_t field;
625 	int offset;
626 	size_t key_iova;
627 	size_t mask_iova;
628 	rte_be16_t eth_type;
629 	uint8_t ip_proto;
630 
631 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
632 		prot = NET_PROT_ETH;
633 		field = NH_FLD_ETH_TYPE;
634 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
635 		prot = NET_PROT_IP;
636 		field = NH_FLD_IP_PROTO;
637 	} else {
638 		DPAA2_PMD_ERR(
639 			"Only Eth and IP support to discriminate next proto.");
640 		return -1;
641 	}
642 
643 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
644 			prot, field);
645 	if (offset < 0) {
646 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
647 				prot, field);
648 		return -1;
649 	}
650 	key_iova = flow->qos_rule.key_iova + offset;
651 	mask_iova = flow->qos_rule.mask_iova + offset;
652 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
653 		eth_type = proto.eth_type;
654 		memcpy((void *)key_iova, (const void *)(&eth_type),
655 			sizeof(rte_be16_t));
656 		eth_type = 0xffff;
657 		memcpy((void *)mask_iova, (const void *)(&eth_type),
658 			sizeof(rte_be16_t));
659 	} else {
660 		ip_proto = proto.ip_proto;
661 		memcpy((void *)key_iova, (const void *)(&ip_proto),
662 			sizeof(uint8_t));
663 		ip_proto = 0xff;
664 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
665 			sizeof(uint8_t));
666 	}
667 
668 	offset = dpaa2_flow_extract_key_offset(
669 			&priv->extract.tc_key_extract[group],
670 			prot, field);
671 	if (offset < 0) {
672 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
673 				prot, field);
674 		return -1;
675 	}
676 	key_iova = flow->fs_rule.key_iova + offset;
677 	mask_iova = flow->fs_rule.mask_iova + offset;
678 
679 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
680 		eth_type = proto.eth_type;
681 		memcpy((void *)key_iova, (const void *)(&eth_type),
682 			sizeof(rte_be16_t));
683 		eth_type = 0xffff;
684 		memcpy((void *)mask_iova, (const void *)(&eth_type),
685 			sizeof(rte_be16_t));
686 	} else {
687 		ip_proto = proto.ip_proto;
688 		memcpy((void *)key_iova, (const void *)(&ip_proto),
689 			sizeof(uint8_t));
690 		ip_proto = 0xff;
691 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
692 			sizeof(uint8_t));
693 	}
694 
695 	return 0;
696 }
697 
698 static inline int
699 dpaa2_flow_rule_data_set(
700 	struct dpaa2_key_extract *key_extract,
701 	struct dpni_rule_cfg *rule,
702 	enum net_prot prot, uint32_t field,
703 	const void *key, const void *mask, int size)
704 {
705 	int offset = dpaa2_flow_extract_key_offset(key_extract,
706 				prot, field);
707 
708 	if (offset < 0) {
709 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
710 			prot, field);
711 		return -1;
712 	}
713 
714 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
715 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
716 
717 	return 0;
718 }
719 
720 static inline int
721 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
722 			     const void *key, const void *mask, int size)
723 {
724 	int offset = 0;
725 
726 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
727 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
728 
729 	return 0;
730 }
731 
732 static inline int
733 _dpaa2_flow_rule_move_ipaddr_tail(
734 	struct dpaa2_key_extract *key_extract,
735 	struct dpni_rule_cfg *rule, int src_offset,
736 	uint32_t field, bool ipv4)
737 {
738 	size_t key_src;
739 	size_t mask_src;
740 	size_t key_dst;
741 	size_t mask_dst;
742 	int dst_offset, len;
743 	enum net_prot prot;
744 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
745 
746 	if (field != NH_FLD_IP_SRC &&
747 		field != NH_FLD_IP_DST) {
748 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
749 		return -1;
750 	}
751 	if (ipv4)
752 		prot = NET_PROT_IPV4;
753 	else
754 		prot = NET_PROT_IPV6;
755 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
756 				prot, field);
757 	if (dst_offset < 0) {
758 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
759 		return -1;
760 	}
761 	key_src = rule->key_iova + src_offset;
762 	mask_src = rule->mask_iova + src_offset;
763 	key_dst = rule->key_iova + dst_offset;
764 	mask_dst = rule->mask_iova + dst_offset;
765 	if (ipv4)
766 		len = sizeof(rte_be32_t);
767 	else
768 		len = NH_FLD_IPV6_ADDR_SIZE;
769 
770 	memcpy(tmp, (char *)key_src, len);
771 	memset((char *)key_src, 0, len);
772 	memcpy((char *)key_dst, tmp, len);
773 
774 	memcpy(tmp, (char *)mask_src, len);
775 	memset((char *)mask_src, 0, len);
776 	memcpy((char *)mask_dst, tmp, len);
777 
778 	return 0;
779 }
780 
781 static inline int
782 dpaa2_flow_rule_move_ipaddr_tail(
783 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
784 	int fs_group)
785 {
786 	int ret;
787 	enum net_prot prot;
788 
789 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
790 		return 0;
791 
792 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
793 		prot = NET_PROT_IPV4;
794 	else
795 		prot = NET_PROT_IPV6;
796 
797 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
798 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
799 				&priv->extract.qos_key_extract,
800 				&flow->qos_rule,
801 				flow->ipaddr_rule.qos_ipsrc_offset,
802 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
803 		if (ret) {
804 			DPAA2_PMD_ERR("QoS src address reorder failed");
805 			return -1;
806 		}
807 		flow->ipaddr_rule.qos_ipsrc_offset =
808 			dpaa2_flow_extract_key_offset(
809 				&priv->extract.qos_key_extract,
810 				prot, NH_FLD_IP_SRC);
811 	}
812 
813 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
814 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
815 				&priv->extract.qos_key_extract,
816 				&flow->qos_rule,
817 				flow->ipaddr_rule.qos_ipdst_offset,
818 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
819 		if (ret) {
820 			DPAA2_PMD_ERR("QoS dst address reorder failed");
821 			return -1;
822 		}
823 		flow->ipaddr_rule.qos_ipdst_offset =
824 			dpaa2_flow_extract_key_offset(
825 				&priv->extract.qos_key_extract,
826 				prot, NH_FLD_IP_DST);
827 	}
828 
829 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
830 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
831 				&priv->extract.tc_key_extract[fs_group],
832 				&flow->fs_rule,
833 				flow->ipaddr_rule.fs_ipsrc_offset,
834 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
835 		if (ret) {
836 			DPAA2_PMD_ERR("FS src address reorder failed");
837 			return -1;
838 		}
839 		flow->ipaddr_rule.fs_ipsrc_offset =
840 			dpaa2_flow_extract_key_offset(
841 				&priv->extract.tc_key_extract[fs_group],
842 				prot, NH_FLD_IP_SRC);
843 	}
844 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
845 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
846 				&priv->extract.tc_key_extract[fs_group],
847 				&flow->fs_rule,
848 				flow->ipaddr_rule.fs_ipdst_offset,
849 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
850 		if (ret) {
851 			DPAA2_PMD_ERR("FS dst address reorder failed");
852 			return -1;
853 		}
854 		flow->ipaddr_rule.fs_ipdst_offset =
855 			dpaa2_flow_extract_key_offset(
856 				&priv->extract.tc_key_extract[fs_group],
857 				prot, NH_FLD_IP_DST);
858 	}
859 
860 	return 0;
861 }
862 
863 static int
864 dpaa2_flow_extract_support(
865 	const uint8_t *mask_src,
866 	enum rte_flow_item_type type)
867 {
868 	char mask[64];
869 	int i, size = 0;
870 	const char *mask_support = 0;
871 
872 	switch (type) {
873 	case RTE_FLOW_ITEM_TYPE_ETH:
874 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
875 		size = sizeof(struct rte_flow_item_eth);
876 		break;
877 	case RTE_FLOW_ITEM_TYPE_VLAN:
878 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
879 		size = sizeof(struct rte_flow_item_vlan);
880 		break;
881 	case RTE_FLOW_ITEM_TYPE_IPV4:
882 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
883 		size = sizeof(struct rte_flow_item_ipv4);
884 		break;
885 	case RTE_FLOW_ITEM_TYPE_IPV6:
886 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
887 		size = sizeof(struct rte_flow_item_ipv6);
888 		break;
889 	case RTE_FLOW_ITEM_TYPE_ICMP:
890 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
891 		size = sizeof(struct rte_flow_item_icmp);
892 		break;
893 	case RTE_FLOW_ITEM_TYPE_UDP:
894 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
895 		size = sizeof(struct rte_flow_item_udp);
896 		break;
897 	case RTE_FLOW_ITEM_TYPE_TCP:
898 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
899 		size = sizeof(struct rte_flow_item_tcp);
900 		break;
901 	case RTE_FLOW_ITEM_TYPE_SCTP:
902 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
903 		size = sizeof(struct rte_flow_item_sctp);
904 		break;
905 	case RTE_FLOW_ITEM_TYPE_GRE:
906 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
907 		size = sizeof(struct rte_flow_item_gre);
908 		break;
909 	default:
910 		return -1;
911 	}
912 
913 	memcpy(mask, mask_support, size);
914 
915 	for (i = 0; i < size; i++)
916 		mask[i] = (mask[i] | mask_src[i]);
917 
918 	if (memcmp(mask, mask_support, size))
919 		return -1;
920 
921 	return 0;
922 }
923 
924 static int
925 dpaa2_configure_flow_eth(struct rte_flow *flow,
926 			 struct rte_eth_dev *dev,
927 			 const struct rte_flow_attr *attr,
928 			 const struct rte_flow_item *pattern,
929 			 const struct rte_flow_action actions[] __rte_unused,
930 			 struct rte_flow_error *error __rte_unused,
931 			 int *device_configured)
932 {
933 	int index, ret;
934 	int local_cfg = 0;
935 	uint32_t group;
936 	const struct rte_flow_item_eth *spec, *mask;
937 
938 	/* TODO: Currently upper bound of range parameter is not implemented */
939 	const struct rte_flow_item_eth *last __rte_unused;
940 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
941 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
942 
943 	group = attr->group;
944 
945 	/* Parse pattern list to get the matching parameters */
946 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
947 	last    = (const struct rte_flow_item_eth *)pattern->last;
948 	mask    = (const struct rte_flow_item_eth *)
949 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
950 	if (!spec) {
951 		/* Don't care any field of eth header,
952 		 * only care eth protocol.
953 		 */
954 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
955 		return 0;
956 	}
957 
958 	/* Get traffic class index and flow id to be configured */
959 	flow->tc_id = group;
960 	flow->tc_index = attr->priority;
961 
962 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
963 		RTE_FLOW_ITEM_TYPE_ETH)) {
964 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
965 
966 		return -1;
967 	}
968 
969 	if (memcmp((const char *)&mask->hdr.src_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
970 		index = dpaa2_flow_extract_search(
971 				&priv->extract.qos_key_extract.dpkg,
972 				NET_PROT_ETH, NH_FLD_ETH_SA);
973 		if (index < 0) {
974 			ret = dpaa2_flow_extract_add(
975 					&priv->extract.qos_key_extract,
976 					NET_PROT_ETH, NH_FLD_ETH_SA,
977 					RTE_ETHER_ADDR_LEN);
978 			if (ret) {
979 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
980 
981 				return -1;
982 			}
983 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
984 		}
985 		index = dpaa2_flow_extract_search(
986 				&priv->extract.tc_key_extract[group].dpkg,
987 				NET_PROT_ETH, NH_FLD_ETH_SA);
988 		if (index < 0) {
989 			ret = dpaa2_flow_extract_add(
990 					&priv->extract.tc_key_extract[group],
991 					NET_PROT_ETH, NH_FLD_ETH_SA,
992 					RTE_ETHER_ADDR_LEN);
993 			if (ret) {
994 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
995 				return -1;
996 			}
997 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
998 		}
999 
1000 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1001 		if (ret) {
1002 			DPAA2_PMD_ERR(
1003 				"Move ipaddr before ETH_SA rule set failed");
1004 			return -1;
1005 		}
1006 
1007 		ret = dpaa2_flow_rule_data_set(
1008 				&priv->extract.qos_key_extract,
1009 				&flow->qos_rule,
1010 				NET_PROT_ETH,
1011 				NH_FLD_ETH_SA,
1012 				&spec->hdr.src_addr.addr_bytes,
1013 				&mask->hdr.src_addr.addr_bytes,
1014 				sizeof(struct rte_ether_addr));
1015 		if (ret) {
1016 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1017 			return -1;
1018 		}
1019 
1020 		ret = dpaa2_flow_rule_data_set(
1021 				&priv->extract.tc_key_extract[group],
1022 				&flow->fs_rule,
1023 				NET_PROT_ETH,
1024 				NH_FLD_ETH_SA,
1025 				&spec->hdr.src_addr.addr_bytes,
1026 				&mask->hdr.src_addr.addr_bytes,
1027 				sizeof(struct rte_ether_addr));
1028 		if (ret) {
1029 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1030 			return -1;
1031 		}
1032 	}
1033 
1034 	if (memcmp((const char *)&mask->hdr.dst_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1035 		index = dpaa2_flow_extract_search(
1036 				&priv->extract.qos_key_extract.dpkg,
1037 				NET_PROT_ETH, NH_FLD_ETH_DA);
1038 		if (index < 0) {
1039 			ret = dpaa2_flow_extract_add(
1040 					&priv->extract.qos_key_extract,
1041 					NET_PROT_ETH, NH_FLD_ETH_DA,
1042 					RTE_ETHER_ADDR_LEN);
1043 			if (ret) {
1044 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1045 
1046 				return -1;
1047 			}
1048 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1049 		}
1050 
1051 		index = dpaa2_flow_extract_search(
1052 				&priv->extract.tc_key_extract[group].dpkg,
1053 				NET_PROT_ETH, NH_FLD_ETH_DA);
1054 		if (index < 0) {
1055 			ret = dpaa2_flow_extract_add(
1056 					&priv->extract.tc_key_extract[group],
1057 					NET_PROT_ETH, NH_FLD_ETH_DA,
1058 					RTE_ETHER_ADDR_LEN);
1059 			if (ret) {
1060 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1061 
1062 				return -1;
1063 			}
1064 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1065 		}
1066 
1067 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1068 		if (ret) {
1069 			DPAA2_PMD_ERR(
1070 				"Move ipaddr before ETH DA rule set failed");
1071 			return -1;
1072 		}
1073 
1074 		ret = dpaa2_flow_rule_data_set(
1075 				&priv->extract.qos_key_extract,
1076 				&flow->qos_rule,
1077 				NET_PROT_ETH,
1078 				NH_FLD_ETH_DA,
1079 				&spec->hdr.dst_addr.addr_bytes,
1080 				&mask->hdr.dst_addr.addr_bytes,
1081 				sizeof(struct rte_ether_addr));
1082 		if (ret) {
1083 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1084 			return -1;
1085 		}
1086 
1087 		ret = dpaa2_flow_rule_data_set(
1088 				&priv->extract.tc_key_extract[group],
1089 				&flow->fs_rule,
1090 				NET_PROT_ETH,
1091 				NH_FLD_ETH_DA,
1092 				&spec->hdr.dst_addr.addr_bytes,
1093 				&mask->hdr.dst_addr.addr_bytes,
1094 				sizeof(struct rte_ether_addr));
1095 		if (ret) {
1096 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1097 			return -1;
1098 		}
1099 	}
1100 
1101 	if (memcmp((const char *)&mask->hdr.ether_type, zero_cmp, sizeof(rte_be16_t))) {
1102 		index = dpaa2_flow_extract_search(
1103 				&priv->extract.qos_key_extract.dpkg,
1104 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1105 		if (index < 0) {
1106 			ret = dpaa2_flow_extract_add(
1107 					&priv->extract.qos_key_extract,
1108 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1109 					RTE_ETHER_TYPE_LEN);
1110 			if (ret) {
1111 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1112 
1113 				return -1;
1114 			}
1115 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1116 		}
1117 		index = dpaa2_flow_extract_search(
1118 				&priv->extract.tc_key_extract[group].dpkg,
1119 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1120 		if (index < 0) {
1121 			ret = dpaa2_flow_extract_add(
1122 					&priv->extract.tc_key_extract[group],
1123 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1124 					RTE_ETHER_TYPE_LEN);
1125 			if (ret) {
1126 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1127 
1128 				return -1;
1129 			}
1130 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1131 		}
1132 
1133 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1134 		if (ret) {
1135 			DPAA2_PMD_ERR(
1136 				"Move ipaddr before ETH TYPE rule set failed");
1137 				return -1;
1138 		}
1139 
1140 		ret = dpaa2_flow_rule_data_set(
1141 				&priv->extract.qos_key_extract,
1142 				&flow->qos_rule,
1143 				NET_PROT_ETH,
1144 				NH_FLD_ETH_TYPE,
1145 				&spec->hdr.ether_type,
1146 				&mask->hdr.ether_type,
1147 				sizeof(rte_be16_t));
1148 		if (ret) {
1149 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1150 			return -1;
1151 		}
1152 
1153 		ret = dpaa2_flow_rule_data_set(
1154 				&priv->extract.tc_key_extract[group],
1155 				&flow->fs_rule,
1156 				NET_PROT_ETH,
1157 				NH_FLD_ETH_TYPE,
1158 				&spec->hdr.ether_type,
1159 				&mask->hdr.ether_type,
1160 				sizeof(rte_be16_t));
1161 		if (ret) {
1162 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1163 			return -1;
1164 		}
1165 	}
1166 
1167 	(*device_configured) |= local_cfg;
1168 
1169 	return 0;
1170 }
1171 
1172 static int
1173 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1174 			  struct rte_eth_dev *dev,
1175 			  const struct rte_flow_attr *attr,
1176 			  const struct rte_flow_item *pattern,
1177 			  const struct rte_flow_action actions[] __rte_unused,
1178 			  struct rte_flow_error *error __rte_unused,
1179 			  int *device_configured)
1180 {
1181 	int index, ret;
1182 	int local_cfg = 0;
1183 	uint32_t group;
1184 	const struct rte_flow_item_vlan *spec, *mask;
1185 
1186 	const struct rte_flow_item_vlan *last __rte_unused;
1187 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1188 
1189 	group = attr->group;
1190 
1191 	/* Parse pattern list to get the matching parameters */
1192 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1193 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1194 	mask    = (const struct rte_flow_item_vlan *)
1195 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1196 
1197 	/* Get traffic class index and flow id to be configured */
1198 	flow->tc_id = group;
1199 	flow->tc_index = attr->priority;
1200 
1201 	if (!spec) {
1202 		/* Don't care any field of vlan header,
1203 		 * only care vlan protocol.
1204 		 */
1205 		/* Eth type is actually used for vLan classification.
1206 		 */
1207 		struct proto_discrimination proto;
1208 
1209 		index = dpaa2_flow_extract_search(
1210 				&priv->extract.qos_key_extract.dpkg,
1211 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1212 		if (index < 0) {
1213 			ret = dpaa2_flow_proto_discrimination_extract(
1214 						&priv->extract.qos_key_extract,
1215 						RTE_FLOW_ITEM_TYPE_ETH);
1216 			if (ret) {
1217 				DPAA2_PMD_ERR(
1218 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1219 
1220 				return -1;
1221 			}
1222 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1223 		}
1224 
1225 		index = dpaa2_flow_extract_search(
1226 				&priv->extract.tc_key_extract[group].dpkg,
1227 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1228 		if (index < 0) {
1229 			ret = dpaa2_flow_proto_discrimination_extract(
1230 					&priv->extract.tc_key_extract[group],
1231 					RTE_FLOW_ITEM_TYPE_ETH);
1232 			if (ret) {
1233 				DPAA2_PMD_ERR(
1234 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1235 
1236 				return -1;
1237 			}
1238 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1239 		}
1240 
1241 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1242 		if (ret) {
1243 			DPAA2_PMD_ERR(
1244 			"Move ipaddr before vLan discrimination set failed");
1245 			return -1;
1246 		}
1247 
1248 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1249 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1250 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1251 							proto, group);
1252 		if (ret) {
1253 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1254 			return -1;
1255 		}
1256 
1257 		(*device_configured) |= local_cfg;
1258 
1259 		return 0;
1260 	}
1261 
1262 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1263 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1264 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1265 
1266 		return -1;
1267 	}
1268 
1269 	if (!mask->hdr.vlan_tci)
1270 		return 0;
1271 
1272 	index = dpaa2_flow_extract_search(
1273 				&priv->extract.qos_key_extract.dpkg,
1274 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1275 	if (index < 0) {
1276 		ret = dpaa2_flow_extract_add(
1277 						&priv->extract.qos_key_extract,
1278 						NET_PROT_VLAN,
1279 						NH_FLD_VLAN_TCI,
1280 						sizeof(rte_be16_t));
1281 		if (ret) {
1282 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1283 
1284 			return -1;
1285 		}
1286 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1287 	}
1288 
1289 	index = dpaa2_flow_extract_search(
1290 			&priv->extract.tc_key_extract[group].dpkg,
1291 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1292 	if (index < 0) {
1293 		ret = dpaa2_flow_extract_add(
1294 				&priv->extract.tc_key_extract[group],
1295 				NET_PROT_VLAN,
1296 				NH_FLD_VLAN_TCI,
1297 				sizeof(rte_be16_t));
1298 		if (ret) {
1299 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1300 
1301 			return -1;
1302 		}
1303 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1304 	}
1305 
1306 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1307 	if (ret) {
1308 		DPAA2_PMD_ERR(
1309 			"Move ipaddr before VLAN TCI rule set failed");
1310 		return -1;
1311 	}
1312 
1313 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1314 				&flow->qos_rule,
1315 				NET_PROT_VLAN,
1316 				NH_FLD_VLAN_TCI,
1317 				&spec->hdr.vlan_tci,
1318 				&mask->hdr.vlan_tci,
1319 				sizeof(rte_be16_t));
1320 	if (ret) {
1321 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1322 		return -1;
1323 	}
1324 
1325 	ret = dpaa2_flow_rule_data_set(
1326 			&priv->extract.tc_key_extract[group],
1327 			&flow->fs_rule,
1328 			NET_PROT_VLAN,
1329 			NH_FLD_VLAN_TCI,
1330 			&spec->hdr.vlan_tci,
1331 			&mask->hdr.vlan_tci,
1332 			sizeof(rte_be16_t));
1333 	if (ret) {
1334 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1335 		return -1;
1336 	}
1337 
1338 	(*device_configured) |= local_cfg;
1339 
1340 	return 0;
1341 }
1342 
1343 static int
1344 dpaa2_configure_flow_ip_discrimation(
1345 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1346 	const struct rte_flow_item *pattern,
1347 	int *local_cfg,	int *device_configured,
1348 	uint32_t group)
1349 {
1350 	int index, ret;
1351 	struct proto_discrimination proto;
1352 
1353 	index = dpaa2_flow_extract_search(
1354 			&priv->extract.qos_key_extract.dpkg,
1355 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1356 	if (index < 0) {
1357 		ret = dpaa2_flow_proto_discrimination_extract(
1358 				&priv->extract.qos_key_extract,
1359 				RTE_FLOW_ITEM_TYPE_ETH);
1360 		if (ret) {
1361 			DPAA2_PMD_ERR(
1362 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1363 			return -1;
1364 		}
1365 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1366 	}
1367 
1368 	index = dpaa2_flow_extract_search(
1369 			&priv->extract.tc_key_extract[group].dpkg,
1370 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1371 	if (index < 0) {
1372 		ret = dpaa2_flow_proto_discrimination_extract(
1373 				&priv->extract.tc_key_extract[group],
1374 				RTE_FLOW_ITEM_TYPE_ETH);
1375 		if (ret) {
1376 			DPAA2_PMD_ERR(
1377 			"FS Extract ETH_TYPE to discriminate IP failed.");
1378 			return -1;
1379 		}
1380 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1381 	}
1382 
1383 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1384 	if (ret) {
1385 		DPAA2_PMD_ERR(
1386 			"Move ipaddr before IP discrimination set failed");
1387 		return -1;
1388 	}
1389 
1390 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1391 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1392 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1393 	else
1394 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1395 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1396 	if (ret) {
1397 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1398 		return -1;
1399 	}
1400 
1401 	(*device_configured) |= (*local_cfg);
1402 
1403 	return 0;
1404 }
1405 
1406 
1407 static int
1408 dpaa2_configure_flow_generic_ip(
1409 	struct rte_flow *flow,
1410 	struct rte_eth_dev *dev,
1411 	const struct rte_flow_attr *attr,
1412 	const struct rte_flow_item *pattern,
1413 	const struct rte_flow_action actions[] __rte_unused,
1414 	struct rte_flow_error *error __rte_unused,
1415 	int *device_configured)
1416 {
1417 	int index, ret;
1418 	int local_cfg = 0;
1419 	uint32_t group;
1420 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1421 		*mask_ipv4 = 0;
1422 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1423 		*mask_ipv6 = 0;
1424 	const void *key, *mask;
1425 	enum net_prot prot;
1426 
1427 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1428 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1429 	int size;
1430 
1431 	group = attr->group;
1432 
1433 	/* Parse pattern list to get the matching parameters */
1434 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1435 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1436 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1437 			(pattern->mask ? pattern->mask :
1438 					&dpaa2_flow_item_ipv4_mask);
1439 	} else {
1440 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1441 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1442 			(pattern->mask ? pattern->mask :
1443 					&dpaa2_flow_item_ipv6_mask);
1444 	}
1445 
1446 	/* Get traffic class index and flow id to be configured */
1447 	flow->tc_id = group;
1448 	flow->tc_index = attr->priority;
1449 
1450 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1451 			flow, pattern, &local_cfg,
1452 			device_configured, group);
1453 	if (ret) {
1454 		DPAA2_PMD_ERR("IP discrimination failed!");
1455 		return -1;
1456 	}
1457 
1458 	if (!spec_ipv4 && !spec_ipv6)
1459 		return 0;
1460 
1461 	if (mask_ipv4) {
1462 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1463 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1464 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1465 
1466 			return -1;
1467 		}
1468 	}
1469 
1470 	if (mask_ipv6) {
1471 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1472 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1473 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1474 
1475 			return -1;
1476 		}
1477 	}
1478 
1479 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1480 		mask_ipv4->hdr.dst_addr)) {
1481 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1482 	} else if (mask_ipv6 &&
1483 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1484 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1485 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1486 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1487 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1488 	}
1489 
1490 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1491 		(mask_ipv6 &&
1492 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1493 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1494 		index = dpaa2_flow_extract_search(
1495 				&priv->extract.qos_key_extract.dpkg,
1496 				NET_PROT_IP, NH_FLD_IP_SRC);
1497 		if (index < 0) {
1498 			ret = dpaa2_flow_extract_add(
1499 					&priv->extract.qos_key_extract,
1500 					NET_PROT_IP,
1501 					NH_FLD_IP_SRC,
1502 					0);
1503 			if (ret) {
1504 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1505 
1506 				return -1;
1507 			}
1508 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1509 		}
1510 
1511 		index = dpaa2_flow_extract_search(
1512 				&priv->extract.tc_key_extract[group].dpkg,
1513 				NET_PROT_IP, NH_FLD_IP_SRC);
1514 		if (index < 0) {
1515 			ret = dpaa2_flow_extract_add(
1516 					&priv->extract.tc_key_extract[group],
1517 					NET_PROT_IP,
1518 					NH_FLD_IP_SRC,
1519 					0);
1520 			if (ret) {
1521 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1522 
1523 				return -1;
1524 			}
1525 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1526 		}
1527 
1528 		if (spec_ipv4)
1529 			key = &spec_ipv4->hdr.src_addr;
1530 		else
1531 			key = &spec_ipv6->hdr.src_addr[0];
1532 		if (mask_ipv4) {
1533 			mask = &mask_ipv4->hdr.src_addr;
1534 			size = NH_FLD_IPV4_ADDR_SIZE;
1535 			prot = NET_PROT_IPV4;
1536 		} else {
1537 			mask = &mask_ipv6->hdr.src_addr[0];
1538 			size = NH_FLD_IPV6_ADDR_SIZE;
1539 			prot = NET_PROT_IPV6;
1540 		}
1541 
1542 		ret = dpaa2_flow_rule_data_set(
1543 				&priv->extract.qos_key_extract,
1544 				&flow->qos_rule,
1545 				prot, NH_FLD_IP_SRC,
1546 				key,	mask, size);
1547 		if (ret) {
1548 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1549 			return -1;
1550 		}
1551 
1552 		ret = dpaa2_flow_rule_data_set(
1553 				&priv->extract.tc_key_extract[group],
1554 				&flow->fs_rule,
1555 				prot, NH_FLD_IP_SRC,
1556 				key,	mask, size);
1557 		if (ret) {
1558 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1559 			return -1;
1560 		}
1561 
1562 		flow->ipaddr_rule.qos_ipsrc_offset =
1563 			dpaa2_flow_extract_key_offset(
1564 				&priv->extract.qos_key_extract,
1565 				prot, NH_FLD_IP_SRC);
1566 		flow->ipaddr_rule.fs_ipsrc_offset =
1567 			dpaa2_flow_extract_key_offset(
1568 				&priv->extract.tc_key_extract[group],
1569 				prot, NH_FLD_IP_SRC);
1570 	}
1571 
1572 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1573 		(mask_ipv6 &&
1574 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1575 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1576 		index = dpaa2_flow_extract_search(
1577 				&priv->extract.qos_key_extract.dpkg,
1578 				NET_PROT_IP, NH_FLD_IP_DST);
1579 		if (index < 0) {
1580 			if (mask_ipv4)
1581 				size = NH_FLD_IPV4_ADDR_SIZE;
1582 			else
1583 				size = NH_FLD_IPV6_ADDR_SIZE;
1584 			ret = dpaa2_flow_extract_add(
1585 					&priv->extract.qos_key_extract,
1586 					NET_PROT_IP,
1587 					NH_FLD_IP_DST,
1588 					size);
1589 			if (ret) {
1590 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1591 
1592 				return -1;
1593 			}
1594 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1595 		}
1596 
1597 		index = dpaa2_flow_extract_search(
1598 				&priv->extract.tc_key_extract[group].dpkg,
1599 				NET_PROT_IP, NH_FLD_IP_DST);
1600 		if (index < 0) {
1601 			if (mask_ipv4)
1602 				size = NH_FLD_IPV4_ADDR_SIZE;
1603 			else
1604 				size = NH_FLD_IPV6_ADDR_SIZE;
1605 			ret = dpaa2_flow_extract_add(
1606 					&priv->extract.tc_key_extract[group],
1607 					NET_PROT_IP,
1608 					NH_FLD_IP_DST,
1609 					size);
1610 			if (ret) {
1611 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1612 
1613 				return -1;
1614 			}
1615 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1616 		}
1617 
1618 		if (spec_ipv4)
1619 			key = &spec_ipv4->hdr.dst_addr;
1620 		else
1621 			key = spec_ipv6->hdr.dst_addr;
1622 		if (mask_ipv4) {
1623 			mask = &mask_ipv4->hdr.dst_addr;
1624 			size = NH_FLD_IPV4_ADDR_SIZE;
1625 			prot = NET_PROT_IPV4;
1626 		} else {
1627 			mask = &mask_ipv6->hdr.dst_addr[0];
1628 			size = NH_FLD_IPV6_ADDR_SIZE;
1629 			prot = NET_PROT_IPV6;
1630 		}
1631 
1632 		ret = dpaa2_flow_rule_data_set(
1633 				&priv->extract.qos_key_extract,
1634 				&flow->qos_rule,
1635 				prot, NH_FLD_IP_DST,
1636 				key,	mask, size);
1637 		if (ret) {
1638 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1639 			return -1;
1640 		}
1641 
1642 		ret = dpaa2_flow_rule_data_set(
1643 				&priv->extract.tc_key_extract[group],
1644 				&flow->fs_rule,
1645 				prot, NH_FLD_IP_DST,
1646 				key,	mask, size);
1647 		if (ret) {
1648 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1649 			return -1;
1650 		}
1651 		flow->ipaddr_rule.qos_ipdst_offset =
1652 			dpaa2_flow_extract_key_offset(
1653 				&priv->extract.qos_key_extract,
1654 				prot, NH_FLD_IP_DST);
1655 		flow->ipaddr_rule.fs_ipdst_offset =
1656 			dpaa2_flow_extract_key_offset(
1657 				&priv->extract.tc_key_extract[group],
1658 				prot, NH_FLD_IP_DST);
1659 	}
1660 
1661 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1662 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1663 		index = dpaa2_flow_extract_search(
1664 				&priv->extract.qos_key_extract.dpkg,
1665 				NET_PROT_IP, NH_FLD_IP_PROTO);
1666 		if (index < 0) {
1667 			ret = dpaa2_flow_extract_add(
1668 				&priv->extract.qos_key_extract,
1669 				NET_PROT_IP,
1670 				NH_FLD_IP_PROTO,
1671 				NH_FLD_IP_PROTO_SIZE);
1672 			if (ret) {
1673 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1674 
1675 				return -1;
1676 			}
1677 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1678 		}
1679 
1680 		index = dpaa2_flow_extract_search(
1681 				&priv->extract.tc_key_extract[group].dpkg,
1682 				NET_PROT_IP, NH_FLD_IP_PROTO);
1683 		if (index < 0) {
1684 			ret = dpaa2_flow_extract_add(
1685 					&priv->extract.tc_key_extract[group],
1686 					NET_PROT_IP,
1687 					NH_FLD_IP_PROTO,
1688 					NH_FLD_IP_PROTO_SIZE);
1689 			if (ret) {
1690 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1691 
1692 				return -1;
1693 			}
1694 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1695 		}
1696 
1697 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1698 		if (ret) {
1699 			DPAA2_PMD_ERR(
1700 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1701 			return -1;
1702 		}
1703 
1704 		if (spec_ipv4)
1705 			key = &spec_ipv4->hdr.next_proto_id;
1706 		else
1707 			key = &spec_ipv6->hdr.proto;
1708 		if (mask_ipv4)
1709 			mask = &mask_ipv4->hdr.next_proto_id;
1710 		else
1711 			mask = &mask_ipv6->hdr.proto;
1712 
1713 		ret = dpaa2_flow_rule_data_set(
1714 				&priv->extract.qos_key_extract,
1715 				&flow->qos_rule,
1716 				NET_PROT_IP,
1717 				NH_FLD_IP_PROTO,
1718 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1719 		if (ret) {
1720 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1721 			return -1;
1722 		}
1723 
1724 		ret = dpaa2_flow_rule_data_set(
1725 				&priv->extract.tc_key_extract[group],
1726 				&flow->fs_rule,
1727 				NET_PROT_IP,
1728 				NH_FLD_IP_PROTO,
1729 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1730 		if (ret) {
1731 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1732 			return -1;
1733 		}
1734 	}
1735 
1736 	(*device_configured) |= local_cfg;
1737 
1738 	return 0;
1739 }
1740 
1741 static int
1742 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1743 			  struct rte_eth_dev *dev,
1744 			  const struct rte_flow_attr *attr,
1745 			  const struct rte_flow_item *pattern,
1746 			  const struct rte_flow_action actions[] __rte_unused,
1747 			  struct rte_flow_error *error __rte_unused,
1748 			  int *device_configured)
1749 {
1750 	int index, ret;
1751 	int local_cfg = 0;
1752 	uint32_t group;
1753 	const struct rte_flow_item_icmp *spec, *mask;
1754 
1755 	const struct rte_flow_item_icmp *last __rte_unused;
1756 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1757 
1758 	group = attr->group;
1759 
1760 	/* Parse pattern list to get the matching parameters */
1761 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1762 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1763 	mask    = (const struct rte_flow_item_icmp *)
1764 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1765 
1766 	/* Get traffic class index and flow id to be configured */
1767 	flow->tc_id = group;
1768 	flow->tc_index = attr->priority;
1769 
1770 	if (!spec) {
1771 		/* Don't care any field of ICMP header,
1772 		 * only care ICMP protocol.
1773 		 * Example: flow create 0 ingress pattern icmp /
1774 		 */
1775 		/* Next proto of Generical IP is actually used
1776 		 * for ICMP identification.
1777 		 */
1778 		struct proto_discrimination proto;
1779 
1780 		index = dpaa2_flow_extract_search(
1781 				&priv->extract.qos_key_extract.dpkg,
1782 				NET_PROT_IP, NH_FLD_IP_PROTO);
1783 		if (index < 0) {
1784 			ret = dpaa2_flow_proto_discrimination_extract(
1785 					&priv->extract.qos_key_extract,
1786 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1787 			if (ret) {
1788 				DPAA2_PMD_ERR(
1789 					"QoS Extract IP protocol to discriminate ICMP failed.");
1790 
1791 				return -1;
1792 			}
1793 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1794 		}
1795 
1796 		index = dpaa2_flow_extract_search(
1797 				&priv->extract.tc_key_extract[group].dpkg,
1798 				NET_PROT_IP, NH_FLD_IP_PROTO);
1799 		if (index < 0) {
1800 			ret = dpaa2_flow_proto_discrimination_extract(
1801 					&priv->extract.tc_key_extract[group],
1802 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1803 			if (ret) {
1804 				DPAA2_PMD_ERR(
1805 					"FS Extract IP protocol to discriminate ICMP failed.");
1806 
1807 				return -1;
1808 			}
1809 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1810 		}
1811 
1812 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1813 		if (ret) {
1814 			DPAA2_PMD_ERR(
1815 				"Move IP addr before ICMP discrimination set failed");
1816 			return -1;
1817 		}
1818 
1819 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1820 		proto.ip_proto = IPPROTO_ICMP;
1821 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1822 							proto, group);
1823 		if (ret) {
1824 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1825 			return -1;
1826 		}
1827 
1828 		(*device_configured) |= local_cfg;
1829 
1830 		return 0;
1831 	}
1832 
1833 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1834 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1835 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1836 
1837 		return -1;
1838 	}
1839 
1840 	if (mask->hdr.icmp_type) {
1841 		index = dpaa2_flow_extract_search(
1842 				&priv->extract.qos_key_extract.dpkg,
1843 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1844 		if (index < 0) {
1845 			ret = dpaa2_flow_extract_add(
1846 					&priv->extract.qos_key_extract,
1847 					NET_PROT_ICMP,
1848 					NH_FLD_ICMP_TYPE,
1849 					NH_FLD_ICMP_TYPE_SIZE);
1850 			if (ret) {
1851 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1852 
1853 				return -1;
1854 			}
1855 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1856 		}
1857 
1858 		index = dpaa2_flow_extract_search(
1859 				&priv->extract.tc_key_extract[group].dpkg,
1860 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1861 		if (index < 0) {
1862 			ret = dpaa2_flow_extract_add(
1863 					&priv->extract.tc_key_extract[group],
1864 					NET_PROT_ICMP,
1865 					NH_FLD_ICMP_TYPE,
1866 					NH_FLD_ICMP_TYPE_SIZE);
1867 			if (ret) {
1868 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1869 
1870 				return -1;
1871 			}
1872 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1873 		}
1874 
1875 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1876 		if (ret) {
1877 			DPAA2_PMD_ERR(
1878 				"Move ipaddr before ICMP TYPE set failed");
1879 			return -1;
1880 		}
1881 
1882 		ret = dpaa2_flow_rule_data_set(
1883 				&priv->extract.qos_key_extract,
1884 				&flow->qos_rule,
1885 				NET_PROT_ICMP,
1886 				NH_FLD_ICMP_TYPE,
1887 				&spec->hdr.icmp_type,
1888 				&mask->hdr.icmp_type,
1889 				NH_FLD_ICMP_TYPE_SIZE);
1890 		if (ret) {
1891 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1892 			return -1;
1893 		}
1894 
1895 		ret = dpaa2_flow_rule_data_set(
1896 				&priv->extract.tc_key_extract[group],
1897 				&flow->fs_rule,
1898 				NET_PROT_ICMP,
1899 				NH_FLD_ICMP_TYPE,
1900 				&spec->hdr.icmp_type,
1901 				&mask->hdr.icmp_type,
1902 				NH_FLD_ICMP_TYPE_SIZE);
1903 		if (ret) {
1904 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1905 			return -1;
1906 		}
1907 	}
1908 
1909 	if (mask->hdr.icmp_code) {
1910 		index = dpaa2_flow_extract_search(
1911 				&priv->extract.qos_key_extract.dpkg,
1912 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1913 		if (index < 0) {
1914 			ret = dpaa2_flow_extract_add(
1915 					&priv->extract.qos_key_extract,
1916 					NET_PROT_ICMP,
1917 					NH_FLD_ICMP_CODE,
1918 					NH_FLD_ICMP_CODE_SIZE);
1919 			if (ret) {
1920 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1921 
1922 				return -1;
1923 			}
1924 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1925 		}
1926 
1927 		index = dpaa2_flow_extract_search(
1928 				&priv->extract.tc_key_extract[group].dpkg,
1929 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1930 		if (index < 0) {
1931 			ret = dpaa2_flow_extract_add(
1932 					&priv->extract.tc_key_extract[group],
1933 					NET_PROT_ICMP,
1934 					NH_FLD_ICMP_CODE,
1935 					NH_FLD_ICMP_CODE_SIZE);
1936 			if (ret) {
1937 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1938 
1939 				return -1;
1940 			}
1941 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1942 		}
1943 
1944 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1945 		if (ret) {
1946 			DPAA2_PMD_ERR(
1947 				"Move ipaddr after ICMP CODE set failed");
1948 			return -1;
1949 		}
1950 
1951 		ret = dpaa2_flow_rule_data_set(
1952 				&priv->extract.qos_key_extract,
1953 				&flow->qos_rule,
1954 				NET_PROT_ICMP,
1955 				NH_FLD_ICMP_CODE,
1956 				&spec->hdr.icmp_code,
1957 				&mask->hdr.icmp_code,
1958 				NH_FLD_ICMP_CODE_SIZE);
1959 		if (ret) {
1960 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1961 			return -1;
1962 		}
1963 
1964 		ret = dpaa2_flow_rule_data_set(
1965 				&priv->extract.tc_key_extract[group],
1966 				&flow->fs_rule,
1967 				NET_PROT_ICMP,
1968 				NH_FLD_ICMP_CODE,
1969 				&spec->hdr.icmp_code,
1970 				&mask->hdr.icmp_code,
1971 				NH_FLD_ICMP_CODE_SIZE);
1972 		if (ret) {
1973 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1974 			return -1;
1975 		}
1976 	}
1977 
1978 	(*device_configured) |= local_cfg;
1979 
1980 	return 0;
1981 }
1982 
1983 static int
1984 dpaa2_configure_flow_udp(struct rte_flow *flow,
1985 			 struct rte_eth_dev *dev,
1986 			  const struct rte_flow_attr *attr,
1987 			  const struct rte_flow_item *pattern,
1988 			  const struct rte_flow_action actions[] __rte_unused,
1989 			  struct rte_flow_error *error __rte_unused,
1990 			  int *device_configured)
1991 {
1992 	int index, ret;
1993 	int local_cfg = 0;
1994 	uint32_t group;
1995 	const struct rte_flow_item_udp *spec, *mask;
1996 
1997 	const struct rte_flow_item_udp *last __rte_unused;
1998 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1999 
2000 	group = attr->group;
2001 
2002 	/* Parse pattern list to get the matching parameters */
2003 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
2004 	last    = (const struct rte_flow_item_udp *)pattern->last;
2005 	mask    = (const struct rte_flow_item_udp *)
2006 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2007 
2008 	/* Get traffic class index and flow id to be configured */
2009 	flow->tc_id = group;
2010 	flow->tc_index = attr->priority;
2011 
2012 	if (!spec || !mc_l4_port_identification) {
2013 		struct proto_discrimination proto;
2014 
2015 		index = dpaa2_flow_extract_search(
2016 				&priv->extract.qos_key_extract.dpkg,
2017 				NET_PROT_IP, NH_FLD_IP_PROTO);
2018 		if (index < 0) {
2019 			ret = dpaa2_flow_proto_discrimination_extract(
2020 					&priv->extract.qos_key_extract,
2021 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2022 			if (ret) {
2023 				DPAA2_PMD_ERR(
2024 					"QoS Extract IP protocol to discriminate UDP failed.");
2025 
2026 				return -1;
2027 			}
2028 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2029 		}
2030 
2031 		index = dpaa2_flow_extract_search(
2032 				&priv->extract.tc_key_extract[group].dpkg,
2033 				NET_PROT_IP, NH_FLD_IP_PROTO);
2034 		if (index < 0) {
2035 			ret = dpaa2_flow_proto_discrimination_extract(
2036 				&priv->extract.tc_key_extract[group],
2037 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2038 			if (ret) {
2039 				DPAA2_PMD_ERR(
2040 					"FS Extract IP protocol to discriminate UDP failed.");
2041 
2042 				return -1;
2043 			}
2044 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2045 		}
2046 
2047 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2048 		if (ret) {
2049 			DPAA2_PMD_ERR(
2050 				"Move IP addr before UDP discrimination set failed");
2051 			return -1;
2052 		}
2053 
2054 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2055 		proto.ip_proto = IPPROTO_UDP;
2056 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2057 							proto, group);
2058 		if (ret) {
2059 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2060 			return -1;
2061 		}
2062 
2063 		(*device_configured) |= local_cfg;
2064 
2065 		if (!spec)
2066 			return 0;
2067 	}
2068 
2069 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2070 		RTE_FLOW_ITEM_TYPE_UDP)) {
2071 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2072 
2073 		return -1;
2074 	}
2075 
2076 	if (mask->hdr.src_port) {
2077 		index = dpaa2_flow_extract_search(
2078 				&priv->extract.qos_key_extract.dpkg,
2079 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2080 		if (index < 0) {
2081 			ret = dpaa2_flow_extract_add(
2082 					&priv->extract.qos_key_extract,
2083 				NET_PROT_UDP,
2084 				NH_FLD_UDP_PORT_SRC,
2085 				NH_FLD_UDP_PORT_SIZE);
2086 			if (ret) {
2087 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2088 
2089 				return -1;
2090 			}
2091 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2092 		}
2093 
2094 		index = dpaa2_flow_extract_search(
2095 				&priv->extract.tc_key_extract[group].dpkg,
2096 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2097 		if (index < 0) {
2098 			ret = dpaa2_flow_extract_add(
2099 					&priv->extract.tc_key_extract[group],
2100 					NET_PROT_UDP,
2101 					NH_FLD_UDP_PORT_SRC,
2102 					NH_FLD_UDP_PORT_SIZE);
2103 			if (ret) {
2104 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2105 
2106 				return -1;
2107 			}
2108 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2109 		}
2110 
2111 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2112 		if (ret) {
2113 			DPAA2_PMD_ERR(
2114 				"Move ipaddr before UDP_PORT_SRC set failed");
2115 			return -1;
2116 		}
2117 
2118 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2119 				&flow->qos_rule,
2120 				NET_PROT_UDP,
2121 				NH_FLD_UDP_PORT_SRC,
2122 				&spec->hdr.src_port,
2123 				&mask->hdr.src_port,
2124 				NH_FLD_UDP_PORT_SIZE);
2125 		if (ret) {
2126 			DPAA2_PMD_ERR(
2127 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2128 			return -1;
2129 		}
2130 
2131 		ret = dpaa2_flow_rule_data_set(
2132 				&priv->extract.tc_key_extract[group],
2133 				&flow->fs_rule,
2134 				NET_PROT_UDP,
2135 				NH_FLD_UDP_PORT_SRC,
2136 				&spec->hdr.src_port,
2137 				&mask->hdr.src_port,
2138 				NH_FLD_UDP_PORT_SIZE);
2139 		if (ret) {
2140 			DPAA2_PMD_ERR(
2141 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2142 			return -1;
2143 		}
2144 	}
2145 
2146 	if (mask->hdr.dst_port) {
2147 		index = dpaa2_flow_extract_search(
2148 				&priv->extract.qos_key_extract.dpkg,
2149 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2150 		if (index < 0) {
2151 			ret = dpaa2_flow_extract_add(
2152 					&priv->extract.qos_key_extract,
2153 					NET_PROT_UDP,
2154 					NH_FLD_UDP_PORT_DST,
2155 					NH_FLD_UDP_PORT_SIZE);
2156 			if (ret) {
2157 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2158 
2159 				return -1;
2160 			}
2161 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2162 		}
2163 
2164 		index = dpaa2_flow_extract_search(
2165 				&priv->extract.tc_key_extract[group].dpkg,
2166 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2167 		if (index < 0) {
2168 			ret = dpaa2_flow_extract_add(
2169 					&priv->extract.tc_key_extract[group],
2170 					NET_PROT_UDP,
2171 					NH_FLD_UDP_PORT_DST,
2172 					NH_FLD_UDP_PORT_SIZE);
2173 			if (ret) {
2174 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2175 
2176 				return -1;
2177 			}
2178 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2179 		}
2180 
2181 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2182 		if (ret) {
2183 			DPAA2_PMD_ERR(
2184 				"Move ipaddr before UDP_PORT_DST set failed");
2185 			return -1;
2186 		}
2187 
2188 		ret = dpaa2_flow_rule_data_set(
2189 				&priv->extract.qos_key_extract,
2190 				&flow->qos_rule,
2191 				NET_PROT_UDP,
2192 				NH_FLD_UDP_PORT_DST,
2193 				&spec->hdr.dst_port,
2194 				&mask->hdr.dst_port,
2195 				NH_FLD_UDP_PORT_SIZE);
2196 		if (ret) {
2197 			DPAA2_PMD_ERR(
2198 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2199 			return -1;
2200 		}
2201 
2202 		ret = dpaa2_flow_rule_data_set(
2203 				&priv->extract.tc_key_extract[group],
2204 				&flow->fs_rule,
2205 				NET_PROT_UDP,
2206 				NH_FLD_UDP_PORT_DST,
2207 				&spec->hdr.dst_port,
2208 				&mask->hdr.dst_port,
2209 				NH_FLD_UDP_PORT_SIZE);
2210 		if (ret) {
2211 			DPAA2_PMD_ERR(
2212 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2213 			return -1;
2214 		}
2215 	}
2216 
2217 	(*device_configured) |= local_cfg;
2218 
2219 	return 0;
2220 }
2221 
2222 static int
2223 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2224 			 struct rte_eth_dev *dev,
2225 			 const struct rte_flow_attr *attr,
2226 			 const struct rte_flow_item *pattern,
2227 			 const struct rte_flow_action actions[] __rte_unused,
2228 			 struct rte_flow_error *error __rte_unused,
2229 			 int *device_configured)
2230 {
2231 	int index, ret;
2232 	int local_cfg = 0;
2233 	uint32_t group;
2234 	const struct rte_flow_item_tcp *spec, *mask;
2235 
2236 	const struct rte_flow_item_tcp *last __rte_unused;
2237 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2238 
2239 	group = attr->group;
2240 
2241 	/* Parse pattern list to get the matching parameters */
2242 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2243 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2244 	mask    = (const struct rte_flow_item_tcp *)
2245 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2246 
2247 	/* Get traffic class index and flow id to be configured */
2248 	flow->tc_id = group;
2249 	flow->tc_index = attr->priority;
2250 
2251 	if (!spec || !mc_l4_port_identification) {
2252 		struct proto_discrimination proto;
2253 
2254 		index = dpaa2_flow_extract_search(
2255 				&priv->extract.qos_key_extract.dpkg,
2256 				NET_PROT_IP, NH_FLD_IP_PROTO);
2257 		if (index < 0) {
2258 			ret = dpaa2_flow_proto_discrimination_extract(
2259 					&priv->extract.qos_key_extract,
2260 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2261 			if (ret) {
2262 				DPAA2_PMD_ERR(
2263 					"QoS Extract IP protocol to discriminate TCP failed.");
2264 
2265 				return -1;
2266 			}
2267 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2268 		}
2269 
2270 		index = dpaa2_flow_extract_search(
2271 				&priv->extract.tc_key_extract[group].dpkg,
2272 				NET_PROT_IP, NH_FLD_IP_PROTO);
2273 		if (index < 0) {
2274 			ret = dpaa2_flow_proto_discrimination_extract(
2275 				&priv->extract.tc_key_extract[group],
2276 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2277 			if (ret) {
2278 				DPAA2_PMD_ERR(
2279 					"FS Extract IP protocol to discriminate TCP failed.");
2280 
2281 				return -1;
2282 			}
2283 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2284 		}
2285 
2286 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2287 		if (ret) {
2288 			DPAA2_PMD_ERR(
2289 				"Move IP addr before TCP discrimination set failed");
2290 			return -1;
2291 		}
2292 
2293 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2294 		proto.ip_proto = IPPROTO_TCP;
2295 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2296 							proto, group);
2297 		if (ret) {
2298 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2299 			return -1;
2300 		}
2301 
2302 		(*device_configured) |= local_cfg;
2303 
2304 		if (!spec)
2305 			return 0;
2306 	}
2307 
2308 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2309 		RTE_FLOW_ITEM_TYPE_TCP)) {
2310 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2311 
2312 		return -1;
2313 	}
2314 
2315 	if (mask->hdr.src_port) {
2316 		index = dpaa2_flow_extract_search(
2317 				&priv->extract.qos_key_extract.dpkg,
2318 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2319 		if (index < 0) {
2320 			ret = dpaa2_flow_extract_add(
2321 					&priv->extract.qos_key_extract,
2322 					NET_PROT_TCP,
2323 					NH_FLD_TCP_PORT_SRC,
2324 					NH_FLD_TCP_PORT_SIZE);
2325 			if (ret) {
2326 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2327 
2328 				return -1;
2329 			}
2330 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2331 		}
2332 
2333 		index = dpaa2_flow_extract_search(
2334 				&priv->extract.tc_key_extract[group].dpkg,
2335 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2336 		if (index < 0) {
2337 			ret = dpaa2_flow_extract_add(
2338 					&priv->extract.tc_key_extract[group],
2339 					NET_PROT_TCP,
2340 					NH_FLD_TCP_PORT_SRC,
2341 					NH_FLD_TCP_PORT_SIZE);
2342 			if (ret) {
2343 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2344 
2345 				return -1;
2346 			}
2347 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2348 		}
2349 
2350 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2351 		if (ret) {
2352 			DPAA2_PMD_ERR(
2353 				"Move ipaddr before TCP_PORT_SRC set failed");
2354 			return -1;
2355 		}
2356 
2357 		ret = dpaa2_flow_rule_data_set(
2358 				&priv->extract.qos_key_extract,
2359 				&flow->qos_rule,
2360 				NET_PROT_TCP,
2361 				NH_FLD_TCP_PORT_SRC,
2362 				&spec->hdr.src_port,
2363 				&mask->hdr.src_port,
2364 				NH_FLD_TCP_PORT_SIZE);
2365 		if (ret) {
2366 			DPAA2_PMD_ERR(
2367 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2368 			return -1;
2369 		}
2370 
2371 		ret = dpaa2_flow_rule_data_set(
2372 				&priv->extract.tc_key_extract[group],
2373 				&flow->fs_rule,
2374 				NET_PROT_TCP,
2375 				NH_FLD_TCP_PORT_SRC,
2376 				&spec->hdr.src_port,
2377 				&mask->hdr.src_port,
2378 				NH_FLD_TCP_PORT_SIZE);
2379 		if (ret) {
2380 			DPAA2_PMD_ERR(
2381 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2382 			return -1;
2383 		}
2384 	}
2385 
2386 	if (mask->hdr.dst_port) {
2387 		index = dpaa2_flow_extract_search(
2388 				&priv->extract.qos_key_extract.dpkg,
2389 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2390 		if (index < 0) {
2391 			ret = dpaa2_flow_extract_add(
2392 					&priv->extract.qos_key_extract,
2393 					NET_PROT_TCP,
2394 					NH_FLD_TCP_PORT_DST,
2395 					NH_FLD_TCP_PORT_SIZE);
2396 			if (ret) {
2397 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2398 
2399 				return -1;
2400 			}
2401 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2402 		}
2403 
2404 		index = dpaa2_flow_extract_search(
2405 				&priv->extract.tc_key_extract[group].dpkg,
2406 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2407 		if (index < 0) {
2408 			ret = dpaa2_flow_extract_add(
2409 					&priv->extract.tc_key_extract[group],
2410 					NET_PROT_TCP,
2411 					NH_FLD_TCP_PORT_DST,
2412 					NH_FLD_TCP_PORT_SIZE);
2413 			if (ret) {
2414 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2415 
2416 				return -1;
2417 			}
2418 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2419 		}
2420 
2421 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2422 		if (ret) {
2423 			DPAA2_PMD_ERR(
2424 				"Move ipaddr before TCP_PORT_DST set failed");
2425 			return -1;
2426 		}
2427 
2428 		ret = dpaa2_flow_rule_data_set(
2429 				&priv->extract.qos_key_extract,
2430 				&flow->qos_rule,
2431 				NET_PROT_TCP,
2432 				NH_FLD_TCP_PORT_DST,
2433 				&spec->hdr.dst_port,
2434 				&mask->hdr.dst_port,
2435 				NH_FLD_TCP_PORT_SIZE);
2436 		if (ret) {
2437 			DPAA2_PMD_ERR(
2438 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2439 			return -1;
2440 		}
2441 
2442 		ret = dpaa2_flow_rule_data_set(
2443 				&priv->extract.tc_key_extract[group],
2444 				&flow->fs_rule,
2445 				NET_PROT_TCP,
2446 				NH_FLD_TCP_PORT_DST,
2447 				&spec->hdr.dst_port,
2448 				&mask->hdr.dst_port,
2449 				NH_FLD_TCP_PORT_SIZE);
2450 		if (ret) {
2451 			DPAA2_PMD_ERR(
2452 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2453 			return -1;
2454 		}
2455 	}
2456 
2457 	(*device_configured) |= local_cfg;
2458 
2459 	return 0;
2460 }
2461 
2462 static int
2463 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2464 			  struct rte_eth_dev *dev,
2465 			  const struct rte_flow_attr *attr,
2466 			  const struct rte_flow_item *pattern,
2467 			  const struct rte_flow_action actions[] __rte_unused,
2468 			  struct rte_flow_error *error __rte_unused,
2469 			  int *device_configured)
2470 {
2471 	int index, ret;
2472 	int local_cfg = 0;
2473 	uint32_t group;
2474 	const struct rte_flow_item_sctp *spec, *mask;
2475 
2476 	const struct rte_flow_item_sctp *last __rte_unused;
2477 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2478 
2479 	group = attr->group;
2480 
2481 	/* Parse pattern list to get the matching parameters */
2482 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2483 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2484 	mask    = (const struct rte_flow_item_sctp *)
2485 			(pattern->mask ? pattern->mask :
2486 				&dpaa2_flow_item_sctp_mask);
2487 
2488 	/* Get traffic class index and flow id to be configured */
2489 	flow->tc_id = group;
2490 	flow->tc_index = attr->priority;
2491 
2492 	if (!spec || !mc_l4_port_identification) {
2493 		struct proto_discrimination proto;
2494 
2495 		index = dpaa2_flow_extract_search(
2496 				&priv->extract.qos_key_extract.dpkg,
2497 				NET_PROT_IP, NH_FLD_IP_PROTO);
2498 		if (index < 0) {
2499 			ret = dpaa2_flow_proto_discrimination_extract(
2500 					&priv->extract.qos_key_extract,
2501 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2502 			if (ret) {
2503 				DPAA2_PMD_ERR(
2504 					"QoS Extract IP protocol to discriminate SCTP failed.");
2505 
2506 				return -1;
2507 			}
2508 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2509 		}
2510 
2511 		index = dpaa2_flow_extract_search(
2512 				&priv->extract.tc_key_extract[group].dpkg,
2513 				NET_PROT_IP, NH_FLD_IP_PROTO);
2514 		if (index < 0) {
2515 			ret = dpaa2_flow_proto_discrimination_extract(
2516 					&priv->extract.tc_key_extract[group],
2517 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2518 			if (ret) {
2519 				DPAA2_PMD_ERR(
2520 					"FS Extract IP protocol to discriminate SCTP failed.");
2521 
2522 				return -1;
2523 			}
2524 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2525 		}
2526 
2527 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2528 		if (ret) {
2529 			DPAA2_PMD_ERR(
2530 				"Move ipaddr before SCTP discrimination set failed");
2531 			return -1;
2532 		}
2533 
2534 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2535 		proto.ip_proto = IPPROTO_SCTP;
2536 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2537 							proto, group);
2538 		if (ret) {
2539 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2540 			return -1;
2541 		}
2542 
2543 		(*device_configured) |= local_cfg;
2544 
2545 		if (!spec)
2546 			return 0;
2547 	}
2548 
2549 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2550 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2551 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2552 
2553 		return -1;
2554 	}
2555 
2556 	if (mask->hdr.src_port) {
2557 		index = dpaa2_flow_extract_search(
2558 				&priv->extract.qos_key_extract.dpkg,
2559 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2560 		if (index < 0) {
2561 			ret = dpaa2_flow_extract_add(
2562 					&priv->extract.qos_key_extract,
2563 					NET_PROT_SCTP,
2564 					NH_FLD_SCTP_PORT_SRC,
2565 					NH_FLD_SCTP_PORT_SIZE);
2566 			if (ret) {
2567 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2568 
2569 				return -1;
2570 			}
2571 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2572 		}
2573 
2574 		index = dpaa2_flow_extract_search(
2575 				&priv->extract.tc_key_extract[group].dpkg,
2576 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2577 		if (index < 0) {
2578 			ret = dpaa2_flow_extract_add(
2579 					&priv->extract.tc_key_extract[group],
2580 					NET_PROT_SCTP,
2581 					NH_FLD_SCTP_PORT_SRC,
2582 					NH_FLD_SCTP_PORT_SIZE);
2583 			if (ret) {
2584 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2585 
2586 				return -1;
2587 			}
2588 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2589 		}
2590 
2591 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2592 		if (ret) {
2593 			DPAA2_PMD_ERR(
2594 				"Move ipaddr before SCTP_PORT_SRC set failed");
2595 			return -1;
2596 		}
2597 
2598 		ret = dpaa2_flow_rule_data_set(
2599 				&priv->extract.qos_key_extract,
2600 				&flow->qos_rule,
2601 				NET_PROT_SCTP,
2602 				NH_FLD_SCTP_PORT_SRC,
2603 				&spec->hdr.src_port,
2604 				&mask->hdr.src_port,
2605 				NH_FLD_SCTP_PORT_SIZE);
2606 		if (ret) {
2607 			DPAA2_PMD_ERR(
2608 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2609 			return -1;
2610 		}
2611 
2612 		ret = dpaa2_flow_rule_data_set(
2613 				&priv->extract.tc_key_extract[group],
2614 				&flow->fs_rule,
2615 				NET_PROT_SCTP,
2616 				NH_FLD_SCTP_PORT_SRC,
2617 				&spec->hdr.src_port,
2618 				&mask->hdr.src_port,
2619 				NH_FLD_SCTP_PORT_SIZE);
2620 		if (ret) {
2621 			DPAA2_PMD_ERR(
2622 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2623 			return -1;
2624 		}
2625 	}
2626 
2627 	if (mask->hdr.dst_port) {
2628 		index = dpaa2_flow_extract_search(
2629 				&priv->extract.qos_key_extract.dpkg,
2630 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2631 		if (index < 0) {
2632 			ret = dpaa2_flow_extract_add(
2633 					&priv->extract.qos_key_extract,
2634 					NET_PROT_SCTP,
2635 					NH_FLD_SCTP_PORT_DST,
2636 					NH_FLD_SCTP_PORT_SIZE);
2637 			if (ret) {
2638 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2639 
2640 				return -1;
2641 			}
2642 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2643 		}
2644 
2645 		index = dpaa2_flow_extract_search(
2646 				&priv->extract.tc_key_extract[group].dpkg,
2647 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2648 		if (index < 0) {
2649 			ret = dpaa2_flow_extract_add(
2650 					&priv->extract.tc_key_extract[group],
2651 					NET_PROT_SCTP,
2652 					NH_FLD_SCTP_PORT_DST,
2653 					NH_FLD_SCTP_PORT_SIZE);
2654 			if (ret) {
2655 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2656 
2657 				return -1;
2658 			}
2659 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2660 		}
2661 
2662 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2663 		if (ret) {
2664 			DPAA2_PMD_ERR(
2665 				"Move ipaddr before SCTP_PORT_DST set failed");
2666 			return -1;
2667 		}
2668 
2669 		ret = dpaa2_flow_rule_data_set(
2670 				&priv->extract.qos_key_extract,
2671 				&flow->qos_rule,
2672 				NET_PROT_SCTP,
2673 				NH_FLD_SCTP_PORT_DST,
2674 				&spec->hdr.dst_port,
2675 				&mask->hdr.dst_port,
2676 				NH_FLD_SCTP_PORT_SIZE);
2677 		if (ret) {
2678 			DPAA2_PMD_ERR(
2679 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2680 			return -1;
2681 		}
2682 
2683 		ret = dpaa2_flow_rule_data_set(
2684 				&priv->extract.tc_key_extract[group],
2685 				&flow->fs_rule,
2686 				NET_PROT_SCTP,
2687 				NH_FLD_SCTP_PORT_DST,
2688 				&spec->hdr.dst_port,
2689 				&mask->hdr.dst_port,
2690 				NH_FLD_SCTP_PORT_SIZE);
2691 		if (ret) {
2692 			DPAA2_PMD_ERR(
2693 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2694 			return -1;
2695 		}
2696 	}
2697 
2698 	(*device_configured) |= local_cfg;
2699 
2700 	return 0;
2701 }
2702 
2703 static int
2704 dpaa2_configure_flow_gre(struct rte_flow *flow,
2705 			 struct rte_eth_dev *dev,
2706 			 const struct rte_flow_attr *attr,
2707 			 const struct rte_flow_item *pattern,
2708 			 const struct rte_flow_action actions[] __rte_unused,
2709 			 struct rte_flow_error *error __rte_unused,
2710 			 int *device_configured)
2711 {
2712 	int index, ret;
2713 	int local_cfg = 0;
2714 	uint32_t group;
2715 	const struct rte_flow_item_gre *spec, *mask;
2716 
2717 	const struct rte_flow_item_gre *last __rte_unused;
2718 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2719 
2720 	group = attr->group;
2721 
2722 	/* Parse pattern list to get the matching parameters */
2723 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2724 	last    = (const struct rte_flow_item_gre *)pattern->last;
2725 	mask    = (const struct rte_flow_item_gre *)
2726 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2727 
2728 	/* Get traffic class index and flow id to be configured */
2729 	flow->tc_id = group;
2730 	flow->tc_index = attr->priority;
2731 
2732 	if (!spec) {
2733 		struct proto_discrimination proto;
2734 
2735 		index = dpaa2_flow_extract_search(
2736 				&priv->extract.qos_key_extract.dpkg,
2737 				NET_PROT_IP, NH_FLD_IP_PROTO);
2738 		if (index < 0) {
2739 			ret = dpaa2_flow_proto_discrimination_extract(
2740 					&priv->extract.qos_key_extract,
2741 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2742 			if (ret) {
2743 				DPAA2_PMD_ERR(
2744 					"QoS Extract IP protocol to discriminate GRE failed.");
2745 
2746 				return -1;
2747 			}
2748 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2749 		}
2750 
2751 		index = dpaa2_flow_extract_search(
2752 				&priv->extract.tc_key_extract[group].dpkg,
2753 				NET_PROT_IP, NH_FLD_IP_PROTO);
2754 		if (index < 0) {
2755 			ret = dpaa2_flow_proto_discrimination_extract(
2756 					&priv->extract.tc_key_extract[group],
2757 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2758 			if (ret) {
2759 				DPAA2_PMD_ERR(
2760 					"FS Extract IP protocol to discriminate GRE failed.");
2761 
2762 				return -1;
2763 			}
2764 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2765 		}
2766 
2767 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2768 		if (ret) {
2769 			DPAA2_PMD_ERR(
2770 				"Move IP addr before GRE discrimination set failed");
2771 			return -1;
2772 		}
2773 
2774 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2775 		proto.ip_proto = IPPROTO_GRE;
2776 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2777 							proto, group);
2778 		if (ret) {
2779 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2780 			return -1;
2781 		}
2782 
2783 		(*device_configured) |= local_cfg;
2784 
2785 		return 0;
2786 	}
2787 
2788 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2789 		RTE_FLOW_ITEM_TYPE_GRE)) {
2790 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2791 
2792 		return -1;
2793 	}
2794 
2795 	if (!mask->protocol)
2796 		return 0;
2797 
2798 	index = dpaa2_flow_extract_search(
2799 			&priv->extract.qos_key_extract.dpkg,
2800 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2801 	if (index < 0) {
2802 		ret = dpaa2_flow_extract_add(
2803 				&priv->extract.qos_key_extract,
2804 				NET_PROT_GRE,
2805 				NH_FLD_GRE_TYPE,
2806 				sizeof(rte_be16_t));
2807 		if (ret) {
2808 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2809 
2810 			return -1;
2811 		}
2812 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2813 	}
2814 
2815 	index = dpaa2_flow_extract_search(
2816 			&priv->extract.tc_key_extract[group].dpkg,
2817 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2818 	if (index < 0) {
2819 		ret = dpaa2_flow_extract_add(
2820 				&priv->extract.tc_key_extract[group],
2821 				NET_PROT_GRE,
2822 				NH_FLD_GRE_TYPE,
2823 				sizeof(rte_be16_t));
2824 		if (ret) {
2825 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2826 
2827 			return -1;
2828 		}
2829 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2830 	}
2831 
2832 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2833 	if (ret) {
2834 		DPAA2_PMD_ERR(
2835 			"Move ipaddr before GRE_TYPE set failed");
2836 		return -1;
2837 	}
2838 
2839 	ret = dpaa2_flow_rule_data_set(
2840 				&priv->extract.qos_key_extract,
2841 				&flow->qos_rule,
2842 				NET_PROT_GRE,
2843 				NH_FLD_GRE_TYPE,
2844 				&spec->protocol,
2845 				&mask->protocol,
2846 				sizeof(rte_be16_t));
2847 	if (ret) {
2848 		DPAA2_PMD_ERR(
2849 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2850 		return -1;
2851 	}
2852 
2853 	ret = dpaa2_flow_rule_data_set(
2854 			&priv->extract.tc_key_extract[group],
2855 			&flow->fs_rule,
2856 			NET_PROT_GRE,
2857 			NH_FLD_GRE_TYPE,
2858 			&spec->protocol,
2859 			&mask->protocol,
2860 			sizeof(rte_be16_t));
2861 	if (ret) {
2862 		DPAA2_PMD_ERR(
2863 			"FS NH_FLD_GRE_TYPE rule data set failed");
2864 		return -1;
2865 	}
2866 
2867 	(*device_configured) |= local_cfg;
2868 
2869 	return 0;
2870 }
2871 
2872 static int
2873 dpaa2_configure_flow_raw(struct rte_flow *flow,
2874 			 struct rte_eth_dev *dev,
2875 			 const struct rte_flow_attr *attr,
2876 			 const struct rte_flow_item *pattern,
2877 			 const struct rte_flow_action actions[] __rte_unused,
2878 			 struct rte_flow_error *error __rte_unused,
2879 			 int *device_configured)
2880 {
2881 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2882 	const struct rte_flow_item_raw *spec = pattern->spec;
2883 	const struct rte_flow_item_raw *mask = pattern->mask;
2884 	int prev_key_size =
2885 		priv->extract.qos_key_extract.key_info.key_total_size;
2886 	int local_cfg = 0, ret;
2887 	uint32_t group;
2888 
2889 	/* Need both spec and mask */
2890 	if (!spec || !mask) {
2891 		DPAA2_PMD_ERR("spec or mask not present.");
2892 		return -EINVAL;
2893 	}
2894 	/* Only supports non-relative with offset 0 */
2895 	if (spec->relative || spec->offset != 0 ||
2896 	    spec->search || spec->limit) {
2897 		DPAA2_PMD_ERR("relative and non zero offset not supported.");
2898 		return -EINVAL;
2899 	}
2900 	/* Spec len and mask len should be same */
2901 	if (spec->length != mask->length) {
2902 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2903 		return -EINVAL;
2904 	}
2905 
2906 	/* Get traffic class index and flow id to be configured */
2907 	group = attr->group;
2908 	flow->tc_id = group;
2909 	flow->tc_index = attr->priority;
2910 
2911 	if (prev_key_size <= spec->length) {
2912 		ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2913 						 spec->length);
2914 		if (ret) {
2915 			DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2916 			return -1;
2917 		}
2918 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2919 
2920 		ret = dpaa2_flow_extract_add_raw(
2921 					&priv->extract.tc_key_extract[group],
2922 					spec->length);
2923 		if (ret) {
2924 			DPAA2_PMD_ERR("FS Extract RAW add failed.");
2925 			return -1;
2926 		}
2927 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2928 	}
2929 
2930 	ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2931 					   mask->pattern, spec->length);
2932 	if (ret) {
2933 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2934 		return -1;
2935 	}
2936 
2937 	ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2938 					   mask->pattern, spec->length);
2939 	if (ret) {
2940 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2941 		return -1;
2942 	}
2943 
2944 	(*device_configured) |= local_cfg;
2945 
2946 	return 0;
2947 }
2948 
2949 static inline int
2950 dpaa2_fs_action_supported(enum rte_flow_action_type action)
2951 {
2952 	int i;
2953 
2954 	for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
2955 					sizeof(enum rte_flow_action_type)); i++) {
2956 		if (action == dpaa2_supported_fs_action_type[i])
2957 			return 1;
2958 	}
2959 
2960 	return 0;
2961 }
2962 /* The existing QoS/FS entry with IP address(es)
2963  * needs update after
2964  * new extract(s) are inserted before IP
2965  * address(es) extract(s).
2966  */
2967 static int
2968 dpaa2_flow_entry_update(
2969 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2970 {
2971 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2972 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2973 	int ret;
2974 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2975 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2976 	struct dpaa2_key_extract *qos_key_extract =
2977 		&priv->extract.qos_key_extract;
2978 	struct dpaa2_key_extract *tc_key_extract =
2979 		&priv->extract.tc_key_extract[tc_id];
2980 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2981 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2982 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2983 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2984 	int extend = -1, extend1, size = -1;
2985 	uint16_t qos_index;
2986 
2987 	while (curr) {
2988 		if (curr->ipaddr_rule.ipaddr_type ==
2989 			FLOW_NONE_IPADDR) {
2990 			curr = LIST_NEXT(curr, next);
2991 			continue;
2992 		}
2993 
2994 		if (curr->ipaddr_rule.ipaddr_type ==
2995 			FLOW_IPV4_ADDR) {
2996 			qos_ipsrc_offset =
2997 				qos_key_extract->key_info.ipv4_src_offset;
2998 			qos_ipdst_offset =
2999 				qos_key_extract->key_info.ipv4_dst_offset;
3000 			fs_ipsrc_offset =
3001 				tc_key_extract->key_info.ipv4_src_offset;
3002 			fs_ipdst_offset =
3003 				tc_key_extract->key_info.ipv4_dst_offset;
3004 			size = NH_FLD_IPV4_ADDR_SIZE;
3005 		} else {
3006 			qos_ipsrc_offset =
3007 				qos_key_extract->key_info.ipv6_src_offset;
3008 			qos_ipdst_offset =
3009 				qos_key_extract->key_info.ipv6_dst_offset;
3010 			fs_ipsrc_offset =
3011 				tc_key_extract->key_info.ipv6_src_offset;
3012 			fs_ipdst_offset =
3013 				tc_key_extract->key_info.ipv6_dst_offset;
3014 			size = NH_FLD_IPV6_ADDR_SIZE;
3015 		}
3016 
3017 		qos_index = curr->tc_id * priv->fs_entries +
3018 			curr->tc_index;
3019 
3020 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
3021 
3022 		if (priv->num_rx_tc > 1) {
3023 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3024 					priv->token, &curr->qos_rule);
3025 			if (ret) {
3026 				DPAA2_PMD_ERR("Qos entry remove failed.");
3027 				return -1;
3028 			}
3029 		}
3030 
3031 		extend = -1;
3032 
3033 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3034 			RTE_ASSERT(qos_ipsrc_offset >=
3035 				curr->ipaddr_rule.qos_ipsrc_offset);
3036 			extend1 = qos_ipsrc_offset -
3037 				curr->ipaddr_rule.qos_ipsrc_offset;
3038 			if (extend >= 0)
3039 				RTE_ASSERT(extend == extend1);
3040 			else
3041 				extend = extend1;
3042 
3043 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3044 				(size == NH_FLD_IPV6_ADDR_SIZE));
3045 
3046 			memcpy(ipsrc_key,
3047 				(char *)(size_t)curr->qos_rule.key_iova +
3048 				curr->ipaddr_rule.qos_ipsrc_offset,
3049 				size);
3050 			memset((char *)(size_t)curr->qos_rule.key_iova +
3051 				curr->ipaddr_rule.qos_ipsrc_offset,
3052 				0, size);
3053 
3054 			memcpy(ipsrc_mask,
3055 				(char *)(size_t)curr->qos_rule.mask_iova +
3056 				curr->ipaddr_rule.qos_ipsrc_offset,
3057 				size);
3058 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3059 				curr->ipaddr_rule.qos_ipsrc_offset,
3060 				0, size);
3061 
3062 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3063 		}
3064 
3065 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3066 			RTE_ASSERT(qos_ipdst_offset >=
3067 				curr->ipaddr_rule.qos_ipdst_offset);
3068 			extend1 = qos_ipdst_offset -
3069 				curr->ipaddr_rule.qos_ipdst_offset;
3070 			if (extend >= 0)
3071 				RTE_ASSERT(extend == extend1);
3072 			else
3073 				extend = extend1;
3074 
3075 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3076 				(size == NH_FLD_IPV6_ADDR_SIZE));
3077 
3078 			memcpy(ipdst_key,
3079 				(char *)(size_t)curr->qos_rule.key_iova +
3080 				curr->ipaddr_rule.qos_ipdst_offset,
3081 				size);
3082 			memset((char *)(size_t)curr->qos_rule.key_iova +
3083 				curr->ipaddr_rule.qos_ipdst_offset,
3084 				0, size);
3085 
3086 			memcpy(ipdst_mask,
3087 				(char *)(size_t)curr->qos_rule.mask_iova +
3088 				curr->ipaddr_rule.qos_ipdst_offset,
3089 				size);
3090 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3091 				curr->ipaddr_rule.qos_ipdst_offset,
3092 				0, size);
3093 
3094 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3095 		}
3096 
3097 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3098 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3099 				(size == NH_FLD_IPV6_ADDR_SIZE));
3100 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3101 				curr->ipaddr_rule.qos_ipsrc_offset,
3102 				ipsrc_key,
3103 				size);
3104 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3105 				curr->ipaddr_rule.qos_ipsrc_offset,
3106 				ipsrc_mask,
3107 				size);
3108 		}
3109 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3110 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3111 				(size == NH_FLD_IPV6_ADDR_SIZE));
3112 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3113 				curr->ipaddr_rule.qos_ipdst_offset,
3114 				ipdst_key,
3115 				size);
3116 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3117 				curr->ipaddr_rule.qos_ipdst_offset,
3118 				ipdst_mask,
3119 				size);
3120 		}
3121 
3122 		if (extend >= 0)
3123 			curr->qos_real_key_size += extend;
3124 
3125 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3126 
3127 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
3128 
3129 		if (priv->num_rx_tc > 1) {
3130 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3131 					priv->token, &curr->qos_rule,
3132 					curr->tc_id, qos_index,
3133 					0, 0);
3134 			if (ret) {
3135 				DPAA2_PMD_ERR("Qos entry update failed.");
3136 				return -1;
3137 			}
3138 		}
3139 
3140 		if (!dpaa2_fs_action_supported(curr->action)) {
3141 			curr = LIST_NEXT(curr, next);
3142 			continue;
3143 		}
3144 
3145 		dpaa2_flow_fs_entry_log("Before update", curr);
3146 		extend = -1;
3147 
3148 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3149 				priv->token, curr->tc_id, &curr->fs_rule);
3150 		if (ret) {
3151 			DPAA2_PMD_ERR("FS entry remove failed.");
3152 			return -1;
3153 		}
3154 
3155 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3156 			tc_id == curr->tc_id) {
3157 			RTE_ASSERT(fs_ipsrc_offset >=
3158 				curr->ipaddr_rule.fs_ipsrc_offset);
3159 			extend1 = fs_ipsrc_offset -
3160 				curr->ipaddr_rule.fs_ipsrc_offset;
3161 			if (extend >= 0)
3162 				RTE_ASSERT(extend == extend1);
3163 			else
3164 				extend = extend1;
3165 
3166 			memcpy(ipsrc_key,
3167 				(char *)(size_t)curr->fs_rule.key_iova +
3168 				curr->ipaddr_rule.fs_ipsrc_offset,
3169 				size);
3170 			memset((char *)(size_t)curr->fs_rule.key_iova +
3171 				curr->ipaddr_rule.fs_ipsrc_offset,
3172 				0, size);
3173 
3174 			memcpy(ipsrc_mask,
3175 				(char *)(size_t)curr->fs_rule.mask_iova +
3176 				curr->ipaddr_rule.fs_ipsrc_offset,
3177 				size);
3178 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3179 				curr->ipaddr_rule.fs_ipsrc_offset,
3180 				0, size);
3181 
3182 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3183 		}
3184 
3185 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3186 			tc_id == curr->tc_id) {
3187 			RTE_ASSERT(fs_ipdst_offset >=
3188 				curr->ipaddr_rule.fs_ipdst_offset);
3189 			extend1 = fs_ipdst_offset -
3190 				curr->ipaddr_rule.fs_ipdst_offset;
3191 			if (extend >= 0)
3192 				RTE_ASSERT(extend == extend1);
3193 			else
3194 				extend = extend1;
3195 
3196 			memcpy(ipdst_key,
3197 				(char *)(size_t)curr->fs_rule.key_iova +
3198 				curr->ipaddr_rule.fs_ipdst_offset,
3199 				size);
3200 			memset((char *)(size_t)curr->fs_rule.key_iova +
3201 				curr->ipaddr_rule.fs_ipdst_offset,
3202 				0, size);
3203 
3204 			memcpy(ipdst_mask,
3205 				(char *)(size_t)curr->fs_rule.mask_iova +
3206 				curr->ipaddr_rule.fs_ipdst_offset,
3207 				size);
3208 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3209 				curr->ipaddr_rule.fs_ipdst_offset,
3210 				0, size);
3211 
3212 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3213 		}
3214 
3215 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3216 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3217 				curr->ipaddr_rule.fs_ipsrc_offset,
3218 				ipsrc_key,
3219 				size);
3220 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3221 				curr->ipaddr_rule.fs_ipsrc_offset,
3222 				ipsrc_mask,
3223 				size);
3224 		}
3225 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3226 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3227 				curr->ipaddr_rule.fs_ipdst_offset,
3228 				ipdst_key,
3229 				size);
3230 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3231 				curr->ipaddr_rule.fs_ipdst_offset,
3232 				ipdst_mask,
3233 				size);
3234 		}
3235 
3236 		if (extend >= 0)
3237 			curr->fs_real_key_size += extend;
3238 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3239 
3240 		dpaa2_flow_fs_entry_log("Start update", curr);
3241 
3242 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3243 				priv->token, curr->tc_id, curr->tc_index,
3244 				&curr->fs_rule, &curr->action_cfg);
3245 		if (ret) {
3246 			DPAA2_PMD_ERR("FS entry update failed.");
3247 			return -1;
3248 		}
3249 
3250 		curr = LIST_NEXT(curr, next);
3251 	}
3252 
3253 	return 0;
3254 }
3255 
3256 static inline int
3257 dpaa2_flow_verify_attr(
3258 	struct dpaa2_dev_priv *priv,
3259 	const struct rte_flow_attr *attr)
3260 {
3261 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3262 
3263 	while (curr) {
3264 		if (curr->tc_id == attr->group &&
3265 			curr->tc_index == attr->priority) {
3266 			DPAA2_PMD_ERR(
3267 				"Flow with group %d and priority %d already exists.",
3268 				attr->group, attr->priority);
3269 
3270 			return -1;
3271 		}
3272 		curr = LIST_NEXT(curr, next);
3273 	}
3274 
3275 	return 0;
3276 }
3277 
3278 static inline struct rte_eth_dev *
3279 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3280 	const struct rte_flow_action *action)
3281 {
3282 	const struct rte_flow_action_port_id *port_id;
3283 	int idx = -1;
3284 	struct rte_eth_dev *dest_dev;
3285 
3286 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3287 		port_id = (const struct rte_flow_action_port_id *)
3288 					action->conf;
3289 		if (!port_id->original)
3290 			idx = port_id->id;
3291 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3292 		const struct rte_flow_action_ethdev *ethdev;
3293 
3294 		ethdev = (const struct rte_flow_action_ethdev *)action->conf;
3295 		idx = ethdev->port_id;
3296 	} else {
3297 		return NULL;
3298 	}
3299 
3300 	if (idx >= 0) {
3301 		if (!rte_eth_dev_is_valid_port(idx))
3302 			return NULL;
3303 		dest_dev = &rte_eth_devices[idx];
3304 	} else {
3305 		dest_dev = priv->eth_dev;
3306 	}
3307 
3308 	if (!dpaa2_dev_is_dpaa2(dest_dev))
3309 		return NULL;
3310 
3311 	return dest_dev;
3312 }
3313 
3314 static inline int
3315 dpaa2_flow_verify_action(
3316 	struct dpaa2_dev_priv *priv,
3317 	const struct rte_flow_attr *attr,
3318 	const struct rte_flow_action actions[])
3319 {
3320 	int end_of_list = 0, i, j = 0;
3321 	const struct rte_flow_action_queue *dest_queue;
3322 	const struct rte_flow_action_rss *rss_conf;
3323 	struct dpaa2_queue *rxq;
3324 
3325 	while (!end_of_list) {
3326 		switch (actions[j].type) {
3327 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3328 			dest_queue = (const struct rte_flow_action_queue *)
3329 					(actions[j].conf);
3330 			rxq = priv->rx_vq[dest_queue->index];
3331 			if (attr->group != rxq->tc_index) {
3332 				DPAA2_PMD_ERR(
3333 					"RXQ[%d] does not belong to the group %d",
3334 					dest_queue->index, attr->group);
3335 
3336 				return -1;
3337 			}
3338 			break;
3339 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3340 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3341 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3342 				DPAA2_PMD_ERR("Invalid port id of action");
3343 				return -ENOTSUP;
3344 			}
3345 			break;
3346 		case RTE_FLOW_ACTION_TYPE_RSS:
3347 			rss_conf = (const struct rte_flow_action_rss *)
3348 					(actions[j].conf);
3349 			if (rss_conf->queue_num > priv->dist_queues) {
3350 				DPAA2_PMD_ERR(
3351 					"RSS number exceeds the distribution size");
3352 				return -ENOTSUP;
3353 			}
3354 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3355 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3356 					DPAA2_PMD_ERR(
3357 						"RSS queue index exceeds the number of RXQs");
3358 					return -ENOTSUP;
3359 				}
3360 				rxq = priv->rx_vq[rss_conf->queue[i]];
3361 				if (rxq->tc_index != attr->group) {
3362 					DPAA2_PMD_ERR(
3363 						"Queue/Group combination are not supported\n");
3364 					return -ENOTSUP;
3365 				}
3366 			}
3367 
3368 			break;
3369 		case RTE_FLOW_ACTION_TYPE_END:
3370 			end_of_list = 1;
3371 			break;
3372 		default:
3373 			DPAA2_PMD_ERR("Invalid action type");
3374 			return -ENOTSUP;
3375 		}
3376 		j++;
3377 	}
3378 
3379 	return 0;
3380 }
3381 
3382 static int
3383 dpaa2_generic_flow_set(struct rte_flow *flow,
3384 		       struct rte_eth_dev *dev,
3385 		       const struct rte_flow_attr *attr,
3386 		       const struct rte_flow_item pattern[],
3387 		       const struct rte_flow_action actions[],
3388 		       struct rte_flow_error *error)
3389 {
3390 	const struct rte_flow_action_queue *dest_queue;
3391 	const struct rte_flow_action_rss *rss_conf;
3392 	int is_keycfg_configured = 0, end_of_list = 0;
3393 	int ret = 0, i = 0, j = 0;
3394 	struct dpni_rx_dist_cfg tc_cfg;
3395 	struct dpni_qos_tbl_cfg qos_cfg;
3396 	struct dpni_fs_action_cfg action;
3397 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3398 	struct dpaa2_queue *dest_q;
3399 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3400 	size_t param;
3401 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3402 	uint16_t qos_index;
3403 	struct rte_eth_dev *dest_dev;
3404 	struct dpaa2_dev_priv *dest_priv;
3405 
3406 	ret = dpaa2_flow_verify_attr(priv, attr);
3407 	if (ret)
3408 		return ret;
3409 
3410 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3411 	if (ret)
3412 		return ret;
3413 
3414 	/* Parse pattern list to get the matching parameters */
3415 	while (!end_of_list) {
3416 		switch (pattern[i].type) {
3417 		case RTE_FLOW_ITEM_TYPE_ETH:
3418 			ret = dpaa2_configure_flow_eth(flow,
3419 					dev, attr, &pattern[i], actions, error,
3420 					&is_keycfg_configured);
3421 			if (ret) {
3422 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3423 				return ret;
3424 			}
3425 			break;
3426 		case RTE_FLOW_ITEM_TYPE_VLAN:
3427 			ret = dpaa2_configure_flow_vlan(flow,
3428 					dev, attr, &pattern[i], actions, error,
3429 					&is_keycfg_configured);
3430 			if (ret) {
3431 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3432 				return ret;
3433 			}
3434 			break;
3435 		case RTE_FLOW_ITEM_TYPE_IPV4:
3436 		case RTE_FLOW_ITEM_TYPE_IPV6:
3437 			ret = dpaa2_configure_flow_generic_ip(flow,
3438 					dev, attr, &pattern[i], actions, error,
3439 					&is_keycfg_configured);
3440 			if (ret) {
3441 				DPAA2_PMD_ERR("IP flow configuration failed!");
3442 				return ret;
3443 			}
3444 			break;
3445 		case RTE_FLOW_ITEM_TYPE_ICMP:
3446 			ret = dpaa2_configure_flow_icmp(flow,
3447 					dev, attr, &pattern[i], actions, error,
3448 					&is_keycfg_configured);
3449 			if (ret) {
3450 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3451 				return ret;
3452 			}
3453 			break;
3454 		case RTE_FLOW_ITEM_TYPE_UDP:
3455 			ret = dpaa2_configure_flow_udp(flow,
3456 					dev, attr, &pattern[i], actions, error,
3457 					&is_keycfg_configured);
3458 			if (ret) {
3459 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3460 				return ret;
3461 			}
3462 			break;
3463 		case RTE_FLOW_ITEM_TYPE_TCP:
3464 			ret = dpaa2_configure_flow_tcp(flow,
3465 					dev, attr, &pattern[i], actions, error,
3466 					&is_keycfg_configured);
3467 			if (ret) {
3468 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3469 				return ret;
3470 			}
3471 			break;
3472 		case RTE_FLOW_ITEM_TYPE_SCTP:
3473 			ret = dpaa2_configure_flow_sctp(flow,
3474 					dev, attr, &pattern[i], actions, error,
3475 					&is_keycfg_configured);
3476 			if (ret) {
3477 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3478 				return ret;
3479 			}
3480 			break;
3481 		case RTE_FLOW_ITEM_TYPE_GRE:
3482 			ret = dpaa2_configure_flow_gre(flow,
3483 					dev, attr, &pattern[i], actions, error,
3484 					&is_keycfg_configured);
3485 			if (ret) {
3486 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3487 				return ret;
3488 			}
3489 			break;
3490 		case RTE_FLOW_ITEM_TYPE_RAW:
3491 			ret = dpaa2_configure_flow_raw(flow,
3492 						       dev, attr, &pattern[i],
3493 						       actions, error,
3494 						       &is_keycfg_configured);
3495 			if (ret) {
3496 				DPAA2_PMD_ERR("RAW flow configuration failed!");
3497 				return ret;
3498 			}
3499 			break;
3500 		case RTE_FLOW_ITEM_TYPE_END:
3501 			end_of_list = 1;
3502 			break; /*End of List*/
3503 		default:
3504 			DPAA2_PMD_ERR("Invalid action type");
3505 			ret = -ENOTSUP;
3506 			break;
3507 		}
3508 		i++;
3509 	}
3510 
3511 	/* Let's parse action on matching traffic */
3512 	end_of_list = 0;
3513 	while (!end_of_list) {
3514 		switch (actions[j].type) {
3515 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3516 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3517 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3518 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3519 			flow->action = actions[j].type;
3520 
3521 			if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3522 				dest_queue = (const struct rte_flow_action_queue *)
3523 								(actions[j].conf);
3524 				dest_q = priv->rx_vq[dest_queue->index];
3525 				action.flow_id = dest_q->flow_id;
3526 			} else {
3527 				dest_dev = dpaa2_flow_redirect_dev(priv,
3528 								   &actions[j]);
3529 				if (!dest_dev) {
3530 					DPAA2_PMD_ERR("Invalid destination device to redirect!");
3531 					return -1;
3532 				}
3533 
3534 				dest_priv = dest_dev->data->dev_private;
3535 				dest_q = dest_priv->tx_vq[0];
3536 				action.options =
3537 						DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
3538 				action.redirect_obj_token = dest_priv->token;
3539 				action.flow_id = dest_q->flow_id;
3540 			}
3541 
3542 			/* Configure FS table first*/
3543 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3544 				dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3545 				if (dpkg_prepare_key_cfg(
3546 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3547 				(uint8_t *)(size_t)priv->extract
3548 				.tc_extract_param[flow->tc_id]) < 0) {
3549 					DPAA2_PMD_ERR(
3550 					"Unable to prepare extract parameters");
3551 					return -1;
3552 				}
3553 
3554 				memset(&tc_cfg, 0,
3555 					sizeof(struct dpni_rx_dist_cfg));
3556 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3557 				tc_cfg.key_cfg_iova =
3558 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3559 				tc_cfg.tc = flow->tc_id;
3560 				tc_cfg.enable = false;
3561 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3562 						priv->token, &tc_cfg);
3563 				if (ret < 0) {
3564 					DPAA2_PMD_ERR(
3565 						"TC hash cannot be disabled.(%d)",
3566 						ret);
3567 					return -1;
3568 				}
3569 				tc_cfg.enable = true;
3570 				tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
3571 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3572 							 priv->token, &tc_cfg);
3573 				if (ret < 0) {
3574 					DPAA2_PMD_ERR(
3575 						"TC distribution cannot be configured.(%d)",
3576 						ret);
3577 					return -1;
3578 				}
3579 			}
3580 
3581 			/* Configure QoS table then.*/
3582 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3583 				dpaa2_flow_qos_table_extracts_log(priv);
3584 				if (dpkg_prepare_key_cfg(
3585 					&priv->extract.qos_key_extract.dpkg,
3586 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3587 					DPAA2_PMD_ERR(
3588 						"Unable to prepare extract parameters");
3589 					return -1;
3590 				}
3591 
3592 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3593 				qos_cfg.discard_on_miss = false;
3594 				qos_cfg.default_tc = 0;
3595 				qos_cfg.keep_entries = true;
3596 				qos_cfg.key_cfg_iova =
3597 					(size_t)priv->extract.qos_extract_param;
3598 				/* QoS table is effective for multiple TCs. */
3599 				if (priv->num_rx_tc > 1) {
3600 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3601 						priv->token, &qos_cfg);
3602 					if (ret < 0) {
3603 						DPAA2_PMD_ERR(
3604 						"RSS QoS table can not be configured(%d)\n",
3605 							ret);
3606 						return -1;
3607 					}
3608 				}
3609 			}
3610 
3611 			flow->qos_real_key_size = priv->extract
3612 				.qos_key_extract.key_info.key_total_size;
3613 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3614 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3615 					flow->ipaddr_rule.qos_ipsrc_offset) {
3616 					flow->qos_real_key_size =
3617 						flow->ipaddr_rule.qos_ipdst_offset +
3618 						NH_FLD_IPV4_ADDR_SIZE;
3619 				} else {
3620 					flow->qos_real_key_size =
3621 						flow->ipaddr_rule.qos_ipsrc_offset +
3622 						NH_FLD_IPV4_ADDR_SIZE;
3623 				}
3624 			} else if (flow->ipaddr_rule.ipaddr_type ==
3625 				FLOW_IPV6_ADDR) {
3626 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3627 					flow->ipaddr_rule.qos_ipsrc_offset) {
3628 					flow->qos_real_key_size =
3629 						flow->ipaddr_rule.qos_ipdst_offset +
3630 						NH_FLD_IPV6_ADDR_SIZE;
3631 				} else {
3632 					flow->qos_real_key_size =
3633 						flow->ipaddr_rule.qos_ipsrc_offset +
3634 						NH_FLD_IPV6_ADDR_SIZE;
3635 				}
3636 			}
3637 
3638 			/* QoS entry added is only effective for multiple TCs.*/
3639 			if (priv->num_rx_tc > 1) {
3640 				qos_index = flow->tc_id * priv->fs_entries +
3641 					flow->tc_index;
3642 				if (qos_index >= priv->qos_entries) {
3643 					DPAA2_PMD_ERR("QoS table with %d entries full",
3644 						priv->qos_entries);
3645 					return -1;
3646 				}
3647 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3648 
3649 				dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3650 
3651 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3652 						priv->token, &flow->qos_rule,
3653 						flow->tc_id, qos_index,
3654 						0, 0);
3655 				if (ret < 0) {
3656 					DPAA2_PMD_ERR(
3657 						"Error in adding entry to QoS table(%d)", ret);
3658 					return ret;
3659 				}
3660 			}
3661 
3662 			if (flow->tc_index >= priv->fs_entries) {
3663 				DPAA2_PMD_ERR("FS table with %d entries full",
3664 					priv->fs_entries);
3665 				return -1;
3666 			}
3667 
3668 			flow->fs_real_key_size =
3669 				priv->extract.tc_key_extract[flow->tc_id]
3670 				.key_info.key_total_size;
3671 
3672 			if (flow->ipaddr_rule.ipaddr_type ==
3673 				FLOW_IPV4_ADDR) {
3674 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3675 					flow->ipaddr_rule.fs_ipsrc_offset) {
3676 					flow->fs_real_key_size =
3677 						flow->ipaddr_rule.fs_ipdst_offset +
3678 						NH_FLD_IPV4_ADDR_SIZE;
3679 				} else {
3680 					flow->fs_real_key_size =
3681 						flow->ipaddr_rule.fs_ipsrc_offset +
3682 						NH_FLD_IPV4_ADDR_SIZE;
3683 				}
3684 			} else if (flow->ipaddr_rule.ipaddr_type ==
3685 				FLOW_IPV6_ADDR) {
3686 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3687 					flow->ipaddr_rule.fs_ipsrc_offset) {
3688 					flow->fs_real_key_size =
3689 						flow->ipaddr_rule.fs_ipdst_offset +
3690 						NH_FLD_IPV6_ADDR_SIZE;
3691 				} else {
3692 					flow->fs_real_key_size =
3693 						flow->ipaddr_rule.fs_ipsrc_offset +
3694 						NH_FLD_IPV6_ADDR_SIZE;
3695 				}
3696 			}
3697 
3698 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3699 
3700 			dpaa2_flow_fs_entry_log("Start add", flow);
3701 
3702 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3703 						flow->tc_id, flow->tc_index,
3704 						&flow->fs_rule, &action);
3705 			if (ret < 0) {
3706 				DPAA2_PMD_ERR(
3707 				"Error in adding entry to FS table(%d)", ret);
3708 				return ret;
3709 			}
3710 			memcpy(&flow->action_cfg, &action,
3711 				sizeof(struct dpni_fs_action_cfg));
3712 			break;
3713 		case RTE_FLOW_ACTION_TYPE_RSS:
3714 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3715 
3716 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3717 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3718 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3719 			if (ret < 0) {
3720 				DPAA2_PMD_ERR(
3721 				"unable to set flow distribution.please check queue config\n");
3722 				return ret;
3723 			}
3724 
3725 			/* Allocate DMA'ble memory to write the rules */
3726 			param = (size_t)rte_malloc(NULL, 256, 64);
3727 			if (!param) {
3728 				DPAA2_PMD_ERR("Memory allocation failure\n");
3729 				return -1;
3730 			}
3731 
3732 			if (dpkg_prepare_key_cfg(
3733 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3734 				(uint8_t *)param) < 0) {
3735 				DPAA2_PMD_ERR(
3736 				"Unable to prepare extract parameters");
3737 				rte_free((void *)param);
3738 				return -1;
3739 			}
3740 
3741 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3742 			tc_cfg.dist_size = rss_conf->queue_num;
3743 			tc_cfg.key_cfg_iova = (size_t)param;
3744 			tc_cfg.enable = true;
3745 			tc_cfg.tc = flow->tc_id;
3746 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3747 						 priv->token, &tc_cfg);
3748 			if (ret < 0) {
3749 				DPAA2_PMD_ERR(
3750 					"RSS TC table cannot be configured: %d\n",
3751 					ret);
3752 				rte_free((void *)param);
3753 				return -1;
3754 			}
3755 
3756 			rte_free((void *)param);
3757 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3758 				if (dpkg_prepare_key_cfg(
3759 					&priv->extract.qos_key_extract.dpkg,
3760 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3761 					DPAA2_PMD_ERR(
3762 					"Unable to prepare extract parameters");
3763 					return -1;
3764 				}
3765 				memset(&qos_cfg, 0,
3766 					sizeof(struct dpni_qos_tbl_cfg));
3767 				qos_cfg.discard_on_miss = true;
3768 				qos_cfg.keep_entries = true;
3769 				qos_cfg.key_cfg_iova =
3770 					(size_t)priv->extract.qos_extract_param;
3771 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3772 							 priv->token, &qos_cfg);
3773 				if (ret < 0) {
3774 					DPAA2_PMD_ERR(
3775 					"RSS QoS dist can't be configured-%d\n",
3776 					ret);
3777 					return -1;
3778 				}
3779 			}
3780 
3781 			/* Add Rule into QoS table */
3782 			qos_index = flow->tc_id * priv->fs_entries +
3783 				flow->tc_index;
3784 			if (qos_index >= priv->qos_entries) {
3785 				DPAA2_PMD_ERR("QoS table with %d entries full",
3786 					priv->qos_entries);
3787 				return -1;
3788 			}
3789 
3790 			flow->qos_real_key_size =
3791 			  priv->extract.qos_key_extract.key_info.key_total_size;
3792 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3793 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3794 						&flow->qos_rule, flow->tc_id,
3795 						qos_index, 0, 0);
3796 			if (ret < 0) {
3797 				DPAA2_PMD_ERR(
3798 				"Error in entry addition in QoS table(%d)",
3799 				ret);
3800 				return ret;
3801 			}
3802 			break;
3803 		case RTE_FLOW_ACTION_TYPE_END:
3804 			end_of_list = 1;
3805 			break;
3806 		default:
3807 			DPAA2_PMD_ERR("Invalid action type");
3808 			ret = -ENOTSUP;
3809 			break;
3810 		}
3811 		j++;
3812 	}
3813 
3814 	if (!ret) {
3815 		if (is_keycfg_configured &
3816 			(DPAA2_QOS_TABLE_RECONFIGURE |
3817 			DPAA2_FS_TABLE_RECONFIGURE)) {
3818 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3819 			if (ret) {
3820 				DPAA2_PMD_ERR("Flow entry update failed.");
3821 
3822 				return -1;
3823 			}
3824 		}
3825 		/* New rules are inserted. */
3826 		if (!curr) {
3827 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3828 		} else {
3829 			while (LIST_NEXT(curr, next))
3830 				curr = LIST_NEXT(curr, next);
3831 			LIST_INSERT_AFTER(curr, flow, next);
3832 		}
3833 	}
3834 	return ret;
3835 }
3836 
3837 static inline int
3838 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3839 		      const struct rte_flow_attr *attr)
3840 {
3841 	int ret = 0;
3842 
3843 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3844 		DPAA2_PMD_ERR("Priority group is out of range\n");
3845 		ret = -ENOTSUP;
3846 	}
3847 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3848 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3849 		ret = -ENOTSUP;
3850 	}
3851 	if (unlikely(attr->egress)) {
3852 		DPAA2_PMD_ERR(
3853 			"Flow configuration is not supported on egress side\n");
3854 		ret = -ENOTSUP;
3855 	}
3856 	if (unlikely(!attr->ingress)) {
3857 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3858 		ret = -EINVAL;
3859 	}
3860 	return ret;
3861 }
3862 
3863 static inline int
3864 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3865 {
3866 	unsigned int i, j, is_found = 0;
3867 	int ret = 0;
3868 
3869 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3870 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3871 			if (dpaa2_supported_pattern_type[i]
3872 					== pattern[j].type) {
3873 				is_found = 1;
3874 				break;
3875 			}
3876 		}
3877 		if (!is_found) {
3878 			ret = -ENOTSUP;
3879 			break;
3880 		}
3881 	}
3882 	/* Lets verify other combinations of given pattern rules */
3883 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3884 		if (!pattern[j].spec) {
3885 			ret = -EINVAL;
3886 			break;
3887 		}
3888 	}
3889 
3890 	return ret;
3891 }
3892 
3893 static inline int
3894 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3895 {
3896 	unsigned int i, j, is_found = 0;
3897 	int ret = 0;
3898 
3899 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3900 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3901 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3902 				is_found = 1;
3903 				break;
3904 			}
3905 		}
3906 		if (!is_found) {
3907 			ret = -ENOTSUP;
3908 			break;
3909 		}
3910 	}
3911 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3912 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3913 				!actions[j].conf)
3914 			ret = -EINVAL;
3915 	}
3916 	return ret;
3917 }
3918 
3919 static
3920 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3921 			const struct rte_flow_attr *flow_attr,
3922 			const struct rte_flow_item pattern[],
3923 			const struct rte_flow_action actions[],
3924 			struct rte_flow_error *error)
3925 {
3926 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3927 	struct dpni_attr dpni_attr;
3928 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3929 	uint16_t token = priv->token;
3930 	int ret = 0;
3931 
3932 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3933 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3934 	if (ret < 0) {
3935 		DPAA2_PMD_ERR(
3936 			"Failure to get dpni@%p attribute, err code  %d\n",
3937 			dpni, ret);
3938 		rte_flow_error_set(error, EPERM,
3939 			   RTE_FLOW_ERROR_TYPE_ATTR,
3940 			   flow_attr, "invalid");
3941 		return ret;
3942 	}
3943 
3944 	/* Verify input attributes */
3945 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3946 	if (ret < 0) {
3947 		DPAA2_PMD_ERR(
3948 			"Invalid attributes are given\n");
3949 		rte_flow_error_set(error, EPERM,
3950 			   RTE_FLOW_ERROR_TYPE_ATTR,
3951 			   flow_attr, "invalid");
3952 		goto not_valid_params;
3953 	}
3954 	/* Verify input pattern list */
3955 	ret = dpaa2_dev_verify_patterns(pattern);
3956 	if (ret < 0) {
3957 		DPAA2_PMD_ERR(
3958 			"Invalid pattern list is given\n");
3959 		rte_flow_error_set(error, EPERM,
3960 			   RTE_FLOW_ERROR_TYPE_ITEM,
3961 			   pattern, "invalid");
3962 		goto not_valid_params;
3963 	}
3964 	/* Verify input action list */
3965 	ret = dpaa2_dev_verify_actions(actions);
3966 	if (ret < 0) {
3967 		DPAA2_PMD_ERR(
3968 			"Invalid action list is given\n");
3969 		rte_flow_error_set(error, EPERM,
3970 			   RTE_FLOW_ERROR_TYPE_ACTION,
3971 			   actions, "invalid");
3972 		goto not_valid_params;
3973 	}
3974 not_valid_params:
3975 	return ret;
3976 }
3977 
3978 static
3979 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3980 				   const struct rte_flow_attr *attr,
3981 				   const struct rte_flow_item pattern[],
3982 				   const struct rte_flow_action actions[],
3983 				   struct rte_flow_error *error)
3984 {
3985 	struct rte_flow *flow = NULL;
3986 	size_t key_iova = 0, mask_iova = 0;
3987 	int ret;
3988 
3989 	dpaa2_flow_control_log =
3990 		getenv("DPAA2_FLOW_CONTROL_LOG");
3991 
3992 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3993 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3994 
3995 		dpaa2_flow_miss_flow_id =
3996 			atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3997 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3998 			DPAA2_PMD_ERR(
3999 				"The missed flow ID %d exceeds the max flow ID %d",
4000 				dpaa2_flow_miss_flow_id,
4001 				priv->dist_queues - 1);
4002 			return NULL;
4003 		}
4004 	}
4005 
4006 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
4007 	if (!flow) {
4008 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4009 		goto mem_failure;
4010 	}
4011 	/* Allocate DMA'ble memory to write the rules */
4012 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4013 	if (!key_iova) {
4014 		DPAA2_PMD_ERR(
4015 			"Memory allocation failure for rule configuration\n");
4016 		goto mem_failure;
4017 	}
4018 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4019 	if (!mask_iova) {
4020 		DPAA2_PMD_ERR(
4021 			"Memory allocation failure for rule configuration\n");
4022 		goto mem_failure;
4023 	}
4024 
4025 	flow->qos_rule.key_iova = key_iova;
4026 	flow->qos_rule.mask_iova = mask_iova;
4027 
4028 	/* Allocate DMA'ble memory to write the rules */
4029 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4030 	if (!key_iova) {
4031 		DPAA2_PMD_ERR(
4032 			"Memory allocation failure for rule configuration\n");
4033 		goto mem_failure;
4034 	}
4035 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4036 	if (!mask_iova) {
4037 		DPAA2_PMD_ERR(
4038 			"Memory allocation failure for rule configuration\n");
4039 		goto mem_failure;
4040 	}
4041 
4042 	flow->fs_rule.key_iova = key_iova;
4043 	flow->fs_rule.mask_iova = mask_iova;
4044 
4045 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
4046 	flow->ipaddr_rule.qos_ipsrc_offset =
4047 		IP_ADDRESS_OFFSET_INVALID;
4048 	flow->ipaddr_rule.qos_ipdst_offset =
4049 		IP_ADDRESS_OFFSET_INVALID;
4050 	flow->ipaddr_rule.fs_ipsrc_offset =
4051 		IP_ADDRESS_OFFSET_INVALID;
4052 	flow->ipaddr_rule.fs_ipdst_offset =
4053 		IP_ADDRESS_OFFSET_INVALID;
4054 
4055 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
4056 			actions, error);
4057 	if (ret < 0) {
4058 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4059 			rte_flow_error_set(error, EPERM,
4060 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4061 					attr, "unknown");
4062 		DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
4063 		goto creation_error;
4064 	}
4065 
4066 	return flow;
4067 mem_failure:
4068 	rte_flow_error_set(error, EPERM,
4069 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4070 			   NULL, "memory alloc");
4071 creation_error:
4072 	rte_free((void *)flow);
4073 	rte_free((void *)key_iova);
4074 	rte_free((void *)mask_iova);
4075 
4076 	return NULL;
4077 }
4078 
4079 static
4080 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4081 		       struct rte_flow *flow,
4082 		       struct rte_flow_error *error)
4083 {
4084 	int ret = 0;
4085 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4086 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4087 
4088 	switch (flow->action) {
4089 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4090 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4091 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
4092 		if (priv->num_rx_tc > 1) {
4093 			/* Remove entry from QoS table first */
4094 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4095 					&flow->qos_rule);
4096 			if (ret < 0) {
4097 				DPAA2_PMD_ERR(
4098 					"Error in removing entry from QoS table(%d)", ret);
4099 				goto error;
4100 			}
4101 		}
4102 
4103 		/* Then remove entry from FS table */
4104 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4105 					   flow->tc_id, &flow->fs_rule);
4106 		if (ret < 0) {
4107 			DPAA2_PMD_ERR(
4108 				"Error in removing entry from FS table(%d)", ret);
4109 			goto error;
4110 		}
4111 		break;
4112 	case RTE_FLOW_ACTION_TYPE_RSS:
4113 		if (priv->num_rx_tc > 1) {
4114 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4115 					&flow->qos_rule);
4116 			if (ret < 0) {
4117 				DPAA2_PMD_ERR(
4118 					"Error in entry addition in QoS table(%d)", ret);
4119 				goto error;
4120 			}
4121 		}
4122 		break;
4123 	default:
4124 		DPAA2_PMD_ERR(
4125 		"Action type (%d) is not supported", flow->action);
4126 		ret = -ENOTSUP;
4127 		break;
4128 	}
4129 
4130 	LIST_REMOVE(flow, next);
4131 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
4132 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4133 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
4134 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4135 	/* Now free the flow */
4136 	rte_free(flow);
4137 
4138 error:
4139 	if (ret)
4140 		rte_flow_error_set(error, EPERM,
4141 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4142 				   NULL, "unknown");
4143 	return ret;
4144 }
4145 
4146 /**
4147  * Destroy user-configured flow rules.
4148  *
4149  * This function skips internal flows rules.
4150  *
4151  * @see rte_flow_flush()
4152  * @see rte_flow_ops
4153  */
4154 static int
4155 dpaa2_flow_flush(struct rte_eth_dev *dev,
4156 		struct rte_flow_error *error)
4157 {
4158 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4159 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
4160 
4161 	while (flow) {
4162 		struct rte_flow *next = LIST_NEXT(flow, next);
4163 
4164 		dpaa2_flow_destroy(dev, flow, error);
4165 		flow = next;
4166 	}
4167 	return 0;
4168 }
4169 
4170 static int
4171 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4172 		struct rte_flow *flow __rte_unused,
4173 		const struct rte_flow_action *actions __rte_unused,
4174 		void *data __rte_unused,
4175 		struct rte_flow_error *error __rte_unused)
4176 {
4177 	return 0;
4178 }
4179 
4180 /**
4181  * Clean up all flow rules.
4182  *
4183  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4184  * rules regardless of whether they are internal or user-configured.
4185  *
4186  * @param priv
4187  *   Pointer to private structure.
4188  */
4189 void
4190 dpaa2_flow_clean(struct rte_eth_dev *dev)
4191 {
4192 	struct rte_flow *flow;
4193 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4194 
4195 	while ((flow = LIST_FIRST(&priv->flows)))
4196 		dpaa2_flow_destroy(dev, flow, NULL);
4197 }
4198 
4199 const struct rte_flow_ops dpaa2_flow_ops = {
4200 	.create	= dpaa2_flow_create,
4201 	.validate = dpaa2_flow_validate,
4202 	.destroy = dpaa2_flow_destroy,
4203 	.flush	= dpaa2_flow_flush,
4204 	.query	= dpaa2_flow_query,
4205 };
4206