xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 068be45fb5363dc9f79821a133f13d8bd781d26d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2021 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */
34 
35 #define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
36 
37 enum flow_rule_ipaddr_type {
38 	FLOW_NONE_IPADDR,
39 	FLOW_IPV4_ADDR,
40 	FLOW_IPV6_ADDR
41 };
42 
43 struct flow_rule_ipaddr {
44 	enum flow_rule_ipaddr_type ipaddr_type;
45 	int qos_ipsrc_offset;
46 	int qos_ipdst_offset;
47 	int fs_ipsrc_offset;
48 	int fs_ipdst_offset;
49 };
50 
51 struct rte_flow {
52 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
53 	struct dpni_rule_cfg qos_rule;
54 	struct dpni_rule_cfg fs_rule;
55 	uint8_t qos_real_key_size;
56 	uint8_t fs_real_key_size;
57 	uint8_t tc_id; /** Traffic Class ID. */
58 	uint8_t tc_index; /** index within this Traffic Class. */
59 	enum rte_flow_action_type action;
60 	/* Special for IP address to specify the offset
61 	 * in key/mask.
62 	 */
63 	struct flow_rule_ipaddr ipaddr_rule;
64 	struct dpni_fs_action_cfg action_cfg;
65 };
66 
67 static const
68 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
69 	RTE_FLOW_ITEM_TYPE_END,
70 	RTE_FLOW_ITEM_TYPE_ETH,
71 	RTE_FLOW_ITEM_TYPE_VLAN,
72 	RTE_FLOW_ITEM_TYPE_IPV4,
73 	RTE_FLOW_ITEM_TYPE_IPV6,
74 	RTE_FLOW_ITEM_TYPE_ICMP,
75 	RTE_FLOW_ITEM_TYPE_UDP,
76 	RTE_FLOW_ITEM_TYPE_TCP,
77 	RTE_FLOW_ITEM_TYPE_SCTP,
78 	RTE_FLOW_ITEM_TYPE_GRE,
79 };
80 
81 static const
82 enum rte_flow_action_type dpaa2_supported_action_type[] = {
83 	RTE_FLOW_ACTION_TYPE_END,
84 	RTE_FLOW_ACTION_TYPE_QUEUE,
85 	RTE_FLOW_ACTION_TYPE_PORT_ID,
86 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
87 	RTE_FLOW_ACTION_TYPE_RSS
88 };
89 
90 static const
91 enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
92 	RTE_FLOW_ACTION_TYPE_QUEUE,
93 	RTE_FLOW_ACTION_TYPE_PORT_ID,
94 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
95 };
96 
97 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
98 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
99 
100 #ifndef __cplusplus
101 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
102 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
103 	.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
104 	.hdr.ether_type = RTE_BE16(0xffff),
105 };
106 
107 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
108 	.hdr.vlan_tci = RTE_BE16(0xffff),
109 };
110 
111 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
112 	.hdr.src_addr = RTE_BE32(0xffffffff),
113 	.hdr.dst_addr = RTE_BE32(0xffffffff),
114 	.hdr.next_proto_id = 0xff,
115 };
116 
117 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
118 	.hdr = {
119 		.src_addr = RTE_IPV6_MASK_FULL,
120 		.dst_addr = RTE_IPV6_MASK_FULL,
121 		.proto = 0xff
122 	},
123 };
124 
125 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
126 	.hdr.icmp_type = 0xff,
127 	.hdr.icmp_code = 0xff,
128 };
129 
130 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
131 	.hdr = {
132 		.src_port = RTE_BE16(0xffff),
133 		.dst_port = RTE_BE16(0xffff),
134 	},
135 };
136 
137 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
138 	.hdr = {
139 		.src_port = RTE_BE16(0xffff),
140 		.dst_port = RTE_BE16(0xffff),
141 	},
142 };
143 
144 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
145 	.hdr = {
146 		.src_port = RTE_BE16(0xffff),
147 		.dst_port = RTE_BE16(0xffff),
148 	},
149 };
150 
151 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
152 	.protocol = RTE_BE16(0xffff),
153 };
154 
155 #endif
156 
157 static inline void dpaa2_prot_field_string(
158 	enum net_prot prot, uint32_t field,
159 	char *string)
160 {
161 	if (!dpaa2_flow_control_log)
162 		return;
163 
164 	if (prot == NET_PROT_ETH) {
165 		strcpy(string, "eth");
166 		if (field == NH_FLD_ETH_DA)
167 			strcat(string, ".dst");
168 		else if (field == NH_FLD_ETH_SA)
169 			strcat(string, ".src");
170 		else if (field == NH_FLD_ETH_TYPE)
171 			strcat(string, ".type");
172 		else
173 			strcat(string, ".unknown field");
174 	} else if (prot == NET_PROT_VLAN) {
175 		strcpy(string, "vlan");
176 		if (field == NH_FLD_VLAN_TCI)
177 			strcat(string, ".tci");
178 		else
179 			strcat(string, ".unknown field");
180 	} else if (prot == NET_PROT_IP) {
181 		strcpy(string, "ip");
182 		if (field == NH_FLD_IP_SRC)
183 			strcat(string, ".src");
184 		else if (field == NH_FLD_IP_DST)
185 			strcat(string, ".dst");
186 		else if (field == NH_FLD_IP_PROTO)
187 			strcat(string, ".proto");
188 		else
189 			strcat(string, ".unknown field");
190 	} else if (prot == NET_PROT_TCP) {
191 		strcpy(string, "tcp");
192 		if (field == NH_FLD_TCP_PORT_SRC)
193 			strcat(string, ".src");
194 		else if (field == NH_FLD_TCP_PORT_DST)
195 			strcat(string, ".dst");
196 		else
197 			strcat(string, ".unknown field");
198 	} else if (prot == NET_PROT_UDP) {
199 		strcpy(string, "udp");
200 		if (field == NH_FLD_UDP_PORT_SRC)
201 			strcat(string, ".src");
202 		else if (field == NH_FLD_UDP_PORT_DST)
203 			strcat(string, ".dst");
204 		else
205 			strcat(string, ".unknown field");
206 	} else if (prot == NET_PROT_ICMP) {
207 		strcpy(string, "icmp");
208 		if (field == NH_FLD_ICMP_TYPE)
209 			strcat(string, ".type");
210 		else if (field == NH_FLD_ICMP_CODE)
211 			strcat(string, ".code");
212 		else
213 			strcat(string, ".unknown field");
214 	} else if (prot == NET_PROT_SCTP) {
215 		strcpy(string, "sctp");
216 		if (field == NH_FLD_SCTP_PORT_SRC)
217 			strcat(string, ".src");
218 		else if (field == NH_FLD_SCTP_PORT_DST)
219 			strcat(string, ".dst");
220 		else
221 			strcat(string, ".unknown field");
222 	} else if (prot == NET_PROT_GRE) {
223 		strcpy(string, "gre");
224 		if (field == NH_FLD_GRE_TYPE)
225 			strcat(string, ".type");
226 		else
227 			strcat(string, ".unknown field");
228 	} else {
229 		strcpy(string, "unknown protocol");
230 	}
231 }
232 
233 static inline void dpaa2_flow_qos_table_extracts_log(
234 	const struct dpaa2_dev_priv *priv, FILE *f)
235 {
236 	int idx;
237 	char string[32];
238 
239 	if (!dpaa2_flow_control_log)
240 		return;
241 
242 	fprintf(f, "Setup QoS table: number of extracts: %d\r\n",
243 			priv->extract.qos_key_extract.dpkg.num_extracts);
244 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
245 		idx++) {
246 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
247 			.extracts[idx].extract.from_hdr.prot,
248 			priv->extract.qos_key_extract.dpkg.extracts[idx]
249 			.extract.from_hdr.field,
250 			string);
251 		fprintf(f, "%s", string);
252 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
253 			fprintf(f, " / ");
254 	}
255 	fprintf(f, "\r\n");
256 }
257 
258 static inline void dpaa2_flow_fs_table_extracts_log(
259 	const struct dpaa2_dev_priv *priv, int tc_id, FILE *f)
260 {
261 	int idx;
262 	char string[32];
263 
264 	if (!dpaa2_flow_control_log)
265 		return;
266 
267 	fprintf(f, "Setup FS table: number of extracts of TC[%d]: %d\r\n",
268 			tc_id, priv->extract.tc_key_extract[tc_id]
269 			.dpkg.num_extracts);
270 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
271 		.dpkg.num_extracts; idx++) {
272 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
273 			.dpkg.extracts[idx].extract.from_hdr.prot,
274 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
275 			.extract.from_hdr.field,
276 			string);
277 		fprintf(f, "%s", string);
278 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
279 			.dpkg.num_extracts)
280 			fprintf(f, " / ");
281 	}
282 	fprintf(f, "\r\n");
283 }
284 
285 static inline void dpaa2_flow_qos_entry_log(
286 	const char *log_info, const struct rte_flow *flow, int qos_index, FILE *f)
287 {
288 	int idx;
289 	uint8_t *key, *mask;
290 
291 	if (!dpaa2_flow_control_log)
292 		return;
293 
294 	fprintf(f, "\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
295 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
296 
297 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
298 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
299 
300 	fprintf(f, "key:\r\n");
301 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
302 		fprintf(f, "%02x ", key[idx]);
303 
304 	fprintf(f, "\r\nmask:\r\n");
305 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
306 		fprintf(f, "%02x ", mask[idx]);
307 
308 	fprintf(f, "\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
309 		flow->ipaddr_rule.qos_ipsrc_offset,
310 		flow->ipaddr_rule.qos_ipdst_offset);
311 }
312 
313 static inline void dpaa2_flow_fs_entry_log(
314 	const char *log_info, const struct rte_flow *flow, FILE *f)
315 {
316 	int idx;
317 	uint8_t *key, *mask;
318 
319 	if (!dpaa2_flow_control_log)
320 		return;
321 
322 	fprintf(f, "\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
323 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
324 
325 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
326 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
327 
328 	fprintf(f, "key:\r\n");
329 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
330 		fprintf(f, "%02x ", key[idx]);
331 
332 	fprintf(f, "\r\nmask:\r\n");
333 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
334 		fprintf(f, "%02x ", mask[idx]);
335 
336 	fprintf(f, "\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
337 		flow->ipaddr_rule.fs_ipsrc_offset,
338 		flow->ipaddr_rule.fs_ipdst_offset);
339 }
340 
341 static inline void dpaa2_flow_extract_key_set(
342 	struct dpaa2_key_info *key_info, int index, uint8_t size)
343 {
344 	key_info->key_size[index] = size;
345 	if (index > 0) {
346 		key_info->key_offset[index] =
347 			key_info->key_offset[index - 1] +
348 			key_info->key_size[index - 1];
349 	} else {
350 		key_info->key_offset[index] = 0;
351 	}
352 	key_info->key_total_size += size;
353 }
354 
355 static int dpaa2_flow_extract_add(
356 	struct dpaa2_key_extract *key_extract,
357 	enum net_prot prot,
358 	uint32_t field, uint8_t field_size)
359 {
360 	int index, ip_src = -1, ip_dst = -1;
361 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
362 	struct dpaa2_key_info *key_info = &key_extract->key_info;
363 
364 	if (dpkg->num_extracts >=
365 		DPKG_MAX_NUM_OF_EXTRACTS) {
366 		DPAA2_PMD_WARN("Number of extracts overflows");
367 		return -1;
368 	}
369 	/* Before reorder, the IP SRC and IP DST are already last
370 	 * extract(s).
371 	 */
372 	for (index = 0; index < dpkg->num_extracts; index++) {
373 		if (dpkg->extracts[index].extract.from_hdr.prot ==
374 			NET_PROT_IP) {
375 			if (dpkg->extracts[index].extract.from_hdr.field ==
376 				NH_FLD_IP_SRC) {
377 				ip_src = index;
378 			}
379 			if (dpkg->extracts[index].extract.from_hdr.field ==
380 				NH_FLD_IP_DST) {
381 				ip_dst = index;
382 			}
383 		}
384 	}
385 
386 	if (ip_src >= 0)
387 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
388 
389 	if (ip_dst >= 0)
390 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
391 
392 	if (prot == NET_PROT_IP &&
393 		(field == NH_FLD_IP_SRC ||
394 		field == NH_FLD_IP_DST)) {
395 		index = dpkg->num_extracts;
396 	} else {
397 		if (ip_src >= 0 && ip_dst >= 0)
398 			index = dpkg->num_extracts - 2;
399 		else if (ip_src >= 0 || ip_dst >= 0)
400 			index = dpkg->num_extracts - 1;
401 		else
402 			index = dpkg->num_extracts;
403 	}
404 
405 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
406 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
407 	dpkg->extracts[index].extract.from_hdr.prot = prot;
408 	dpkg->extracts[index].extract.from_hdr.field = field;
409 	if (prot == NET_PROT_IP &&
410 		(field == NH_FLD_IP_SRC ||
411 		field == NH_FLD_IP_DST)) {
412 		dpaa2_flow_extract_key_set(key_info, index, 0);
413 	} else {
414 		dpaa2_flow_extract_key_set(key_info, index, field_size);
415 	}
416 
417 	if (prot == NET_PROT_IP) {
418 		if (field == NH_FLD_IP_SRC) {
419 			if (key_info->ipv4_dst_offset >= 0) {
420 				key_info->ipv4_src_offset =
421 					key_info->ipv4_dst_offset +
422 					NH_FLD_IPV4_ADDR_SIZE;
423 			} else {
424 				key_info->ipv4_src_offset =
425 					key_info->key_offset[index - 1] +
426 						key_info->key_size[index - 1];
427 			}
428 			if (key_info->ipv6_dst_offset >= 0) {
429 				key_info->ipv6_src_offset =
430 					key_info->ipv6_dst_offset +
431 					NH_FLD_IPV6_ADDR_SIZE;
432 			} else {
433 				key_info->ipv6_src_offset =
434 					key_info->key_offset[index - 1] +
435 						key_info->key_size[index - 1];
436 			}
437 		} else if (field == NH_FLD_IP_DST) {
438 			if (key_info->ipv4_src_offset >= 0) {
439 				key_info->ipv4_dst_offset =
440 					key_info->ipv4_src_offset +
441 					NH_FLD_IPV4_ADDR_SIZE;
442 			} else {
443 				key_info->ipv4_dst_offset =
444 					key_info->key_offset[index - 1] +
445 						key_info->key_size[index - 1];
446 			}
447 			if (key_info->ipv6_src_offset >= 0) {
448 				key_info->ipv6_dst_offset =
449 					key_info->ipv6_src_offset +
450 					NH_FLD_IPV6_ADDR_SIZE;
451 			} else {
452 				key_info->ipv6_dst_offset =
453 					key_info->key_offset[index - 1] +
454 						key_info->key_size[index - 1];
455 			}
456 		}
457 	}
458 
459 	if (index == dpkg->num_extracts) {
460 		dpkg->num_extracts++;
461 		return 0;
462 	}
463 
464 	if (ip_src >= 0) {
465 		ip_src++;
466 		dpkg->extracts[ip_src].type =
467 			DPKG_EXTRACT_FROM_HDR;
468 		dpkg->extracts[ip_src].extract.from_hdr.type =
469 			DPKG_FULL_FIELD;
470 		dpkg->extracts[ip_src].extract.from_hdr.prot =
471 			NET_PROT_IP;
472 		dpkg->extracts[ip_src].extract.from_hdr.field =
473 			NH_FLD_IP_SRC;
474 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
475 		key_info->ipv4_src_offset += field_size;
476 		key_info->ipv6_src_offset += field_size;
477 	}
478 	if (ip_dst >= 0) {
479 		ip_dst++;
480 		dpkg->extracts[ip_dst].type =
481 			DPKG_EXTRACT_FROM_HDR;
482 		dpkg->extracts[ip_dst].extract.from_hdr.type =
483 			DPKG_FULL_FIELD;
484 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
485 			NET_PROT_IP;
486 		dpkg->extracts[ip_dst].extract.from_hdr.field =
487 			NH_FLD_IP_DST;
488 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
489 		key_info->ipv4_dst_offset += field_size;
490 		key_info->ipv6_dst_offset += field_size;
491 	}
492 
493 	dpkg->num_extracts++;
494 
495 	return 0;
496 }
497 
498 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
499 				      int size)
500 {
501 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
502 	struct dpaa2_key_info *key_info = &key_extract->key_info;
503 	int last_extract_size, index;
504 
505 	if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
506 	    DPKG_EXTRACT_FROM_DATA) {
507 		DPAA2_PMD_WARN("RAW extract cannot be combined with others");
508 		return -1;
509 	}
510 
511 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
512 	dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
513 	if (last_extract_size)
514 		dpkg->num_extracts++;
515 	else
516 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
517 
518 	for (index = 0; index < dpkg->num_extracts; index++) {
519 		dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
520 		if (index == dpkg->num_extracts - 1)
521 			dpkg->extracts[index].extract.from_data.size =
522 				last_extract_size;
523 		else
524 			dpkg->extracts[index].extract.from_data.size =
525 				DPAA2_FLOW_MAX_KEY_SIZE;
526 		dpkg->extracts[index].extract.from_data.offset =
527 			DPAA2_FLOW_MAX_KEY_SIZE * index;
528 	}
529 
530 	key_info->key_total_size = size;
531 	return 0;
532 }
533 
534 /* Protocol discrimination.
535  * Discriminate IPv4/IPv6/vLan by Eth type.
536  * Discriminate UDP/TCP/ICMP by next proto of IP.
537  */
538 static inline int
539 dpaa2_flow_proto_discrimination_extract(
540 	struct dpaa2_key_extract *key_extract,
541 	enum rte_flow_item_type type)
542 {
543 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
544 		return dpaa2_flow_extract_add(
545 				key_extract, NET_PROT_ETH,
546 				NH_FLD_ETH_TYPE,
547 				sizeof(rte_be16_t));
548 	} else if (type == (enum rte_flow_item_type)
549 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
550 		return dpaa2_flow_extract_add(
551 				key_extract, NET_PROT_IP,
552 				NH_FLD_IP_PROTO,
553 				NH_FLD_IP_PROTO_SIZE);
554 	}
555 
556 	return -1;
557 }
558 
559 static inline int dpaa2_flow_extract_search(
560 	struct dpkg_profile_cfg *dpkg,
561 	enum net_prot prot, uint32_t field)
562 {
563 	int i;
564 
565 	for (i = 0; i < dpkg->num_extracts; i++) {
566 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
567 			dpkg->extracts[i].extract.from_hdr.field == field) {
568 			return i;
569 		}
570 	}
571 
572 	return -1;
573 }
574 
575 static inline int dpaa2_flow_extract_key_offset(
576 	struct dpaa2_key_extract *key_extract,
577 	enum net_prot prot, uint32_t field)
578 {
579 	int i;
580 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
581 	struct dpaa2_key_info *key_info = &key_extract->key_info;
582 
583 	if (prot == NET_PROT_IPV4 ||
584 		prot == NET_PROT_IPV6)
585 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
586 	else
587 		i = dpaa2_flow_extract_search(dpkg, prot, field);
588 
589 	if (i >= 0) {
590 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
591 			return key_info->ipv4_src_offset;
592 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
593 			return key_info->ipv4_dst_offset;
594 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
595 			return key_info->ipv6_src_offset;
596 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
597 			return key_info->ipv6_dst_offset;
598 		else
599 			return key_info->key_offset[i];
600 	} else {
601 		return -1;
602 	}
603 }
604 
605 struct proto_discrimination {
606 	enum rte_flow_item_type type;
607 	union {
608 		rte_be16_t eth_type;
609 		uint8_t ip_proto;
610 	};
611 };
612 
613 static int
614 dpaa2_flow_proto_discrimination_rule(
615 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
616 	struct proto_discrimination proto, int group)
617 {
618 	enum net_prot prot;
619 	uint32_t field;
620 	int offset;
621 	size_t key_iova;
622 	size_t mask_iova;
623 	rte_be16_t eth_type;
624 	uint8_t ip_proto;
625 
626 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
627 		prot = NET_PROT_ETH;
628 		field = NH_FLD_ETH_TYPE;
629 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
630 		prot = NET_PROT_IP;
631 		field = NH_FLD_IP_PROTO;
632 	} else {
633 		DPAA2_PMD_ERR(
634 			"Only Eth and IP support to discriminate next proto.");
635 		return -1;
636 	}
637 
638 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
639 			prot, field);
640 	if (offset < 0) {
641 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
642 				prot, field);
643 		return -1;
644 	}
645 	key_iova = flow->qos_rule.key_iova + offset;
646 	mask_iova = flow->qos_rule.mask_iova + offset;
647 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
648 		eth_type = proto.eth_type;
649 		memcpy((void *)key_iova, (const void *)(&eth_type),
650 			sizeof(rte_be16_t));
651 		eth_type = 0xffff;
652 		memcpy((void *)mask_iova, (const void *)(&eth_type),
653 			sizeof(rte_be16_t));
654 	} else {
655 		ip_proto = proto.ip_proto;
656 		memcpy((void *)key_iova, (const void *)(&ip_proto),
657 			sizeof(uint8_t));
658 		ip_proto = 0xff;
659 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
660 			sizeof(uint8_t));
661 	}
662 
663 	offset = dpaa2_flow_extract_key_offset(
664 			&priv->extract.tc_key_extract[group],
665 			prot, field);
666 	if (offset < 0) {
667 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
668 				prot, field);
669 		return -1;
670 	}
671 	key_iova = flow->fs_rule.key_iova + offset;
672 	mask_iova = flow->fs_rule.mask_iova + offset;
673 
674 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
675 		eth_type = proto.eth_type;
676 		memcpy((void *)key_iova, (const void *)(&eth_type),
677 			sizeof(rte_be16_t));
678 		eth_type = 0xffff;
679 		memcpy((void *)mask_iova, (const void *)(&eth_type),
680 			sizeof(rte_be16_t));
681 	} else {
682 		ip_proto = proto.ip_proto;
683 		memcpy((void *)key_iova, (const void *)(&ip_proto),
684 			sizeof(uint8_t));
685 		ip_proto = 0xff;
686 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
687 			sizeof(uint8_t));
688 	}
689 
690 	return 0;
691 }
692 
693 static inline int
694 dpaa2_flow_rule_data_set(
695 	struct dpaa2_key_extract *key_extract,
696 	struct dpni_rule_cfg *rule,
697 	enum net_prot prot, uint32_t field,
698 	const void *key, const void *mask, int size)
699 {
700 	int offset = dpaa2_flow_extract_key_offset(key_extract,
701 				prot, field);
702 
703 	if (offset < 0) {
704 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
705 			prot, field);
706 		return -1;
707 	}
708 
709 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
710 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
711 
712 	return 0;
713 }
714 
715 static inline int
716 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
717 			     const void *key, const void *mask, int size)
718 {
719 	int offset = 0;
720 
721 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
722 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
723 
724 	return 0;
725 }
726 
727 static inline int
728 _dpaa2_flow_rule_move_ipaddr_tail(
729 	struct dpaa2_key_extract *key_extract,
730 	struct dpni_rule_cfg *rule, int src_offset,
731 	uint32_t field, bool ipv4)
732 {
733 	size_t key_src;
734 	size_t mask_src;
735 	size_t key_dst;
736 	size_t mask_dst;
737 	int dst_offset, len;
738 	enum net_prot prot;
739 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
740 
741 	if (field != NH_FLD_IP_SRC &&
742 		field != NH_FLD_IP_DST) {
743 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
744 		return -1;
745 	}
746 	if (ipv4)
747 		prot = NET_PROT_IPV4;
748 	else
749 		prot = NET_PROT_IPV6;
750 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
751 				prot, field);
752 	if (dst_offset < 0) {
753 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
754 		return -1;
755 	}
756 	key_src = rule->key_iova + src_offset;
757 	mask_src = rule->mask_iova + src_offset;
758 	key_dst = rule->key_iova + dst_offset;
759 	mask_dst = rule->mask_iova + dst_offset;
760 	if (ipv4)
761 		len = sizeof(rte_be32_t);
762 	else
763 		len = NH_FLD_IPV6_ADDR_SIZE;
764 
765 	memcpy(tmp, (char *)key_src, len);
766 	memset((char *)key_src, 0, len);
767 	memcpy((char *)key_dst, tmp, len);
768 
769 	memcpy(tmp, (char *)mask_src, len);
770 	memset((char *)mask_src, 0, len);
771 	memcpy((char *)mask_dst, tmp, len);
772 
773 	return 0;
774 }
775 
776 static inline int
777 dpaa2_flow_rule_move_ipaddr_tail(
778 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
779 	int fs_group)
780 {
781 	int ret;
782 	enum net_prot prot;
783 
784 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
785 		return 0;
786 
787 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
788 		prot = NET_PROT_IPV4;
789 	else
790 		prot = NET_PROT_IPV6;
791 
792 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
793 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
794 				&priv->extract.qos_key_extract,
795 				&flow->qos_rule,
796 				flow->ipaddr_rule.qos_ipsrc_offset,
797 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
798 		if (ret) {
799 			DPAA2_PMD_ERR("QoS src address reorder failed");
800 			return -1;
801 		}
802 		flow->ipaddr_rule.qos_ipsrc_offset =
803 			dpaa2_flow_extract_key_offset(
804 				&priv->extract.qos_key_extract,
805 				prot, NH_FLD_IP_SRC);
806 	}
807 
808 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
809 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
810 				&priv->extract.qos_key_extract,
811 				&flow->qos_rule,
812 				flow->ipaddr_rule.qos_ipdst_offset,
813 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
814 		if (ret) {
815 			DPAA2_PMD_ERR("QoS dst address reorder failed");
816 			return -1;
817 		}
818 		flow->ipaddr_rule.qos_ipdst_offset =
819 			dpaa2_flow_extract_key_offset(
820 				&priv->extract.qos_key_extract,
821 				prot, NH_FLD_IP_DST);
822 	}
823 
824 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
825 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
826 				&priv->extract.tc_key_extract[fs_group],
827 				&flow->fs_rule,
828 				flow->ipaddr_rule.fs_ipsrc_offset,
829 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
830 		if (ret) {
831 			DPAA2_PMD_ERR("FS src address reorder failed");
832 			return -1;
833 		}
834 		flow->ipaddr_rule.fs_ipsrc_offset =
835 			dpaa2_flow_extract_key_offset(
836 				&priv->extract.tc_key_extract[fs_group],
837 				prot, NH_FLD_IP_SRC);
838 	}
839 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
840 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
841 				&priv->extract.tc_key_extract[fs_group],
842 				&flow->fs_rule,
843 				flow->ipaddr_rule.fs_ipdst_offset,
844 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
845 		if (ret) {
846 			DPAA2_PMD_ERR("FS dst address reorder failed");
847 			return -1;
848 		}
849 		flow->ipaddr_rule.fs_ipdst_offset =
850 			dpaa2_flow_extract_key_offset(
851 				&priv->extract.tc_key_extract[fs_group],
852 				prot, NH_FLD_IP_DST);
853 	}
854 
855 	return 0;
856 }
857 
858 static int
859 dpaa2_flow_extract_support(
860 	const uint8_t *mask_src,
861 	enum rte_flow_item_type type)
862 {
863 	char mask[64];
864 	int i, size = 0;
865 	const char *mask_support = 0;
866 
867 	switch (type) {
868 	case RTE_FLOW_ITEM_TYPE_ETH:
869 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
870 		size = sizeof(struct rte_flow_item_eth);
871 		break;
872 	case RTE_FLOW_ITEM_TYPE_VLAN:
873 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
874 		size = sizeof(struct rte_flow_item_vlan);
875 		break;
876 	case RTE_FLOW_ITEM_TYPE_IPV4:
877 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
878 		size = sizeof(struct rte_flow_item_ipv4);
879 		break;
880 	case RTE_FLOW_ITEM_TYPE_IPV6:
881 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
882 		size = sizeof(struct rte_flow_item_ipv6);
883 		break;
884 	case RTE_FLOW_ITEM_TYPE_ICMP:
885 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
886 		size = sizeof(struct rte_flow_item_icmp);
887 		break;
888 	case RTE_FLOW_ITEM_TYPE_UDP:
889 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
890 		size = sizeof(struct rte_flow_item_udp);
891 		break;
892 	case RTE_FLOW_ITEM_TYPE_TCP:
893 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
894 		size = sizeof(struct rte_flow_item_tcp);
895 		break;
896 	case RTE_FLOW_ITEM_TYPE_SCTP:
897 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
898 		size = sizeof(struct rte_flow_item_sctp);
899 		break;
900 	case RTE_FLOW_ITEM_TYPE_GRE:
901 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
902 		size = sizeof(struct rte_flow_item_gre);
903 		break;
904 	default:
905 		return -1;
906 	}
907 
908 	memcpy(mask, mask_support, size);
909 
910 	for (i = 0; i < size; i++)
911 		mask[i] = (mask[i] | mask_src[i]);
912 
913 	if (memcmp(mask, mask_support, size))
914 		return -1;
915 
916 	return 0;
917 }
918 
919 static int
920 dpaa2_configure_flow_eth(struct rte_flow *flow,
921 			 struct rte_eth_dev *dev,
922 			 const struct rte_flow_attr *attr,
923 			 const struct rte_flow_item *pattern,
924 			 const struct rte_flow_action actions[] __rte_unused,
925 			 struct rte_flow_error *error __rte_unused,
926 			 int *device_configured)
927 {
928 	int index, ret;
929 	int local_cfg = 0;
930 	uint32_t group;
931 	const struct rte_flow_item_eth *spec, *mask;
932 
933 	/* TODO: Currently upper bound of range parameter is not implemented */
934 	const struct rte_flow_item_eth *last __rte_unused;
935 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
936 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
937 
938 	group = attr->group;
939 
940 	/* Parse pattern list to get the matching parameters */
941 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
942 	last    = (const struct rte_flow_item_eth *)pattern->last;
943 	mask    = (const struct rte_flow_item_eth *)
944 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
945 	if (!spec) {
946 		/* Don't care any field of eth header,
947 		 * only care eth protocol.
948 		 */
949 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
950 		return 0;
951 	}
952 
953 	/* Get traffic class index and flow id to be configured */
954 	flow->tc_id = group;
955 	flow->tc_index = attr->priority;
956 
957 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
958 		RTE_FLOW_ITEM_TYPE_ETH)) {
959 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
960 
961 		return -1;
962 	}
963 
964 	if (memcmp((const char *)&mask->hdr.src_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
965 		index = dpaa2_flow_extract_search(
966 				&priv->extract.qos_key_extract.dpkg,
967 				NET_PROT_ETH, NH_FLD_ETH_SA);
968 		if (index < 0) {
969 			ret = dpaa2_flow_extract_add(
970 					&priv->extract.qos_key_extract,
971 					NET_PROT_ETH, NH_FLD_ETH_SA,
972 					RTE_ETHER_ADDR_LEN);
973 			if (ret) {
974 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
975 
976 				return -1;
977 			}
978 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
979 		}
980 		index = dpaa2_flow_extract_search(
981 				&priv->extract.tc_key_extract[group].dpkg,
982 				NET_PROT_ETH, NH_FLD_ETH_SA);
983 		if (index < 0) {
984 			ret = dpaa2_flow_extract_add(
985 					&priv->extract.tc_key_extract[group],
986 					NET_PROT_ETH, NH_FLD_ETH_SA,
987 					RTE_ETHER_ADDR_LEN);
988 			if (ret) {
989 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
990 				return -1;
991 			}
992 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
993 		}
994 
995 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
996 		if (ret) {
997 			DPAA2_PMD_ERR(
998 				"Move ipaddr before ETH_SA rule set failed");
999 			return -1;
1000 		}
1001 
1002 		ret = dpaa2_flow_rule_data_set(
1003 				&priv->extract.qos_key_extract,
1004 				&flow->qos_rule,
1005 				NET_PROT_ETH,
1006 				NH_FLD_ETH_SA,
1007 				&spec->hdr.src_addr.addr_bytes,
1008 				&mask->hdr.src_addr.addr_bytes,
1009 				sizeof(struct rte_ether_addr));
1010 		if (ret) {
1011 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1012 			return -1;
1013 		}
1014 
1015 		ret = dpaa2_flow_rule_data_set(
1016 				&priv->extract.tc_key_extract[group],
1017 				&flow->fs_rule,
1018 				NET_PROT_ETH,
1019 				NH_FLD_ETH_SA,
1020 				&spec->hdr.src_addr.addr_bytes,
1021 				&mask->hdr.src_addr.addr_bytes,
1022 				sizeof(struct rte_ether_addr));
1023 		if (ret) {
1024 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1025 			return -1;
1026 		}
1027 	}
1028 
1029 	if (memcmp((const char *)&mask->hdr.dst_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1030 		index = dpaa2_flow_extract_search(
1031 				&priv->extract.qos_key_extract.dpkg,
1032 				NET_PROT_ETH, NH_FLD_ETH_DA);
1033 		if (index < 0) {
1034 			ret = dpaa2_flow_extract_add(
1035 					&priv->extract.qos_key_extract,
1036 					NET_PROT_ETH, NH_FLD_ETH_DA,
1037 					RTE_ETHER_ADDR_LEN);
1038 			if (ret) {
1039 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1040 
1041 				return -1;
1042 			}
1043 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1044 		}
1045 
1046 		index = dpaa2_flow_extract_search(
1047 				&priv->extract.tc_key_extract[group].dpkg,
1048 				NET_PROT_ETH, NH_FLD_ETH_DA);
1049 		if (index < 0) {
1050 			ret = dpaa2_flow_extract_add(
1051 					&priv->extract.tc_key_extract[group],
1052 					NET_PROT_ETH, NH_FLD_ETH_DA,
1053 					RTE_ETHER_ADDR_LEN);
1054 			if (ret) {
1055 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1056 
1057 				return -1;
1058 			}
1059 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1060 		}
1061 
1062 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1063 		if (ret) {
1064 			DPAA2_PMD_ERR(
1065 				"Move ipaddr before ETH DA rule set failed");
1066 			return -1;
1067 		}
1068 
1069 		ret = dpaa2_flow_rule_data_set(
1070 				&priv->extract.qos_key_extract,
1071 				&flow->qos_rule,
1072 				NET_PROT_ETH,
1073 				NH_FLD_ETH_DA,
1074 				&spec->hdr.dst_addr.addr_bytes,
1075 				&mask->hdr.dst_addr.addr_bytes,
1076 				sizeof(struct rte_ether_addr));
1077 		if (ret) {
1078 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1079 			return -1;
1080 		}
1081 
1082 		ret = dpaa2_flow_rule_data_set(
1083 				&priv->extract.tc_key_extract[group],
1084 				&flow->fs_rule,
1085 				NET_PROT_ETH,
1086 				NH_FLD_ETH_DA,
1087 				&spec->hdr.dst_addr.addr_bytes,
1088 				&mask->hdr.dst_addr.addr_bytes,
1089 				sizeof(struct rte_ether_addr));
1090 		if (ret) {
1091 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1092 			return -1;
1093 		}
1094 	}
1095 
1096 	if (memcmp((const char *)&mask->hdr.ether_type, zero_cmp, sizeof(rte_be16_t))) {
1097 		index = dpaa2_flow_extract_search(
1098 				&priv->extract.qos_key_extract.dpkg,
1099 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1100 		if (index < 0) {
1101 			ret = dpaa2_flow_extract_add(
1102 					&priv->extract.qos_key_extract,
1103 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1104 					RTE_ETHER_TYPE_LEN);
1105 			if (ret) {
1106 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1107 
1108 				return -1;
1109 			}
1110 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1111 		}
1112 		index = dpaa2_flow_extract_search(
1113 				&priv->extract.tc_key_extract[group].dpkg,
1114 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1115 		if (index < 0) {
1116 			ret = dpaa2_flow_extract_add(
1117 					&priv->extract.tc_key_extract[group],
1118 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1119 					RTE_ETHER_TYPE_LEN);
1120 			if (ret) {
1121 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1122 
1123 				return -1;
1124 			}
1125 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1126 		}
1127 
1128 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1129 		if (ret) {
1130 			DPAA2_PMD_ERR(
1131 				"Move ipaddr before ETH TYPE rule set failed");
1132 				return -1;
1133 		}
1134 
1135 		ret = dpaa2_flow_rule_data_set(
1136 				&priv->extract.qos_key_extract,
1137 				&flow->qos_rule,
1138 				NET_PROT_ETH,
1139 				NH_FLD_ETH_TYPE,
1140 				&spec->hdr.ether_type,
1141 				&mask->hdr.ether_type,
1142 				sizeof(rte_be16_t));
1143 		if (ret) {
1144 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1145 			return -1;
1146 		}
1147 
1148 		ret = dpaa2_flow_rule_data_set(
1149 				&priv->extract.tc_key_extract[group],
1150 				&flow->fs_rule,
1151 				NET_PROT_ETH,
1152 				NH_FLD_ETH_TYPE,
1153 				&spec->hdr.ether_type,
1154 				&mask->hdr.ether_type,
1155 				sizeof(rte_be16_t));
1156 		if (ret) {
1157 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1158 			return -1;
1159 		}
1160 	}
1161 
1162 	(*device_configured) |= local_cfg;
1163 
1164 	return 0;
1165 }
1166 
1167 static int
1168 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1169 			  struct rte_eth_dev *dev,
1170 			  const struct rte_flow_attr *attr,
1171 			  const struct rte_flow_item *pattern,
1172 			  const struct rte_flow_action actions[] __rte_unused,
1173 			  struct rte_flow_error *error __rte_unused,
1174 			  int *device_configured)
1175 {
1176 	int index, ret;
1177 	int local_cfg = 0;
1178 	uint32_t group;
1179 	const struct rte_flow_item_vlan *spec, *mask;
1180 
1181 	const struct rte_flow_item_vlan *last __rte_unused;
1182 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1183 
1184 	group = attr->group;
1185 
1186 	/* Parse pattern list to get the matching parameters */
1187 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1188 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1189 	mask    = (const struct rte_flow_item_vlan *)
1190 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1191 
1192 	/* Get traffic class index and flow id to be configured */
1193 	flow->tc_id = group;
1194 	flow->tc_index = attr->priority;
1195 
1196 	if (!spec) {
1197 		/* Don't care any field of vlan header,
1198 		 * only care vlan protocol.
1199 		 */
1200 		/* Eth type is actually used for vLan classification.
1201 		 */
1202 		struct proto_discrimination proto;
1203 
1204 		index = dpaa2_flow_extract_search(
1205 				&priv->extract.qos_key_extract.dpkg,
1206 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1207 		if (index < 0) {
1208 			ret = dpaa2_flow_proto_discrimination_extract(
1209 						&priv->extract.qos_key_extract,
1210 						RTE_FLOW_ITEM_TYPE_ETH);
1211 			if (ret) {
1212 				DPAA2_PMD_ERR(
1213 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1214 
1215 				return -1;
1216 			}
1217 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1218 		}
1219 
1220 		index = dpaa2_flow_extract_search(
1221 				&priv->extract.tc_key_extract[group].dpkg,
1222 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1223 		if (index < 0) {
1224 			ret = dpaa2_flow_proto_discrimination_extract(
1225 					&priv->extract.tc_key_extract[group],
1226 					RTE_FLOW_ITEM_TYPE_ETH);
1227 			if (ret) {
1228 				DPAA2_PMD_ERR(
1229 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1230 
1231 				return -1;
1232 			}
1233 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1234 		}
1235 
1236 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1237 		if (ret) {
1238 			DPAA2_PMD_ERR(
1239 			"Move ipaddr before vLan discrimination set failed");
1240 			return -1;
1241 		}
1242 
1243 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1244 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1245 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1246 							proto, group);
1247 		if (ret) {
1248 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1249 			return -1;
1250 		}
1251 
1252 		(*device_configured) |= local_cfg;
1253 
1254 		return 0;
1255 	}
1256 
1257 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1258 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1259 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1260 
1261 		return -1;
1262 	}
1263 
1264 	if (!mask->hdr.vlan_tci)
1265 		return 0;
1266 
1267 	index = dpaa2_flow_extract_search(
1268 				&priv->extract.qos_key_extract.dpkg,
1269 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1270 	if (index < 0) {
1271 		ret = dpaa2_flow_extract_add(
1272 						&priv->extract.qos_key_extract,
1273 						NET_PROT_VLAN,
1274 						NH_FLD_VLAN_TCI,
1275 						sizeof(rte_be16_t));
1276 		if (ret) {
1277 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1278 
1279 			return -1;
1280 		}
1281 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1282 	}
1283 
1284 	index = dpaa2_flow_extract_search(
1285 			&priv->extract.tc_key_extract[group].dpkg,
1286 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1287 	if (index < 0) {
1288 		ret = dpaa2_flow_extract_add(
1289 				&priv->extract.tc_key_extract[group],
1290 				NET_PROT_VLAN,
1291 				NH_FLD_VLAN_TCI,
1292 				sizeof(rte_be16_t));
1293 		if (ret) {
1294 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1295 
1296 			return -1;
1297 		}
1298 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1299 	}
1300 
1301 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1302 	if (ret) {
1303 		DPAA2_PMD_ERR(
1304 			"Move ipaddr before VLAN TCI rule set failed");
1305 		return -1;
1306 	}
1307 
1308 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1309 				&flow->qos_rule,
1310 				NET_PROT_VLAN,
1311 				NH_FLD_VLAN_TCI,
1312 				&spec->hdr.vlan_tci,
1313 				&mask->hdr.vlan_tci,
1314 				sizeof(rte_be16_t));
1315 	if (ret) {
1316 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1317 		return -1;
1318 	}
1319 
1320 	ret = dpaa2_flow_rule_data_set(
1321 			&priv->extract.tc_key_extract[group],
1322 			&flow->fs_rule,
1323 			NET_PROT_VLAN,
1324 			NH_FLD_VLAN_TCI,
1325 			&spec->hdr.vlan_tci,
1326 			&mask->hdr.vlan_tci,
1327 			sizeof(rte_be16_t));
1328 	if (ret) {
1329 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1330 		return -1;
1331 	}
1332 
1333 	(*device_configured) |= local_cfg;
1334 
1335 	return 0;
1336 }
1337 
1338 static int
1339 dpaa2_configure_flow_ip_discrimation(
1340 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1341 	const struct rte_flow_item *pattern,
1342 	int *local_cfg,	int *device_configured,
1343 	uint32_t group)
1344 {
1345 	int index, ret;
1346 	struct proto_discrimination proto;
1347 
1348 	index = dpaa2_flow_extract_search(
1349 			&priv->extract.qos_key_extract.dpkg,
1350 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1351 	if (index < 0) {
1352 		ret = dpaa2_flow_proto_discrimination_extract(
1353 				&priv->extract.qos_key_extract,
1354 				RTE_FLOW_ITEM_TYPE_ETH);
1355 		if (ret) {
1356 			DPAA2_PMD_ERR(
1357 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1358 			return -1;
1359 		}
1360 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1361 	}
1362 
1363 	index = dpaa2_flow_extract_search(
1364 			&priv->extract.tc_key_extract[group].dpkg,
1365 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1366 	if (index < 0) {
1367 		ret = dpaa2_flow_proto_discrimination_extract(
1368 				&priv->extract.tc_key_extract[group],
1369 				RTE_FLOW_ITEM_TYPE_ETH);
1370 		if (ret) {
1371 			DPAA2_PMD_ERR(
1372 			"FS Extract ETH_TYPE to discriminate IP failed.");
1373 			return -1;
1374 		}
1375 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1376 	}
1377 
1378 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1379 	if (ret) {
1380 		DPAA2_PMD_ERR(
1381 			"Move ipaddr before IP discrimination set failed");
1382 		return -1;
1383 	}
1384 
1385 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1386 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1387 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1388 	else
1389 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1390 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1391 	if (ret) {
1392 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1393 		return -1;
1394 	}
1395 
1396 	(*device_configured) |= (*local_cfg);
1397 
1398 	return 0;
1399 }
1400 
1401 
1402 static int
1403 dpaa2_configure_flow_generic_ip(
1404 	struct rte_flow *flow,
1405 	struct rte_eth_dev *dev,
1406 	const struct rte_flow_attr *attr,
1407 	const struct rte_flow_item *pattern,
1408 	const struct rte_flow_action actions[] __rte_unused,
1409 	struct rte_flow_error *error __rte_unused,
1410 	int *device_configured)
1411 {
1412 	int index, ret;
1413 	int local_cfg = 0;
1414 	uint32_t group;
1415 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1416 		*mask_ipv4 = 0;
1417 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1418 		*mask_ipv6 = 0;
1419 	const void *key, *mask;
1420 	enum net_prot prot;
1421 
1422 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1423 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1424 	int size;
1425 
1426 	group = attr->group;
1427 
1428 	/* Parse pattern list to get the matching parameters */
1429 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1430 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1431 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1432 			(pattern->mask ? pattern->mask :
1433 					&dpaa2_flow_item_ipv4_mask);
1434 	} else {
1435 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1436 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1437 			(pattern->mask ? pattern->mask :
1438 					&dpaa2_flow_item_ipv6_mask);
1439 	}
1440 
1441 	/* Get traffic class index and flow id to be configured */
1442 	flow->tc_id = group;
1443 	flow->tc_index = attr->priority;
1444 
1445 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1446 			flow, pattern, &local_cfg,
1447 			device_configured, group);
1448 	if (ret) {
1449 		DPAA2_PMD_ERR("IP discrimination failed!");
1450 		return -1;
1451 	}
1452 
1453 	if (!spec_ipv4 && !spec_ipv6)
1454 		return 0;
1455 
1456 	if (mask_ipv4) {
1457 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1458 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1459 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1460 
1461 			return -1;
1462 		}
1463 	}
1464 
1465 	if (mask_ipv6) {
1466 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1467 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1468 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1469 
1470 			return -1;
1471 		}
1472 	}
1473 
1474 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1475 		mask_ipv4->hdr.dst_addr)) {
1476 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1477 	} else if (mask_ipv6 &&
1478 		(memcmp(&mask_ipv6->hdr.src_addr,
1479 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1480 		memcmp(&mask_ipv6->hdr.dst_addr,
1481 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1482 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1483 	}
1484 
1485 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1486 		(mask_ipv6 &&
1487 			memcmp(&mask_ipv6->hdr.src_addr,
1488 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1489 		index = dpaa2_flow_extract_search(
1490 				&priv->extract.qos_key_extract.dpkg,
1491 				NET_PROT_IP, NH_FLD_IP_SRC);
1492 		if (index < 0) {
1493 			ret = dpaa2_flow_extract_add(
1494 					&priv->extract.qos_key_extract,
1495 					NET_PROT_IP,
1496 					NH_FLD_IP_SRC,
1497 					0);
1498 			if (ret) {
1499 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1500 
1501 				return -1;
1502 			}
1503 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1504 		}
1505 
1506 		index = dpaa2_flow_extract_search(
1507 				&priv->extract.tc_key_extract[group].dpkg,
1508 				NET_PROT_IP, NH_FLD_IP_SRC);
1509 		if (index < 0) {
1510 			ret = dpaa2_flow_extract_add(
1511 					&priv->extract.tc_key_extract[group],
1512 					NET_PROT_IP,
1513 					NH_FLD_IP_SRC,
1514 					0);
1515 			if (ret) {
1516 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1517 
1518 				return -1;
1519 			}
1520 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1521 		}
1522 
1523 		if (spec_ipv4)
1524 			key = &spec_ipv4->hdr.src_addr;
1525 		else
1526 			key = &spec_ipv6->hdr.src_addr;
1527 		if (mask_ipv4) {
1528 			mask = &mask_ipv4->hdr.src_addr;
1529 			size = NH_FLD_IPV4_ADDR_SIZE;
1530 			prot = NET_PROT_IPV4;
1531 		} else {
1532 			mask = &mask_ipv6->hdr.src_addr;
1533 			size = NH_FLD_IPV6_ADDR_SIZE;
1534 			prot = NET_PROT_IPV6;
1535 		}
1536 
1537 		ret = dpaa2_flow_rule_data_set(
1538 				&priv->extract.qos_key_extract,
1539 				&flow->qos_rule,
1540 				prot, NH_FLD_IP_SRC,
1541 				key,	mask, size);
1542 		if (ret) {
1543 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1544 			return -1;
1545 		}
1546 
1547 		ret = dpaa2_flow_rule_data_set(
1548 				&priv->extract.tc_key_extract[group],
1549 				&flow->fs_rule,
1550 				prot, NH_FLD_IP_SRC,
1551 				key,	mask, size);
1552 		if (ret) {
1553 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1554 			return -1;
1555 		}
1556 
1557 		flow->ipaddr_rule.qos_ipsrc_offset =
1558 			dpaa2_flow_extract_key_offset(
1559 				&priv->extract.qos_key_extract,
1560 				prot, NH_FLD_IP_SRC);
1561 		flow->ipaddr_rule.fs_ipsrc_offset =
1562 			dpaa2_flow_extract_key_offset(
1563 				&priv->extract.tc_key_extract[group],
1564 				prot, NH_FLD_IP_SRC);
1565 	}
1566 
1567 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1568 		(mask_ipv6 &&
1569 			memcmp(&mask_ipv6->hdr.dst_addr,
1570 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1571 		index = dpaa2_flow_extract_search(
1572 				&priv->extract.qos_key_extract.dpkg,
1573 				NET_PROT_IP, NH_FLD_IP_DST);
1574 		if (index < 0) {
1575 			if (mask_ipv4)
1576 				size = NH_FLD_IPV4_ADDR_SIZE;
1577 			else
1578 				size = NH_FLD_IPV6_ADDR_SIZE;
1579 			ret = dpaa2_flow_extract_add(
1580 					&priv->extract.qos_key_extract,
1581 					NET_PROT_IP,
1582 					NH_FLD_IP_DST,
1583 					size);
1584 			if (ret) {
1585 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1586 
1587 				return -1;
1588 			}
1589 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1590 		}
1591 
1592 		index = dpaa2_flow_extract_search(
1593 				&priv->extract.tc_key_extract[group].dpkg,
1594 				NET_PROT_IP, NH_FLD_IP_DST);
1595 		if (index < 0) {
1596 			if (mask_ipv4)
1597 				size = NH_FLD_IPV4_ADDR_SIZE;
1598 			else
1599 				size = NH_FLD_IPV6_ADDR_SIZE;
1600 			ret = dpaa2_flow_extract_add(
1601 					&priv->extract.tc_key_extract[group],
1602 					NET_PROT_IP,
1603 					NH_FLD_IP_DST,
1604 					size);
1605 			if (ret) {
1606 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1607 
1608 				return -1;
1609 			}
1610 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1611 		}
1612 
1613 		if (spec_ipv4)
1614 			key = &spec_ipv4->hdr.dst_addr;
1615 		else
1616 			key = &spec_ipv6->hdr.dst_addr;
1617 		if (mask_ipv4) {
1618 			mask = &mask_ipv4->hdr.dst_addr;
1619 			size = NH_FLD_IPV4_ADDR_SIZE;
1620 			prot = NET_PROT_IPV4;
1621 		} else {
1622 			mask = &mask_ipv6->hdr.dst_addr;
1623 			size = NH_FLD_IPV6_ADDR_SIZE;
1624 			prot = NET_PROT_IPV6;
1625 		}
1626 
1627 		ret = dpaa2_flow_rule_data_set(
1628 				&priv->extract.qos_key_extract,
1629 				&flow->qos_rule,
1630 				prot, NH_FLD_IP_DST,
1631 				key,	mask, size);
1632 		if (ret) {
1633 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1634 			return -1;
1635 		}
1636 
1637 		ret = dpaa2_flow_rule_data_set(
1638 				&priv->extract.tc_key_extract[group],
1639 				&flow->fs_rule,
1640 				prot, NH_FLD_IP_DST,
1641 				key,	mask, size);
1642 		if (ret) {
1643 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1644 			return -1;
1645 		}
1646 		flow->ipaddr_rule.qos_ipdst_offset =
1647 			dpaa2_flow_extract_key_offset(
1648 				&priv->extract.qos_key_extract,
1649 				prot, NH_FLD_IP_DST);
1650 		flow->ipaddr_rule.fs_ipdst_offset =
1651 			dpaa2_flow_extract_key_offset(
1652 				&priv->extract.tc_key_extract[group],
1653 				prot, NH_FLD_IP_DST);
1654 	}
1655 
1656 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1657 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1658 		index = dpaa2_flow_extract_search(
1659 				&priv->extract.qos_key_extract.dpkg,
1660 				NET_PROT_IP, NH_FLD_IP_PROTO);
1661 		if (index < 0) {
1662 			ret = dpaa2_flow_extract_add(
1663 				&priv->extract.qos_key_extract,
1664 				NET_PROT_IP,
1665 				NH_FLD_IP_PROTO,
1666 				NH_FLD_IP_PROTO_SIZE);
1667 			if (ret) {
1668 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1669 
1670 				return -1;
1671 			}
1672 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1673 		}
1674 
1675 		index = dpaa2_flow_extract_search(
1676 				&priv->extract.tc_key_extract[group].dpkg,
1677 				NET_PROT_IP, NH_FLD_IP_PROTO);
1678 		if (index < 0) {
1679 			ret = dpaa2_flow_extract_add(
1680 					&priv->extract.tc_key_extract[group],
1681 					NET_PROT_IP,
1682 					NH_FLD_IP_PROTO,
1683 					NH_FLD_IP_PROTO_SIZE);
1684 			if (ret) {
1685 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1686 
1687 				return -1;
1688 			}
1689 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1690 		}
1691 
1692 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1693 		if (ret) {
1694 			DPAA2_PMD_ERR(
1695 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1696 			return -1;
1697 		}
1698 
1699 		if (spec_ipv4)
1700 			key = &spec_ipv4->hdr.next_proto_id;
1701 		else
1702 			key = &spec_ipv6->hdr.proto;
1703 		if (mask_ipv4)
1704 			mask = &mask_ipv4->hdr.next_proto_id;
1705 		else
1706 			mask = &mask_ipv6->hdr.proto;
1707 
1708 		ret = dpaa2_flow_rule_data_set(
1709 				&priv->extract.qos_key_extract,
1710 				&flow->qos_rule,
1711 				NET_PROT_IP,
1712 				NH_FLD_IP_PROTO,
1713 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1714 		if (ret) {
1715 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1716 			return -1;
1717 		}
1718 
1719 		ret = dpaa2_flow_rule_data_set(
1720 				&priv->extract.tc_key_extract[group],
1721 				&flow->fs_rule,
1722 				NET_PROT_IP,
1723 				NH_FLD_IP_PROTO,
1724 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1725 		if (ret) {
1726 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1727 			return -1;
1728 		}
1729 	}
1730 
1731 	(*device_configured) |= local_cfg;
1732 
1733 	return 0;
1734 }
1735 
1736 static int
1737 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1738 			  struct rte_eth_dev *dev,
1739 			  const struct rte_flow_attr *attr,
1740 			  const struct rte_flow_item *pattern,
1741 			  const struct rte_flow_action actions[] __rte_unused,
1742 			  struct rte_flow_error *error __rte_unused,
1743 			  int *device_configured)
1744 {
1745 	int index, ret;
1746 	int local_cfg = 0;
1747 	uint32_t group;
1748 	const struct rte_flow_item_icmp *spec, *mask;
1749 
1750 	const struct rte_flow_item_icmp *last __rte_unused;
1751 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1752 
1753 	group = attr->group;
1754 
1755 	/* Parse pattern list to get the matching parameters */
1756 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1757 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1758 	mask    = (const struct rte_flow_item_icmp *)
1759 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1760 
1761 	/* Get traffic class index and flow id to be configured */
1762 	flow->tc_id = group;
1763 	flow->tc_index = attr->priority;
1764 
1765 	if (!spec) {
1766 		/* Don't care any field of ICMP header,
1767 		 * only care ICMP protocol.
1768 		 * Example: flow create 0 ingress pattern icmp /
1769 		 */
1770 		/* Next proto of Generical IP is actually used
1771 		 * for ICMP identification.
1772 		 */
1773 		struct proto_discrimination proto;
1774 
1775 		index = dpaa2_flow_extract_search(
1776 				&priv->extract.qos_key_extract.dpkg,
1777 				NET_PROT_IP, NH_FLD_IP_PROTO);
1778 		if (index < 0) {
1779 			ret = dpaa2_flow_proto_discrimination_extract(
1780 					&priv->extract.qos_key_extract,
1781 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1782 			if (ret) {
1783 				DPAA2_PMD_ERR(
1784 					"QoS Extract IP protocol to discriminate ICMP failed.");
1785 
1786 				return -1;
1787 			}
1788 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1789 		}
1790 
1791 		index = dpaa2_flow_extract_search(
1792 				&priv->extract.tc_key_extract[group].dpkg,
1793 				NET_PROT_IP, NH_FLD_IP_PROTO);
1794 		if (index < 0) {
1795 			ret = dpaa2_flow_proto_discrimination_extract(
1796 					&priv->extract.tc_key_extract[group],
1797 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1798 			if (ret) {
1799 				DPAA2_PMD_ERR(
1800 					"FS Extract IP protocol to discriminate ICMP failed.");
1801 
1802 				return -1;
1803 			}
1804 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1805 		}
1806 
1807 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1808 		if (ret) {
1809 			DPAA2_PMD_ERR(
1810 				"Move IP addr before ICMP discrimination set failed");
1811 			return -1;
1812 		}
1813 
1814 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1815 		proto.ip_proto = IPPROTO_ICMP;
1816 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1817 							proto, group);
1818 		if (ret) {
1819 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1820 			return -1;
1821 		}
1822 
1823 		(*device_configured) |= local_cfg;
1824 
1825 		return 0;
1826 	}
1827 
1828 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1829 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1830 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1831 
1832 		return -1;
1833 	}
1834 
1835 	if (mask->hdr.icmp_type) {
1836 		index = dpaa2_flow_extract_search(
1837 				&priv->extract.qos_key_extract.dpkg,
1838 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1839 		if (index < 0) {
1840 			ret = dpaa2_flow_extract_add(
1841 					&priv->extract.qos_key_extract,
1842 					NET_PROT_ICMP,
1843 					NH_FLD_ICMP_TYPE,
1844 					NH_FLD_ICMP_TYPE_SIZE);
1845 			if (ret) {
1846 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1847 
1848 				return -1;
1849 			}
1850 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1851 		}
1852 
1853 		index = dpaa2_flow_extract_search(
1854 				&priv->extract.tc_key_extract[group].dpkg,
1855 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1856 		if (index < 0) {
1857 			ret = dpaa2_flow_extract_add(
1858 					&priv->extract.tc_key_extract[group],
1859 					NET_PROT_ICMP,
1860 					NH_FLD_ICMP_TYPE,
1861 					NH_FLD_ICMP_TYPE_SIZE);
1862 			if (ret) {
1863 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1864 
1865 				return -1;
1866 			}
1867 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1868 		}
1869 
1870 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1871 		if (ret) {
1872 			DPAA2_PMD_ERR(
1873 				"Move ipaddr before ICMP TYPE set failed");
1874 			return -1;
1875 		}
1876 
1877 		ret = dpaa2_flow_rule_data_set(
1878 				&priv->extract.qos_key_extract,
1879 				&flow->qos_rule,
1880 				NET_PROT_ICMP,
1881 				NH_FLD_ICMP_TYPE,
1882 				&spec->hdr.icmp_type,
1883 				&mask->hdr.icmp_type,
1884 				NH_FLD_ICMP_TYPE_SIZE);
1885 		if (ret) {
1886 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1887 			return -1;
1888 		}
1889 
1890 		ret = dpaa2_flow_rule_data_set(
1891 				&priv->extract.tc_key_extract[group],
1892 				&flow->fs_rule,
1893 				NET_PROT_ICMP,
1894 				NH_FLD_ICMP_TYPE,
1895 				&spec->hdr.icmp_type,
1896 				&mask->hdr.icmp_type,
1897 				NH_FLD_ICMP_TYPE_SIZE);
1898 		if (ret) {
1899 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1900 			return -1;
1901 		}
1902 	}
1903 
1904 	if (mask->hdr.icmp_code) {
1905 		index = dpaa2_flow_extract_search(
1906 				&priv->extract.qos_key_extract.dpkg,
1907 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1908 		if (index < 0) {
1909 			ret = dpaa2_flow_extract_add(
1910 					&priv->extract.qos_key_extract,
1911 					NET_PROT_ICMP,
1912 					NH_FLD_ICMP_CODE,
1913 					NH_FLD_ICMP_CODE_SIZE);
1914 			if (ret) {
1915 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1916 
1917 				return -1;
1918 			}
1919 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1920 		}
1921 
1922 		index = dpaa2_flow_extract_search(
1923 				&priv->extract.tc_key_extract[group].dpkg,
1924 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1925 		if (index < 0) {
1926 			ret = dpaa2_flow_extract_add(
1927 					&priv->extract.tc_key_extract[group],
1928 					NET_PROT_ICMP,
1929 					NH_FLD_ICMP_CODE,
1930 					NH_FLD_ICMP_CODE_SIZE);
1931 			if (ret) {
1932 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1933 
1934 				return -1;
1935 			}
1936 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1937 		}
1938 
1939 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1940 		if (ret) {
1941 			DPAA2_PMD_ERR(
1942 				"Move ipaddr after ICMP CODE set failed");
1943 			return -1;
1944 		}
1945 
1946 		ret = dpaa2_flow_rule_data_set(
1947 				&priv->extract.qos_key_extract,
1948 				&flow->qos_rule,
1949 				NET_PROT_ICMP,
1950 				NH_FLD_ICMP_CODE,
1951 				&spec->hdr.icmp_code,
1952 				&mask->hdr.icmp_code,
1953 				NH_FLD_ICMP_CODE_SIZE);
1954 		if (ret) {
1955 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1956 			return -1;
1957 		}
1958 
1959 		ret = dpaa2_flow_rule_data_set(
1960 				&priv->extract.tc_key_extract[group],
1961 				&flow->fs_rule,
1962 				NET_PROT_ICMP,
1963 				NH_FLD_ICMP_CODE,
1964 				&spec->hdr.icmp_code,
1965 				&mask->hdr.icmp_code,
1966 				NH_FLD_ICMP_CODE_SIZE);
1967 		if (ret) {
1968 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1969 			return -1;
1970 		}
1971 	}
1972 
1973 	(*device_configured) |= local_cfg;
1974 
1975 	return 0;
1976 }
1977 
1978 static int
1979 dpaa2_configure_flow_udp(struct rte_flow *flow,
1980 			 struct rte_eth_dev *dev,
1981 			  const struct rte_flow_attr *attr,
1982 			  const struct rte_flow_item *pattern,
1983 			  const struct rte_flow_action actions[] __rte_unused,
1984 			  struct rte_flow_error *error __rte_unused,
1985 			  int *device_configured)
1986 {
1987 	int index, ret;
1988 	int local_cfg = 0;
1989 	uint32_t group;
1990 	const struct rte_flow_item_udp *spec, *mask;
1991 
1992 	const struct rte_flow_item_udp *last __rte_unused;
1993 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1994 
1995 	group = attr->group;
1996 
1997 	/* Parse pattern list to get the matching parameters */
1998 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1999 	last    = (const struct rte_flow_item_udp *)pattern->last;
2000 	mask    = (const struct rte_flow_item_udp *)
2001 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2002 
2003 	/* Get traffic class index and flow id to be configured */
2004 	flow->tc_id = group;
2005 	flow->tc_index = attr->priority;
2006 
2007 	if (!spec || !mc_l4_port_identification) {
2008 		struct proto_discrimination proto;
2009 
2010 		index = dpaa2_flow_extract_search(
2011 				&priv->extract.qos_key_extract.dpkg,
2012 				NET_PROT_IP, NH_FLD_IP_PROTO);
2013 		if (index < 0) {
2014 			ret = dpaa2_flow_proto_discrimination_extract(
2015 					&priv->extract.qos_key_extract,
2016 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2017 			if (ret) {
2018 				DPAA2_PMD_ERR(
2019 					"QoS Extract IP protocol to discriminate UDP failed.");
2020 
2021 				return -1;
2022 			}
2023 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2024 		}
2025 
2026 		index = dpaa2_flow_extract_search(
2027 				&priv->extract.tc_key_extract[group].dpkg,
2028 				NET_PROT_IP, NH_FLD_IP_PROTO);
2029 		if (index < 0) {
2030 			ret = dpaa2_flow_proto_discrimination_extract(
2031 				&priv->extract.tc_key_extract[group],
2032 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2033 			if (ret) {
2034 				DPAA2_PMD_ERR(
2035 					"FS Extract IP protocol to discriminate UDP failed.");
2036 
2037 				return -1;
2038 			}
2039 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2040 		}
2041 
2042 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2043 		if (ret) {
2044 			DPAA2_PMD_ERR(
2045 				"Move IP addr before UDP discrimination set failed");
2046 			return -1;
2047 		}
2048 
2049 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2050 		proto.ip_proto = IPPROTO_UDP;
2051 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2052 							proto, group);
2053 		if (ret) {
2054 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2055 			return -1;
2056 		}
2057 
2058 		(*device_configured) |= local_cfg;
2059 
2060 		if (!spec)
2061 			return 0;
2062 	}
2063 
2064 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2065 		RTE_FLOW_ITEM_TYPE_UDP)) {
2066 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2067 
2068 		return -1;
2069 	}
2070 
2071 	if (mask->hdr.src_port) {
2072 		index = dpaa2_flow_extract_search(
2073 				&priv->extract.qos_key_extract.dpkg,
2074 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2075 		if (index < 0) {
2076 			ret = dpaa2_flow_extract_add(
2077 					&priv->extract.qos_key_extract,
2078 				NET_PROT_UDP,
2079 				NH_FLD_UDP_PORT_SRC,
2080 				NH_FLD_UDP_PORT_SIZE);
2081 			if (ret) {
2082 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2083 
2084 				return -1;
2085 			}
2086 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2087 		}
2088 
2089 		index = dpaa2_flow_extract_search(
2090 				&priv->extract.tc_key_extract[group].dpkg,
2091 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2092 		if (index < 0) {
2093 			ret = dpaa2_flow_extract_add(
2094 					&priv->extract.tc_key_extract[group],
2095 					NET_PROT_UDP,
2096 					NH_FLD_UDP_PORT_SRC,
2097 					NH_FLD_UDP_PORT_SIZE);
2098 			if (ret) {
2099 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2100 
2101 				return -1;
2102 			}
2103 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2104 		}
2105 
2106 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2107 		if (ret) {
2108 			DPAA2_PMD_ERR(
2109 				"Move ipaddr before UDP_PORT_SRC set failed");
2110 			return -1;
2111 		}
2112 
2113 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2114 				&flow->qos_rule,
2115 				NET_PROT_UDP,
2116 				NH_FLD_UDP_PORT_SRC,
2117 				&spec->hdr.src_port,
2118 				&mask->hdr.src_port,
2119 				NH_FLD_UDP_PORT_SIZE);
2120 		if (ret) {
2121 			DPAA2_PMD_ERR(
2122 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2123 			return -1;
2124 		}
2125 
2126 		ret = dpaa2_flow_rule_data_set(
2127 				&priv->extract.tc_key_extract[group],
2128 				&flow->fs_rule,
2129 				NET_PROT_UDP,
2130 				NH_FLD_UDP_PORT_SRC,
2131 				&spec->hdr.src_port,
2132 				&mask->hdr.src_port,
2133 				NH_FLD_UDP_PORT_SIZE);
2134 		if (ret) {
2135 			DPAA2_PMD_ERR(
2136 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2137 			return -1;
2138 		}
2139 	}
2140 
2141 	if (mask->hdr.dst_port) {
2142 		index = dpaa2_flow_extract_search(
2143 				&priv->extract.qos_key_extract.dpkg,
2144 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2145 		if (index < 0) {
2146 			ret = dpaa2_flow_extract_add(
2147 					&priv->extract.qos_key_extract,
2148 					NET_PROT_UDP,
2149 					NH_FLD_UDP_PORT_DST,
2150 					NH_FLD_UDP_PORT_SIZE);
2151 			if (ret) {
2152 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2153 
2154 				return -1;
2155 			}
2156 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2157 		}
2158 
2159 		index = dpaa2_flow_extract_search(
2160 				&priv->extract.tc_key_extract[group].dpkg,
2161 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2162 		if (index < 0) {
2163 			ret = dpaa2_flow_extract_add(
2164 					&priv->extract.tc_key_extract[group],
2165 					NET_PROT_UDP,
2166 					NH_FLD_UDP_PORT_DST,
2167 					NH_FLD_UDP_PORT_SIZE);
2168 			if (ret) {
2169 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2170 
2171 				return -1;
2172 			}
2173 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2174 		}
2175 
2176 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2177 		if (ret) {
2178 			DPAA2_PMD_ERR(
2179 				"Move ipaddr before UDP_PORT_DST set failed");
2180 			return -1;
2181 		}
2182 
2183 		ret = dpaa2_flow_rule_data_set(
2184 				&priv->extract.qos_key_extract,
2185 				&flow->qos_rule,
2186 				NET_PROT_UDP,
2187 				NH_FLD_UDP_PORT_DST,
2188 				&spec->hdr.dst_port,
2189 				&mask->hdr.dst_port,
2190 				NH_FLD_UDP_PORT_SIZE);
2191 		if (ret) {
2192 			DPAA2_PMD_ERR(
2193 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2194 			return -1;
2195 		}
2196 
2197 		ret = dpaa2_flow_rule_data_set(
2198 				&priv->extract.tc_key_extract[group],
2199 				&flow->fs_rule,
2200 				NET_PROT_UDP,
2201 				NH_FLD_UDP_PORT_DST,
2202 				&spec->hdr.dst_port,
2203 				&mask->hdr.dst_port,
2204 				NH_FLD_UDP_PORT_SIZE);
2205 		if (ret) {
2206 			DPAA2_PMD_ERR(
2207 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2208 			return -1;
2209 		}
2210 	}
2211 
2212 	(*device_configured) |= local_cfg;
2213 
2214 	return 0;
2215 }
2216 
2217 static int
2218 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2219 			 struct rte_eth_dev *dev,
2220 			 const struct rte_flow_attr *attr,
2221 			 const struct rte_flow_item *pattern,
2222 			 const struct rte_flow_action actions[] __rte_unused,
2223 			 struct rte_flow_error *error __rte_unused,
2224 			 int *device_configured)
2225 {
2226 	int index, ret;
2227 	int local_cfg = 0;
2228 	uint32_t group;
2229 	const struct rte_flow_item_tcp *spec, *mask;
2230 
2231 	const struct rte_flow_item_tcp *last __rte_unused;
2232 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2233 
2234 	group = attr->group;
2235 
2236 	/* Parse pattern list to get the matching parameters */
2237 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2238 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2239 	mask    = (const struct rte_flow_item_tcp *)
2240 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2241 
2242 	/* Get traffic class index and flow id to be configured */
2243 	flow->tc_id = group;
2244 	flow->tc_index = attr->priority;
2245 
2246 	if (!spec || !mc_l4_port_identification) {
2247 		struct proto_discrimination proto;
2248 
2249 		index = dpaa2_flow_extract_search(
2250 				&priv->extract.qos_key_extract.dpkg,
2251 				NET_PROT_IP, NH_FLD_IP_PROTO);
2252 		if (index < 0) {
2253 			ret = dpaa2_flow_proto_discrimination_extract(
2254 					&priv->extract.qos_key_extract,
2255 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2256 			if (ret) {
2257 				DPAA2_PMD_ERR(
2258 					"QoS Extract IP protocol to discriminate TCP failed.");
2259 
2260 				return -1;
2261 			}
2262 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2263 		}
2264 
2265 		index = dpaa2_flow_extract_search(
2266 				&priv->extract.tc_key_extract[group].dpkg,
2267 				NET_PROT_IP, NH_FLD_IP_PROTO);
2268 		if (index < 0) {
2269 			ret = dpaa2_flow_proto_discrimination_extract(
2270 				&priv->extract.tc_key_extract[group],
2271 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2272 			if (ret) {
2273 				DPAA2_PMD_ERR(
2274 					"FS Extract IP protocol to discriminate TCP failed.");
2275 
2276 				return -1;
2277 			}
2278 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2279 		}
2280 
2281 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2282 		if (ret) {
2283 			DPAA2_PMD_ERR(
2284 				"Move IP addr before TCP discrimination set failed");
2285 			return -1;
2286 		}
2287 
2288 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2289 		proto.ip_proto = IPPROTO_TCP;
2290 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2291 							proto, group);
2292 		if (ret) {
2293 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2294 			return -1;
2295 		}
2296 
2297 		(*device_configured) |= local_cfg;
2298 
2299 		if (!spec)
2300 			return 0;
2301 	}
2302 
2303 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2304 		RTE_FLOW_ITEM_TYPE_TCP)) {
2305 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2306 
2307 		return -1;
2308 	}
2309 
2310 	if (mask->hdr.src_port) {
2311 		index = dpaa2_flow_extract_search(
2312 				&priv->extract.qos_key_extract.dpkg,
2313 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2314 		if (index < 0) {
2315 			ret = dpaa2_flow_extract_add(
2316 					&priv->extract.qos_key_extract,
2317 					NET_PROT_TCP,
2318 					NH_FLD_TCP_PORT_SRC,
2319 					NH_FLD_TCP_PORT_SIZE);
2320 			if (ret) {
2321 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2322 
2323 				return -1;
2324 			}
2325 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2326 		}
2327 
2328 		index = dpaa2_flow_extract_search(
2329 				&priv->extract.tc_key_extract[group].dpkg,
2330 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2331 		if (index < 0) {
2332 			ret = dpaa2_flow_extract_add(
2333 					&priv->extract.tc_key_extract[group],
2334 					NET_PROT_TCP,
2335 					NH_FLD_TCP_PORT_SRC,
2336 					NH_FLD_TCP_PORT_SIZE);
2337 			if (ret) {
2338 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2339 
2340 				return -1;
2341 			}
2342 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2343 		}
2344 
2345 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2346 		if (ret) {
2347 			DPAA2_PMD_ERR(
2348 				"Move ipaddr before TCP_PORT_SRC set failed");
2349 			return -1;
2350 		}
2351 
2352 		ret = dpaa2_flow_rule_data_set(
2353 				&priv->extract.qos_key_extract,
2354 				&flow->qos_rule,
2355 				NET_PROT_TCP,
2356 				NH_FLD_TCP_PORT_SRC,
2357 				&spec->hdr.src_port,
2358 				&mask->hdr.src_port,
2359 				NH_FLD_TCP_PORT_SIZE);
2360 		if (ret) {
2361 			DPAA2_PMD_ERR(
2362 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2363 			return -1;
2364 		}
2365 
2366 		ret = dpaa2_flow_rule_data_set(
2367 				&priv->extract.tc_key_extract[group],
2368 				&flow->fs_rule,
2369 				NET_PROT_TCP,
2370 				NH_FLD_TCP_PORT_SRC,
2371 				&spec->hdr.src_port,
2372 				&mask->hdr.src_port,
2373 				NH_FLD_TCP_PORT_SIZE);
2374 		if (ret) {
2375 			DPAA2_PMD_ERR(
2376 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2377 			return -1;
2378 		}
2379 	}
2380 
2381 	if (mask->hdr.dst_port) {
2382 		index = dpaa2_flow_extract_search(
2383 				&priv->extract.qos_key_extract.dpkg,
2384 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2385 		if (index < 0) {
2386 			ret = dpaa2_flow_extract_add(
2387 					&priv->extract.qos_key_extract,
2388 					NET_PROT_TCP,
2389 					NH_FLD_TCP_PORT_DST,
2390 					NH_FLD_TCP_PORT_SIZE);
2391 			if (ret) {
2392 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2393 
2394 				return -1;
2395 			}
2396 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2397 		}
2398 
2399 		index = dpaa2_flow_extract_search(
2400 				&priv->extract.tc_key_extract[group].dpkg,
2401 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2402 		if (index < 0) {
2403 			ret = dpaa2_flow_extract_add(
2404 					&priv->extract.tc_key_extract[group],
2405 					NET_PROT_TCP,
2406 					NH_FLD_TCP_PORT_DST,
2407 					NH_FLD_TCP_PORT_SIZE);
2408 			if (ret) {
2409 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2410 
2411 				return -1;
2412 			}
2413 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2414 		}
2415 
2416 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2417 		if (ret) {
2418 			DPAA2_PMD_ERR(
2419 				"Move ipaddr before TCP_PORT_DST set failed");
2420 			return -1;
2421 		}
2422 
2423 		ret = dpaa2_flow_rule_data_set(
2424 				&priv->extract.qos_key_extract,
2425 				&flow->qos_rule,
2426 				NET_PROT_TCP,
2427 				NH_FLD_TCP_PORT_DST,
2428 				&spec->hdr.dst_port,
2429 				&mask->hdr.dst_port,
2430 				NH_FLD_TCP_PORT_SIZE);
2431 		if (ret) {
2432 			DPAA2_PMD_ERR(
2433 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2434 			return -1;
2435 		}
2436 
2437 		ret = dpaa2_flow_rule_data_set(
2438 				&priv->extract.tc_key_extract[group],
2439 				&flow->fs_rule,
2440 				NET_PROT_TCP,
2441 				NH_FLD_TCP_PORT_DST,
2442 				&spec->hdr.dst_port,
2443 				&mask->hdr.dst_port,
2444 				NH_FLD_TCP_PORT_SIZE);
2445 		if (ret) {
2446 			DPAA2_PMD_ERR(
2447 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2448 			return -1;
2449 		}
2450 	}
2451 
2452 	(*device_configured) |= local_cfg;
2453 
2454 	return 0;
2455 }
2456 
2457 static int
2458 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2459 			  struct rte_eth_dev *dev,
2460 			  const struct rte_flow_attr *attr,
2461 			  const struct rte_flow_item *pattern,
2462 			  const struct rte_flow_action actions[] __rte_unused,
2463 			  struct rte_flow_error *error __rte_unused,
2464 			  int *device_configured)
2465 {
2466 	int index, ret;
2467 	int local_cfg = 0;
2468 	uint32_t group;
2469 	const struct rte_flow_item_sctp *spec, *mask;
2470 
2471 	const struct rte_flow_item_sctp *last __rte_unused;
2472 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2473 
2474 	group = attr->group;
2475 
2476 	/* Parse pattern list to get the matching parameters */
2477 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2478 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2479 	mask    = (const struct rte_flow_item_sctp *)
2480 			(pattern->mask ? pattern->mask :
2481 				&dpaa2_flow_item_sctp_mask);
2482 
2483 	/* Get traffic class index and flow id to be configured */
2484 	flow->tc_id = group;
2485 	flow->tc_index = attr->priority;
2486 
2487 	if (!spec || !mc_l4_port_identification) {
2488 		struct proto_discrimination proto;
2489 
2490 		index = dpaa2_flow_extract_search(
2491 				&priv->extract.qos_key_extract.dpkg,
2492 				NET_PROT_IP, NH_FLD_IP_PROTO);
2493 		if (index < 0) {
2494 			ret = dpaa2_flow_proto_discrimination_extract(
2495 					&priv->extract.qos_key_extract,
2496 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2497 			if (ret) {
2498 				DPAA2_PMD_ERR(
2499 					"QoS Extract IP protocol to discriminate SCTP failed.");
2500 
2501 				return -1;
2502 			}
2503 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2504 		}
2505 
2506 		index = dpaa2_flow_extract_search(
2507 				&priv->extract.tc_key_extract[group].dpkg,
2508 				NET_PROT_IP, NH_FLD_IP_PROTO);
2509 		if (index < 0) {
2510 			ret = dpaa2_flow_proto_discrimination_extract(
2511 					&priv->extract.tc_key_extract[group],
2512 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2513 			if (ret) {
2514 				DPAA2_PMD_ERR(
2515 					"FS Extract IP protocol to discriminate SCTP failed.");
2516 
2517 				return -1;
2518 			}
2519 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2520 		}
2521 
2522 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2523 		if (ret) {
2524 			DPAA2_PMD_ERR(
2525 				"Move ipaddr before SCTP discrimination set failed");
2526 			return -1;
2527 		}
2528 
2529 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2530 		proto.ip_proto = IPPROTO_SCTP;
2531 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2532 							proto, group);
2533 		if (ret) {
2534 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2535 			return -1;
2536 		}
2537 
2538 		(*device_configured) |= local_cfg;
2539 
2540 		if (!spec)
2541 			return 0;
2542 	}
2543 
2544 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2545 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2546 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2547 
2548 		return -1;
2549 	}
2550 
2551 	if (mask->hdr.src_port) {
2552 		index = dpaa2_flow_extract_search(
2553 				&priv->extract.qos_key_extract.dpkg,
2554 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2555 		if (index < 0) {
2556 			ret = dpaa2_flow_extract_add(
2557 					&priv->extract.qos_key_extract,
2558 					NET_PROT_SCTP,
2559 					NH_FLD_SCTP_PORT_SRC,
2560 					NH_FLD_SCTP_PORT_SIZE);
2561 			if (ret) {
2562 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2563 
2564 				return -1;
2565 			}
2566 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2567 		}
2568 
2569 		index = dpaa2_flow_extract_search(
2570 				&priv->extract.tc_key_extract[group].dpkg,
2571 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2572 		if (index < 0) {
2573 			ret = dpaa2_flow_extract_add(
2574 					&priv->extract.tc_key_extract[group],
2575 					NET_PROT_SCTP,
2576 					NH_FLD_SCTP_PORT_SRC,
2577 					NH_FLD_SCTP_PORT_SIZE);
2578 			if (ret) {
2579 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2580 
2581 				return -1;
2582 			}
2583 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2584 		}
2585 
2586 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2587 		if (ret) {
2588 			DPAA2_PMD_ERR(
2589 				"Move ipaddr before SCTP_PORT_SRC set failed");
2590 			return -1;
2591 		}
2592 
2593 		ret = dpaa2_flow_rule_data_set(
2594 				&priv->extract.qos_key_extract,
2595 				&flow->qos_rule,
2596 				NET_PROT_SCTP,
2597 				NH_FLD_SCTP_PORT_SRC,
2598 				&spec->hdr.src_port,
2599 				&mask->hdr.src_port,
2600 				NH_FLD_SCTP_PORT_SIZE);
2601 		if (ret) {
2602 			DPAA2_PMD_ERR(
2603 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2604 			return -1;
2605 		}
2606 
2607 		ret = dpaa2_flow_rule_data_set(
2608 				&priv->extract.tc_key_extract[group],
2609 				&flow->fs_rule,
2610 				NET_PROT_SCTP,
2611 				NH_FLD_SCTP_PORT_SRC,
2612 				&spec->hdr.src_port,
2613 				&mask->hdr.src_port,
2614 				NH_FLD_SCTP_PORT_SIZE);
2615 		if (ret) {
2616 			DPAA2_PMD_ERR(
2617 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2618 			return -1;
2619 		}
2620 	}
2621 
2622 	if (mask->hdr.dst_port) {
2623 		index = dpaa2_flow_extract_search(
2624 				&priv->extract.qos_key_extract.dpkg,
2625 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2626 		if (index < 0) {
2627 			ret = dpaa2_flow_extract_add(
2628 					&priv->extract.qos_key_extract,
2629 					NET_PROT_SCTP,
2630 					NH_FLD_SCTP_PORT_DST,
2631 					NH_FLD_SCTP_PORT_SIZE);
2632 			if (ret) {
2633 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2634 
2635 				return -1;
2636 			}
2637 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2638 		}
2639 
2640 		index = dpaa2_flow_extract_search(
2641 				&priv->extract.tc_key_extract[group].dpkg,
2642 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2643 		if (index < 0) {
2644 			ret = dpaa2_flow_extract_add(
2645 					&priv->extract.tc_key_extract[group],
2646 					NET_PROT_SCTP,
2647 					NH_FLD_SCTP_PORT_DST,
2648 					NH_FLD_SCTP_PORT_SIZE);
2649 			if (ret) {
2650 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2651 
2652 				return -1;
2653 			}
2654 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2655 		}
2656 
2657 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2658 		if (ret) {
2659 			DPAA2_PMD_ERR(
2660 				"Move ipaddr before SCTP_PORT_DST set failed");
2661 			return -1;
2662 		}
2663 
2664 		ret = dpaa2_flow_rule_data_set(
2665 				&priv->extract.qos_key_extract,
2666 				&flow->qos_rule,
2667 				NET_PROT_SCTP,
2668 				NH_FLD_SCTP_PORT_DST,
2669 				&spec->hdr.dst_port,
2670 				&mask->hdr.dst_port,
2671 				NH_FLD_SCTP_PORT_SIZE);
2672 		if (ret) {
2673 			DPAA2_PMD_ERR(
2674 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2675 			return -1;
2676 		}
2677 
2678 		ret = dpaa2_flow_rule_data_set(
2679 				&priv->extract.tc_key_extract[group],
2680 				&flow->fs_rule,
2681 				NET_PROT_SCTP,
2682 				NH_FLD_SCTP_PORT_DST,
2683 				&spec->hdr.dst_port,
2684 				&mask->hdr.dst_port,
2685 				NH_FLD_SCTP_PORT_SIZE);
2686 		if (ret) {
2687 			DPAA2_PMD_ERR(
2688 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2689 			return -1;
2690 		}
2691 	}
2692 
2693 	(*device_configured) |= local_cfg;
2694 
2695 	return 0;
2696 }
2697 
2698 static int
2699 dpaa2_configure_flow_gre(struct rte_flow *flow,
2700 			 struct rte_eth_dev *dev,
2701 			 const struct rte_flow_attr *attr,
2702 			 const struct rte_flow_item *pattern,
2703 			 const struct rte_flow_action actions[] __rte_unused,
2704 			 struct rte_flow_error *error __rte_unused,
2705 			 int *device_configured)
2706 {
2707 	int index, ret;
2708 	int local_cfg = 0;
2709 	uint32_t group;
2710 	const struct rte_flow_item_gre *spec, *mask;
2711 
2712 	const struct rte_flow_item_gre *last __rte_unused;
2713 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2714 
2715 	group = attr->group;
2716 
2717 	/* Parse pattern list to get the matching parameters */
2718 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2719 	last    = (const struct rte_flow_item_gre *)pattern->last;
2720 	mask    = (const struct rte_flow_item_gre *)
2721 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2722 
2723 	/* Get traffic class index and flow id to be configured */
2724 	flow->tc_id = group;
2725 	flow->tc_index = attr->priority;
2726 
2727 	if (!spec) {
2728 		struct proto_discrimination proto;
2729 
2730 		index = dpaa2_flow_extract_search(
2731 				&priv->extract.qos_key_extract.dpkg,
2732 				NET_PROT_IP, NH_FLD_IP_PROTO);
2733 		if (index < 0) {
2734 			ret = dpaa2_flow_proto_discrimination_extract(
2735 					&priv->extract.qos_key_extract,
2736 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2737 			if (ret) {
2738 				DPAA2_PMD_ERR(
2739 					"QoS Extract IP protocol to discriminate GRE failed.");
2740 
2741 				return -1;
2742 			}
2743 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2744 		}
2745 
2746 		index = dpaa2_flow_extract_search(
2747 				&priv->extract.tc_key_extract[group].dpkg,
2748 				NET_PROT_IP, NH_FLD_IP_PROTO);
2749 		if (index < 0) {
2750 			ret = dpaa2_flow_proto_discrimination_extract(
2751 					&priv->extract.tc_key_extract[group],
2752 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2753 			if (ret) {
2754 				DPAA2_PMD_ERR(
2755 					"FS Extract IP protocol to discriminate GRE failed.");
2756 
2757 				return -1;
2758 			}
2759 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2760 		}
2761 
2762 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2763 		if (ret) {
2764 			DPAA2_PMD_ERR(
2765 				"Move IP addr before GRE discrimination set failed");
2766 			return -1;
2767 		}
2768 
2769 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2770 		proto.ip_proto = IPPROTO_GRE;
2771 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2772 							proto, group);
2773 		if (ret) {
2774 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2775 			return -1;
2776 		}
2777 
2778 		(*device_configured) |= local_cfg;
2779 
2780 		return 0;
2781 	}
2782 
2783 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2784 		RTE_FLOW_ITEM_TYPE_GRE)) {
2785 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2786 
2787 		return -1;
2788 	}
2789 
2790 	if (!mask->protocol)
2791 		return 0;
2792 
2793 	index = dpaa2_flow_extract_search(
2794 			&priv->extract.qos_key_extract.dpkg,
2795 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2796 	if (index < 0) {
2797 		ret = dpaa2_flow_extract_add(
2798 				&priv->extract.qos_key_extract,
2799 				NET_PROT_GRE,
2800 				NH_FLD_GRE_TYPE,
2801 				sizeof(rte_be16_t));
2802 		if (ret) {
2803 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2804 
2805 			return -1;
2806 		}
2807 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2808 	}
2809 
2810 	index = dpaa2_flow_extract_search(
2811 			&priv->extract.tc_key_extract[group].dpkg,
2812 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2813 	if (index < 0) {
2814 		ret = dpaa2_flow_extract_add(
2815 				&priv->extract.tc_key_extract[group],
2816 				NET_PROT_GRE,
2817 				NH_FLD_GRE_TYPE,
2818 				sizeof(rte_be16_t));
2819 		if (ret) {
2820 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2821 
2822 			return -1;
2823 		}
2824 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2825 	}
2826 
2827 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2828 	if (ret) {
2829 		DPAA2_PMD_ERR(
2830 			"Move ipaddr before GRE_TYPE set failed");
2831 		return -1;
2832 	}
2833 
2834 	ret = dpaa2_flow_rule_data_set(
2835 				&priv->extract.qos_key_extract,
2836 				&flow->qos_rule,
2837 				NET_PROT_GRE,
2838 				NH_FLD_GRE_TYPE,
2839 				&spec->protocol,
2840 				&mask->protocol,
2841 				sizeof(rte_be16_t));
2842 	if (ret) {
2843 		DPAA2_PMD_ERR(
2844 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2845 		return -1;
2846 	}
2847 
2848 	ret = dpaa2_flow_rule_data_set(
2849 			&priv->extract.tc_key_extract[group],
2850 			&flow->fs_rule,
2851 			NET_PROT_GRE,
2852 			NH_FLD_GRE_TYPE,
2853 			&spec->protocol,
2854 			&mask->protocol,
2855 			sizeof(rte_be16_t));
2856 	if (ret) {
2857 		DPAA2_PMD_ERR(
2858 			"FS NH_FLD_GRE_TYPE rule data set failed");
2859 		return -1;
2860 	}
2861 
2862 	(*device_configured) |= local_cfg;
2863 
2864 	return 0;
2865 }
2866 
2867 static int
2868 dpaa2_configure_flow_raw(struct rte_flow *flow,
2869 			 struct rte_eth_dev *dev,
2870 			 const struct rte_flow_attr *attr,
2871 			 const struct rte_flow_item *pattern,
2872 			 const struct rte_flow_action actions[] __rte_unused,
2873 			 struct rte_flow_error *error __rte_unused,
2874 			 int *device_configured)
2875 {
2876 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2877 	const struct rte_flow_item_raw *spec = pattern->spec;
2878 	const struct rte_flow_item_raw *mask = pattern->mask;
2879 	int prev_key_size =
2880 		priv->extract.qos_key_extract.key_info.key_total_size;
2881 	int local_cfg = 0, ret;
2882 	uint32_t group;
2883 
2884 	/* Need both spec and mask */
2885 	if (!spec || !mask) {
2886 		DPAA2_PMD_ERR("spec or mask not present.");
2887 		return -EINVAL;
2888 	}
2889 	/* Only supports non-relative with offset 0 */
2890 	if (spec->relative || spec->offset != 0 ||
2891 	    spec->search || spec->limit) {
2892 		DPAA2_PMD_ERR("relative and non zero offset not supported.");
2893 		return -EINVAL;
2894 	}
2895 	/* Spec len and mask len should be same */
2896 	if (spec->length != mask->length) {
2897 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2898 		return -EINVAL;
2899 	}
2900 
2901 	/* Get traffic class index and flow id to be configured */
2902 	group = attr->group;
2903 	flow->tc_id = group;
2904 	flow->tc_index = attr->priority;
2905 
2906 	if (prev_key_size <= spec->length) {
2907 		ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2908 						 spec->length);
2909 		if (ret) {
2910 			DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2911 			return -1;
2912 		}
2913 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2914 
2915 		ret = dpaa2_flow_extract_add_raw(
2916 					&priv->extract.tc_key_extract[group],
2917 					spec->length);
2918 		if (ret) {
2919 			DPAA2_PMD_ERR("FS Extract RAW add failed.");
2920 			return -1;
2921 		}
2922 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2923 	}
2924 
2925 	ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2926 					   mask->pattern, spec->length);
2927 	if (ret) {
2928 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2929 		return -1;
2930 	}
2931 
2932 	ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2933 					   mask->pattern, spec->length);
2934 	if (ret) {
2935 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2936 		return -1;
2937 	}
2938 
2939 	(*device_configured) |= local_cfg;
2940 
2941 	return 0;
2942 }
2943 
2944 static inline int
2945 dpaa2_fs_action_supported(enum rte_flow_action_type action)
2946 {
2947 	int i;
2948 
2949 	for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
2950 					sizeof(enum rte_flow_action_type)); i++) {
2951 		if (action == dpaa2_supported_fs_action_type[i])
2952 			return 1;
2953 	}
2954 
2955 	return 0;
2956 }
2957 /* The existing QoS/FS entry with IP address(es)
2958  * needs update after
2959  * new extract(s) are inserted before IP
2960  * address(es) extract(s).
2961  */
2962 static int
2963 dpaa2_flow_entry_update(
2964 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2965 {
2966 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2967 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2968 	int ret;
2969 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2970 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2971 	struct dpaa2_key_extract *qos_key_extract =
2972 		&priv->extract.qos_key_extract;
2973 	struct dpaa2_key_extract *tc_key_extract =
2974 		&priv->extract.tc_key_extract[tc_id];
2975 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2976 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2977 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2978 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2979 	int extend = -1, extend1, size = -1;
2980 	uint16_t qos_index;
2981 
2982 	while (curr) {
2983 		if (curr->ipaddr_rule.ipaddr_type ==
2984 			FLOW_NONE_IPADDR) {
2985 			curr = LIST_NEXT(curr, next);
2986 			continue;
2987 		}
2988 
2989 		if (curr->ipaddr_rule.ipaddr_type ==
2990 			FLOW_IPV4_ADDR) {
2991 			qos_ipsrc_offset =
2992 				qos_key_extract->key_info.ipv4_src_offset;
2993 			qos_ipdst_offset =
2994 				qos_key_extract->key_info.ipv4_dst_offset;
2995 			fs_ipsrc_offset =
2996 				tc_key_extract->key_info.ipv4_src_offset;
2997 			fs_ipdst_offset =
2998 				tc_key_extract->key_info.ipv4_dst_offset;
2999 			size = NH_FLD_IPV4_ADDR_SIZE;
3000 		} else {
3001 			qos_ipsrc_offset =
3002 				qos_key_extract->key_info.ipv6_src_offset;
3003 			qos_ipdst_offset =
3004 				qos_key_extract->key_info.ipv6_dst_offset;
3005 			fs_ipsrc_offset =
3006 				tc_key_extract->key_info.ipv6_src_offset;
3007 			fs_ipdst_offset =
3008 				tc_key_extract->key_info.ipv6_dst_offset;
3009 			size = NH_FLD_IPV6_ADDR_SIZE;
3010 		}
3011 
3012 		qos_index = curr->tc_id * priv->fs_entries +
3013 			curr->tc_index;
3014 
3015 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index, stdout);
3016 
3017 		if (priv->num_rx_tc > 1) {
3018 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3019 					priv->token, &curr->qos_rule);
3020 			if (ret) {
3021 				DPAA2_PMD_ERR("Qos entry remove failed.");
3022 				return -1;
3023 			}
3024 		}
3025 
3026 		extend = -1;
3027 
3028 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3029 			RTE_ASSERT(qos_ipsrc_offset >=
3030 				curr->ipaddr_rule.qos_ipsrc_offset);
3031 			extend1 = qos_ipsrc_offset -
3032 				curr->ipaddr_rule.qos_ipsrc_offset;
3033 			if (extend >= 0)
3034 				RTE_ASSERT(extend == extend1);
3035 			else
3036 				extend = extend1;
3037 
3038 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3039 				(size == NH_FLD_IPV6_ADDR_SIZE));
3040 
3041 			memcpy(ipsrc_key,
3042 				(char *)(size_t)curr->qos_rule.key_iova +
3043 				curr->ipaddr_rule.qos_ipsrc_offset,
3044 				size);
3045 			memset((char *)(size_t)curr->qos_rule.key_iova +
3046 				curr->ipaddr_rule.qos_ipsrc_offset,
3047 				0, size);
3048 
3049 			memcpy(ipsrc_mask,
3050 				(char *)(size_t)curr->qos_rule.mask_iova +
3051 				curr->ipaddr_rule.qos_ipsrc_offset,
3052 				size);
3053 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3054 				curr->ipaddr_rule.qos_ipsrc_offset,
3055 				0, size);
3056 
3057 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3058 		}
3059 
3060 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3061 			RTE_ASSERT(qos_ipdst_offset >=
3062 				curr->ipaddr_rule.qos_ipdst_offset);
3063 			extend1 = qos_ipdst_offset -
3064 				curr->ipaddr_rule.qos_ipdst_offset;
3065 			if (extend >= 0)
3066 				RTE_ASSERT(extend == extend1);
3067 			else
3068 				extend = extend1;
3069 
3070 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3071 				(size == NH_FLD_IPV6_ADDR_SIZE));
3072 
3073 			memcpy(ipdst_key,
3074 				(char *)(size_t)curr->qos_rule.key_iova +
3075 				curr->ipaddr_rule.qos_ipdst_offset,
3076 				size);
3077 			memset((char *)(size_t)curr->qos_rule.key_iova +
3078 				curr->ipaddr_rule.qos_ipdst_offset,
3079 				0, size);
3080 
3081 			memcpy(ipdst_mask,
3082 				(char *)(size_t)curr->qos_rule.mask_iova +
3083 				curr->ipaddr_rule.qos_ipdst_offset,
3084 				size);
3085 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3086 				curr->ipaddr_rule.qos_ipdst_offset,
3087 				0, size);
3088 
3089 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3090 		}
3091 
3092 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3093 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3094 				(size == NH_FLD_IPV6_ADDR_SIZE));
3095 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3096 				curr->ipaddr_rule.qos_ipsrc_offset,
3097 				ipsrc_key,
3098 				size);
3099 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3100 				curr->ipaddr_rule.qos_ipsrc_offset,
3101 				ipsrc_mask,
3102 				size);
3103 		}
3104 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3105 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3106 				(size == NH_FLD_IPV6_ADDR_SIZE));
3107 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3108 				curr->ipaddr_rule.qos_ipdst_offset,
3109 				ipdst_key,
3110 				size);
3111 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3112 				curr->ipaddr_rule.qos_ipdst_offset,
3113 				ipdst_mask,
3114 				size);
3115 		}
3116 
3117 		if (extend >= 0)
3118 			curr->qos_real_key_size += extend;
3119 
3120 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3121 
3122 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index, stdout);
3123 
3124 		if (priv->num_rx_tc > 1) {
3125 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3126 					priv->token, &curr->qos_rule,
3127 					curr->tc_id, qos_index,
3128 					0, 0);
3129 			if (ret) {
3130 				DPAA2_PMD_ERR("Qos entry update failed.");
3131 				return -1;
3132 			}
3133 		}
3134 
3135 		if (!dpaa2_fs_action_supported(curr->action)) {
3136 			curr = LIST_NEXT(curr, next);
3137 			continue;
3138 		}
3139 
3140 		dpaa2_flow_fs_entry_log("Before update", curr, stdout);
3141 		extend = -1;
3142 
3143 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3144 				priv->token, curr->tc_id, &curr->fs_rule);
3145 		if (ret) {
3146 			DPAA2_PMD_ERR("FS entry remove failed.");
3147 			return -1;
3148 		}
3149 
3150 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3151 			tc_id == curr->tc_id) {
3152 			RTE_ASSERT(fs_ipsrc_offset >=
3153 				curr->ipaddr_rule.fs_ipsrc_offset);
3154 			extend1 = fs_ipsrc_offset -
3155 				curr->ipaddr_rule.fs_ipsrc_offset;
3156 			if (extend >= 0)
3157 				RTE_ASSERT(extend == extend1);
3158 			else
3159 				extend = extend1;
3160 
3161 			memcpy(ipsrc_key,
3162 				(char *)(size_t)curr->fs_rule.key_iova +
3163 				curr->ipaddr_rule.fs_ipsrc_offset,
3164 				size);
3165 			memset((char *)(size_t)curr->fs_rule.key_iova +
3166 				curr->ipaddr_rule.fs_ipsrc_offset,
3167 				0, size);
3168 
3169 			memcpy(ipsrc_mask,
3170 				(char *)(size_t)curr->fs_rule.mask_iova +
3171 				curr->ipaddr_rule.fs_ipsrc_offset,
3172 				size);
3173 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3174 				curr->ipaddr_rule.fs_ipsrc_offset,
3175 				0, size);
3176 
3177 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3178 		}
3179 
3180 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3181 			tc_id == curr->tc_id) {
3182 			RTE_ASSERT(fs_ipdst_offset >=
3183 				curr->ipaddr_rule.fs_ipdst_offset);
3184 			extend1 = fs_ipdst_offset -
3185 				curr->ipaddr_rule.fs_ipdst_offset;
3186 			if (extend >= 0)
3187 				RTE_ASSERT(extend == extend1);
3188 			else
3189 				extend = extend1;
3190 
3191 			memcpy(ipdst_key,
3192 				(char *)(size_t)curr->fs_rule.key_iova +
3193 				curr->ipaddr_rule.fs_ipdst_offset,
3194 				size);
3195 			memset((char *)(size_t)curr->fs_rule.key_iova +
3196 				curr->ipaddr_rule.fs_ipdst_offset,
3197 				0, size);
3198 
3199 			memcpy(ipdst_mask,
3200 				(char *)(size_t)curr->fs_rule.mask_iova +
3201 				curr->ipaddr_rule.fs_ipdst_offset,
3202 				size);
3203 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3204 				curr->ipaddr_rule.fs_ipdst_offset,
3205 				0, size);
3206 
3207 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3208 		}
3209 
3210 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3211 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3212 				curr->ipaddr_rule.fs_ipsrc_offset,
3213 				ipsrc_key,
3214 				size);
3215 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3216 				curr->ipaddr_rule.fs_ipsrc_offset,
3217 				ipsrc_mask,
3218 				size);
3219 		}
3220 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3221 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3222 				curr->ipaddr_rule.fs_ipdst_offset,
3223 				ipdst_key,
3224 				size);
3225 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3226 				curr->ipaddr_rule.fs_ipdst_offset,
3227 				ipdst_mask,
3228 				size);
3229 		}
3230 
3231 		if (extend >= 0)
3232 			curr->fs_real_key_size += extend;
3233 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3234 
3235 		dpaa2_flow_fs_entry_log("Start update", curr, stdout);
3236 
3237 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3238 				priv->token, curr->tc_id, curr->tc_index,
3239 				&curr->fs_rule, &curr->action_cfg);
3240 		if (ret) {
3241 			DPAA2_PMD_ERR("FS entry update failed.");
3242 			return -1;
3243 		}
3244 
3245 		curr = LIST_NEXT(curr, next);
3246 	}
3247 
3248 	return 0;
3249 }
3250 
3251 static inline int
3252 dpaa2_flow_verify_attr(
3253 	struct dpaa2_dev_priv *priv,
3254 	const struct rte_flow_attr *attr)
3255 {
3256 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3257 
3258 	while (curr) {
3259 		if (curr->tc_id == attr->group &&
3260 			curr->tc_index == attr->priority) {
3261 			DPAA2_PMD_ERR(
3262 				"Flow with group %d and priority %d already exists.",
3263 				attr->group, attr->priority);
3264 
3265 			return -1;
3266 		}
3267 		curr = LIST_NEXT(curr, next);
3268 	}
3269 
3270 	return 0;
3271 }
3272 
3273 static inline struct rte_eth_dev *
3274 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3275 	const struct rte_flow_action *action)
3276 {
3277 	const struct rte_flow_action_port_id *port_id;
3278 	int idx = -1;
3279 	struct rte_eth_dev *dest_dev;
3280 
3281 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3282 		port_id = (const struct rte_flow_action_port_id *)
3283 					action->conf;
3284 		if (!port_id->original)
3285 			idx = port_id->id;
3286 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3287 		const struct rte_flow_action_ethdev *ethdev;
3288 
3289 		ethdev = (const struct rte_flow_action_ethdev *)action->conf;
3290 		idx = ethdev->port_id;
3291 	} else {
3292 		return NULL;
3293 	}
3294 
3295 	if (idx >= 0) {
3296 		if (!rte_eth_dev_is_valid_port(idx))
3297 			return NULL;
3298 		if (!rte_pmd_dpaa2_dev_is_dpaa2(idx))
3299 			return NULL;
3300 		dest_dev = &rte_eth_devices[idx];
3301 	} else {
3302 		dest_dev = priv->eth_dev;
3303 	}
3304 
3305 	return dest_dev;
3306 }
3307 
3308 static inline int
3309 dpaa2_flow_verify_action(
3310 	struct dpaa2_dev_priv *priv,
3311 	const struct rte_flow_attr *attr,
3312 	const struct rte_flow_action actions[])
3313 {
3314 	int end_of_list = 0, i, j = 0;
3315 	const struct rte_flow_action_queue *dest_queue;
3316 	const struct rte_flow_action_rss *rss_conf;
3317 	struct dpaa2_queue *rxq;
3318 
3319 	while (!end_of_list) {
3320 		switch (actions[j].type) {
3321 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3322 			dest_queue = (const struct rte_flow_action_queue *)
3323 					(actions[j].conf);
3324 			rxq = priv->rx_vq[dest_queue->index];
3325 			if (attr->group != rxq->tc_index) {
3326 				DPAA2_PMD_ERR(
3327 					"RXQ[%d] does not belong to the group %d",
3328 					dest_queue->index, attr->group);
3329 
3330 				return -1;
3331 			}
3332 			break;
3333 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3334 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3335 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3336 				DPAA2_PMD_ERR("Invalid port id of action");
3337 				return -ENOTSUP;
3338 			}
3339 			break;
3340 		case RTE_FLOW_ACTION_TYPE_RSS:
3341 			rss_conf = (const struct rte_flow_action_rss *)
3342 					(actions[j].conf);
3343 			if (rss_conf->queue_num > priv->dist_queues) {
3344 				DPAA2_PMD_ERR(
3345 					"RSS number exceeds the distribution size");
3346 				return -ENOTSUP;
3347 			}
3348 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3349 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3350 					DPAA2_PMD_ERR(
3351 						"RSS queue index exceeds the number of RXQs");
3352 					return -ENOTSUP;
3353 				}
3354 				rxq = priv->rx_vq[rss_conf->queue[i]];
3355 				if (rxq->tc_index != attr->group) {
3356 					DPAA2_PMD_ERR(
3357 						"Queue/Group combination are not supported");
3358 					return -ENOTSUP;
3359 				}
3360 			}
3361 
3362 			break;
3363 		case RTE_FLOW_ACTION_TYPE_END:
3364 			end_of_list = 1;
3365 			break;
3366 		default:
3367 			DPAA2_PMD_ERR("Invalid action type");
3368 			return -ENOTSUP;
3369 		}
3370 		j++;
3371 	}
3372 
3373 	return 0;
3374 }
3375 
3376 static int
3377 dpaa2_generic_flow_set(struct rte_flow *flow,
3378 		       struct rte_eth_dev *dev,
3379 		       const struct rte_flow_attr *attr,
3380 		       const struct rte_flow_item pattern[],
3381 		       const struct rte_flow_action actions[],
3382 		       struct rte_flow_error *error)
3383 {
3384 	const struct rte_flow_action_queue *dest_queue;
3385 	const struct rte_flow_action_rss *rss_conf;
3386 	int is_keycfg_configured = 0, end_of_list = 0;
3387 	int ret = 0, i = 0, j = 0;
3388 	struct dpni_rx_dist_cfg tc_cfg;
3389 	struct dpni_qos_tbl_cfg qos_cfg;
3390 	struct dpni_fs_action_cfg action;
3391 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3392 	struct dpaa2_queue *dest_q;
3393 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3394 	size_t param;
3395 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3396 	uint16_t qos_index;
3397 	struct rte_eth_dev *dest_dev;
3398 	struct dpaa2_dev_priv *dest_priv;
3399 
3400 	ret = dpaa2_flow_verify_attr(priv, attr);
3401 	if (ret)
3402 		return ret;
3403 
3404 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3405 	if (ret)
3406 		return ret;
3407 
3408 	/* Parse pattern list to get the matching parameters */
3409 	while (!end_of_list) {
3410 		switch (pattern[i].type) {
3411 		case RTE_FLOW_ITEM_TYPE_ETH:
3412 			ret = dpaa2_configure_flow_eth(flow,
3413 					dev, attr, &pattern[i], actions, error,
3414 					&is_keycfg_configured);
3415 			if (ret) {
3416 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3417 				return ret;
3418 			}
3419 			break;
3420 		case RTE_FLOW_ITEM_TYPE_VLAN:
3421 			ret = dpaa2_configure_flow_vlan(flow,
3422 					dev, attr, &pattern[i], actions, error,
3423 					&is_keycfg_configured);
3424 			if (ret) {
3425 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3426 				return ret;
3427 			}
3428 			break;
3429 		case RTE_FLOW_ITEM_TYPE_IPV4:
3430 		case RTE_FLOW_ITEM_TYPE_IPV6:
3431 			ret = dpaa2_configure_flow_generic_ip(flow,
3432 					dev, attr, &pattern[i], actions, error,
3433 					&is_keycfg_configured);
3434 			if (ret) {
3435 				DPAA2_PMD_ERR("IP flow configuration failed!");
3436 				return ret;
3437 			}
3438 			break;
3439 		case RTE_FLOW_ITEM_TYPE_ICMP:
3440 			ret = dpaa2_configure_flow_icmp(flow,
3441 					dev, attr, &pattern[i], actions, error,
3442 					&is_keycfg_configured);
3443 			if (ret) {
3444 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3445 				return ret;
3446 			}
3447 			break;
3448 		case RTE_FLOW_ITEM_TYPE_UDP:
3449 			ret = dpaa2_configure_flow_udp(flow,
3450 					dev, attr, &pattern[i], actions, error,
3451 					&is_keycfg_configured);
3452 			if (ret) {
3453 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3454 				return ret;
3455 			}
3456 			break;
3457 		case RTE_FLOW_ITEM_TYPE_TCP:
3458 			ret = dpaa2_configure_flow_tcp(flow,
3459 					dev, attr, &pattern[i], actions, error,
3460 					&is_keycfg_configured);
3461 			if (ret) {
3462 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3463 				return ret;
3464 			}
3465 			break;
3466 		case RTE_FLOW_ITEM_TYPE_SCTP:
3467 			ret = dpaa2_configure_flow_sctp(flow,
3468 					dev, attr, &pattern[i], actions, error,
3469 					&is_keycfg_configured);
3470 			if (ret) {
3471 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3472 				return ret;
3473 			}
3474 			break;
3475 		case RTE_FLOW_ITEM_TYPE_GRE:
3476 			ret = dpaa2_configure_flow_gre(flow,
3477 					dev, attr, &pattern[i], actions, error,
3478 					&is_keycfg_configured);
3479 			if (ret) {
3480 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3481 				return ret;
3482 			}
3483 			break;
3484 		case RTE_FLOW_ITEM_TYPE_RAW:
3485 			ret = dpaa2_configure_flow_raw(flow,
3486 						       dev, attr, &pattern[i],
3487 						       actions, error,
3488 						       &is_keycfg_configured);
3489 			if (ret) {
3490 				DPAA2_PMD_ERR("RAW flow configuration failed!");
3491 				return ret;
3492 			}
3493 			break;
3494 		case RTE_FLOW_ITEM_TYPE_END:
3495 			end_of_list = 1;
3496 			break; /*End of List*/
3497 		default:
3498 			DPAA2_PMD_ERR("Invalid action type");
3499 			ret = -ENOTSUP;
3500 			break;
3501 		}
3502 		i++;
3503 	}
3504 
3505 	/* Let's parse action on matching traffic */
3506 	end_of_list = 0;
3507 	while (!end_of_list) {
3508 		switch (actions[j].type) {
3509 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3510 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3511 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3512 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3513 			flow->action = actions[j].type;
3514 
3515 			if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3516 				dest_queue = (const struct rte_flow_action_queue *)
3517 								(actions[j].conf);
3518 				dest_q = priv->rx_vq[dest_queue->index];
3519 				action.flow_id = dest_q->flow_id;
3520 			} else {
3521 				dest_dev = dpaa2_flow_redirect_dev(priv,
3522 								   &actions[j]);
3523 				if (!dest_dev) {
3524 					DPAA2_PMD_ERR("Invalid destination device to redirect!");
3525 					return -1;
3526 				}
3527 
3528 				dest_priv = dest_dev->data->dev_private;
3529 				dest_q = dest_priv->tx_vq[0];
3530 				action.options =
3531 						DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
3532 				action.redirect_obj_token = dest_priv->token;
3533 				action.flow_id = dest_q->flow_id;
3534 			}
3535 
3536 			/* Configure FS table first*/
3537 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3538 				dpaa2_flow_fs_table_extracts_log(priv,
3539 							flow->tc_id, stdout);
3540 				if (dpkg_prepare_key_cfg(
3541 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3542 				(uint8_t *)(size_t)priv->extract
3543 				.tc_extract_param[flow->tc_id]) < 0) {
3544 					DPAA2_PMD_ERR(
3545 					"Unable to prepare extract parameters");
3546 					return -1;
3547 				}
3548 
3549 				memset(&tc_cfg, 0,
3550 					sizeof(struct dpni_rx_dist_cfg));
3551 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3552 				tc_cfg.key_cfg_iova =
3553 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3554 				tc_cfg.tc = flow->tc_id;
3555 				tc_cfg.enable = false;
3556 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3557 						priv->token, &tc_cfg);
3558 				if (ret < 0) {
3559 					DPAA2_PMD_ERR(
3560 						"TC hash cannot be disabled.(%d)",
3561 						ret);
3562 					return -1;
3563 				}
3564 				tc_cfg.enable = true;
3565 				tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
3566 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3567 							 priv->token, &tc_cfg);
3568 				if (ret < 0) {
3569 					DPAA2_PMD_ERR(
3570 						"TC distribution cannot be configured.(%d)",
3571 						ret);
3572 					return -1;
3573 				}
3574 			}
3575 
3576 			/* Configure QoS table then.*/
3577 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3578 				dpaa2_flow_qos_table_extracts_log(priv, stdout);
3579 				if (dpkg_prepare_key_cfg(
3580 					&priv->extract.qos_key_extract.dpkg,
3581 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3582 					DPAA2_PMD_ERR(
3583 						"Unable to prepare extract parameters");
3584 					return -1;
3585 				}
3586 
3587 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3588 				qos_cfg.discard_on_miss = false;
3589 				qos_cfg.default_tc = 0;
3590 				qos_cfg.keep_entries = true;
3591 				qos_cfg.key_cfg_iova =
3592 					(size_t)priv->extract.qos_extract_param;
3593 				/* QoS table is effective for multiple TCs. */
3594 				if (priv->num_rx_tc > 1) {
3595 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3596 						priv->token, &qos_cfg);
3597 					if (ret < 0) {
3598 						DPAA2_PMD_ERR(
3599 						"RSS QoS table can not be configured(%d)",
3600 							ret);
3601 						return -1;
3602 					}
3603 				}
3604 			}
3605 
3606 			flow->qos_real_key_size = priv->extract
3607 				.qos_key_extract.key_info.key_total_size;
3608 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3609 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3610 					flow->ipaddr_rule.qos_ipsrc_offset) {
3611 					flow->qos_real_key_size =
3612 						flow->ipaddr_rule.qos_ipdst_offset +
3613 						NH_FLD_IPV4_ADDR_SIZE;
3614 				} else {
3615 					flow->qos_real_key_size =
3616 						flow->ipaddr_rule.qos_ipsrc_offset +
3617 						NH_FLD_IPV4_ADDR_SIZE;
3618 				}
3619 			} else if (flow->ipaddr_rule.ipaddr_type ==
3620 				FLOW_IPV6_ADDR) {
3621 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3622 					flow->ipaddr_rule.qos_ipsrc_offset) {
3623 					flow->qos_real_key_size =
3624 						flow->ipaddr_rule.qos_ipdst_offset +
3625 						NH_FLD_IPV6_ADDR_SIZE;
3626 				} else {
3627 					flow->qos_real_key_size =
3628 						flow->ipaddr_rule.qos_ipsrc_offset +
3629 						NH_FLD_IPV6_ADDR_SIZE;
3630 				}
3631 			}
3632 
3633 			/* QoS entry added is only effective for multiple TCs.*/
3634 			if (priv->num_rx_tc > 1) {
3635 				qos_index = flow->tc_id * priv->fs_entries +
3636 					flow->tc_index;
3637 				if (qos_index >= priv->qos_entries) {
3638 					DPAA2_PMD_ERR("QoS table with %d entries full",
3639 						priv->qos_entries);
3640 					return -1;
3641 				}
3642 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3643 
3644 				dpaa2_flow_qos_entry_log("Start add", flow,
3645 							qos_index, stdout);
3646 
3647 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3648 						priv->token, &flow->qos_rule,
3649 						flow->tc_id, qos_index,
3650 						0, 0);
3651 				if (ret < 0) {
3652 					DPAA2_PMD_ERR(
3653 						"Error in adding entry to QoS table(%d)", ret);
3654 					return ret;
3655 				}
3656 			}
3657 
3658 			if (flow->tc_index >= priv->fs_entries) {
3659 				DPAA2_PMD_ERR("FS table with %d entries full",
3660 					priv->fs_entries);
3661 				return -1;
3662 			}
3663 
3664 			flow->fs_real_key_size =
3665 				priv->extract.tc_key_extract[flow->tc_id]
3666 				.key_info.key_total_size;
3667 
3668 			if (flow->ipaddr_rule.ipaddr_type ==
3669 				FLOW_IPV4_ADDR) {
3670 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3671 					flow->ipaddr_rule.fs_ipsrc_offset) {
3672 					flow->fs_real_key_size =
3673 						flow->ipaddr_rule.fs_ipdst_offset +
3674 						NH_FLD_IPV4_ADDR_SIZE;
3675 				} else {
3676 					flow->fs_real_key_size =
3677 						flow->ipaddr_rule.fs_ipsrc_offset +
3678 						NH_FLD_IPV4_ADDR_SIZE;
3679 				}
3680 			} else if (flow->ipaddr_rule.ipaddr_type ==
3681 				FLOW_IPV6_ADDR) {
3682 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3683 					flow->ipaddr_rule.fs_ipsrc_offset) {
3684 					flow->fs_real_key_size =
3685 						flow->ipaddr_rule.fs_ipdst_offset +
3686 						NH_FLD_IPV6_ADDR_SIZE;
3687 				} else {
3688 					flow->fs_real_key_size =
3689 						flow->ipaddr_rule.fs_ipsrc_offset +
3690 						NH_FLD_IPV6_ADDR_SIZE;
3691 				}
3692 			}
3693 
3694 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3695 
3696 			dpaa2_flow_fs_entry_log("Start add", flow, stdout);
3697 
3698 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3699 						flow->tc_id, flow->tc_index,
3700 						&flow->fs_rule, &action);
3701 			if (ret < 0) {
3702 				DPAA2_PMD_ERR(
3703 				"Error in adding entry to FS table(%d)", ret);
3704 				return ret;
3705 			}
3706 			memcpy(&flow->action_cfg, &action,
3707 				sizeof(struct dpni_fs_action_cfg));
3708 			break;
3709 		case RTE_FLOW_ACTION_TYPE_RSS:
3710 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3711 
3712 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3713 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3714 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3715 			if (ret < 0) {
3716 				DPAA2_PMD_ERR(
3717 				"unable to set flow distribution.please check queue config");
3718 				return ret;
3719 			}
3720 
3721 			/* Allocate DMA'ble memory to write the rules */
3722 			param = (size_t)rte_malloc(NULL, 256, 64);
3723 			if (!param) {
3724 				DPAA2_PMD_ERR("Memory allocation failure");
3725 				return -1;
3726 			}
3727 
3728 			if (dpkg_prepare_key_cfg(
3729 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3730 				(uint8_t *)param) < 0) {
3731 				DPAA2_PMD_ERR(
3732 				"Unable to prepare extract parameters");
3733 				rte_free((void *)param);
3734 				return -1;
3735 			}
3736 
3737 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3738 			tc_cfg.dist_size = rss_conf->queue_num;
3739 			tc_cfg.key_cfg_iova = (size_t)param;
3740 			tc_cfg.enable = true;
3741 			tc_cfg.tc = flow->tc_id;
3742 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3743 						 priv->token, &tc_cfg);
3744 			if (ret < 0) {
3745 				DPAA2_PMD_ERR(
3746 					"RSS TC table cannot be configured: %d",
3747 					ret);
3748 				rte_free((void *)param);
3749 				return -1;
3750 			}
3751 
3752 			rte_free((void *)param);
3753 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3754 				if (dpkg_prepare_key_cfg(
3755 					&priv->extract.qos_key_extract.dpkg,
3756 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3757 					DPAA2_PMD_ERR(
3758 					"Unable to prepare extract parameters");
3759 					return -1;
3760 				}
3761 				memset(&qos_cfg, 0,
3762 					sizeof(struct dpni_qos_tbl_cfg));
3763 				qos_cfg.discard_on_miss = true;
3764 				qos_cfg.keep_entries = true;
3765 				qos_cfg.key_cfg_iova =
3766 					(size_t)priv->extract.qos_extract_param;
3767 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3768 							 priv->token, &qos_cfg);
3769 				if (ret < 0) {
3770 					DPAA2_PMD_ERR(
3771 					"RSS QoS dist can't be configured-%d",
3772 					ret);
3773 					return -1;
3774 				}
3775 			}
3776 
3777 			/* Add Rule into QoS table */
3778 			qos_index = flow->tc_id * priv->fs_entries +
3779 				flow->tc_index;
3780 			if (qos_index >= priv->qos_entries) {
3781 				DPAA2_PMD_ERR("QoS table with %d entries full",
3782 					priv->qos_entries);
3783 				return -1;
3784 			}
3785 
3786 			flow->qos_real_key_size =
3787 			  priv->extract.qos_key_extract.key_info.key_total_size;
3788 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3789 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3790 						&flow->qos_rule, flow->tc_id,
3791 						qos_index, 0, 0);
3792 			if (ret < 0) {
3793 				DPAA2_PMD_ERR(
3794 				"Error in entry addition in QoS table(%d)",
3795 				ret);
3796 				return ret;
3797 			}
3798 			break;
3799 		case RTE_FLOW_ACTION_TYPE_END:
3800 			end_of_list = 1;
3801 			break;
3802 		default:
3803 			DPAA2_PMD_ERR("Invalid action type");
3804 			ret = -ENOTSUP;
3805 			break;
3806 		}
3807 		j++;
3808 	}
3809 
3810 	if (!ret) {
3811 		if (is_keycfg_configured &
3812 			(DPAA2_QOS_TABLE_RECONFIGURE |
3813 			DPAA2_FS_TABLE_RECONFIGURE)) {
3814 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3815 			if (ret) {
3816 				DPAA2_PMD_ERR("Flow entry update failed.");
3817 
3818 				return -1;
3819 			}
3820 		}
3821 		/* New rules are inserted. */
3822 		if (!curr) {
3823 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3824 		} else {
3825 			while (LIST_NEXT(curr, next))
3826 				curr = LIST_NEXT(curr, next);
3827 			LIST_INSERT_AFTER(curr, flow, next);
3828 		}
3829 	}
3830 	return ret;
3831 }
3832 
3833 static inline int
3834 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3835 		      const struct rte_flow_attr *attr)
3836 {
3837 	int ret = 0;
3838 
3839 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3840 		DPAA2_PMD_ERR("Priority group is out of range");
3841 		ret = -ENOTSUP;
3842 	}
3843 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3844 		DPAA2_PMD_ERR("Priority within the group is out of range");
3845 		ret = -ENOTSUP;
3846 	}
3847 	if (unlikely(attr->egress)) {
3848 		DPAA2_PMD_ERR(
3849 			"Flow configuration is not supported on egress side");
3850 		ret = -ENOTSUP;
3851 	}
3852 	if (unlikely(!attr->ingress)) {
3853 		DPAA2_PMD_ERR("Ingress flag must be configured");
3854 		ret = -EINVAL;
3855 	}
3856 	return ret;
3857 }
3858 
3859 static inline int
3860 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3861 {
3862 	unsigned int i, j, is_found = 0;
3863 	int ret = 0;
3864 
3865 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3866 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3867 			if (dpaa2_supported_pattern_type[i]
3868 					== pattern[j].type) {
3869 				is_found = 1;
3870 				break;
3871 			}
3872 		}
3873 		if (!is_found) {
3874 			ret = -ENOTSUP;
3875 			break;
3876 		}
3877 	}
3878 	/* Lets verify other combinations of given pattern rules */
3879 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3880 		if (!pattern[j].spec) {
3881 			ret = -EINVAL;
3882 			break;
3883 		}
3884 	}
3885 
3886 	return ret;
3887 }
3888 
3889 static inline int
3890 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3891 {
3892 	unsigned int i, j, is_found = 0;
3893 	int ret = 0;
3894 
3895 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3896 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3897 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3898 				is_found = 1;
3899 				break;
3900 			}
3901 		}
3902 		if (!is_found) {
3903 			ret = -ENOTSUP;
3904 			break;
3905 		}
3906 	}
3907 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3908 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3909 				!actions[j].conf)
3910 			ret = -EINVAL;
3911 	}
3912 	return ret;
3913 }
3914 
3915 static
3916 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3917 			const struct rte_flow_attr *flow_attr,
3918 			const struct rte_flow_item pattern[],
3919 			const struct rte_flow_action actions[],
3920 			struct rte_flow_error *error)
3921 {
3922 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3923 	struct dpni_attr dpni_attr;
3924 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3925 	uint16_t token = priv->token;
3926 	int ret = 0;
3927 
3928 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3929 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3930 	if (ret < 0) {
3931 		DPAA2_PMD_ERR(
3932 			"Failure to get dpni@%p attribute, err code  %d",
3933 			dpni, ret);
3934 		rte_flow_error_set(error, EPERM,
3935 			   RTE_FLOW_ERROR_TYPE_ATTR,
3936 			   flow_attr, "invalid");
3937 		return ret;
3938 	}
3939 
3940 	/* Verify input attributes */
3941 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3942 	if (ret < 0) {
3943 		DPAA2_PMD_ERR(
3944 			"Invalid attributes are given");
3945 		rte_flow_error_set(error, EPERM,
3946 			   RTE_FLOW_ERROR_TYPE_ATTR,
3947 			   flow_attr, "invalid");
3948 		goto not_valid_params;
3949 	}
3950 	/* Verify input pattern list */
3951 	ret = dpaa2_dev_verify_patterns(pattern);
3952 	if (ret < 0) {
3953 		DPAA2_PMD_ERR(
3954 			"Invalid pattern list is given");
3955 		rte_flow_error_set(error, EPERM,
3956 			   RTE_FLOW_ERROR_TYPE_ITEM,
3957 			   pattern, "invalid");
3958 		goto not_valid_params;
3959 	}
3960 	/* Verify input action list */
3961 	ret = dpaa2_dev_verify_actions(actions);
3962 	if (ret < 0) {
3963 		DPAA2_PMD_ERR(
3964 			"Invalid action list is given");
3965 		rte_flow_error_set(error, EPERM,
3966 			   RTE_FLOW_ERROR_TYPE_ACTION,
3967 			   actions, "invalid");
3968 		goto not_valid_params;
3969 	}
3970 not_valid_params:
3971 	return ret;
3972 }
3973 
3974 static
3975 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3976 				   const struct rte_flow_attr *attr,
3977 				   const struct rte_flow_item pattern[],
3978 				   const struct rte_flow_action actions[],
3979 				   struct rte_flow_error *error)
3980 {
3981 	struct rte_flow *flow = NULL;
3982 	size_t key_iova = 0, mask_iova = 0;
3983 	int ret;
3984 
3985 	dpaa2_flow_control_log =
3986 		getenv("DPAA2_FLOW_CONTROL_LOG");
3987 
3988 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3989 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3990 
3991 		dpaa2_flow_miss_flow_id =
3992 			(uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3993 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3994 			DPAA2_PMD_ERR(
3995 				"The missed flow ID %d exceeds the max flow ID %d",
3996 				dpaa2_flow_miss_flow_id,
3997 				priv->dist_queues - 1);
3998 			return NULL;
3999 		}
4000 	}
4001 
4002 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
4003 	if (!flow) {
4004 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4005 		goto mem_failure;
4006 	}
4007 	/* Allocate DMA'ble memory to write the rules */
4008 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4009 	if (!key_iova) {
4010 		DPAA2_PMD_ERR(
4011 			"Memory allocation failure for rule configuration");
4012 		goto mem_failure;
4013 	}
4014 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4015 	if (!mask_iova) {
4016 		DPAA2_PMD_ERR(
4017 			"Memory allocation failure for rule configuration");
4018 		goto mem_failure;
4019 	}
4020 
4021 	flow->qos_rule.key_iova = key_iova;
4022 	flow->qos_rule.mask_iova = mask_iova;
4023 
4024 	/* Allocate DMA'ble memory to write the rules */
4025 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4026 	if (!key_iova) {
4027 		DPAA2_PMD_ERR(
4028 			"Memory allocation failure for rule configuration");
4029 		goto mem_failure;
4030 	}
4031 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4032 	if (!mask_iova) {
4033 		DPAA2_PMD_ERR(
4034 			"Memory allocation failure for rule configuration");
4035 		goto mem_failure;
4036 	}
4037 
4038 	flow->fs_rule.key_iova = key_iova;
4039 	flow->fs_rule.mask_iova = mask_iova;
4040 
4041 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
4042 	flow->ipaddr_rule.qos_ipsrc_offset =
4043 		IP_ADDRESS_OFFSET_INVALID;
4044 	flow->ipaddr_rule.qos_ipdst_offset =
4045 		IP_ADDRESS_OFFSET_INVALID;
4046 	flow->ipaddr_rule.fs_ipsrc_offset =
4047 		IP_ADDRESS_OFFSET_INVALID;
4048 	flow->ipaddr_rule.fs_ipdst_offset =
4049 		IP_ADDRESS_OFFSET_INVALID;
4050 
4051 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
4052 			actions, error);
4053 	if (ret < 0) {
4054 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4055 			rte_flow_error_set(error, EPERM,
4056 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4057 					attr, "unknown");
4058 		DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
4059 		goto creation_error;
4060 	}
4061 
4062 	return flow;
4063 mem_failure:
4064 	rte_flow_error_set(error, EPERM,
4065 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4066 			   NULL, "memory alloc");
4067 creation_error:
4068 	rte_free((void *)flow);
4069 	rte_free((void *)key_iova);
4070 	rte_free((void *)mask_iova);
4071 
4072 	return NULL;
4073 }
4074 
4075 static
4076 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4077 		       struct rte_flow *flow,
4078 		       struct rte_flow_error *error)
4079 {
4080 	int ret = 0;
4081 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4082 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4083 
4084 	switch (flow->action) {
4085 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4086 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4087 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
4088 		if (priv->num_rx_tc > 1) {
4089 			/* Remove entry from QoS table first */
4090 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4091 					&flow->qos_rule);
4092 			if (ret < 0) {
4093 				DPAA2_PMD_ERR(
4094 					"Error in removing entry from QoS table(%d)", ret);
4095 				goto error;
4096 			}
4097 		}
4098 
4099 		/* Then remove entry from FS table */
4100 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4101 					   flow->tc_id, &flow->fs_rule);
4102 		if (ret < 0) {
4103 			DPAA2_PMD_ERR(
4104 				"Error in removing entry from FS table(%d)", ret);
4105 			goto error;
4106 		}
4107 		break;
4108 	case RTE_FLOW_ACTION_TYPE_RSS:
4109 		if (priv->num_rx_tc > 1) {
4110 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4111 					&flow->qos_rule);
4112 			if (ret < 0) {
4113 				DPAA2_PMD_ERR(
4114 					"Error in entry addition in QoS table(%d)", ret);
4115 				goto error;
4116 			}
4117 		}
4118 		break;
4119 	default:
4120 		DPAA2_PMD_ERR(
4121 		"Action type (%d) is not supported", flow->action);
4122 		ret = -ENOTSUP;
4123 		break;
4124 	}
4125 
4126 	LIST_REMOVE(flow, next);
4127 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
4128 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4129 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
4130 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4131 	/* Now free the flow */
4132 	rte_free(flow);
4133 
4134 error:
4135 	if (ret)
4136 		rte_flow_error_set(error, EPERM,
4137 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4138 				   NULL, "unknown");
4139 	return ret;
4140 }
4141 
4142 /**
4143  * Destroy user-configured flow rules.
4144  *
4145  * This function skips internal flows rules.
4146  *
4147  * @see rte_flow_flush()
4148  * @see rte_flow_ops
4149  */
4150 static int
4151 dpaa2_flow_flush(struct rte_eth_dev *dev,
4152 		struct rte_flow_error *error)
4153 {
4154 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4155 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
4156 
4157 	while (flow) {
4158 		struct rte_flow *next = LIST_NEXT(flow, next);
4159 
4160 		dpaa2_flow_destroy(dev, flow, error);
4161 		flow = next;
4162 	}
4163 	return 0;
4164 }
4165 
4166 static int
4167 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4168 		struct rte_flow *flow __rte_unused,
4169 		const struct rte_flow_action *actions __rte_unused,
4170 		void *data __rte_unused,
4171 		struct rte_flow_error *error __rte_unused)
4172 {
4173 	return 0;
4174 }
4175 
4176 /**
4177  * Clean up all flow rules.
4178  *
4179  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4180  * rules regardless of whether they are internal or user-configured.
4181  *
4182  * @param priv
4183  *   Pointer to private structure.
4184  */
4185 void
4186 dpaa2_flow_clean(struct rte_eth_dev *dev)
4187 {
4188 	struct rte_flow *flow;
4189 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4190 
4191 	while ((flow = LIST_FIRST(&priv->flows)))
4192 		dpaa2_flow_destroy(dev, flow, NULL);
4193 }
4194 
4195 const struct rte_flow_ops dpaa2_flow_ops = {
4196 	.create	= dpaa2_flow_create,
4197 	.validate = dpaa2_flow_validate,
4198 	.destroy = dpaa2_flow_destroy,
4199 	.flush	= dpaa2_flow_flush,
4200 	.query	= dpaa2_flow_query,
4201 };
4202