xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2021 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 static char *dpaa2_flow_control_log;
33 static uint16_t dpaa2_flow_miss_flow_id =
34 	DPNI_FS_MISS_DROP;
35 
36 #define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
37 
38 enum flow_rule_ipaddr_type {
39 	FLOW_NONE_IPADDR,
40 	FLOW_IPV4_ADDR,
41 	FLOW_IPV6_ADDR
42 };
43 
44 struct flow_rule_ipaddr {
45 	enum flow_rule_ipaddr_type ipaddr_type;
46 	int qos_ipsrc_offset;
47 	int qos_ipdst_offset;
48 	int fs_ipsrc_offset;
49 	int fs_ipdst_offset;
50 };
51 
52 struct rte_flow {
53 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 	struct dpni_rule_cfg qos_rule;
55 	struct dpni_rule_cfg fs_rule;
56 	uint8_t qos_real_key_size;
57 	uint8_t fs_real_key_size;
58 	uint8_t tc_id; /** Traffic Class ID. */
59 	uint8_t tc_index; /** index within this Traffic Class. */
60 	enum rte_flow_action_type action;
61 	/* Special for IP address to specify the offset
62 	 * in key/mask.
63 	 */
64 	struct flow_rule_ipaddr ipaddr_rule;
65 	struct dpni_fs_action_cfg action_cfg;
66 };
67 
68 static const
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 	RTE_FLOW_ITEM_TYPE_END,
71 	RTE_FLOW_ITEM_TYPE_ETH,
72 	RTE_FLOW_ITEM_TYPE_VLAN,
73 	RTE_FLOW_ITEM_TYPE_IPV4,
74 	RTE_FLOW_ITEM_TYPE_IPV6,
75 	RTE_FLOW_ITEM_TYPE_ICMP,
76 	RTE_FLOW_ITEM_TYPE_UDP,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_SCTP,
79 	RTE_FLOW_ITEM_TYPE_GRE,
80 };
81 
82 static const
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 	RTE_FLOW_ACTION_TYPE_END,
85 	RTE_FLOW_ACTION_TYPE_QUEUE,
86 	RTE_FLOW_ACTION_TYPE_PORT_ID,
87 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
88 	RTE_FLOW_ACTION_TYPE_RSS
89 };
90 
91 static const
92 enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
93 	RTE_FLOW_ACTION_TYPE_QUEUE,
94 	RTE_FLOW_ACTION_TYPE_PORT_ID,
95 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
96 };
97 
98 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
99 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
100 
101 #ifndef __cplusplus
102 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
103 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
104 	.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
105 	.hdr.ether_type = RTE_BE16(0xffff),
106 };
107 
108 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
109 	.hdr.vlan_tci = RTE_BE16(0xffff),
110 };
111 
112 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
113 	.hdr.src_addr = RTE_BE32(0xffffffff),
114 	.hdr.dst_addr = RTE_BE32(0xffffffff),
115 	.hdr.next_proto_id = 0xff,
116 };
117 
118 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
119 	.hdr = {
120 		.src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
121 			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
122 		.dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
123 			      0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
124 		.proto = 0xff
125 	},
126 };
127 
128 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
129 	.hdr.icmp_type = 0xff,
130 	.hdr.icmp_code = 0xff,
131 };
132 
133 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
134 	.hdr = {
135 		.src_port = RTE_BE16(0xffff),
136 		.dst_port = RTE_BE16(0xffff),
137 	},
138 };
139 
140 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
141 	.hdr = {
142 		.src_port = RTE_BE16(0xffff),
143 		.dst_port = RTE_BE16(0xffff),
144 	},
145 };
146 
147 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
148 	.hdr = {
149 		.src_port = RTE_BE16(0xffff),
150 		.dst_port = RTE_BE16(0xffff),
151 	},
152 };
153 
154 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
155 	.protocol = RTE_BE16(0xffff),
156 };
157 
158 #endif
159 
160 static inline void dpaa2_prot_field_string(
161 	enum net_prot prot, uint32_t field,
162 	char *string)
163 {
164 	if (!dpaa2_flow_control_log)
165 		return;
166 
167 	if (prot == NET_PROT_ETH) {
168 		strcpy(string, "eth");
169 		if (field == NH_FLD_ETH_DA)
170 			strcat(string, ".dst");
171 		else if (field == NH_FLD_ETH_SA)
172 			strcat(string, ".src");
173 		else if (field == NH_FLD_ETH_TYPE)
174 			strcat(string, ".type");
175 		else
176 			strcat(string, ".unknown field");
177 	} else if (prot == NET_PROT_VLAN) {
178 		strcpy(string, "vlan");
179 		if (field == NH_FLD_VLAN_TCI)
180 			strcat(string, ".tci");
181 		else
182 			strcat(string, ".unknown field");
183 	} else if (prot == NET_PROT_IP) {
184 		strcpy(string, "ip");
185 		if (field == NH_FLD_IP_SRC)
186 			strcat(string, ".src");
187 		else if (field == NH_FLD_IP_DST)
188 			strcat(string, ".dst");
189 		else if (field == NH_FLD_IP_PROTO)
190 			strcat(string, ".proto");
191 		else
192 			strcat(string, ".unknown field");
193 	} else if (prot == NET_PROT_TCP) {
194 		strcpy(string, "tcp");
195 		if (field == NH_FLD_TCP_PORT_SRC)
196 			strcat(string, ".src");
197 		else if (field == NH_FLD_TCP_PORT_DST)
198 			strcat(string, ".dst");
199 		else
200 			strcat(string, ".unknown field");
201 	} else if (prot == NET_PROT_UDP) {
202 		strcpy(string, "udp");
203 		if (field == NH_FLD_UDP_PORT_SRC)
204 			strcat(string, ".src");
205 		else if (field == NH_FLD_UDP_PORT_DST)
206 			strcat(string, ".dst");
207 		else
208 			strcat(string, ".unknown field");
209 	} else if (prot == NET_PROT_ICMP) {
210 		strcpy(string, "icmp");
211 		if (field == NH_FLD_ICMP_TYPE)
212 			strcat(string, ".type");
213 		else if (field == NH_FLD_ICMP_CODE)
214 			strcat(string, ".code");
215 		else
216 			strcat(string, ".unknown field");
217 	} else if (prot == NET_PROT_SCTP) {
218 		strcpy(string, "sctp");
219 		if (field == NH_FLD_SCTP_PORT_SRC)
220 			strcat(string, ".src");
221 		else if (field == NH_FLD_SCTP_PORT_DST)
222 			strcat(string, ".dst");
223 		else
224 			strcat(string, ".unknown field");
225 	} else if (prot == NET_PROT_GRE) {
226 		strcpy(string, "gre");
227 		if (field == NH_FLD_GRE_TYPE)
228 			strcat(string, ".type");
229 		else
230 			strcat(string, ".unknown field");
231 	} else {
232 		strcpy(string, "unknown protocol");
233 	}
234 }
235 
236 static inline void dpaa2_flow_qos_table_extracts_log(
237 	const struct dpaa2_dev_priv *priv, FILE *f)
238 {
239 	int idx;
240 	char string[32];
241 
242 	if (!dpaa2_flow_control_log)
243 		return;
244 
245 	fprintf(f, "Setup QoS table: number of extracts: %d\r\n",
246 			priv->extract.qos_key_extract.dpkg.num_extracts);
247 	for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
248 		idx++) {
249 		dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
250 			.extracts[idx].extract.from_hdr.prot,
251 			priv->extract.qos_key_extract.dpkg.extracts[idx]
252 			.extract.from_hdr.field,
253 			string);
254 		fprintf(f, "%s", string);
255 		if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
256 			fprintf(f, " / ");
257 	}
258 	fprintf(f, "\r\n");
259 }
260 
261 static inline void dpaa2_flow_fs_table_extracts_log(
262 	const struct dpaa2_dev_priv *priv, int tc_id, FILE *f)
263 {
264 	int idx;
265 	char string[32];
266 
267 	if (!dpaa2_flow_control_log)
268 		return;
269 
270 	fprintf(f, "Setup FS table: number of extracts of TC[%d]: %d\r\n",
271 			tc_id, priv->extract.tc_key_extract[tc_id]
272 			.dpkg.num_extracts);
273 	for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
274 		.dpkg.num_extracts; idx++) {
275 		dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
276 			.dpkg.extracts[idx].extract.from_hdr.prot,
277 			priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
278 			.extract.from_hdr.field,
279 			string);
280 		fprintf(f, "%s", string);
281 		if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
282 			.dpkg.num_extracts)
283 			fprintf(f, " / ");
284 	}
285 	fprintf(f, "\r\n");
286 }
287 
288 static inline void dpaa2_flow_qos_entry_log(
289 	const char *log_info, const struct rte_flow *flow, int qos_index, FILE *f)
290 {
291 	int idx;
292 	uint8_t *key, *mask;
293 
294 	if (!dpaa2_flow_control_log)
295 		return;
296 
297 	fprintf(f, "\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
298 		log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
299 
300 	key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
301 	mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
302 
303 	fprintf(f, "key:\r\n");
304 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
305 		fprintf(f, "%02x ", key[idx]);
306 
307 	fprintf(f, "\r\nmask:\r\n");
308 	for (idx = 0; idx < flow->qos_real_key_size; idx++)
309 		fprintf(f, "%02x ", mask[idx]);
310 
311 	fprintf(f, "\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
312 		flow->ipaddr_rule.qos_ipsrc_offset,
313 		flow->ipaddr_rule.qos_ipdst_offset);
314 }
315 
316 static inline void dpaa2_flow_fs_entry_log(
317 	const char *log_info, const struct rte_flow *flow, FILE *f)
318 {
319 	int idx;
320 	uint8_t *key, *mask;
321 
322 	if (!dpaa2_flow_control_log)
323 		return;
324 
325 	fprintf(f, "\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
326 		log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
327 
328 	key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
329 	mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
330 
331 	fprintf(f, "key:\r\n");
332 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
333 		fprintf(f, "%02x ", key[idx]);
334 
335 	fprintf(f, "\r\nmask:\r\n");
336 	for (idx = 0; idx < flow->fs_real_key_size; idx++)
337 		fprintf(f, "%02x ", mask[idx]);
338 
339 	fprintf(f, "\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
340 		flow->ipaddr_rule.fs_ipsrc_offset,
341 		flow->ipaddr_rule.fs_ipdst_offset);
342 }
343 
344 static inline void dpaa2_flow_extract_key_set(
345 	struct dpaa2_key_info *key_info, int index, uint8_t size)
346 {
347 	key_info->key_size[index] = size;
348 	if (index > 0) {
349 		key_info->key_offset[index] =
350 			key_info->key_offset[index - 1] +
351 			key_info->key_size[index - 1];
352 	} else {
353 		key_info->key_offset[index] = 0;
354 	}
355 	key_info->key_total_size += size;
356 }
357 
358 static int dpaa2_flow_extract_add(
359 	struct dpaa2_key_extract *key_extract,
360 	enum net_prot prot,
361 	uint32_t field, uint8_t field_size)
362 {
363 	int index, ip_src = -1, ip_dst = -1;
364 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
365 	struct dpaa2_key_info *key_info = &key_extract->key_info;
366 
367 	if (dpkg->num_extracts >=
368 		DPKG_MAX_NUM_OF_EXTRACTS) {
369 		DPAA2_PMD_WARN("Number of extracts overflows");
370 		return -1;
371 	}
372 	/* Before reorder, the IP SRC and IP DST are already last
373 	 * extract(s).
374 	 */
375 	for (index = 0; index < dpkg->num_extracts; index++) {
376 		if (dpkg->extracts[index].extract.from_hdr.prot ==
377 			NET_PROT_IP) {
378 			if (dpkg->extracts[index].extract.from_hdr.field ==
379 				NH_FLD_IP_SRC) {
380 				ip_src = index;
381 			}
382 			if (dpkg->extracts[index].extract.from_hdr.field ==
383 				NH_FLD_IP_DST) {
384 				ip_dst = index;
385 			}
386 		}
387 	}
388 
389 	if (ip_src >= 0)
390 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
391 
392 	if (ip_dst >= 0)
393 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
394 
395 	if (prot == NET_PROT_IP &&
396 		(field == NH_FLD_IP_SRC ||
397 		field == NH_FLD_IP_DST)) {
398 		index = dpkg->num_extracts;
399 	} else {
400 		if (ip_src >= 0 && ip_dst >= 0)
401 			index = dpkg->num_extracts - 2;
402 		else if (ip_src >= 0 || ip_dst >= 0)
403 			index = dpkg->num_extracts - 1;
404 		else
405 			index = dpkg->num_extracts;
406 	}
407 
408 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
409 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
410 	dpkg->extracts[index].extract.from_hdr.prot = prot;
411 	dpkg->extracts[index].extract.from_hdr.field = field;
412 	if (prot == NET_PROT_IP &&
413 		(field == NH_FLD_IP_SRC ||
414 		field == NH_FLD_IP_DST)) {
415 		dpaa2_flow_extract_key_set(key_info, index, 0);
416 	} else {
417 		dpaa2_flow_extract_key_set(key_info, index, field_size);
418 	}
419 
420 	if (prot == NET_PROT_IP) {
421 		if (field == NH_FLD_IP_SRC) {
422 			if (key_info->ipv4_dst_offset >= 0) {
423 				key_info->ipv4_src_offset =
424 					key_info->ipv4_dst_offset +
425 					NH_FLD_IPV4_ADDR_SIZE;
426 			} else {
427 				key_info->ipv4_src_offset =
428 					key_info->key_offset[index - 1] +
429 						key_info->key_size[index - 1];
430 			}
431 			if (key_info->ipv6_dst_offset >= 0) {
432 				key_info->ipv6_src_offset =
433 					key_info->ipv6_dst_offset +
434 					NH_FLD_IPV6_ADDR_SIZE;
435 			} else {
436 				key_info->ipv6_src_offset =
437 					key_info->key_offset[index - 1] +
438 						key_info->key_size[index - 1];
439 			}
440 		} else if (field == NH_FLD_IP_DST) {
441 			if (key_info->ipv4_src_offset >= 0) {
442 				key_info->ipv4_dst_offset =
443 					key_info->ipv4_src_offset +
444 					NH_FLD_IPV4_ADDR_SIZE;
445 			} else {
446 				key_info->ipv4_dst_offset =
447 					key_info->key_offset[index - 1] +
448 						key_info->key_size[index - 1];
449 			}
450 			if (key_info->ipv6_src_offset >= 0) {
451 				key_info->ipv6_dst_offset =
452 					key_info->ipv6_src_offset +
453 					NH_FLD_IPV6_ADDR_SIZE;
454 			} else {
455 				key_info->ipv6_dst_offset =
456 					key_info->key_offset[index - 1] +
457 						key_info->key_size[index - 1];
458 			}
459 		}
460 	}
461 
462 	if (index == dpkg->num_extracts) {
463 		dpkg->num_extracts++;
464 		return 0;
465 	}
466 
467 	if (ip_src >= 0) {
468 		ip_src++;
469 		dpkg->extracts[ip_src].type =
470 			DPKG_EXTRACT_FROM_HDR;
471 		dpkg->extracts[ip_src].extract.from_hdr.type =
472 			DPKG_FULL_FIELD;
473 		dpkg->extracts[ip_src].extract.from_hdr.prot =
474 			NET_PROT_IP;
475 		dpkg->extracts[ip_src].extract.from_hdr.field =
476 			NH_FLD_IP_SRC;
477 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
478 		key_info->ipv4_src_offset += field_size;
479 		key_info->ipv6_src_offset += field_size;
480 	}
481 	if (ip_dst >= 0) {
482 		ip_dst++;
483 		dpkg->extracts[ip_dst].type =
484 			DPKG_EXTRACT_FROM_HDR;
485 		dpkg->extracts[ip_dst].extract.from_hdr.type =
486 			DPKG_FULL_FIELD;
487 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
488 			NET_PROT_IP;
489 		dpkg->extracts[ip_dst].extract.from_hdr.field =
490 			NH_FLD_IP_DST;
491 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
492 		key_info->ipv4_dst_offset += field_size;
493 		key_info->ipv6_dst_offset += field_size;
494 	}
495 
496 	dpkg->num_extracts++;
497 
498 	return 0;
499 }
500 
501 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
502 				      int size)
503 {
504 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
505 	struct dpaa2_key_info *key_info = &key_extract->key_info;
506 	int last_extract_size, index;
507 
508 	if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
509 	    DPKG_EXTRACT_FROM_DATA) {
510 		DPAA2_PMD_WARN("RAW extract cannot be combined with others");
511 		return -1;
512 	}
513 
514 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
515 	dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
516 	if (last_extract_size)
517 		dpkg->num_extracts++;
518 	else
519 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
520 
521 	for (index = 0; index < dpkg->num_extracts; index++) {
522 		dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
523 		if (index == dpkg->num_extracts - 1)
524 			dpkg->extracts[index].extract.from_data.size =
525 				last_extract_size;
526 		else
527 			dpkg->extracts[index].extract.from_data.size =
528 				DPAA2_FLOW_MAX_KEY_SIZE;
529 		dpkg->extracts[index].extract.from_data.offset =
530 			DPAA2_FLOW_MAX_KEY_SIZE * index;
531 	}
532 
533 	key_info->key_total_size = size;
534 	return 0;
535 }
536 
537 /* Protocol discrimination.
538  * Discriminate IPv4/IPv6/vLan by Eth type.
539  * Discriminate UDP/TCP/ICMP by next proto of IP.
540  */
541 static inline int
542 dpaa2_flow_proto_discrimination_extract(
543 	struct dpaa2_key_extract *key_extract,
544 	enum rte_flow_item_type type)
545 {
546 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
547 		return dpaa2_flow_extract_add(
548 				key_extract, NET_PROT_ETH,
549 				NH_FLD_ETH_TYPE,
550 				sizeof(rte_be16_t));
551 	} else if (type == (enum rte_flow_item_type)
552 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
553 		return dpaa2_flow_extract_add(
554 				key_extract, NET_PROT_IP,
555 				NH_FLD_IP_PROTO,
556 				NH_FLD_IP_PROTO_SIZE);
557 	}
558 
559 	return -1;
560 }
561 
562 static inline int dpaa2_flow_extract_search(
563 	struct dpkg_profile_cfg *dpkg,
564 	enum net_prot prot, uint32_t field)
565 {
566 	int i;
567 
568 	for (i = 0; i < dpkg->num_extracts; i++) {
569 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
570 			dpkg->extracts[i].extract.from_hdr.field == field) {
571 			return i;
572 		}
573 	}
574 
575 	return -1;
576 }
577 
578 static inline int dpaa2_flow_extract_key_offset(
579 	struct dpaa2_key_extract *key_extract,
580 	enum net_prot prot, uint32_t field)
581 {
582 	int i;
583 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
584 	struct dpaa2_key_info *key_info = &key_extract->key_info;
585 
586 	if (prot == NET_PROT_IPV4 ||
587 		prot == NET_PROT_IPV6)
588 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
589 	else
590 		i = dpaa2_flow_extract_search(dpkg, prot, field);
591 
592 	if (i >= 0) {
593 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
594 			return key_info->ipv4_src_offset;
595 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
596 			return key_info->ipv4_dst_offset;
597 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
598 			return key_info->ipv6_src_offset;
599 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
600 			return key_info->ipv6_dst_offset;
601 		else
602 			return key_info->key_offset[i];
603 	} else {
604 		return -1;
605 	}
606 }
607 
608 struct proto_discrimination {
609 	enum rte_flow_item_type type;
610 	union {
611 		rte_be16_t eth_type;
612 		uint8_t ip_proto;
613 	};
614 };
615 
616 static int
617 dpaa2_flow_proto_discrimination_rule(
618 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
619 	struct proto_discrimination proto, int group)
620 {
621 	enum net_prot prot;
622 	uint32_t field;
623 	int offset;
624 	size_t key_iova;
625 	size_t mask_iova;
626 	rte_be16_t eth_type;
627 	uint8_t ip_proto;
628 
629 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
630 		prot = NET_PROT_ETH;
631 		field = NH_FLD_ETH_TYPE;
632 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
633 		prot = NET_PROT_IP;
634 		field = NH_FLD_IP_PROTO;
635 	} else {
636 		DPAA2_PMD_ERR(
637 			"Only Eth and IP support to discriminate next proto.");
638 		return -1;
639 	}
640 
641 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
642 			prot, field);
643 	if (offset < 0) {
644 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
645 				prot, field);
646 		return -1;
647 	}
648 	key_iova = flow->qos_rule.key_iova + offset;
649 	mask_iova = flow->qos_rule.mask_iova + offset;
650 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
651 		eth_type = proto.eth_type;
652 		memcpy((void *)key_iova, (const void *)(&eth_type),
653 			sizeof(rte_be16_t));
654 		eth_type = 0xffff;
655 		memcpy((void *)mask_iova, (const void *)(&eth_type),
656 			sizeof(rte_be16_t));
657 	} else {
658 		ip_proto = proto.ip_proto;
659 		memcpy((void *)key_iova, (const void *)(&ip_proto),
660 			sizeof(uint8_t));
661 		ip_proto = 0xff;
662 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
663 			sizeof(uint8_t));
664 	}
665 
666 	offset = dpaa2_flow_extract_key_offset(
667 			&priv->extract.tc_key_extract[group],
668 			prot, field);
669 	if (offset < 0) {
670 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
671 				prot, field);
672 		return -1;
673 	}
674 	key_iova = flow->fs_rule.key_iova + offset;
675 	mask_iova = flow->fs_rule.mask_iova + offset;
676 
677 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
678 		eth_type = proto.eth_type;
679 		memcpy((void *)key_iova, (const void *)(&eth_type),
680 			sizeof(rte_be16_t));
681 		eth_type = 0xffff;
682 		memcpy((void *)mask_iova, (const void *)(&eth_type),
683 			sizeof(rte_be16_t));
684 	} else {
685 		ip_proto = proto.ip_proto;
686 		memcpy((void *)key_iova, (const void *)(&ip_proto),
687 			sizeof(uint8_t));
688 		ip_proto = 0xff;
689 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
690 			sizeof(uint8_t));
691 	}
692 
693 	return 0;
694 }
695 
696 static inline int
697 dpaa2_flow_rule_data_set(
698 	struct dpaa2_key_extract *key_extract,
699 	struct dpni_rule_cfg *rule,
700 	enum net_prot prot, uint32_t field,
701 	const void *key, const void *mask, int size)
702 {
703 	int offset = dpaa2_flow_extract_key_offset(key_extract,
704 				prot, field);
705 
706 	if (offset < 0) {
707 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
708 			prot, field);
709 		return -1;
710 	}
711 
712 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
713 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
714 
715 	return 0;
716 }
717 
718 static inline int
719 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
720 			     const void *key, const void *mask, int size)
721 {
722 	int offset = 0;
723 
724 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
725 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
726 
727 	return 0;
728 }
729 
730 static inline int
731 _dpaa2_flow_rule_move_ipaddr_tail(
732 	struct dpaa2_key_extract *key_extract,
733 	struct dpni_rule_cfg *rule, int src_offset,
734 	uint32_t field, bool ipv4)
735 {
736 	size_t key_src;
737 	size_t mask_src;
738 	size_t key_dst;
739 	size_t mask_dst;
740 	int dst_offset, len;
741 	enum net_prot prot;
742 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
743 
744 	if (field != NH_FLD_IP_SRC &&
745 		field != NH_FLD_IP_DST) {
746 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
747 		return -1;
748 	}
749 	if (ipv4)
750 		prot = NET_PROT_IPV4;
751 	else
752 		prot = NET_PROT_IPV6;
753 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
754 				prot, field);
755 	if (dst_offset < 0) {
756 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
757 		return -1;
758 	}
759 	key_src = rule->key_iova + src_offset;
760 	mask_src = rule->mask_iova + src_offset;
761 	key_dst = rule->key_iova + dst_offset;
762 	mask_dst = rule->mask_iova + dst_offset;
763 	if (ipv4)
764 		len = sizeof(rte_be32_t);
765 	else
766 		len = NH_FLD_IPV6_ADDR_SIZE;
767 
768 	memcpy(tmp, (char *)key_src, len);
769 	memset((char *)key_src, 0, len);
770 	memcpy((char *)key_dst, tmp, len);
771 
772 	memcpy(tmp, (char *)mask_src, len);
773 	memset((char *)mask_src, 0, len);
774 	memcpy((char *)mask_dst, tmp, len);
775 
776 	return 0;
777 }
778 
779 static inline int
780 dpaa2_flow_rule_move_ipaddr_tail(
781 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
782 	int fs_group)
783 {
784 	int ret;
785 	enum net_prot prot;
786 
787 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
788 		return 0;
789 
790 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
791 		prot = NET_PROT_IPV4;
792 	else
793 		prot = NET_PROT_IPV6;
794 
795 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
796 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
797 				&priv->extract.qos_key_extract,
798 				&flow->qos_rule,
799 				flow->ipaddr_rule.qos_ipsrc_offset,
800 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
801 		if (ret) {
802 			DPAA2_PMD_ERR("QoS src address reorder failed");
803 			return -1;
804 		}
805 		flow->ipaddr_rule.qos_ipsrc_offset =
806 			dpaa2_flow_extract_key_offset(
807 				&priv->extract.qos_key_extract,
808 				prot, NH_FLD_IP_SRC);
809 	}
810 
811 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
812 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
813 				&priv->extract.qos_key_extract,
814 				&flow->qos_rule,
815 				flow->ipaddr_rule.qos_ipdst_offset,
816 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
817 		if (ret) {
818 			DPAA2_PMD_ERR("QoS dst address reorder failed");
819 			return -1;
820 		}
821 		flow->ipaddr_rule.qos_ipdst_offset =
822 			dpaa2_flow_extract_key_offset(
823 				&priv->extract.qos_key_extract,
824 				prot, NH_FLD_IP_DST);
825 	}
826 
827 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
828 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
829 				&priv->extract.tc_key_extract[fs_group],
830 				&flow->fs_rule,
831 				flow->ipaddr_rule.fs_ipsrc_offset,
832 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
833 		if (ret) {
834 			DPAA2_PMD_ERR("FS src address reorder failed");
835 			return -1;
836 		}
837 		flow->ipaddr_rule.fs_ipsrc_offset =
838 			dpaa2_flow_extract_key_offset(
839 				&priv->extract.tc_key_extract[fs_group],
840 				prot, NH_FLD_IP_SRC);
841 	}
842 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
843 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
844 				&priv->extract.tc_key_extract[fs_group],
845 				&flow->fs_rule,
846 				flow->ipaddr_rule.fs_ipdst_offset,
847 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
848 		if (ret) {
849 			DPAA2_PMD_ERR("FS dst address reorder failed");
850 			return -1;
851 		}
852 		flow->ipaddr_rule.fs_ipdst_offset =
853 			dpaa2_flow_extract_key_offset(
854 				&priv->extract.tc_key_extract[fs_group],
855 				prot, NH_FLD_IP_DST);
856 	}
857 
858 	return 0;
859 }
860 
861 static int
862 dpaa2_flow_extract_support(
863 	const uint8_t *mask_src,
864 	enum rte_flow_item_type type)
865 {
866 	char mask[64];
867 	int i, size = 0;
868 	const char *mask_support = 0;
869 
870 	switch (type) {
871 	case RTE_FLOW_ITEM_TYPE_ETH:
872 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
873 		size = sizeof(struct rte_flow_item_eth);
874 		break;
875 	case RTE_FLOW_ITEM_TYPE_VLAN:
876 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
877 		size = sizeof(struct rte_flow_item_vlan);
878 		break;
879 	case RTE_FLOW_ITEM_TYPE_IPV4:
880 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
881 		size = sizeof(struct rte_flow_item_ipv4);
882 		break;
883 	case RTE_FLOW_ITEM_TYPE_IPV6:
884 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
885 		size = sizeof(struct rte_flow_item_ipv6);
886 		break;
887 	case RTE_FLOW_ITEM_TYPE_ICMP:
888 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
889 		size = sizeof(struct rte_flow_item_icmp);
890 		break;
891 	case RTE_FLOW_ITEM_TYPE_UDP:
892 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
893 		size = sizeof(struct rte_flow_item_udp);
894 		break;
895 	case RTE_FLOW_ITEM_TYPE_TCP:
896 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
897 		size = sizeof(struct rte_flow_item_tcp);
898 		break;
899 	case RTE_FLOW_ITEM_TYPE_SCTP:
900 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
901 		size = sizeof(struct rte_flow_item_sctp);
902 		break;
903 	case RTE_FLOW_ITEM_TYPE_GRE:
904 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
905 		size = sizeof(struct rte_flow_item_gre);
906 		break;
907 	default:
908 		return -1;
909 	}
910 
911 	memcpy(mask, mask_support, size);
912 
913 	for (i = 0; i < size; i++)
914 		mask[i] = (mask[i] | mask_src[i]);
915 
916 	if (memcmp(mask, mask_support, size))
917 		return -1;
918 
919 	return 0;
920 }
921 
922 static int
923 dpaa2_configure_flow_eth(struct rte_flow *flow,
924 			 struct rte_eth_dev *dev,
925 			 const struct rte_flow_attr *attr,
926 			 const struct rte_flow_item *pattern,
927 			 const struct rte_flow_action actions[] __rte_unused,
928 			 struct rte_flow_error *error __rte_unused,
929 			 int *device_configured)
930 {
931 	int index, ret;
932 	int local_cfg = 0;
933 	uint32_t group;
934 	const struct rte_flow_item_eth *spec, *mask;
935 
936 	/* TODO: Currently upper bound of range parameter is not implemented */
937 	const struct rte_flow_item_eth *last __rte_unused;
938 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
939 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
940 
941 	group = attr->group;
942 
943 	/* Parse pattern list to get the matching parameters */
944 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
945 	last    = (const struct rte_flow_item_eth *)pattern->last;
946 	mask    = (const struct rte_flow_item_eth *)
947 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
948 	if (!spec) {
949 		/* Don't care any field of eth header,
950 		 * only care eth protocol.
951 		 */
952 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
953 		return 0;
954 	}
955 
956 	/* Get traffic class index and flow id to be configured */
957 	flow->tc_id = group;
958 	flow->tc_index = attr->priority;
959 
960 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
961 		RTE_FLOW_ITEM_TYPE_ETH)) {
962 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
963 
964 		return -1;
965 	}
966 
967 	if (memcmp((const char *)&mask->hdr.src_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
968 		index = dpaa2_flow_extract_search(
969 				&priv->extract.qos_key_extract.dpkg,
970 				NET_PROT_ETH, NH_FLD_ETH_SA);
971 		if (index < 0) {
972 			ret = dpaa2_flow_extract_add(
973 					&priv->extract.qos_key_extract,
974 					NET_PROT_ETH, NH_FLD_ETH_SA,
975 					RTE_ETHER_ADDR_LEN);
976 			if (ret) {
977 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
978 
979 				return -1;
980 			}
981 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
982 		}
983 		index = dpaa2_flow_extract_search(
984 				&priv->extract.tc_key_extract[group].dpkg,
985 				NET_PROT_ETH, NH_FLD_ETH_SA);
986 		if (index < 0) {
987 			ret = dpaa2_flow_extract_add(
988 					&priv->extract.tc_key_extract[group],
989 					NET_PROT_ETH, NH_FLD_ETH_SA,
990 					RTE_ETHER_ADDR_LEN);
991 			if (ret) {
992 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
993 				return -1;
994 			}
995 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
996 		}
997 
998 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
999 		if (ret) {
1000 			DPAA2_PMD_ERR(
1001 				"Move ipaddr before ETH_SA rule set failed");
1002 			return -1;
1003 		}
1004 
1005 		ret = dpaa2_flow_rule_data_set(
1006 				&priv->extract.qos_key_extract,
1007 				&flow->qos_rule,
1008 				NET_PROT_ETH,
1009 				NH_FLD_ETH_SA,
1010 				&spec->hdr.src_addr.addr_bytes,
1011 				&mask->hdr.src_addr.addr_bytes,
1012 				sizeof(struct rte_ether_addr));
1013 		if (ret) {
1014 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1015 			return -1;
1016 		}
1017 
1018 		ret = dpaa2_flow_rule_data_set(
1019 				&priv->extract.tc_key_extract[group],
1020 				&flow->fs_rule,
1021 				NET_PROT_ETH,
1022 				NH_FLD_ETH_SA,
1023 				&spec->hdr.src_addr.addr_bytes,
1024 				&mask->hdr.src_addr.addr_bytes,
1025 				sizeof(struct rte_ether_addr));
1026 		if (ret) {
1027 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1028 			return -1;
1029 		}
1030 	}
1031 
1032 	if (memcmp((const char *)&mask->hdr.dst_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1033 		index = dpaa2_flow_extract_search(
1034 				&priv->extract.qos_key_extract.dpkg,
1035 				NET_PROT_ETH, NH_FLD_ETH_DA);
1036 		if (index < 0) {
1037 			ret = dpaa2_flow_extract_add(
1038 					&priv->extract.qos_key_extract,
1039 					NET_PROT_ETH, NH_FLD_ETH_DA,
1040 					RTE_ETHER_ADDR_LEN);
1041 			if (ret) {
1042 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1043 
1044 				return -1;
1045 			}
1046 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1047 		}
1048 
1049 		index = dpaa2_flow_extract_search(
1050 				&priv->extract.tc_key_extract[group].dpkg,
1051 				NET_PROT_ETH, NH_FLD_ETH_DA);
1052 		if (index < 0) {
1053 			ret = dpaa2_flow_extract_add(
1054 					&priv->extract.tc_key_extract[group],
1055 					NET_PROT_ETH, NH_FLD_ETH_DA,
1056 					RTE_ETHER_ADDR_LEN);
1057 			if (ret) {
1058 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1059 
1060 				return -1;
1061 			}
1062 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1063 		}
1064 
1065 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1066 		if (ret) {
1067 			DPAA2_PMD_ERR(
1068 				"Move ipaddr before ETH DA rule set failed");
1069 			return -1;
1070 		}
1071 
1072 		ret = dpaa2_flow_rule_data_set(
1073 				&priv->extract.qos_key_extract,
1074 				&flow->qos_rule,
1075 				NET_PROT_ETH,
1076 				NH_FLD_ETH_DA,
1077 				&spec->hdr.dst_addr.addr_bytes,
1078 				&mask->hdr.dst_addr.addr_bytes,
1079 				sizeof(struct rte_ether_addr));
1080 		if (ret) {
1081 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1082 			return -1;
1083 		}
1084 
1085 		ret = dpaa2_flow_rule_data_set(
1086 				&priv->extract.tc_key_extract[group],
1087 				&flow->fs_rule,
1088 				NET_PROT_ETH,
1089 				NH_FLD_ETH_DA,
1090 				&spec->hdr.dst_addr.addr_bytes,
1091 				&mask->hdr.dst_addr.addr_bytes,
1092 				sizeof(struct rte_ether_addr));
1093 		if (ret) {
1094 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1095 			return -1;
1096 		}
1097 	}
1098 
1099 	if (memcmp((const char *)&mask->hdr.ether_type, zero_cmp, sizeof(rte_be16_t))) {
1100 		index = dpaa2_flow_extract_search(
1101 				&priv->extract.qos_key_extract.dpkg,
1102 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1103 		if (index < 0) {
1104 			ret = dpaa2_flow_extract_add(
1105 					&priv->extract.qos_key_extract,
1106 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1107 					RTE_ETHER_TYPE_LEN);
1108 			if (ret) {
1109 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1110 
1111 				return -1;
1112 			}
1113 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1114 		}
1115 		index = dpaa2_flow_extract_search(
1116 				&priv->extract.tc_key_extract[group].dpkg,
1117 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1118 		if (index < 0) {
1119 			ret = dpaa2_flow_extract_add(
1120 					&priv->extract.tc_key_extract[group],
1121 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
1122 					RTE_ETHER_TYPE_LEN);
1123 			if (ret) {
1124 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1125 
1126 				return -1;
1127 			}
1128 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1129 		}
1130 
1131 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1132 		if (ret) {
1133 			DPAA2_PMD_ERR(
1134 				"Move ipaddr before ETH TYPE rule set failed");
1135 				return -1;
1136 		}
1137 
1138 		ret = dpaa2_flow_rule_data_set(
1139 				&priv->extract.qos_key_extract,
1140 				&flow->qos_rule,
1141 				NET_PROT_ETH,
1142 				NH_FLD_ETH_TYPE,
1143 				&spec->hdr.ether_type,
1144 				&mask->hdr.ether_type,
1145 				sizeof(rte_be16_t));
1146 		if (ret) {
1147 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1148 			return -1;
1149 		}
1150 
1151 		ret = dpaa2_flow_rule_data_set(
1152 				&priv->extract.tc_key_extract[group],
1153 				&flow->fs_rule,
1154 				NET_PROT_ETH,
1155 				NH_FLD_ETH_TYPE,
1156 				&spec->hdr.ether_type,
1157 				&mask->hdr.ether_type,
1158 				sizeof(rte_be16_t));
1159 		if (ret) {
1160 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1161 			return -1;
1162 		}
1163 	}
1164 
1165 	(*device_configured) |= local_cfg;
1166 
1167 	return 0;
1168 }
1169 
1170 static int
1171 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1172 			  struct rte_eth_dev *dev,
1173 			  const struct rte_flow_attr *attr,
1174 			  const struct rte_flow_item *pattern,
1175 			  const struct rte_flow_action actions[] __rte_unused,
1176 			  struct rte_flow_error *error __rte_unused,
1177 			  int *device_configured)
1178 {
1179 	int index, ret;
1180 	int local_cfg = 0;
1181 	uint32_t group;
1182 	const struct rte_flow_item_vlan *spec, *mask;
1183 
1184 	const struct rte_flow_item_vlan *last __rte_unused;
1185 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1186 
1187 	group = attr->group;
1188 
1189 	/* Parse pattern list to get the matching parameters */
1190 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
1191 	last    = (const struct rte_flow_item_vlan *)pattern->last;
1192 	mask    = (const struct rte_flow_item_vlan *)
1193 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1194 
1195 	/* Get traffic class index and flow id to be configured */
1196 	flow->tc_id = group;
1197 	flow->tc_index = attr->priority;
1198 
1199 	if (!spec) {
1200 		/* Don't care any field of vlan header,
1201 		 * only care vlan protocol.
1202 		 */
1203 		/* Eth type is actually used for vLan classification.
1204 		 */
1205 		struct proto_discrimination proto;
1206 
1207 		index = dpaa2_flow_extract_search(
1208 				&priv->extract.qos_key_extract.dpkg,
1209 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1210 		if (index < 0) {
1211 			ret = dpaa2_flow_proto_discrimination_extract(
1212 						&priv->extract.qos_key_extract,
1213 						RTE_FLOW_ITEM_TYPE_ETH);
1214 			if (ret) {
1215 				DPAA2_PMD_ERR(
1216 				"QoS Ext ETH_TYPE to discriminate vLan failed");
1217 
1218 				return -1;
1219 			}
1220 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1221 		}
1222 
1223 		index = dpaa2_flow_extract_search(
1224 				&priv->extract.tc_key_extract[group].dpkg,
1225 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1226 		if (index < 0) {
1227 			ret = dpaa2_flow_proto_discrimination_extract(
1228 					&priv->extract.tc_key_extract[group],
1229 					RTE_FLOW_ITEM_TYPE_ETH);
1230 			if (ret) {
1231 				DPAA2_PMD_ERR(
1232 				"FS Ext ETH_TYPE to discriminate vLan failed.");
1233 
1234 				return -1;
1235 			}
1236 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1237 		}
1238 
1239 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1240 		if (ret) {
1241 			DPAA2_PMD_ERR(
1242 			"Move ipaddr before vLan discrimination set failed");
1243 			return -1;
1244 		}
1245 
1246 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1247 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1248 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1249 							proto, group);
1250 		if (ret) {
1251 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1252 			return -1;
1253 		}
1254 
1255 		(*device_configured) |= local_cfg;
1256 
1257 		return 0;
1258 	}
1259 
1260 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1261 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1262 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1263 
1264 		return -1;
1265 	}
1266 
1267 	if (!mask->hdr.vlan_tci)
1268 		return 0;
1269 
1270 	index = dpaa2_flow_extract_search(
1271 				&priv->extract.qos_key_extract.dpkg,
1272 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1273 	if (index < 0) {
1274 		ret = dpaa2_flow_extract_add(
1275 						&priv->extract.qos_key_extract,
1276 						NET_PROT_VLAN,
1277 						NH_FLD_VLAN_TCI,
1278 						sizeof(rte_be16_t));
1279 		if (ret) {
1280 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1281 
1282 			return -1;
1283 		}
1284 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1285 	}
1286 
1287 	index = dpaa2_flow_extract_search(
1288 			&priv->extract.tc_key_extract[group].dpkg,
1289 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1290 	if (index < 0) {
1291 		ret = dpaa2_flow_extract_add(
1292 				&priv->extract.tc_key_extract[group],
1293 				NET_PROT_VLAN,
1294 				NH_FLD_VLAN_TCI,
1295 				sizeof(rte_be16_t));
1296 		if (ret) {
1297 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1298 
1299 			return -1;
1300 		}
1301 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1302 	}
1303 
1304 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1305 	if (ret) {
1306 		DPAA2_PMD_ERR(
1307 			"Move ipaddr before VLAN TCI rule set failed");
1308 		return -1;
1309 	}
1310 
1311 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1312 				&flow->qos_rule,
1313 				NET_PROT_VLAN,
1314 				NH_FLD_VLAN_TCI,
1315 				&spec->hdr.vlan_tci,
1316 				&mask->hdr.vlan_tci,
1317 				sizeof(rte_be16_t));
1318 	if (ret) {
1319 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1320 		return -1;
1321 	}
1322 
1323 	ret = dpaa2_flow_rule_data_set(
1324 			&priv->extract.tc_key_extract[group],
1325 			&flow->fs_rule,
1326 			NET_PROT_VLAN,
1327 			NH_FLD_VLAN_TCI,
1328 			&spec->hdr.vlan_tci,
1329 			&mask->hdr.vlan_tci,
1330 			sizeof(rte_be16_t));
1331 	if (ret) {
1332 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1333 		return -1;
1334 	}
1335 
1336 	(*device_configured) |= local_cfg;
1337 
1338 	return 0;
1339 }
1340 
1341 static int
1342 dpaa2_configure_flow_ip_discrimation(
1343 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1344 	const struct rte_flow_item *pattern,
1345 	int *local_cfg,	int *device_configured,
1346 	uint32_t group)
1347 {
1348 	int index, ret;
1349 	struct proto_discrimination proto;
1350 
1351 	index = dpaa2_flow_extract_search(
1352 			&priv->extract.qos_key_extract.dpkg,
1353 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1354 	if (index < 0) {
1355 		ret = dpaa2_flow_proto_discrimination_extract(
1356 				&priv->extract.qos_key_extract,
1357 				RTE_FLOW_ITEM_TYPE_ETH);
1358 		if (ret) {
1359 			DPAA2_PMD_ERR(
1360 			"QoS Extract ETH_TYPE to discriminate IP failed.");
1361 			return -1;
1362 		}
1363 		(*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1364 	}
1365 
1366 	index = dpaa2_flow_extract_search(
1367 			&priv->extract.tc_key_extract[group].dpkg,
1368 			NET_PROT_ETH, NH_FLD_ETH_TYPE);
1369 	if (index < 0) {
1370 		ret = dpaa2_flow_proto_discrimination_extract(
1371 				&priv->extract.tc_key_extract[group],
1372 				RTE_FLOW_ITEM_TYPE_ETH);
1373 		if (ret) {
1374 			DPAA2_PMD_ERR(
1375 			"FS Extract ETH_TYPE to discriminate IP failed.");
1376 			return -1;
1377 		}
1378 		(*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1379 	}
1380 
1381 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1382 	if (ret) {
1383 		DPAA2_PMD_ERR(
1384 			"Move ipaddr before IP discrimination set failed");
1385 		return -1;
1386 	}
1387 
1388 	proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1389 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1390 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1391 	else
1392 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1393 	ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1394 	if (ret) {
1395 		DPAA2_PMD_ERR("IP discrimination rule set failed");
1396 		return -1;
1397 	}
1398 
1399 	(*device_configured) |= (*local_cfg);
1400 
1401 	return 0;
1402 }
1403 
1404 
1405 static int
1406 dpaa2_configure_flow_generic_ip(
1407 	struct rte_flow *flow,
1408 	struct rte_eth_dev *dev,
1409 	const struct rte_flow_attr *attr,
1410 	const struct rte_flow_item *pattern,
1411 	const struct rte_flow_action actions[] __rte_unused,
1412 	struct rte_flow_error *error __rte_unused,
1413 	int *device_configured)
1414 {
1415 	int index, ret;
1416 	int local_cfg = 0;
1417 	uint32_t group;
1418 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1419 		*mask_ipv4 = 0;
1420 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1421 		*mask_ipv6 = 0;
1422 	const void *key, *mask;
1423 	enum net_prot prot;
1424 
1425 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1426 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1427 	int size;
1428 
1429 	group = attr->group;
1430 
1431 	/* Parse pattern list to get the matching parameters */
1432 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1433 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1434 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1435 			(pattern->mask ? pattern->mask :
1436 					&dpaa2_flow_item_ipv4_mask);
1437 	} else {
1438 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1439 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1440 			(pattern->mask ? pattern->mask :
1441 					&dpaa2_flow_item_ipv6_mask);
1442 	}
1443 
1444 	/* Get traffic class index and flow id to be configured */
1445 	flow->tc_id = group;
1446 	flow->tc_index = attr->priority;
1447 
1448 	ret = dpaa2_configure_flow_ip_discrimation(priv,
1449 			flow, pattern, &local_cfg,
1450 			device_configured, group);
1451 	if (ret) {
1452 		DPAA2_PMD_ERR("IP discrimination failed!");
1453 		return -1;
1454 	}
1455 
1456 	if (!spec_ipv4 && !spec_ipv6)
1457 		return 0;
1458 
1459 	if (mask_ipv4) {
1460 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1461 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1462 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1463 
1464 			return -1;
1465 		}
1466 	}
1467 
1468 	if (mask_ipv6) {
1469 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1470 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1471 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1472 
1473 			return -1;
1474 		}
1475 	}
1476 
1477 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1478 		mask_ipv4->hdr.dst_addr)) {
1479 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1480 	} else if (mask_ipv6 &&
1481 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1482 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1483 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1484 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1485 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1486 	}
1487 
1488 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1489 		(mask_ipv6 &&
1490 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1491 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1492 		index = dpaa2_flow_extract_search(
1493 				&priv->extract.qos_key_extract.dpkg,
1494 				NET_PROT_IP, NH_FLD_IP_SRC);
1495 		if (index < 0) {
1496 			ret = dpaa2_flow_extract_add(
1497 					&priv->extract.qos_key_extract,
1498 					NET_PROT_IP,
1499 					NH_FLD_IP_SRC,
1500 					0);
1501 			if (ret) {
1502 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1503 
1504 				return -1;
1505 			}
1506 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1507 		}
1508 
1509 		index = dpaa2_flow_extract_search(
1510 				&priv->extract.tc_key_extract[group].dpkg,
1511 				NET_PROT_IP, NH_FLD_IP_SRC);
1512 		if (index < 0) {
1513 			ret = dpaa2_flow_extract_add(
1514 					&priv->extract.tc_key_extract[group],
1515 					NET_PROT_IP,
1516 					NH_FLD_IP_SRC,
1517 					0);
1518 			if (ret) {
1519 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1520 
1521 				return -1;
1522 			}
1523 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1524 		}
1525 
1526 		if (spec_ipv4)
1527 			key = &spec_ipv4->hdr.src_addr;
1528 		else
1529 			key = &spec_ipv6->hdr.src_addr[0];
1530 		if (mask_ipv4) {
1531 			mask = &mask_ipv4->hdr.src_addr;
1532 			size = NH_FLD_IPV4_ADDR_SIZE;
1533 			prot = NET_PROT_IPV4;
1534 		} else {
1535 			mask = &mask_ipv6->hdr.src_addr[0];
1536 			size = NH_FLD_IPV6_ADDR_SIZE;
1537 			prot = NET_PROT_IPV6;
1538 		}
1539 
1540 		ret = dpaa2_flow_rule_data_set(
1541 				&priv->extract.qos_key_extract,
1542 				&flow->qos_rule,
1543 				prot, NH_FLD_IP_SRC,
1544 				key,	mask, size);
1545 		if (ret) {
1546 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1547 			return -1;
1548 		}
1549 
1550 		ret = dpaa2_flow_rule_data_set(
1551 				&priv->extract.tc_key_extract[group],
1552 				&flow->fs_rule,
1553 				prot, NH_FLD_IP_SRC,
1554 				key,	mask, size);
1555 		if (ret) {
1556 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1557 			return -1;
1558 		}
1559 
1560 		flow->ipaddr_rule.qos_ipsrc_offset =
1561 			dpaa2_flow_extract_key_offset(
1562 				&priv->extract.qos_key_extract,
1563 				prot, NH_FLD_IP_SRC);
1564 		flow->ipaddr_rule.fs_ipsrc_offset =
1565 			dpaa2_flow_extract_key_offset(
1566 				&priv->extract.tc_key_extract[group],
1567 				prot, NH_FLD_IP_SRC);
1568 	}
1569 
1570 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1571 		(mask_ipv6 &&
1572 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1573 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1574 		index = dpaa2_flow_extract_search(
1575 				&priv->extract.qos_key_extract.dpkg,
1576 				NET_PROT_IP, NH_FLD_IP_DST);
1577 		if (index < 0) {
1578 			if (mask_ipv4)
1579 				size = NH_FLD_IPV4_ADDR_SIZE;
1580 			else
1581 				size = NH_FLD_IPV6_ADDR_SIZE;
1582 			ret = dpaa2_flow_extract_add(
1583 					&priv->extract.qos_key_extract,
1584 					NET_PROT_IP,
1585 					NH_FLD_IP_DST,
1586 					size);
1587 			if (ret) {
1588 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1589 
1590 				return -1;
1591 			}
1592 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1593 		}
1594 
1595 		index = dpaa2_flow_extract_search(
1596 				&priv->extract.tc_key_extract[group].dpkg,
1597 				NET_PROT_IP, NH_FLD_IP_DST);
1598 		if (index < 0) {
1599 			if (mask_ipv4)
1600 				size = NH_FLD_IPV4_ADDR_SIZE;
1601 			else
1602 				size = NH_FLD_IPV6_ADDR_SIZE;
1603 			ret = dpaa2_flow_extract_add(
1604 					&priv->extract.tc_key_extract[group],
1605 					NET_PROT_IP,
1606 					NH_FLD_IP_DST,
1607 					size);
1608 			if (ret) {
1609 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1610 
1611 				return -1;
1612 			}
1613 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1614 		}
1615 
1616 		if (spec_ipv4)
1617 			key = &spec_ipv4->hdr.dst_addr;
1618 		else
1619 			key = spec_ipv6->hdr.dst_addr;
1620 		if (mask_ipv4) {
1621 			mask = &mask_ipv4->hdr.dst_addr;
1622 			size = NH_FLD_IPV4_ADDR_SIZE;
1623 			prot = NET_PROT_IPV4;
1624 		} else {
1625 			mask = &mask_ipv6->hdr.dst_addr[0];
1626 			size = NH_FLD_IPV6_ADDR_SIZE;
1627 			prot = NET_PROT_IPV6;
1628 		}
1629 
1630 		ret = dpaa2_flow_rule_data_set(
1631 				&priv->extract.qos_key_extract,
1632 				&flow->qos_rule,
1633 				prot, NH_FLD_IP_DST,
1634 				key,	mask, size);
1635 		if (ret) {
1636 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1637 			return -1;
1638 		}
1639 
1640 		ret = dpaa2_flow_rule_data_set(
1641 				&priv->extract.tc_key_extract[group],
1642 				&flow->fs_rule,
1643 				prot, NH_FLD_IP_DST,
1644 				key,	mask, size);
1645 		if (ret) {
1646 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1647 			return -1;
1648 		}
1649 		flow->ipaddr_rule.qos_ipdst_offset =
1650 			dpaa2_flow_extract_key_offset(
1651 				&priv->extract.qos_key_extract,
1652 				prot, NH_FLD_IP_DST);
1653 		flow->ipaddr_rule.fs_ipdst_offset =
1654 			dpaa2_flow_extract_key_offset(
1655 				&priv->extract.tc_key_extract[group],
1656 				prot, NH_FLD_IP_DST);
1657 	}
1658 
1659 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1660 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1661 		index = dpaa2_flow_extract_search(
1662 				&priv->extract.qos_key_extract.dpkg,
1663 				NET_PROT_IP, NH_FLD_IP_PROTO);
1664 		if (index < 0) {
1665 			ret = dpaa2_flow_extract_add(
1666 				&priv->extract.qos_key_extract,
1667 				NET_PROT_IP,
1668 				NH_FLD_IP_PROTO,
1669 				NH_FLD_IP_PROTO_SIZE);
1670 			if (ret) {
1671 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1672 
1673 				return -1;
1674 			}
1675 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1676 		}
1677 
1678 		index = dpaa2_flow_extract_search(
1679 				&priv->extract.tc_key_extract[group].dpkg,
1680 				NET_PROT_IP, NH_FLD_IP_PROTO);
1681 		if (index < 0) {
1682 			ret = dpaa2_flow_extract_add(
1683 					&priv->extract.tc_key_extract[group],
1684 					NET_PROT_IP,
1685 					NH_FLD_IP_PROTO,
1686 					NH_FLD_IP_PROTO_SIZE);
1687 			if (ret) {
1688 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1689 
1690 				return -1;
1691 			}
1692 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1693 		}
1694 
1695 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1696 		if (ret) {
1697 			DPAA2_PMD_ERR(
1698 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1699 			return -1;
1700 		}
1701 
1702 		if (spec_ipv4)
1703 			key = &spec_ipv4->hdr.next_proto_id;
1704 		else
1705 			key = &spec_ipv6->hdr.proto;
1706 		if (mask_ipv4)
1707 			mask = &mask_ipv4->hdr.next_proto_id;
1708 		else
1709 			mask = &mask_ipv6->hdr.proto;
1710 
1711 		ret = dpaa2_flow_rule_data_set(
1712 				&priv->extract.qos_key_extract,
1713 				&flow->qos_rule,
1714 				NET_PROT_IP,
1715 				NH_FLD_IP_PROTO,
1716 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1717 		if (ret) {
1718 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1719 			return -1;
1720 		}
1721 
1722 		ret = dpaa2_flow_rule_data_set(
1723 				&priv->extract.tc_key_extract[group],
1724 				&flow->fs_rule,
1725 				NET_PROT_IP,
1726 				NH_FLD_IP_PROTO,
1727 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1728 		if (ret) {
1729 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1730 			return -1;
1731 		}
1732 	}
1733 
1734 	(*device_configured) |= local_cfg;
1735 
1736 	return 0;
1737 }
1738 
1739 static int
1740 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1741 			  struct rte_eth_dev *dev,
1742 			  const struct rte_flow_attr *attr,
1743 			  const struct rte_flow_item *pattern,
1744 			  const struct rte_flow_action actions[] __rte_unused,
1745 			  struct rte_flow_error *error __rte_unused,
1746 			  int *device_configured)
1747 {
1748 	int index, ret;
1749 	int local_cfg = 0;
1750 	uint32_t group;
1751 	const struct rte_flow_item_icmp *spec, *mask;
1752 
1753 	const struct rte_flow_item_icmp *last __rte_unused;
1754 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1755 
1756 	group = attr->group;
1757 
1758 	/* Parse pattern list to get the matching parameters */
1759 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1760 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1761 	mask    = (const struct rte_flow_item_icmp *)
1762 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1763 
1764 	/* Get traffic class index and flow id to be configured */
1765 	flow->tc_id = group;
1766 	flow->tc_index = attr->priority;
1767 
1768 	if (!spec) {
1769 		/* Don't care any field of ICMP header,
1770 		 * only care ICMP protocol.
1771 		 * Example: flow create 0 ingress pattern icmp /
1772 		 */
1773 		/* Next proto of Generical IP is actually used
1774 		 * for ICMP identification.
1775 		 */
1776 		struct proto_discrimination proto;
1777 
1778 		index = dpaa2_flow_extract_search(
1779 				&priv->extract.qos_key_extract.dpkg,
1780 				NET_PROT_IP, NH_FLD_IP_PROTO);
1781 		if (index < 0) {
1782 			ret = dpaa2_flow_proto_discrimination_extract(
1783 					&priv->extract.qos_key_extract,
1784 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1785 			if (ret) {
1786 				DPAA2_PMD_ERR(
1787 					"QoS Extract IP protocol to discriminate ICMP failed.");
1788 
1789 				return -1;
1790 			}
1791 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1792 		}
1793 
1794 		index = dpaa2_flow_extract_search(
1795 				&priv->extract.tc_key_extract[group].dpkg,
1796 				NET_PROT_IP, NH_FLD_IP_PROTO);
1797 		if (index < 0) {
1798 			ret = dpaa2_flow_proto_discrimination_extract(
1799 					&priv->extract.tc_key_extract[group],
1800 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1801 			if (ret) {
1802 				DPAA2_PMD_ERR(
1803 					"FS Extract IP protocol to discriminate ICMP failed.");
1804 
1805 				return -1;
1806 			}
1807 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1808 		}
1809 
1810 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1811 		if (ret) {
1812 			DPAA2_PMD_ERR(
1813 				"Move IP addr before ICMP discrimination set failed");
1814 			return -1;
1815 		}
1816 
1817 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1818 		proto.ip_proto = IPPROTO_ICMP;
1819 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1820 							proto, group);
1821 		if (ret) {
1822 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1823 			return -1;
1824 		}
1825 
1826 		(*device_configured) |= local_cfg;
1827 
1828 		return 0;
1829 	}
1830 
1831 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1832 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1833 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1834 
1835 		return -1;
1836 	}
1837 
1838 	if (mask->hdr.icmp_type) {
1839 		index = dpaa2_flow_extract_search(
1840 				&priv->extract.qos_key_extract.dpkg,
1841 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1842 		if (index < 0) {
1843 			ret = dpaa2_flow_extract_add(
1844 					&priv->extract.qos_key_extract,
1845 					NET_PROT_ICMP,
1846 					NH_FLD_ICMP_TYPE,
1847 					NH_FLD_ICMP_TYPE_SIZE);
1848 			if (ret) {
1849 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1850 
1851 				return -1;
1852 			}
1853 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1854 		}
1855 
1856 		index = dpaa2_flow_extract_search(
1857 				&priv->extract.tc_key_extract[group].dpkg,
1858 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1859 		if (index < 0) {
1860 			ret = dpaa2_flow_extract_add(
1861 					&priv->extract.tc_key_extract[group],
1862 					NET_PROT_ICMP,
1863 					NH_FLD_ICMP_TYPE,
1864 					NH_FLD_ICMP_TYPE_SIZE);
1865 			if (ret) {
1866 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1867 
1868 				return -1;
1869 			}
1870 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1871 		}
1872 
1873 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1874 		if (ret) {
1875 			DPAA2_PMD_ERR(
1876 				"Move ipaddr before ICMP TYPE set failed");
1877 			return -1;
1878 		}
1879 
1880 		ret = dpaa2_flow_rule_data_set(
1881 				&priv->extract.qos_key_extract,
1882 				&flow->qos_rule,
1883 				NET_PROT_ICMP,
1884 				NH_FLD_ICMP_TYPE,
1885 				&spec->hdr.icmp_type,
1886 				&mask->hdr.icmp_type,
1887 				NH_FLD_ICMP_TYPE_SIZE);
1888 		if (ret) {
1889 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1890 			return -1;
1891 		}
1892 
1893 		ret = dpaa2_flow_rule_data_set(
1894 				&priv->extract.tc_key_extract[group],
1895 				&flow->fs_rule,
1896 				NET_PROT_ICMP,
1897 				NH_FLD_ICMP_TYPE,
1898 				&spec->hdr.icmp_type,
1899 				&mask->hdr.icmp_type,
1900 				NH_FLD_ICMP_TYPE_SIZE);
1901 		if (ret) {
1902 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1903 			return -1;
1904 		}
1905 	}
1906 
1907 	if (mask->hdr.icmp_code) {
1908 		index = dpaa2_flow_extract_search(
1909 				&priv->extract.qos_key_extract.dpkg,
1910 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1911 		if (index < 0) {
1912 			ret = dpaa2_flow_extract_add(
1913 					&priv->extract.qos_key_extract,
1914 					NET_PROT_ICMP,
1915 					NH_FLD_ICMP_CODE,
1916 					NH_FLD_ICMP_CODE_SIZE);
1917 			if (ret) {
1918 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1919 
1920 				return -1;
1921 			}
1922 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1923 		}
1924 
1925 		index = dpaa2_flow_extract_search(
1926 				&priv->extract.tc_key_extract[group].dpkg,
1927 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1928 		if (index < 0) {
1929 			ret = dpaa2_flow_extract_add(
1930 					&priv->extract.tc_key_extract[group],
1931 					NET_PROT_ICMP,
1932 					NH_FLD_ICMP_CODE,
1933 					NH_FLD_ICMP_CODE_SIZE);
1934 			if (ret) {
1935 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1936 
1937 				return -1;
1938 			}
1939 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1940 		}
1941 
1942 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1943 		if (ret) {
1944 			DPAA2_PMD_ERR(
1945 				"Move ipaddr after ICMP CODE set failed");
1946 			return -1;
1947 		}
1948 
1949 		ret = dpaa2_flow_rule_data_set(
1950 				&priv->extract.qos_key_extract,
1951 				&flow->qos_rule,
1952 				NET_PROT_ICMP,
1953 				NH_FLD_ICMP_CODE,
1954 				&spec->hdr.icmp_code,
1955 				&mask->hdr.icmp_code,
1956 				NH_FLD_ICMP_CODE_SIZE);
1957 		if (ret) {
1958 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1959 			return -1;
1960 		}
1961 
1962 		ret = dpaa2_flow_rule_data_set(
1963 				&priv->extract.tc_key_extract[group],
1964 				&flow->fs_rule,
1965 				NET_PROT_ICMP,
1966 				NH_FLD_ICMP_CODE,
1967 				&spec->hdr.icmp_code,
1968 				&mask->hdr.icmp_code,
1969 				NH_FLD_ICMP_CODE_SIZE);
1970 		if (ret) {
1971 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1972 			return -1;
1973 		}
1974 	}
1975 
1976 	(*device_configured) |= local_cfg;
1977 
1978 	return 0;
1979 }
1980 
1981 static int
1982 dpaa2_configure_flow_udp(struct rte_flow *flow,
1983 			 struct rte_eth_dev *dev,
1984 			  const struct rte_flow_attr *attr,
1985 			  const struct rte_flow_item *pattern,
1986 			  const struct rte_flow_action actions[] __rte_unused,
1987 			  struct rte_flow_error *error __rte_unused,
1988 			  int *device_configured)
1989 {
1990 	int index, ret;
1991 	int local_cfg = 0;
1992 	uint32_t group;
1993 	const struct rte_flow_item_udp *spec, *mask;
1994 
1995 	const struct rte_flow_item_udp *last __rte_unused;
1996 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1997 
1998 	group = attr->group;
1999 
2000 	/* Parse pattern list to get the matching parameters */
2001 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
2002 	last    = (const struct rte_flow_item_udp *)pattern->last;
2003 	mask    = (const struct rte_flow_item_udp *)
2004 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2005 
2006 	/* Get traffic class index and flow id to be configured */
2007 	flow->tc_id = group;
2008 	flow->tc_index = attr->priority;
2009 
2010 	if (!spec || !mc_l4_port_identification) {
2011 		struct proto_discrimination proto;
2012 
2013 		index = dpaa2_flow_extract_search(
2014 				&priv->extract.qos_key_extract.dpkg,
2015 				NET_PROT_IP, NH_FLD_IP_PROTO);
2016 		if (index < 0) {
2017 			ret = dpaa2_flow_proto_discrimination_extract(
2018 					&priv->extract.qos_key_extract,
2019 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2020 			if (ret) {
2021 				DPAA2_PMD_ERR(
2022 					"QoS Extract IP protocol to discriminate UDP failed.");
2023 
2024 				return -1;
2025 			}
2026 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2027 		}
2028 
2029 		index = dpaa2_flow_extract_search(
2030 				&priv->extract.tc_key_extract[group].dpkg,
2031 				NET_PROT_IP, NH_FLD_IP_PROTO);
2032 		if (index < 0) {
2033 			ret = dpaa2_flow_proto_discrimination_extract(
2034 				&priv->extract.tc_key_extract[group],
2035 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2036 			if (ret) {
2037 				DPAA2_PMD_ERR(
2038 					"FS Extract IP protocol to discriminate UDP failed.");
2039 
2040 				return -1;
2041 			}
2042 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2043 		}
2044 
2045 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2046 		if (ret) {
2047 			DPAA2_PMD_ERR(
2048 				"Move IP addr before UDP discrimination set failed");
2049 			return -1;
2050 		}
2051 
2052 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2053 		proto.ip_proto = IPPROTO_UDP;
2054 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2055 							proto, group);
2056 		if (ret) {
2057 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
2058 			return -1;
2059 		}
2060 
2061 		(*device_configured) |= local_cfg;
2062 
2063 		if (!spec)
2064 			return 0;
2065 	}
2066 
2067 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2068 		RTE_FLOW_ITEM_TYPE_UDP)) {
2069 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2070 
2071 		return -1;
2072 	}
2073 
2074 	if (mask->hdr.src_port) {
2075 		index = dpaa2_flow_extract_search(
2076 				&priv->extract.qos_key_extract.dpkg,
2077 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2078 		if (index < 0) {
2079 			ret = dpaa2_flow_extract_add(
2080 					&priv->extract.qos_key_extract,
2081 				NET_PROT_UDP,
2082 				NH_FLD_UDP_PORT_SRC,
2083 				NH_FLD_UDP_PORT_SIZE);
2084 			if (ret) {
2085 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2086 
2087 				return -1;
2088 			}
2089 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2090 		}
2091 
2092 		index = dpaa2_flow_extract_search(
2093 				&priv->extract.tc_key_extract[group].dpkg,
2094 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2095 		if (index < 0) {
2096 			ret = dpaa2_flow_extract_add(
2097 					&priv->extract.tc_key_extract[group],
2098 					NET_PROT_UDP,
2099 					NH_FLD_UDP_PORT_SRC,
2100 					NH_FLD_UDP_PORT_SIZE);
2101 			if (ret) {
2102 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2103 
2104 				return -1;
2105 			}
2106 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2107 		}
2108 
2109 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2110 		if (ret) {
2111 			DPAA2_PMD_ERR(
2112 				"Move ipaddr before UDP_PORT_SRC set failed");
2113 			return -1;
2114 		}
2115 
2116 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2117 				&flow->qos_rule,
2118 				NET_PROT_UDP,
2119 				NH_FLD_UDP_PORT_SRC,
2120 				&spec->hdr.src_port,
2121 				&mask->hdr.src_port,
2122 				NH_FLD_UDP_PORT_SIZE);
2123 		if (ret) {
2124 			DPAA2_PMD_ERR(
2125 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2126 			return -1;
2127 		}
2128 
2129 		ret = dpaa2_flow_rule_data_set(
2130 				&priv->extract.tc_key_extract[group],
2131 				&flow->fs_rule,
2132 				NET_PROT_UDP,
2133 				NH_FLD_UDP_PORT_SRC,
2134 				&spec->hdr.src_port,
2135 				&mask->hdr.src_port,
2136 				NH_FLD_UDP_PORT_SIZE);
2137 		if (ret) {
2138 			DPAA2_PMD_ERR(
2139 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
2140 			return -1;
2141 		}
2142 	}
2143 
2144 	if (mask->hdr.dst_port) {
2145 		index = dpaa2_flow_extract_search(
2146 				&priv->extract.qos_key_extract.dpkg,
2147 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2148 		if (index < 0) {
2149 			ret = dpaa2_flow_extract_add(
2150 					&priv->extract.qos_key_extract,
2151 					NET_PROT_UDP,
2152 					NH_FLD_UDP_PORT_DST,
2153 					NH_FLD_UDP_PORT_SIZE);
2154 			if (ret) {
2155 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2156 
2157 				return -1;
2158 			}
2159 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2160 		}
2161 
2162 		index = dpaa2_flow_extract_search(
2163 				&priv->extract.tc_key_extract[group].dpkg,
2164 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2165 		if (index < 0) {
2166 			ret = dpaa2_flow_extract_add(
2167 					&priv->extract.tc_key_extract[group],
2168 					NET_PROT_UDP,
2169 					NH_FLD_UDP_PORT_DST,
2170 					NH_FLD_UDP_PORT_SIZE);
2171 			if (ret) {
2172 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2173 
2174 				return -1;
2175 			}
2176 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2177 		}
2178 
2179 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2180 		if (ret) {
2181 			DPAA2_PMD_ERR(
2182 				"Move ipaddr before UDP_PORT_DST set failed");
2183 			return -1;
2184 		}
2185 
2186 		ret = dpaa2_flow_rule_data_set(
2187 				&priv->extract.qos_key_extract,
2188 				&flow->qos_rule,
2189 				NET_PROT_UDP,
2190 				NH_FLD_UDP_PORT_DST,
2191 				&spec->hdr.dst_port,
2192 				&mask->hdr.dst_port,
2193 				NH_FLD_UDP_PORT_SIZE);
2194 		if (ret) {
2195 			DPAA2_PMD_ERR(
2196 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
2197 			return -1;
2198 		}
2199 
2200 		ret = dpaa2_flow_rule_data_set(
2201 				&priv->extract.tc_key_extract[group],
2202 				&flow->fs_rule,
2203 				NET_PROT_UDP,
2204 				NH_FLD_UDP_PORT_DST,
2205 				&spec->hdr.dst_port,
2206 				&mask->hdr.dst_port,
2207 				NH_FLD_UDP_PORT_SIZE);
2208 		if (ret) {
2209 			DPAA2_PMD_ERR(
2210 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
2211 			return -1;
2212 		}
2213 	}
2214 
2215 	(*device_configured) |= local_cfg;
2216 
2217 	return 0;
2218 }
2219 
2220 static int
2221 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2222 			 struct rte_eth_dev *dev,
2223 			 const struct rte_flow_attr *attr,
2224 			 const struct rte_flow_item *pattern,
2225 			 const struct rte_flow_action actions[] __rte_unused,
2226 			 struct rte_flow_error *error __rte_unused,
2227 			 int *device_configured)
2228 {
2229 	int index, ret;
2230 	int local_cfg = 0;
2231 	uint32_t group;
2232 	const struct rte_flow_item_tcp *spec, *mask;
2233 
2234 	const struct rte_flow_item_tcp *last __rte_unused;
2235 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2236 
2237 	group = attr->group;
2238 
2239 	/* Parse pattern list to get the matching parameters */
2240 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
2241 	last    = (const struct rte_flow_item_tcp *)pattern->last;
2242 	mask    = (const struct rte_flow_item_tcp *)
2243 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2244 
2245 	/* Get traffic class index and flow id to be configured */
2246 	flow->tc_id = group;
2247 	flow->tc_index = attr->priority;
2248 
2249 	if (!spec || !mc_l4_port_identification) {
2250 		struct proto_discrimination proto;
2251 
2252 		index = dpaa2_flow_extract_search(
2253 				&priv->extract.qos_key_extract.dpkg,
2254 				NET_PROT_IP, NH_FLD_IP_PROTO);
2255 		if (index < 0) {
2256 			ret = dpaa2_flow_proto_discrimination_extract(
2257 					&priv->extract.qos_key_extract,
2258 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2259 			if (ret) {
2260 				DPAA2_PMD_ERR(
2261 					"QoS Extract IP protocol to discriminate TCP failed.");
2262 
2263 				return -1;
2264 			}
2265 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2266 		}
2267 
2268 		index = dpaa2_flow_extract_search(
2269 				&priv->extract.tc_key_extract[group].dpkg,
2270 				NET_PROT_IP, NH_FLD_IP_PROTO);
2271 		if (index < 0) {
2272 			ret = dpaa2_flow_proto_discrimination_extract(
2273 				&priv->extract.tc_key_extract[group],
2274 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2275 			if (ret) {
2276 				DPAA2_PMD_ERR(
2277 					"FS Extract IP protocol to discriminate TCP failed.");
2278 
2279 				return -1;
2280 			}
2281 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2282 		}
2283 
2284 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2285 		if (ret) {
2286 			DPAA2_PMD_ERR(
2287 				"Move IP addr before TCP discrimination set failed");
2288 			return -1;
2289 		}
2290 
2291 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2292 		proto.ip_proto = IPPROTO_TCP;
2293 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2294 							proto, group);
2295 		if (ret) {
2296 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2297 			return -1;
2298 		}
2299 
2300 		(*device_configured) |= local_cfg;
2301 
2302 		if (!spec)
2303 			return 0;
2304 	}
2305 
2306 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2307 		RTE_FLOW_ITEM_TYPE_TCP)) {
2308 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2309 
2310 		return -1;
2311 	}
2312 
2313 	if (mask->hdr.src_port) {
2314 		index = dpaa2_flow_extract_search(
2315 				&priv->extract.qos_key_extract.dpkg,
2316 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2317 		if (index < 0) {
2318 			ret = dpaa2_flow_extract_add(
2319 					&priv->extract.qos_key_extract,
2320 					NET_PROT_TCP,
2321 					NH_FLD_TCP_PORT_SRC,
2322 					NH_FLD_TCP_PORT_SIZE);
2323 			if (ret) {
2324 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2325 
2326 				return -1;
2327 			}
2328 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2329 		}
2330 
2331 		index = dpaa2_flow_extract_search(
2332 				&priv->extract.tc_key_extract[group].dpkg,
2333 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2334 		if (index < 0) {
2335 			ret = dpaa2_flow_extract_add(
2336 					&priv->extract.tc_key_extract[group],
2337 					NET_PROT_TCP,
2338 					NH_FLD_TCP_PORT_SRC,
2339 					NH_FLD_TCP_PORT_SIZE);
2340 			if (ret) {
2341 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2342 
2343 				return -1;
2344 			}
2345 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2346 		}
2347 
2348 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2349 		if (ret) {
2350 			DPAA2_PMD_ERR(
2351 				"Move ipaddr before TCP_PORT_SRC set failed");
2352 			return -1;
2353 		}
2354 
2355 		ret = dpaa2_flow_rule_data_set(
2356 				&priv->extract.qos_key_extract,
2357 				&flow->qos_rule,
2358 				NET_PROT_TCP,
2359 				NH_FLD_TCP_PORT_SRC,
2360 				&spec->hdr.src_port,
2361 				&mask->hdr.src_port,
2362 				NH_FLD_TCP_PORT_SIZE);
2363 		if (ret) {
2364 			DPAA2_PMD_ERR(
2365 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2366 			return -1;
2367 		}
2368 
2369 		ret = dpaa2_flow_rule_data_set(
2370 				&priv->extract.tc_key_extract[group],
2371 				&flow->fs_rule,
2372 				NET_PROT_TCP,
2373 				NH_FLD_TCP_PORT_SRC,
2374 				&spec->hdr.src_port,
2375 				&mask->hdr.src_port,
2376 				NH_FLD_TCP_PORT_SIZE);
2377 		if (ret) {
2378 			DPAA2_PMD_ERR(
2379 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2380 			return -1;
2381 		}
2382 	}
2383 
2384 	if (mask->hdr.dst_port) {
2385 		index = dpaa2_flow_extract_search(
2386 				&priv->extract.qos_key_extract.dpkg,
2387 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2388 		if (index < 0) {
2389 			ret = dpaa2_flow_extract_add(
2390 					&priv->extract.qos_key_extract,
2391 					NET_PROT_TCP,
2392 					NH_FLD_TCP_PORT_DST,
2393 					NH_FLD_TCP_PORT_SIZE);
2394 			if (ret) {
2395 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2396 
2397 				return -1;
2398 			}
2399 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2400 		}
2401 
2402 		index = dpaa2_flow_extract_search(
2403 				&priv->extract.tc_key_extract[group].dpkg,
2404 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2405 		if (index < 0) {
2406 			ret = dpaa2_flow_extract_add(
2407 					&priv->extract.tc_key_extract[group],
2408 					NET_PROT_TCP,
2409 					NH_FLD_TCP_PORT_DST,
2410 					NH_FLD_TCP_PORT_SIZE);
2411 			if (ret) {
2412 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2413 
2414 				return -1;
2415 			}
2416 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2417 		}
2418 
2419 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2420 		if (ret) {
2421 			DPAA2_PMD_ERR(
2422 				"Move ipaddr before TCP_PORT_DST set failed");
2423 			return -1;
2424 		}
2425 
2426 		ret = dpaa2_flow_rule_data_set(
2427 				&priv->extract.qos_key_extract,
2428 				&flow->qos_rule,
2429 				NET_PROT_TCP,
2430 				NH_FLD_TCP_PORT_DST,
2431 				&spec->hdr.dst_port,
2432 				&mask->hdr.dst_port,
2433 				NH_FLD_TCP_PORT_SIZE);
2434 		if (ret) {
2435 			DPAA2_PMD_ERR(
2436 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2437 			return -1;
2438 		}
2439 
2440 		ret = dpaa2_flow_rule_data_set(
2441 				&priv->extract.tc_key_extract[group],
2442 				&flow->fs_rule,
2443 				NET_PROT_TCP,
2444 				NH_FLD_TCP_PORT_DST,
2445 				&spec->hdr.dst_port,
2446 				&mask->hdr.dst_port,
2447 				NH_FLD_TCP_PORT_SIZE);
2448 		if (ret) {
2449 			DPAA2_PMD_ERR(
2450 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2451 			return -1;
2452 		}
2453 	}
2454 
2455 	(*device_configured) |= local_cfg;
2456 
2457 	return 0;
2458 }
2459 
2460 static int
2461 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2462 			  struct rte_eth_dev *dev,
2463 			  const struct rte_flow_attr *attr,
2464 			  const struct rte_flow_item *pattern,
2465 			  const struct rte_flow_action actions[] __rte_unused,
2466 			  struct rte_flow_error *error __rte_unused,
2467 			  int *device_configured)
2468 {
2469 	int index, ret;
2470 	int local_cfg = 0;
2471 	uint32_t group;
2472 	const struct rte_flow_item_sctp *spec, *mask;
2473 
2474 	const struct rte_flow_item_sctp *last __rte_unused;
2475 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2476 
2477 	group = attr->group;
2478 
2479 	/* Parse pattern list to get the matching parameters */
2480 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2481 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2482 	mask    = (const struct rte_flow_item_sctp *)
2483 			(pattern->mask ? pattern->mask :
2484 				&dpaa2_flow_item_sctp_mask);
2485 
2486 	/* Get traffic class index and flow id to be configured */
2487 	flow->tc_id = group;
2488 	flow->tc_index = attr->priority;
2489 
2490 	if (!spec || !mc_l4_port_identification) {
2491 		struct proto_discrimination proto;
2492 
2493 		index = dpaa2_flow_extract_search(
2494 				&priv->extract.qos_key_extract.dpkg,
2495 				NET_PROT_IP, NH_FLD_IP_PROTO);
2496 		if (index < 0) {
2497 			ret = dpaa2_flow_proto_discrimination_extract(
2498 					&priv->extract.qos_key_extract,
2499 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2500 			if (ret) {
2501 				DPAA2_PMD_ERR(
2502 					"QoS Extract IP protocol to discriminate SCTP failed.");
2503 
2504 				return -1;
2505 			}
2506 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2507 		}
2508 
2509 		index = dpaa2_flow_extract_search(
2510 				&priv->extract.tc_key_extract[group].dpkg,
2511 				NET_PROT_IP, NH_FLD_IP_PROTO);
2512 		if (index < 0) {
2513 			ret = dpaa2_flow_proto_discrimination_extract(
2514 					&priv->extract.tc_key_extract[group],
2515 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2516 			if (ret) {
2517 				DPAA2_PMD_ERR(
2518 					"FS Extract IP protocol to discriminate SCTP failed.");
2519 
2520 				return -1;
2521 			}
2522 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2523 		}
2524 
2525 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2526 		if (ret) {
2527 			DPAA2_PMD_ERR(
2528 				"Move ipaddr before SCTP discrimination set failed");
2529 			return -1;
2530 		}
2531 
2532 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2533 		proto.ip_proto = IPPROTO_SCTP;
2534 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2535 							proto, group);
2536 		if (ret) {
2537 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2538 			return -1;
2539 		}
2540 
2541 		(*device_configured) |= local_cfg;
2542 
2543 		if (!spec)
2544 			return 0;
2545 	}
2546 
2547 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2548 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2549 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2550 
2551 		return -1;
2552 	}
2553 
2554 	if (mask->hdr.src_port) {
2555 		index = dpaa2_flow_extract_search(
2556 				&priv->extract.qos_key_extract.dpkg,
2557 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2558 		if (index < 0) {
2559 			ret = dpaa2_flow_extract_add(
2560 					&priv->extract.qos_key_extract,
2561 					NET_PROT_SCTP,
2562 					NH_FLD_SCTP_PORT_SRC,
2563 					NH_FLD_SCTP_PORT_SIZE);
2564 			if (ret) {
2565 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2566 
2567 				return -1;
2568 			}
2569 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2570 		}
2571 
2572 		index = dpaa2_flow_extract_search(
2573 				&priv->extract.tc_key_extract[group].dpkg,
2574 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2575 		if (index < 0) {
2576 			ret = dpaa2_flow_extract_add(
2577 					&priv->extract.tc_key_extract[group],
2578 					NET_PROT_SCTP,
2579 					NH_FLD_SCTP_PORT_SRC,
2580 					NH_FLD_SCTP_PORT_SIZE);
2581 			if (ret) {
2582 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2583 
2584 				return -1;
2585 			}
2586 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2587 		}
2588 
2589 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2590 		if (ret) {
2591 			DPAA2_PMD_ERR(
2592 				"Move ipaddr before SCTP_PORT_SRC set failed");
2593 			return -1;
2594 		}
2595 
2596 		ret = dpaa2_flow_rule_data_set(
2597 				&priv->extract.qos_key_extract,
2598 				&flow->qos_rule,
2599 				NET_PROT_SCTP,
2600 				NH_FLD_SCTP_PORT_SRC,
2601 				&spec->hdr.src_port,
2602 				&mask->hdr.src_port,
2603 				NH_FLD_SCTP_PORT_SIZE);
2604 		if (ret) {
2605 			DPAA2_PMD_ERR(
2606 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2607 			return -1;
2608 		}
2609 
2610 		ret = dpaa2_flow_rule_data_set(
2611 				&priv->extract.tc_key_extract[group],
2612 				&flow->fs_rule,
2613 				NET_PROT_SCTP,
2614 				NH_FLD_SCTP_PORT_SRC,
2615 				&spec->hdr.src_port,
2616 				&mask->hdr.src_port,
2617 				NH_FLD_SCTP_PORT_SIZE);
2618 		if (ret) {
2619 			DPAA2_PMD_ERR(
2620 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2621 			return -1;
2622 		}
2623 	}
2624 
2625 	if (mask->hdr.dst_port) {
2626 		index = dpaa2_flow_extract_search(
2627 				&priv->extract.qos_key_extract.dpkg,
2628 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2629 		if (index < 0) {
2630 			ret = dpaa2_flow_extract_add(
2631 					&priv->extract.qos_key_extract,
2632 					NET_PROT_SCTP,
2633 					NH_FLD_SCTP_PORT_DST,
2634 					NH_FLD_SCTP_PORT_SIZE);
2635 			if (ret) {
2636 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2637 
2638 				return -1;
2639 			}
2640 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2641 		}
2642 
2643 		index = dpaa2_flow_extract_search(
2644 				&priv->extract.tc_key_extract[group].dpkg,
2645 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2646 		if (index < 0) {
2647 			ret = dpaa2_flow_extract_add(
2648 					&priv->extract.tc_key_extract[group],
2649 					NET_PROT_SCTP,
2650 					NH_FLD_SCTP_PORT_DST,
2651 					NH_FLD_SCTP_PORT_SIZE);
2652 			if (ret) {
2653 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2654 
2655 				return -1;
2656 			}
2657 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2658 		}
2659 
2660 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2661 		if (ret) {
2662 			DPAA2_PMD_ERR(
2663 				"Move ipaddr before SCTP_PORT_DST set failed");
2664 			return -1;
2665 		}
2666 
2667 		ret = dpaa2_flow_rule_data_set(
2668 				&priv->extract.qos_key_extract,
2669 				&flow->qos_rule,
2670 				NET_PROT_SCTP,
2671 				NH_FLD_SCTP_PORT_DST,
2672 				&spec->hdr.dst_port,
2673 				&mask->hdr.dst_port,
2674 				NH_FLD_SCTP_PORT_SIZE);
2675 		if (ret) {
2676 			DPAA2_PMD_ERR(
2677 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2678 			return -1;
2679 		}
2680 
2681 		ret = dpaa2_flow_rule_data_set(
2682 				&priv->extract.tc_key_extract[group],
2683 				&flow->fs_rule,
2684 				NET_PROT_SCTP,
2685 				NH_FLD_SCTP_PORT_DST,
2686 				&spec->hdr.dst_port,
2687 				&mask->hdr.dst_port,
2688 				NH_FLD_SCTP_PORT_SIZE);
2689 		if (ret) {
2690 			DPAA2_PMD_ERR(
2691 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2692 			return -1;
2693 		}
2694 	}
2695 
2696 	(*device_configured) |= local_cfg;
2697 
2698 	return 0;
2699 }
2700 
2701 static int
2702 dpaa2_configure_flow_gre(struct rte_flow *flow,
2703 			 struct rte_eth_dev *dev,
2704 			 const struct rte_flow_attr *attr,
2705 			 const struct rte_flow_item *pattern,
2706 			 const struct rte_flow_action actions[] __rte_unused,
2707 			 struct rte_flow_error *error __rte_unused,
2708 			 int *device_configured)
2709 {
2710 	int index, ret;
2711 	int local_cfg = 0;
2712 	uint32_t group;
2713 	const struct rte_flow_item_gre *spec, *mask;
2714 
2715 	const struct rte_flow_item_gre *last __rte_unused;
2716 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2717 
2718 	group = attr->group;
2719 
2720 	/* Parse pattern list to get the matching parameters */
2721 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2722 	last    = (const struct rte_flow_item_gre *)pattern->last;
2723 	mask    = (const struct rte_flow_item_gre *)
2724 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2725 
2726 	/* Get traffic class index and flow id to be configured */
2727 	flow->tc_id = group;
2728 	flow->tc_index = attr->priority;
2729 
2730 	if (!spec) {
2731 		struct proto_discrimination proto;
2732 
2733 		index = dpaa2_flow_extract_search(
2734 				&priv->extract.qos_key_extract.dpkg,
2735 				NET_PROT_IP, NH_FLD_IP_PROTO);
2736 		if (index < 0) {
2737 			ret = dpaa2_flow_proto_discrimination_extract(
2738 					&priv->extract.qos_key_extract,
2739 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2740 			if (ret) {
2741 				DPAA2_PMD_ERR(
2742 					"QoS Extract IP protocol to discriminate GRE failed.");
2743 
2744 				return -1;
2745 			}
2746 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2747 		}
2748 
2749 		index = dpaa2_flow_extract_search(
2750 				&priv->extract.tc_key_extract[group].dpkg,
2751 				NET_PROT_IP, NH_FLD_IP_PROTO);
2752 		if (index < 0) {
2753 			ret = dpaa2_flow_proto_discrimination_extract(
2754 					&priv->extract.tc_key_extract[group],
2755 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2756 			if (ret) {
2757 				DPAA2_PMD_ERR(
2758 					"FS Extract IP protocol to discriminate GRE failed.");
2759 
2760 				return -1;
2761 			}
2762 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2763 		}
2764 
2765 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2766 		if (ret) {
2767 			DPAA2_PMD_ERR(
2768 				"Move IP addr before GRE discrimination set failed");
2769 			return -1;
2770 		}
2771 
2772 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2773 		proto.ip_proto = IPPROTO_GRE;
2774 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2775 							proto, group);
2776 		if (ret) {
2777 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2778 			return -1;
2779 		}
2780 
2781 		(*device_configured) |= local_cfg;
2782 
2783 		return 0;
2784 	}
2785 
2786 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2787 		RTE_FLOW_ITEM_TYPE_GRE)) {
2788 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2789 
2790 		return -1;
2791 	}
2792 
2793 	if (!mask->protocol)
2794 		return 0;
2795 
2796 	index = dpaa2_flow_extract_search(
2797 			&priv->extract.qos_key_extract.dpkg,
2798 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2799 	if (index < 0) {
2800 		ret = dpaa2_flow_extract_add(
2801 				&priv->extract.qos_key_extract,
2802 				NET_PROT_GRE,
2803 				NH_FLD_GRE_TYPE,
2804 				sizeof(rte_be16_t));
2805 		if (ret) {
2806 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2807 
2808 			return -1;
2809 		}
2810 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2811 	}
2812 
2813 	index = dpaa2_flow_extract_search(
2814 			&priv->extract.tc_key_extract[group].dpkg,
2815 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2816 	if (index < 0) {
2817 		ret = dpaa2_flow_extract_add(
2818 				&priv->extract.tc_key_extract[group],
2819 				NET_PROT_GRE,
2820 				NH_FLD_GRE_TYPE,
2821 				sizeof(rte_be16_t));
2822 		if (ret) {
2823 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2824 
2825 			return -1;
2826 		}
2827 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2828 	}
2829 
2830 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2831 	if (ret) {
2832 		DPAA2_PMD_ERR(
2833 			"Move ipaddr before GRE_TYPE set failed");
2834 		return -1;
2835 	}
2836 
2837 	ret = dpaa2_flow_rule_data_set(
2838 				&priv->extract.qos_key_extract,
2839 				&flow->qos_rule,
2840 				NET_PROT_GRE,
2841 				NH_FLD_GRE_TYPE,
2842 				&spec->protocol,
2843 				&mask->protocol,
2844 				sizeof(rte_be16_t));
2845 	if (ret) {
2846 		DPAA2_PMD_ERR(
2847 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2848 		return -1;
2849 	}
2850 
2851 	ret = dpaa2_flow_rule_data_set(
2852 			&priv->extract.tc_key_extract[group],
2853 			&flow->fs_rule,
2854 			NET_PROT_GRE,
2855 			NH_FLD_GRE_TYPE,
2856 			&spec->protocol,
2857 			&mask->protocol,
2858 			sizeof(rte_be16_t));
2859 	if (ret) {
2860 		DPAA2_PMD_ERR(
2861 			"FS NH_FLD_GRE_TYPE rule data set failed");
2862 		return -1;
2863 	}
2864 
2865 	(*device_configured) |= local_cfg;
2866 
2867 	return 0;
2868 }
2869 
2870 static int
2871 dpaa2_configure_flow_raw(struct rte_flow *flow,
2872 			 struct rte_eth_dev *dev,
2873 			 const struct rte_flow_attr *attr,
2874 			 const struct rte_flow_item *pattern,
2875 			 const struct rte_flow_action actions[] __rte_unused,
2876 			 struct rte_flow_error *error __rte_unused,
2877 			 int *device_configured)
2878 {
2879 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2880 	const struct rte_flow_item_raw *spec = pattern->spec;
2881 	const struct rte_flow_item_raw *mask = pattern->mask;
2882 	int prev_key_size =
2883 		priv->extract.qos_key_extract.key_info.key_total_size;
2884 	int local_cfg = 0, ret;
2885 	uint32_t group;
2886 
2887 	/* Need both spec and mask */
2888 	if (!spec || !mask) {
2889 		DPAA2_PMD_ERR("spec or mask not present.");
2890 		return -EINVAL;
2891 	}
2892 	/* Only supports non-relative with offset 0 */
2893 	if (spec->relative || spec->offset != 0 ||
2894 	    spec->search || spec->limit) {
2895 		DPAA2_PMD_ERR("relative and non zero offset not supported.");
2896 		return -EINVAL;
2897 	}
2898 	/* Spec len and mask len should be same */
2899 	if (spec->length != mask->length) {
2900 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2901 		return -EINVAL;
2902 	}
2903 
2904 	/* Get traffic class index and flow id to be configured */
2905 	group = attr->group;
2906 	flow->tc_id = group;
2907 	flow->tc_index = attr->priority;
2908 
2909 	if (prev_key_size <= spec->length) {
2910 		ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2911 						 spec->length);
2912 		if (ret) {
2913 			DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2914 			return -1;
2915 		}
2916 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2917 
2918 		ret = dpaa2_flow_extract_add_raw(
2919 					&priv->extract.tc_key_extract[group],
2920 					spec->length);
2921 		if (ret) {
2922 			DPAA2_PMD_ERR("FS Extract RAW add failed.");
2923 			return -1;
2924 		}
2925 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2926 	}
2927 
2928 	ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2929 					   mask->pattern, spec->length);
2930 	if (ret) {
2931 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2932 		return -1;
2933 	}
2934 
2935 	ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2936 					   mask->pattern, spec->length);
2937 	if (ret) {
2938 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2939 		return -1;
2940 	}
2941 
2942 	(*device_configured) |= local_cfg;
2943 
2944 	return 0;
2945 }
2946 
2947 static inline int
2948 dpaa2_fs_action_supported(enum rte_flow_action_type action)
2949 {
2950 	int i;
2951 
2952 	for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
2953 					sizeof(enum rte_flow_action_type)); i++) {
2954 		if (action == dpaa2_supported_fs_action_type[i])
2955 			return 1;
2956 	}
2957 
2958 	return 0;
2959 }
2960 /* The existing QoS/FS entry with IP address(es)
2961  * needs update after
2962  * new extract(s) are inserted before IP
2963  * address(es) extract(s).
2964  */
2965 static int
2966 dpaa2_flow_entry_update(
2967 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2968 {
2969 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2970 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2971 	int ret;
2972 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2973 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2974 	struct dpaa2_key_extract *qos_key_extract =
2975 		&priv->extract.qos_key_extract;
2976 	struct dpaa2_key_extract *tc_key_extract =
2977 		&priv->extract.tc_key_extract[tc_id];
2978 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2979 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2980 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2981 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2982 	int extend = -1, extend1, size = -1;
2983 	uint16_t qos_index;
2984 
2985 	while (curr) {
2986 		if (curr->ipaddr_rule.ipaddr_type ==
2987 			FLOW_NONE_IPADDR) {
2988 			curr = LIST_NEXT(curr, next);
2989 			continue;
2990 		}
2991 
2992 		if (curr->ipaddr_rule.ipaddr_type ==
2993 			FLOW_IPV4_ADDR) {
2994 			qos_ipsrc_offset =
2995 				qos_key_extract->key_info.ipv4_src_offset;
2996 			qos_ipdst_offset =
2997 				qos_key_extract->key_info.ipv4_dst_offset;
2998 			fs_ipsrc_offset =
2999 				tc_key_extract->key_info.ipv4_src_offset;
3000 			fs_ipdst_offset =
3001 				tc_key_extract->key_info.ipv4_dst_offset;
3002 			size = NH_FLD_IPV4_ADDR_SIZE;
3003 		} else {
3004 			qos_ipsrc_offset =
3005 				qos_key_extract->key_info.ipv6_src_offset;
3006 			qos_ipdst_offset =
3007 				qos_key_extract->key_info.ipv6_dst_offset;
3008 			fs_ipsrc_offset =
3009 				tc_key_extract->key_info.ipv6_src_offset;
3010 			fs_ipdst_offset =
3011 				tc_key_extract->key_info.ipv6_dst_offset;
3012 			size = NH_FLD_IPV6_ADDR_SIZE;
3013 		}
3014 
3015 		qos_index = curr->tc_id * priv->fs_entries +
3016 			curr->tc_index;
3017 
3018 		dpaa2_flow_qos_entry_log("Before update", curr, qos_index, stdout);
3019 
3020 		if (priv->num_rx_tc > 1) {
3021 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3022 					priv->token, &curr->qos_rule);
3023 			if (ret) {
3024 				DPAA2_PMD_ERR("Qos entry remove failed.");
3025 				return -1;
3026 			}
3027 		}
3028 
3029 		extend = -1;
3030 
3031 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3032 			RTE_ASSERT(qos_ipsrc_offset >=
3033 				curr->ipaddr_rule.qos_ipsrc_offset);
3034 			extend1 = qos_ipsrc_offset -
3035 				curr->ipaddr_rule.qos_ipsrc_offset;
3036 			if (extend >= 0)
3037 				RTE_ASSERT(extend == extend1);
3038 			else
3039 				extend = extend1;
3040 
3041 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3042 				(size == NH_FLD_IPV6_ADDR_SIZE));
3043 
3044 			memcpy(ipsrc_key,
3045 				(char *)(size_t)curr->qos_rule.key_iova +
3046 				curr->ipaddr_rule.qos_ipsrc_offset,
3047 				size);
3048 			memset((char *)(size_t)curr->qos_rule.key_iova +
3049 				curr->ipaddr_rule.qos_ipsrc_offset,
3050 				0, size);
3051 
3052 			memcpy(ipsrc_mask,
3053 				(char *)(size_t)curr->qos_rule.mask_iova +
3054 				curr->ipaddr_rule.qos_ipsrc_offset,
3055 				size);
3056 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3057 				curr->ipaddr_rule.qos_ipsrc_offset,
3058 				0, size);
3059 
3060 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3061 		}
3062 
3063 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3064 			RTE_ASSERT(qos_ipdst_offset >=
3065 				curr->ipaddr_rule.qos_ipdst_offset);
3066 			extend1 = qos_ipdst_offset -
3067 				curr->ipaddr_rule.qos_ipdst_offset;
3068 			if (extend >= 0)
3069 				RTE_ASSERT(extend == extend1);
3070 			else
3071 				extend = extend1;
3072 
3073 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3074 				(size == NH_FLD_IPV6_ADDR_SIZE));
3075 
3076 			memcpy(ipdst_key,
3077 				(char *)(size_t)curr->qos_rule.key_iova +
3078 				curr->ipaddr_rule.qos_ipdst_offset,
3079 				size);
3080 			memset((char *)(size_t)curr->qos_rule.key_iova +
3081 				curr->ipaddr_rule.qos_ipdst_offset,
3082 				0, size);
3083 
3084 			memcpy(ipdst_mask,
3085 				(char *)(size_t)curr->qos_rule.mask_iova +
3086 				curr->ipaddr_rule.qos_ipdst_offset,
3087 				size);
3088 			memset((char *)(size_t)curr->qos_rule.mask_iova +
3089 				curr->ipaddr_rule.qos_ipdst_offset,
3090 				0, size);
3091 
3092 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3093 		}
3094 
3095 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3096 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3097 				(size == NH_FLD_IPV6_ADDR_SIZE));
3098 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3099 				curr->ipaddr_rule.qos_ipsrc_offset,
3100 				ipsrc_key,
3101 				size);
3102 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3103 				curr->ipaddr_rule.qos_ipsrc_offset,
3104 				ipsrc_mask,
3105 				size);
3106 		}
3107 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3108 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3109 				(size == NH_FLD_IPV6_ADDR_SIZE));
3110 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
3111 				curr->ipaddr_rule.qos_ipdst_offset,
3112 				ipdst_key,
3113 				size);
3114 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3115 				curr->ipaddr_rule.qos_ipdst_offset,
3116 				ipdst_mask,
3117 				size);
3118 		}
3119 
3120 		if (extend >= 0)
3121 			curr->qos_real_key_size += extend;
3122 
3123 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3124 
3125 		dpaa2_flow_qos_entry_log("Start update", curr, qos_index, stdout);
3126 
3127 		if (priv->num_rx_tc > 1) {
3128 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3129 					priv->token, &curr->qos_rule,
3130 					curr->tc_id, qos_index,
3131 					0, 0);
3132 			if (ret) {
3133 				DPAA2_PMD_ERR("Qos entry update failed.");
3134 				return -1;
3135 			}
3136 		}
3137 
3138 		if (!dpaa2_fs_action_supported(curr->action)) {
3139 			curr = LIST_NEXT(curr, next);
3140 			continue;
3141 		}
3142 
3143 		dpaa2_flow_fs_entry_log("Before update", curr, stdout);
3144 		extend = -1;
3145 
3146 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3147 				priv->token, curr->tc_id, &curr->fs_rule);
3148 		if (ret) {
3149 			DPAA2_PMD_ERR("FS entry remove failed.");
3150 			return -1;
3151 		}
3152 
3153 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3154 			tc_id == curr->tc_id) {
3155 			RTE_ASSERT(fs_ipsrc_offset >=
3156 				curr->ipaddr_rule.fs_ipsrc_offset);
3157 			extend1 = fs_ipsrc_offset -
3158 				curr->ipaddr_rule.fs_ipsrc_offset;
3159 			if (extend >= 0)
3160 				RTE_ASSERT(extend == extend1);
3161 			else
3162 				extend = extend1;
3163 
3164 			memcpy(ipsrc_key,
3165 				(char *)(size_t)curr->fs_rule.key_iova +
3166 				curr->ipaddr_rule.fs_ipsrc_offset,
3167 				size);
3168 			memset((char *)(size_t)curr->fs_rule.key_iova +
3169 				curr->ipaddr_rule.fs_ipsrc_offset,
3170 				0, size);
3171 
3172 			memcpy(ipsrc_mask,
3173 				(char *)(size_t)curr->fs_rule.mask_iova +
3174 				curr->ipaddr_rule.fs_ipsrc_offset,
3175 				size);
3176 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3177 				curr->ipaddr_rule.fs_ipsrc_offset,
3178 				0, size);
3179 
3180 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3181 		}
3182 
3183 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3184 			tc_id == curr->tc_id) {
3185 			RTE_ASSERT(fs_ipdst_offset >=
3186 				curr->ipaddr_rule.fs_ipdst_offset);
3187 			extend1 = fs_ipdst_offset -
3188 				curr->ipaddr_rule.fs_ipdst_offset;
3189 			if (extend >= 0)
3190 				RTE_ASSERT(extend == extend1);
3191 			else
3192 				extend = extend1;
3193 
3194 			memcpy(ipdst_key,
3195 				(char *)(size_t)curr->fs_rule.key_iova +
3196 				curr->ipaddr_rule.fs_ipdst_offset,
3197 				size);
3198 			memset((char *)(size_t)curr->fs_rule.key_iova +
3199 				curr->ipaddr_rule.fs_ipdst_offset,
3200 				0, size);
3201 
3202 			memcpy(ipdst_mask,
3203 				(char *)(size_t)curr->fs_rule.mask_iova +
3204 				curr->ipaddr_rule.fs_ipdst_offset,
3205 				size);
3206 			memset((char *)(size_t)curr->fs_rule.mask_iova +
3207 				curr->ipaddr_rule.fs_ipdst_offset,
3208 				0, size);
3209 
3210 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3211 		}
3212 
3213 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3214 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3215 				curr->ipaddr_rule.fs_ipsrc_offset,
3216 				ipsrc_key,
3217 				size);
3218 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3219 				curr->ipaddr_rule.fs_ipsrc_offset,
3220 				ipsrc_mask,
3221 				size);
3222 		}
3223 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3224 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
3225 				curr->ipaddr_rule.fs_ipdst_offset,
3226 				ipdst_key,
3227 				size);
3228 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3229 				curr->ipaddr_rule.fs_ipdst_offset,
3230 				ipdst_mask,
3231 				size);
3232 		}
3233 
3234 		if (extend >= 0)
3235 			curr->fs_real_key_size += extend;
3236 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3237 
3238 		dpaa2_flow_fs_entry_log("Start update", curr, stdout);
3239 
3240 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3241 				priv->token, curr->tc_id, curr->tc_index,
3242 				&curr->fs_rule, &curr->action_cfg);
3243 		if (ret) {
3244 			DPAA2_PMD_ERR("FS entry update failed.");
3245 			return -1;
3246 		}
3247 
3248 		curr = LIST_NEXT(curr, next);
3249 	}
3250 
3251 	return 0;
3252 }
3253 
3254 static inline int
3255 dpaa2_flow_verify_attr(
3256 	struct dpaa2_dev_priv *priv,
3257 	const struct rte_flow_attr *attr)
3258 {
3259 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3260 
3261 	while (curr) {
3262 		if (curr->tc_id == attr->group &&
3263 			curr->tc_index == attr->priority) {
3264 			DPAA2_PMD_ERR(
3265 				"Flow with group %d and priority %d already exists.",
3266 				attr->group, attr->priority);
3267 
3268 			return -1;
3269 		}
3270 		curr = LIST_NEXT(curr, next);
3271 	}
3272 
3273 	return 0;
3274 }
3275 
3276 static inline struct rte_eth_dev *
3277 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3278 	const struct rte_flow_action *action)
3279 {
3280 	const struct rte_flow_action_port_id *port_id;
3281 	int idx = -1;
3282 	struct rte_eth_dev *dest_dev;
3283 
3284 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3285 		port_id = (const struct rte_flow_action_port_id *)
3286 					action->conf;
3287 		if (!port_id->original)
3288 			idx = port_id->id;
3289 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3290 		const struct rte_flow_action_ethdev *ethdev;
3291 
3292 		ethdev = (const struct rte_flow_action_ethdev *)action->conf;
3293 		idx = ethdev->port_id;
3294 	} else {
3295 		return NULL;
3296 	}
3297 
3298 	if (idx >= 0) {
3299 		if (!rte_eth_dev_is_valid_port(idx))
3300 			return NULL;
3301 		dest_dev = &rte_eth_devices[idx];
3302 	} else {
3303 		dest_dev = priv->eth_dev;
3304 	}
3305 
3306 	if (!dpaa2_dev_is_dpaa2(dest_dev))
3307 		return NULL;
3308 
3309 	return dest_dev;
3310 }
3311 
3312 static inline int
3313 dpaa2_flow_verify_action(
3314 	struct dpaa2_dev_priv *priv,
3315 	const struct rte_flow_attr *attr,
3316 	const struct rte_flow_action actions[])
3317 {
3318 	int end_of_list = 0, i, j = 0;
3319 	const struct rte_flow_action_queue *dest_queue;
3320 	const struct rte_flow_action_rss *rss_conf;
3321 	struct dpaa2_queue *rxq;
3322 
3323 	while (!end_of_list) {
3324 		switch (actions[j].type) {
3325 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3326 			dest_queue = (const struct rte_flow_action_queue *)
3327 					(actions[j].conf);
3328 			rxq = priv->rx_vq[dest_queue->index];
3329 			if (attr->group != rxq->tc_index) {
3330 				DPAA2_PMD_ERR(
3331 					"RXQ[%d] does not belong to the group %d",
3332 					dest_queue->index, attr->group);
3333 
3334 				return -1;
3335 			}
3336 			break;
3337 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3338 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3339 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3340 				DPAA2_PMD_ERR("Invalid port id of action");
3341 				return -ENOTSUP;
3342 			}
3343 			break;
3344 		case RTE_FLOW_ACTION_TYPE_RSS:
3345 			rss_conf = (const struct rte_flow_action_rss *)
3346 					(actions[j].conf);
3347 			if (rss_conf->queue_num > priv->dist_queues) {
3348 				DPAA2_PMD_ERR(
3349 					"RSS number exceeds the distribution size");
3350 				return -ENOTSUP;
3351 			}
3352 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3353 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3354 					DPAA2_PMD_ERR(
3355 						"RSS queue index exceeds the number of RXQs");
3356 					return -ENOTSUP;
3357 				}
3358 				rxq = priv->rx_vq[rss_conf->queue[i]];
3359 				if (rxq->tc_index != attr->group) {
3360 					DPAA2_PMD_ERR(
3361 						"Queue/Group combination are not supported");
3362 					return -ENOTSUP;
3363 				}
3364 			}
3365 
3366 			break;
3367 		case RTE_FLOW_ACTION_TYPE_END:
3368 			end_of_list = 1;
3369 			break;
3370 		default:
3371 			DPAA2_PMD_ERR("Invalid action type");
3372 			return -ENOTSUP;
3373 		}
3374 		j++;
3375 	}
3376 
3377 	return 0;
3378 }
3379 
3380 static int
3381 dpaa2_generic_flow_set(struct rte_flow *flow,
3382 		       struct rte_eth_dev *dev,
3383 		       const struct rte_flow_attr *attr,
3384 		       const struct rte_flow_item pattern[],
3385 		       const struct rte_flow_action actions[],
3386 		       struct rte_flow_error *error)
3387 {
3388 	const struct rte_flow_action_queue *dest_queue;
3389 	const struct rte_flow_action_rss *rss_conf;
3390 	int is_keycfg_configured = 0, end_of_list = 0;
3391 	int ret = 0, i = 0, j = 0;
3392 	struct dpni_rx_dist_cfg tc_cfg;
3393 	struct dpni_qos_tbl_cfg qos_cfg;
3394 	struct dpni_fs_action_cfg action;
3395 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3396 	struct dpaa2_queue *dest_q;
3397 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3398 	size_t param;
3399 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
3400 	uint16_t qos_index;
3401 	struct rte_eth_dev *dest_dev;
3402 	struct dpaa2_dev_priv *dest_priv;
3403 
3404 	ret = dpaa2_flow_verify_attr(priv, attr);
3405 	if (ret)
3406 		return ret;
3407 
3408 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3409 	if (ret)
3410 		return ret;
3411 
3412 	/* Parse pattern list to get the matching parameters */
3413 	while (!end_of_list) {
3414 		switch (pattern[i].type) {
3415 		case RTE_FLOW_ITEM_TYPE_ETH:
3416 			ret = dpaa2_configure_flow_eth(flow,
3417 					dev, attr, &pattern[i], actions, error,
3418 					&is_keycfg_configured);
3419 			if (ret) {
3420 				DPAA2_PMD_ERR("ETH flow configuration failed!");
3421 				return ret;
3422 			}
3423 			break;
3424 		case RTE_FLOW_ITEM_TYPE_VLAN:
3425 			ret = dpaa2_configure_flow_vlan(flow,
3426 					dev, attr, &pattern[i], actions, error,
3427 					&is_keycfg_configured);
3428 			if (ret) {
3429 				DPAA2_PMD_ERR("vLan flow configuration failed!");
3430 				return ret;
3431 			}
3432 			break;
3433 		case RTE_FLOW_ITEM_TYPE_IPV4:
3434 		case RTE_FLOW_ITEM_TYPE_IPV6:
3435 			ret = dpaa2_configure_flow_generic_ip(flow,
3436 					dev, attr, &pattern[i], actions, error,
3437 					&is_keycfg_configured);
3438 			if (ret) {
3439 				DPAA2_PMD_ERR("IP flow configuration failed!");
3440 				return ret;
3441 			}
3442 			break;
3443 		case RTE_FLOW_ITEM_TYPE_ICMP:
3444 			ret = dpaa2_configure_flow_icmp(flow,
3445 					dev, attr, &pattern[i], actions, error,
3446 					&is_keycfg_configured);
3447 			if (ret) {
3448 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
3449 				return ret;
3450 			}
3451 			break;
3452 		case RTE_FLOW_ITEM_TYPE_UDP:
3453 			ret = dpaa2_configure_flow_udp(flow,
3454 					dev, attr, &pattern[i], actions, error,
3455 					&is_keycfg_configured);
3456 			if (ret) {
3457 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3458 				return ret;
3459 			}
3460 			break;
3461 		case RTE_FLOW_ITEM_TYPE_TCP:
3462 			ret = dpaa2_configure_flow_tcp(flow,
3463 					dev, attr, &pattern[i], actions, error,
3464 					&is_keycfg_configured);
3465 			if (ret) {
3466 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3467 				return ret;
3468 			}
3469 			break;
3470 		case RTE_FLOW_ITEM_TYPE_SCTP:
3471 			ret = dpaa2_configure_flow_sctp(flow,
3472 					dev, attr, &pattern[i], actions, error,
3473 					&is_keycfg_configured);
3474 			if (ret) {
3475 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3476 				return ret;
3477 			}
3478 			break;
3479 		case RTE_FLOW_ITEM_TYPE_GRE:
3480 			ret = dpaa2_configure_flow_gre(flow,
3481 					dev, attr, &pattern[i], actions, error,
3482 					&is_keycfg_configured);
3483 			if (ret) {
3484 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3485 				return ret;
3486 			}
3487 			break;
3488 		case RTE_FLOW_ITEM_TYPE_RAW:
3489 			ret = dpaa2_configure_flow_raw(flow,
3490 						       dev, attr, &pattern[i],
3491 						       actions, error,
3492 						       &is_keycfg_configured);
3493 			if (ret) {
3494 				DPAA2_PMD_ERR("RAW flow configuration failed!");
3495 				return ret;
3496 			}
3497 			break;
3498 		case RTE_FLOW_ITEM_TYPE_END:
3499 			end_of_list = 1;
3500 			break; /*End of List*/
3501 		default:
3502 			DPAA2_PMD_ERR("Invalid action type");
3503 			ret = -ENOTSUP;
3504 			break;
3505 		}
3506 		i++;
3507 	}
3508 
3509 	/* Let's parse action on matching traffic */
3510 	end_of_list = 0;
3511 	while (!end_of_list) {
3512 		switch (actions[j].type) {
3513 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3514 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3515 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3516 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3517 			flow->action = actions[j].type;
3518 
3519 			if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3520 				dest_queue = (const struct rte_flow_action_queue *)
3521 								(actions[j].conf);
3522 				dest_q = priv->rx_vq[dest_queue->index];
3523 				action.flow_id = dest_q->flow_id;
3524 			} else {
3525 				dest_dev = dpaa2_flow_redirect_dev(priv,
3526 								   &actions[j]);
3527 				if (!dest_dev) {
3528 					DPAA2_PMD_ERR("Invalid destination device to redirect!");
3529 					return -1;
3530 				}
3531 
3532 				dest_priv = dest_dev->data->dev_private;
3533 				dest_q = dest_priv->tx_vq[0];
3534 				action.options =
3535 						DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
3536 				action.redirect_obj_token = dest_priv->token;
3537 				action.flow_id = dest_q->flow_id;
3538 			}
3539 
3540 			/* Configure FS table first*/
3541 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3542 				dpaa2_flow_fs_table_extracts_log(priv,
3543 							flow->tc_id, stdout);
3544 				if (dpkg_prepare_key_cfg(
3545 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3546 				(uint8_t *)(size_t)priv->extract
3547 				.tc_extract_param[flow->tc_id]) < 0) {
3548 					DPAA2_PMD_ERR(
3549 					"Unable to prepare extract parameters");
3550 					return -1;
3551 				}
3552 
3553 				memset(&tc_cfg, 0,
3554 					sizeof(struct dpni_rx_dist_cfg));
3555 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3556 				tc_cfg.key_cfg_iova =
3557 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3558 				tc_cfg.tc = flow->tc_id;
3559 				tc_cfg.enable = false;
3560 				ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3561 						priv->token, &tc_cfg);
3562 				if (ret < 0) {
3563 					DPAA2_PMD_ERR(
3564 						"TC hash cannot be disabled.(%d)",
3565 						ret);
3566 					return -1;
3567 				}
3568 				tc_cfg.enable = true;
3569 				tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
3570 				ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3571 							 priv->token, &tc_cfg);
3572 				if (ret < 0) {
3573 					DPAA2_PMD_ERR(
3574 						"TC distribution cannot be configured.(%d)",
3575 						ret);
3576 					return -1;
3577 				}
3578 			}
3579 
3580 			/* Configure QoS table then.*/
3581 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3582 				dpaa2_flow_qos_table_extracts_log(priv, stdout);
3583 				if (dpkg_prepare_key_cfg(
3584 					&priv->extract.qos_key_extract.dpkg,
3585 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3586 					DPAA2_PMD_ERR(
3587 						"Unable to prepare extract parameters");
3588 					return -1;
3589 				}
3590 
3591 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3592 				qos_cfg.discard_on_miss = false;
3593 				qos_cfg.default_tc = 0;
3594 				qos_cfg.keep_entries = true;
3595 				qos_cfg.key_cfg_iova =
3596 					(size_t)priv->extract.qos_extract_param;
3597 				/* QoS table is effective for multiple TCs. */
3598 				if (priv->num_rx_tc > 1) {
3599 					ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3600 						priv->token, &qos_cfg);
3601 					if (ret < 0) {
3602 						DPAA2_PMD_ERR(
3603 						"RSS QoS table can not be configured(%d)",
3604 							ret);
3605 						return -1;
3606 					}
3607 				}
3608 			}
3609 
3610 			flow->qos_real_key_size = priv->extract
3611 				.qos_key_extract.key_info.key_total_size;
3612 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3613 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3614 					flow->ipaddr_rule.qos_ipsrc_offset) {
3615 					flow->qos_real_key_size =
3616 						flow->ipaddr_rule.qos_ipdst_offset +
3617 						NH_FLD_IPV4_ADDR_SIZE;
3618 				} else {
3619 					flow->qos_real_key_size =
3620 						flow->ipaddr_rule.qos_ipsrc_offset +
3621 						NH_FLD_IPV4_ADDR_SIZE;
3622 				}
3623 			} else if (flow->ipaddr_rule.ipaddr_type ==
3624 				FLOW_IPV6_ADDR) {
3625 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3626 					flow->ipaddr_rule.qos_ipsrc_offset) {
3627 					flow->qos_real_key_size =
3628 						flow->ipaddr_rule.qos_ipdst_offset +
3629 						NH_FLD_IPV6_ADDR_SIZE;
3630 				} else {
3631 					flow->qos_real_key_size =
3632 						flow->ipaddr_rule.qos_ipsrc_offset +
3633 						NH_FLD_IPV6_ADDR_SIZE;
3634 				}
3635 			}
3636 
3637 			/* QoS entry added is only effective for multiple TCs.*/
3638 			if (priv->num_rx_tc > 1) {
3639 				qos_index = flow->tc_id * priv->fs_entries +
3640 					flow->tc_index;
3641 				if (qos_index >= priv->qos_entries) {
3642 					DPAA2_PMD_ERR("QoS table with %d entries full",
3643 						priv->qos_entries);
3644 					return -1;
3645 				}
3646 				flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3647 
3648 				dpaa2_flow_qos_entry_log("Start add", flow,
3649 							qos_index, stdout);
3650 
3651 				ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3652 						priv->token, &flow->qos_rule,
3653 						flow->tc_id, qos_index,
3654 						0, 0);
3655 				if (ret < 0) {
3656 					DPAA2_PMD_ERR(
3657 						"Error in adding entry to QoS table(%d)", ret);
3658 					return ret;
3659 				}
3660 			}
3661 
3662 			if (flow->tc_index >= priv->fs_entries) {
3663 				DPAA2_PMD_ERR("FS table with %d entries full",
3664 					priv->fs_entries);
3665 				return -1;
3666 			}
3667 
3668 			flow->fs_real_key_size =
3669 				priv->extract.tc_key_extract[flow->tc_id]
3670 				.key_info.key_total_size;
3671 
3672 			if (flow->ipaddr_rule.ipaddr_type ==
3673 				FLOW_IPV4_ADDR) {
3674 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3675 					flow->ipaddr_rule.fs_ipsrc_offset) {
3676 					flow->fs_real_key_size =
3677 						flow->ipaddr_rule.fs_ipdst_offset +
3678 						NH_FLD_IPV4_ADDR_SIZE;
3679 				} else {
3680 					flow->fs_real_key_size =
3681 						flow->ipaddr_rule.fs_ipsrc_offset +
3682 						NH_FLD_IPV4_ADDR_SIZE;
3683 				}
3684 			} else if (flow->ipaddr_rule.ipaddr_type ==
3685 				FLOW_IPV6_ADDR) {
3686 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3687 					flow->ipaddr_rule.fs_ipsrc_offset) {
3688 					flow->fs_real_key_size =
3689 						flow->ipaddr_rule.fs_ipdst_offset +
3690 						NH_FLD_IPV6_ADDR_SIZE;
3691 				} else {
3692 					flow->fs_real_key_size =
3693 						flow->ipaddr_rule.fs_ipsrc_offset +
3694 						NH_FLD_IPV6_ADDR_SIZE;
3695 				}
3696 			}
3697 
3698 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3699 
3700 			dpaa2_flow_fs_entry_log("Start add", flow, stdout);
3701 
3702 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3703 						flow->tc_id, flow->tc_index,
3704 						&flow->fs_rule, &action);
3705 			if (ret < 0) {
3706 				DPAA2_PMD_ERR(
3707 				"Error in adding entry to FS table(%d)", ret);
3708 				return ret;
3709 			}
3710 			memcpy(&flow->action_cfg, &action,
3711 				sizeof(struct dpni_fs_action_cfg));
3712 			break;
3713 		case RTE_FLOW_ACTION_TYPE_RSS:
3714 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3715 
3716 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3717 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3718 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3719 			if (ret < 0) {
3720 				DPAA2_PMD_ERR(
3721 				"unable to set flow distribution.please check queue config");
3722 				return ret;
3723 			}
3724 
3725 			/* Allocate DMA'ble memory to write the rules */
3726 			param = (size_t)rte_malloc(NULL, 256, 64);
3727 			if (!param) {
3728 				DPAA2_PMD_ERR("Memory allocation failure");
3729 				return -1;
3730 			}
3731 
3732 			if (dpkg_prepare_key_cfg(
3733 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3734 				(uint8_t *)param) < 0) {
3735 				DPAA2_PMD_ERR(
3736 				"Unable to prepare extract parameters");
3737 				rte_free((void *)param);
3738 				return -1;
3739 			}
3740 
3741 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3742 			tc_cfg.dist_size = rss_conf->queue_num;
3743 			tc_cfg.key_cfg_iova = (size_t)param;
3744 			tc_cfg.enable = true;
3745 			tc_cfg.tc = flow->tc_id;
3746 			ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3747 						 priv->token, &tc_cfg);
3748 			if (ret < 0) {
3749 				DPAA2_PMD_ERR(
3750 					"RSS TC table cannot be configured: %d",
3751 					ret);
3752 				rte_free((void *)param);
3753 				return -1;
3754 			}
3755 
3756 			rte_free((void *)param);
3757 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3758 				if (dpkg_prepare_key_cfg(
3759 					&priv->extract.qos_key_extract.dpkg,
3760 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3761 					DPAA2_PMD_ERR(
3762 					"Unable to prepare extract parameters");
3763 					return -1;
3764 				}
3765 				memset(&qos_cfg, 0,
3766 					sizeof(struct dpni_qos_tbl_cfg));
3767 				qos_cfg.discard_on_miss = true;
3768 				qos_cfg.keep_entries = true;
3769 				qos_cfg.key_cfg_iova =
3770 					(size_t)priv->extract.qos_extract_param;
3771 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3772 							 priv->token, &qos_cfg);
3773 				if (ret < 0) {
3774 					DPAA2_PMD_ERR(
3775 					"RSS QoS dist can't be configured-%d",
3776 					ret);
3777 					return -1;
3778 				}
3779 			}
3780 
3781 			/* Add Rule into QoS table */
3782 			qos_index = flow->tc_id * priv->fs_entries +
3783 				flow->tc_index;
3784 			if (qos_index >= priv->qos_entries) {
3785 				DPAA2_PMD_ERR("QoS table with %d entries full",
3786 					priv->qos_entries);
3787 				return -1;
3788 			}
3789 
3790 			flow->qos_real_key_size =
3791 			  priv->extract.qos_key_extract.key_info.key_total_size;
3792 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3793 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3794 						&flow->qos_rule, flow->tc_id,
3795 						qos_index, 0, 0);
3796 			if (ret < 0) {
3797 				DPAA2_PMD_ERR(
3798 				"Error in entry addition in QoS table(%d)",
3799 				ret);
3800 				return ret;
3801 			}
3802 			break;
3803 		case RTE_FLOW_ACTION_TYPE_END:
3804 			end_of_list = 1;
3805 			break;
3806 		default:
3807 			DPAA2_PMD_ERR("Invalid action type");
3808 			ret = -ENOTSUP;
3809 			break;
3810 		}
3811 		j++;
3812 	}
3813 
3814 	if (!ret) {
3815 		if (is_keycfg_configured &
3816 			(DPAA2_QOS_TABLE_RECONFIGURE |
3817 			DPAA2_FS_TABLE_RECONFIGURE)) {
3818 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3819 			if (ret) {
3820 				DPAA2_PMD_ERR("Flow entry update failed.");
3821 
3822 				return -1;
3823 			}
3824 		}
3825 		/* New rules are inserted. */
3826 		if (!curr) {
3827 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3828 		} else {
3829 			while (LIST_NEXT(curr, next))
3830 				curr = LIST_NEXT(curr, next);
3831 			LIST_INSERT_AFTER(curr, flow, next);
3832 		}
3833 	}
3834 	return ret;
3835 }
3836 
3837 static inline int
3838 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3839 		      const struct rte_flow_attr *attr)
3840 {
3841 	int ret = 0;
3842 
3843 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3844 		DPAA2_PMD_ERR("Priority group is out of range");
3845 		ret = -ENOTSUP;
3846 	}
3847 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3848 		DPAA2_PMD_ERR("Priority within the group is out of range");
3849 		ret = -ENOTSUP;
3850 	}
3851 	if (unlikely(attr->egress)) {
3852 		DPAA2_PMD_ERR(
3853 			"Flow configuration is not supported on egress side");
3854 		ret = -ENOTSUP;
3855 	}
3856 	if (unlikely(!attr->ingress)) {
3857 		DPAA2_PMD_ERR("Ingress flag must be configured");
3858 		ret = -EINVAL;
3859 	}
3860 	return ret;
3861 }
3862 
3863 static inline int
3864 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3865 {
3866 	unsigned int i, j, is_found = 0;
3867 	int ret = 0;
3868 
3869 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3870 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3871 			if (dpaa2_supported_pattern_type[i]
3872 					== pattern[j].type) {
3873 				is_found = 1;
3874 				break;
3875 			}
3876 		}
3877 		if (!is_found) {
3878 			ret = -ENOTSUP;
3879 			break;
3880 		}
3881 	}
3882 	/* Lets verify other combinations of given pattern rules */
3883 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3884 		if (!pattern[j].spec) {
3885 			ret = -EINVAL;
3886 			break;
3887 		}
3888 	}
3889 
3890 	return ret;
3891 }
3892 
3893 static inline int
3894 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3895 {
3896 	unsigned int i, j, is_found = 0;
3897 	int ret = 0;
3898 
3899 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3900 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3901 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3902 				is_found = 1;
3903 				break;
3904 			}
3905 		}
3906 		if (!is_found) {
3907 			ret = -ENOTSUP;
3908 			break;
3909 		}
3910 	}
3911 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3912 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3913 				!actions[j].conf)
3914 			ret = -EINVAL;
3915 	}
3916 	return ret;
3917 }
3918 
3919 static
3920 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3921 			const struct rte_flow_attr *flow_attr,
3922 			const struct rte_flow_item pattern[],
3923 			const struct rte_flow_action actions[],
3924 			struct rte_flow_error *error)
3925 {
3926 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3927 	struct dpni_attr dpni_attr;
3928 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3929 	uint16_t token = priv->token;
3930 	int ret = 0;
3931 
3932 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3933 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3934 	if (ret < 0) {
3935 		DPAA2_PMD_ERR(
3936 			"Failure to get dpni@%p attribute, err code  %d",
3937 			dpni, ret);
3938 		rte_flow_error_set(error, EPERM,
3939 			   RTE_FLOW_ERROR_TYPE_ATTR,
3940 			   flow_attr, "invalid");
3941 		return ret;
3942 	}
3943 
3944 	/* Verify input attributes */
3945 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3946 	if (ret < 0) {
3947 		DPAA2_PMD_ERR(
3948 			"Invalid attributes are given");
3949 		rte_flow_error_set(error, EPERM,
3950 			   RTE_FLOW_ERROR_TYPE_ATTR,
3951 			   flow_attr, "invalid");
3952 		goto not_valid_params;
3953 	}
3954 	/* Verify input pattern list */
3955 	ret = dpaa2_dev_verify_patterns(pattern);
3956 	if (ret < 0) {
3957 		DPAA2_PMD_ERR(
3958 			"Invalid pattern list is given");
3959 		rte_flow_error_set(error, EPERM,
3960 			   RTE_FLOW_ERROR_TYPE_ITEM,
3961 			   pattern, "invalid");
3962 		goto not_valid_params;
3963 	}
3964 	/* Verify input action list */
3965 	ret = dpaa2_dev_verify_actions(actions);
3966 	if (ret < 0) {
3967 		DPAA2_PMD_ERR(
3968 			"Invalid action list is given");
3969 		rte_flow_error_set(error, EPERM,
3970 			   RTE_FLOW_ERROR_TYPE_ACTION,
3971 			   actions, "invalid");
3972 		goto not_valid_params;
3973 	}
3974 not_valid_params:
3975 	return ret;
3976 }
3977 
3978 static
3979 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3980 				   const struct rte_flow_attr *attr,
3981 				   const struct rte_flow_item pattern[],
3982 				   const struct rte_flow_action actions[],
3983 				   struct rte_flow_error *error)
3984 {
3985 	struct rte_flow *flow = NULL;
3986 	size_t key_iova = 0, mask_iova = 0;
3987 	int ret;
3988 
3989 	dpaa2_flow_control_log =
3990 		getenv("DPAA2_FLOW_CONTROL_LOG");
3991 
3992 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3993 		struct dpaa2_dev_priv *priv = dev->data->dev_private;
3994 
3995 		dpaa2_flow_miss_flow_id =
3996 			atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3997 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3998 			DPAA2_PMD_ERR(
3999 				"The missed flow ID %d exceeds the max flow ID %d",
4000 				dpaa2_flow_miss_flow_id,
4001 				priv->dist_queues - 1);
4002 			return NULL;
4003 		}
4004 	}
4005 
4006 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
4007 	if (!flow) {
4008 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4009 		goto mem_failure;
4010 	}
4011 	/* Allocate DMA'ble memory to write the rules */
4012 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4013 	if (!key_iova) {
4014 		DPAA2_PMD_ERR(
4015 			"Memory allocation failure for rule configuration");
4016 		goto mem_failure;
4017 	}
4018 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4019 	if (!mask_iova) {
4020 		DPAA2_PMD_ERR(
4021 			"Memory allocation failure for rule configuration");
4022 		goto mem_failure;
4023 	}
4024 
4025 	flow->qos_rule.key_iova = key_iova;
4026 	flow->qos_rule.mask_iova = mask_iova;
4027 
4028 	/* Allocate DMA'ble memory to write the rules */
4029 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4030 	if (!key_iova) {
4031 		DPAA2_PMD_ERR(
4032 			"Memory allocation failure for rule configuration");
4033 		goto mem_failure;
4034 	}
4035 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4036 	if (!mask_iova) {
4037 		DPAA2_PMD_ERR(
4038 			"Memory allocation failure for rule configuration");
4039 		goto mem_failure;
4040 	}
4041 
4042 	flow->fs_rule.key_iova = key_iova;
4043 	flow->fs_rule.mask_iova = mask_iova;
4044 
4045 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
4046 	flow->ipaddr_rule.qos_ipsrc_offset =
4047 		IP_ADDRESS_OFFSET_INVALID;
4048 	flow->ipaddr_rule.qos_ipdst_offset =
4049 		IP_ADDRESS_OFFSET_INVALID;
4050 	flow->ipaddr_rule.fs_ipsrc_offset =
4051 		IP_ADDRESS_OFFSET_INVALID;
4052 	flow->ipaddr_rule.fs_ipdst_offset =
4053 		IP_ADDRESS_OFFSET_INVALID;
4054 
4055 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
4056 			actions, error);
4057 	if (ret < 0) {
4058 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4059 			rte_flow_error_set(error, EPERM,
4060 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4061 					attr, "unknown");
4062 		DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
4063 		goto creation_error;
4064 	}
4065 
4066 	return flow;
4067 mem_failure:
4068 	rte_flow_error_set(error, EPERM,
4069 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4070 			   NULL, "memory alloc");
4071 creation_error:
4072 	rte_free((void *)flow);
4073 	rte_free((void *)key_iova);
4074 	rte_free((void *)mask_iova);
4075 
4076 	return NULL;
4077 }
4078 
4079 static
4080 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4081 		       struct rte_flow *flow,
4082 		       struct rte_flow_error *error)
4083 {
4084 	int ret = 0;
4085 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4086 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4087 
4088 	switch (flow->action) {
4089 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4090 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4091 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
4092 		if (priv->num_rx_tc > 1) {
4093 			/* Remove entry from QoS table first */
4094 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4095 					&flow->qos_rule);
4096 			if (ret < 0) {
4097 				DPAA2_PMD_ERR(
4098 					"Error in removing entry from QoS table(%d)", ret);
4099 				goto error;
4100 			}
4101 		}
4102 
4103 		/* Then remove entry from FS table */
4104 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4105 					   flow->tc_id, &flow->fs_rule);
4106 		if (ret < 0) {
4107 			DPAA2_PMD_ERR(
4108 				"Error in removing entry from FS table(%d)", ret);
4109 			goto error;
4110 		}
4111 		break;
4112 	case RTE_FLOW_ACTION_TYPE_RSS:
4113 		if (priv->num_rx_tc > 1) {
4114 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4115 					&flow->qos_rule);
4116 			if (ret < 0) {
4117 				DPAA2_PMD_ERR(
4118 					"Error in entry addition in QoS table(%d)", ret);
4119 				goto error;
4120 			}
4121 		}
4122 		break;
4123 	default:
4124 		DPAA2_PMD_ERR(
4125 		"Action type (%d) is not supported", flow->action);
4126 		ret = -ENOTSUP;
4127 		break;
4128 	}
4129 
4130 	LIST_REMOVE(flow, next);
4131 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
4132 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4133 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
4134 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4135 	/* Now free the flow */
4136 	rte_free(flow);
4137 
4138 error:
4139 	if (ret)
4140 		rte_flow_error_set(error, EPERM,
4141 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4142 				   NULL, "unknown");
4143 	return ret;
4144 }
4145 
4146 /**
4147  * Destroy user-configured flow rules.
4148  *
4149  * This function skips internal flows rules.
4150  *
4151  * @see rte_flow_flush()
4152  * @see rte_flow_ops
4153  */
4154 static int
4155 dpaa2_flow_flush(struct rte_eth_dev *dev,
4156 		struct rte_flow_error *error)
4157 {
4158 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4159 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
4160 
4161 	while (flow) {
4162 		struct rte_flow *next = LIST_NEXT(flow, next);
4163 
4164 		dpaa2_flow_destroy(dev, flow, error);
4165 		flow = next;
4166 	}
4167 	return 0;
4168 }
4169 
4170 static int
4171 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4172 		struct rte_flow *flow __rte_unused,
4173 		const struct rte_flow_action *actions __rte_unused,
4174 		void *data __rte_unused,
4175 		struct rte_flow_error *error __rte_unused)
4176 {
4177 	return 0;
4178 }
4179 
4180 /**
4181  * Clean up all flow rules.
4182  *
4183  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4184  * rules regardless of whether they are internal or user-configured.
4185  *
4186  * @param priv
4187  *   Pointer to private structure.
4188  */
4189 void
4190 dpaa2_flow_clean(struct rte_eth_dev *dev)
4191 {
4192 	struct rte_flow *flow;
4193 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4194 
4195 	while ((flow = LIST_FIRST(&priv->flows)))
4196 		dpaa2_flow_destroy(dev, flow, NULL);
4197 }
4198 
4199 const struct rte_flow_ops dpaa2_flow_ops = {
4200 	.create	= dpaa2_flow_create,
4201 	.validate = dpaa2_flow_validate,
4202 	.destroy = dpaa2_flow_destroy,
4203 	.flush	= dpaa2_flow_flush,
4204 	.query	= dpaa2_flow_query,
4205 };
4206