xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision d0a77dc3d9d477a8fdf900dedf9538c0a6c4098f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 #define FIXED_ENTRY_SIZE 54
33 
34 enum flow_rule_ipaddr_type {
35 	FLOW_NONE_IPADDR,
36 	FLOW_IPV4_ADDR,
37 	FLOW_IPV6_ADDR
38 };
39 
40 struct flow_rule_ipaddr {
41 	enum flow_rule_ipaddr_type ipaddr_type;
42 	int qos_ipsrc_offset;
43 	int qos_ipdst_offset;
44 	int fs_ipsrc_offset;
45 	int fs_ipdst_offset;
46 };
47 
48 struct rte_flow {
49 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
50 	struct dpni_rule_cfg qos_rule;
51 	struct dpni_rule_cfg fs_rule;
52 	uint8_t qos_real_key_size;
53 	uint8_t fs_real_key_size;
54 	uint8_t tc_id; /** Traffic Class ID. */
55 	uint8_t tc_index; /** index within this Traffic Class. */
56 	enum rte_flow_action_type action;
57 	uint16_t flow_id;
58 	/* Special for IP address to specify the offset
59 	 * in key/mask.
60 	 */
61 	struct flow_rule_ipaddr ipaddr_rule;
62 	struct dpni_fs_action_cfg action_cfg;
63 };
64 
65 static const
66 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
67 	RTE_FLOW_ITEM_TYPE_END,
68 	RTE_FLOW_ITEM_TYPE_ETH,
69 	RTE_FLOW_ITEM_TYPE_VLAN,
70 	RTE_FLOW_ITEM_TYPE_IPV4,
71 	RTE_FLOW_ITEM_TYPE_IPV6,
72 	RTE_FLOW_ITEM_TYPE_ICMP,
73 	RTE_FLOW_ITEM_TYPE_UDP,
74 	RTE_FLOW_ITEM_TYPE_TCP,
75 	RTE_FLOW_ITEM_TYPE_SCTP,
76 	RTE_FLOW_ITEM_TYPE_GRE,
77 };
78 
79 static const
80 enum rte_flow_action_type dpaa2_supported_action_type[] = {
81 	RTE_FLOW_ACTION_TYPE_END,
82 	RTE_FLOW_ACTION_TYPE_QUEUE,
83 	RTE_FLOW_ACTION_TYPE_RSS
84 };
85 
86 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
87 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
88 
89 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
90 
91 #ifndef __cplusplus
92 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
93 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
94 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
95 	.type = RTE_BE16(0xffff),
96 };
97 
98 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
99 	.tci = RTE_BE16(0xffff),
100 };
101 
102 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
103 	.hdr.src_addr = RTE_BE32(0xffffffff),
104 	.hdr.dst_addr = RTE_BE32(0xffffffff),
105 	.hdr.next_proto_id = 0xff,
106 };
107 
108 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
109 	.hdr = {
110 		.src_addr =
111 			"\xff\xff\xff\xff\xff\xff\xff\xff"
112 			"\xff\xff\xff\xff\xff\xff\xff\xff",
113 		.dst_addr =
114 			"\xff\xff\xff\xff\xff\xff\xff\xff"
115 			"\xff\xff\xff\xff\xff\xff\xff\xff",
116 		.proto = 0xff
117 	},
118 };
119 
120 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
121 	.hdr.icmp_type = 0xff,
122 	.hdr.icmp_code = 0xff,
123 };
124 
125 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
126 	.hdr = {
127 		.src_port = RTE_BE16(0xffff),
128 		.dst_port = RTE_BE16(0xffff),
129 	},
130 };
131 
132 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
133 	.hdr = {
134 		.src_port = RTE_BE16(0xffff),
135 		.dst_port = RTE_BE16(0xffff),
136 	},
137 };
138 
139 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
140 	.hdr = {
141 		.src_port = RTE_BE16(0xffff),
142 		.dst_port = RTE_BE16(0xffff),
143 	},
144 };
145 
146 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
147 	.protocol = RTE_BE16(0xffff),
148 };
149 
150 #endif
151 
152 
153 static inline void dpaa2_flow_extract_key_set(
154 	struct dpaa2_key_info *key_info, int index, uint8_t size)
155 {
156 	key_info->key_size[index] = size;
157 	if (index > 0) {
158 		key_info->key_offset[index] =
159 			key_info->key_offset[index - 1] +
160 			key_info->key_size[index - 1];
161 	} else {
162 		key_info->key_offset[index] = 0;
163 	}
164 	key_info->key_total_size += size;
165 }
166 
167 static int dpaa2_flow_extract_add(
168 	struct dpaa2_key_extract *key_extract,
169 	enum net_prot prot,
170 	uint32_t field, uint8_t field_size)
171 {
172 	int index, ip_src = -1, ip_dst = -1;
173 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
174 	struct dpaa2_key_info *key_info = &key_extract->key_info;
175 
176 	if (dpkg->num_extracts >=
177 		DPKG_MAX_NUM_OF_EXTRACTS) {
178 		DPAA2_PMD_WARN("Number of extracts overflows");
179 		return -1;
180 	}
181 	/* Before reorder, the IP SRC and IP DST are already last
182 	 * extract(s).
183 	 */
184 	for (index = 0; index < dpkg->num_extracts; index++) {
185 		if (dpkg->extracts[index].extract.from_hdr.prot ==
186 			NET_PROT_IP) {
187 			if (dpkg->extracts[index].extract.from_hdr.field ==
188 				NH_FLD_IP_SRC) {
189 				ip_src = index;
190 			}
191 			if (dpkg->extracts[index].extract.from_hdr.field ==
192 				NH_FLD_IP_DST) {
193 				ip_dst = index;
194 			}
195 		}
196 	}
197 
198 	if (ip_src >= 0)
199 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
200 
201 	if (ip_dst >= 0)
202 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
203 
204 	if (prot == NET_PROT_IP &&
205 		(field == NH_FLD_IP_SRC ||
206 		field == NH_FLD_IP_DST)) {
207 		index = dpkg->num_extracts;
208 	} else {
209 		if (ip_src >= 0 && ip_dst >= 0)
210 			index = dpkg->num_extracts - 2;
211 		else if (ip_src >= 0 || ip_dst >= 0)
212 			index = dpkg->num_extracts - 1;
213 		else
214 			index = dpkg->num_extracts;
215 	}
216 
217 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
218 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
219 	dpkg->extracts[index].extract.from_hdr.prot = prot;
220 	dpkg->extracts[index].extract.from_hdr.field = field;
221 	if (prot == NET_PROT_IP &&
222 		(field == NH_FLD_IP_SRC ||
223 		field == NH_FLD_IP_DST)) {
224 		dpaa2_flow_extract_key_set(key_info, index, 0);
225 	} else {
226 		dpaa2_flow_extract_key_set(key_info, index, field_size);
227 	}
228 
229 	if (prot == NET_PROT_IP) {
230 		if (field == NH_FLD_IP_SRC) {
231 			if (key_info->ipv4_dst_offset >= 0) {
232 				key_info->ipv4_src_offset =
233 					key_info->ipv4_dst_offset +
234 					NH_FLD_IPV4_ADDR_SIZE;
235 			} else {
236 				key_info->ipv4_src_offset =
237 					key_info->key_offset[index - 1] +
238 						key_info->key_size[index - 1];
239 			}
240 			if (key_info->ipv6_dst_offset >= 0) {
241 				key_info->ipv6_src_offset =
242 					key_info->ipv6_dst_offset +
243 					NH_FLD_IPV6_ADDR_SIZE;
244 			} else {
245 				key_info->ipv6_src_offset =
246 					key_info->key_offset[index - 1] +
247 						key_info->key_size[index - 1];
248 			}
249 		} else if (field == NH_FLD_IP_DST) {
250 			if (key_info->ipv4_src_offset >= 0) {
251 				key_info->ipv4_dst_offset =
252 					key_info->ipv4_src_offset +
253 					NH_FLD_IPV4_ADDR_SIZE;
254 			} else {
255 				key_info->ipv4_dst_offset =
256 					key_info->key_offset[index - 1] +
257 						key_info->key_size[index - 1];
258 			}
259 			if (key_info->ipv6_src_offset >= 0) {
260 				key_info->ipv6_dst_offset =
261 					key_info->ipv6_src_offset +
262 					NH_FLD_IPV6_ADDR_SIZE;
263 			} else {
264 				key_info->ipv6_dst_offset =
265 					key_info->key_offset[index - 1] +
266 						key_info->key_size[index - 1];
267 			}
268 		}
269 	}
270 
271 	if (index == dpkg->num_extracts) {
272 		dpkg->num_extracts++;
273 		return 0;
274 	}
275 
276 	if (ip_src >= 0) {
277 		ip_src++;
278 		dpkg->extracts[ip_src].type =
279 			DPKG_EXTRACT_FROM_HDR;
280 		dpkg->extracts[ip_src].extract.from_hdr.type =
281 			DPKG_FULL_FIELD;
282 		dpkg->extracts[ip_src].extract.from_hdr.prot =
283 			NET_PROT_IP;
284 		dpkg->extracts[ip_src].extract.from_hdr.field =
285 			NH_FLD_IP_SRC;
286 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
287 		key_info->ipv4_src_offset += field_size;
288 		key_info->ipv6_src_offset += field_size;
289 	}
290 	if (ip_dst >= 0) {
291 		ip_dst++;
292 		dpkg->extracts[ip_dst].type =
293 			DPKG_EXTRACT_FROM_HDR;
294 		dpkg->extracts[ip_dst].extract.from_hdr.type =
295 			DPKG_FULL_FIELD;
296 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
297 			NET_PROT_IP;
298 		dpkg->extracts[ip_dst].extract.from_hdr.field =
299 			NH_FLD_IP_DST;
300 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
301 		key_info->ipv4_dst_offset += field_size;
302 		key_info->ipv6_dst_offset += field_size;
303 	}
304 
305 	dpkg->num_extracts++;
306 
307 	return 0;
308 }
309 
310 /* Protocol discrimination.
311  * Discriminate IPv4/IPv6/vLan by Eth type.
312  * Discriminate UDP/TCP/ICMP by next proto of IP.
313  */
314 static inline int
315 dpaa2_flow_proto_discrimination_extract(
316 	struct dpaa2_key_extract *key_extract,
317 	enum rte_flow_item_type type)
318 {
319 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
320 		return dpaa2_flow_extract_add(
321 				key_extract, NET_PROT_ETH,
322 				NH_FLD_ETH_TYPE,
323 				sizeof(rte_be16_t));
324 	} else if (type == (enum rte_flow_item_type)
325 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
326 		return dpaa2_flow_extract_add(
327 				key_extract, NET_PROT_IP,
328 				NH_FLD_IP_PROTO,
329 				NH_FLD_IP_PROTO_SIZE);
330 	}
331 
332 	return -1;
333 }
334 
335 static inline int dpaa2_flow_extract_search(
336 	struct dpkg_profile_cfg *dpkg,
337 	enum net_prot prot, uint32_t field)
338 {
339 	int i;
340 
341 	for (i = 0; i < dpkg->num_extracts; i++) {
342 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
343 			dpkg->extracts[i].extract.from_hdr.field == field) {
344 			return i;
345 		}
346 	}
347 
348 	return -1;
349 }
350 
351 static inline int dpaa2_flow_extract_key_offset(
352 	struct dpaa2_key_extract *key_extract,
353 	enum net_prot prot, uint32_t field)
354 {
355 	int i;
356 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
357 	struct dpaa2_key_info *key_info = &key_extract->key_info;
358 
359 	if (prot == NET_PROT_IPV4 ||
360 		prot == NET_PROT_IPV6)
361 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
362 	else
363 		i = dpaa2_flow_extract_search(dpkg, prot, field);
364 
365 	if (i >= 0) {
366 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
367 			return key_info->ipv4_src_offset;
368 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
369 			return key_info->ipv4_dst_offset;
370 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
371 			return key_info->ipv6_src_offset;
372 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
373 			return key_info->ipv6_dst_offset;
374 		else
375 			return key_info->key_offset[i];
376 	} else {
377 		return -1;
378 	}
379 }
380 
381 struct proto_discrimination {
382 	enum rte_flow_item_type type;
383 	union {
384 		rte_be16_t eth_type;
385 		uint8_t ip_proto;
386 	};
387 };
388 
389 static int
390 dpaa2_flow_proto_discrimination_rule(
391 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
392 	struct proto_discrimination proto, int group)
393 {
394 	enum net_prot prot;
395 	uint32_t field;
396 	int offset;
397 	size_t key_iova;
398 	size_t mask_iova;
399 	rte_be16_t eth_type;
400 	uint8_t ip_proto;
401 
402 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
403 		prot = NET_PROT_ETH;
404 		field = NH_FLD_ETH_TYPE;
405 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
406 		prot = NET_PROT_IP;
407 		field = NH_FLD_IP_PROTO;
408 	} else {
409 		DPAA2_PMD_ERR(
410 			"Only Eth and IP support to discriminate next proto.");
411 		return -1;
412 	}
413 
414 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
415 			prot, field);
416 	if (offset < 0) {
417 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
418 				prot, field);
419 		return -1;
420 	}
421 	key_iova = flow->qos_rule.key_iova + offset;
422 	mask_iova = flow->qos_rule.mask_iova + offset;
423 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
424 		eth_type = proto.eth_type;
425 		memcpy((void *)key_iova, (const void *)(&eth_type),
426 			sizeof(rte_be16_t));
427 		eth_type = 0xffff;
428 		memcpy((void *)mask_iova, (const void *)(&eth_type),
429 			sizeof(rte_be16_t));
430 	} else {
431 		ip_proto = proto.ip_proto;
432 		memcpy((void *)key_iova, (const void *)(&ip_proto),
433 			sizeof(uint8_t));
434 		ip_proto = 0xff;
435 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
436 			sizeof(uint8_t));
437 	}
438 
439 	offset = dpaa2_flow_extract_key_offset(
440 			&priv->extract.tc_key_extract[group],
441 			prot, field);
442 	if (offset < 0) {
443 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
444 				prot, field);
445 		return -1;
446 	}
447 	key_iova = flow->fs_rule.key_iova + offset;
448 	mask_iova = flow->fs_rule.mask_iova + offset;
449 
450 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
451 		eth_type = proto.eth_type;
452 		memcpy((void *)key_iova, (const void *)(&eth_type),
453 			sizeof(rte_be16_t));
454 		eth_type = 0xffff;
455 		memcpy((void *)mask_iova, (const void *)(&eth_type),
456 			sizeof(rte_be16_t));
457 	} else {
458 		ip_proto = proto.ip_proto;
459 		memcpy((void *)key_iova, (const void *)(&ip_proto),
460 			sizeof(uint8_t));
461 		ip_proto = 0xff;
462 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
463 			sizeof(uint8_t));
464 	}
465 
466 	return 0;
467 }
468 
469 static inline int
470 dpaa2_flow_rule_data_set(
471 	struct dpaa2_key_extract *key_extract,
472 	struct dpni_rule_cfg *rule,
473 	enum net_prot prot, uint32_t field,
474 	const void *key, const void *mask, int size)
475 {
476 	int offset = dpaa2_flow_extract_key_offset(key_extract,
477 				prot, field);
478 
479 	if (offset < 0) {
480 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
481 			prot, field);
482 		return -1;
483 	}
484 
485 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
486 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
487 
488 	return 0;
489 }
490 
491 static inline int
492 _dpaa2_flow_rule_move_ipaddr_tail(
493 	struct dpaa2_key_extract *key_extract,
494 	struct dpni_rule_cfg *rule, int src_offset,
495 	uint32_t field, bool ipv4)
496 {
497 	size_t key_src;
498 	size_t mask_src;
499 	size_t key_dst;
500 	size_t mask_dst;
501 	int dst_offset, len;
502 	enum net_prot prot;
503 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
504 
505 	if (field != NH_FLD_IP_SRC &&
506 		field != NH_FLD_IP_DST) {
507 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
508 		return -1;
509 	}
510 	if (ipv4)
511 		prot = NET_PROT_IPV4;
512 	else
513 		prot = NET_PROT_IPV6;
514 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
515 				prot, field);
516 	if (dst_offset < 0) {
517 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
518 		return -1;
519 	}
520 	key_src = rule->key_iova + src_offset;
521 	mask_src = rule->mask_iova + src_offset;
522 	key_dst = rule->key_iova + dst_offset;
523 	mask_dst = rule->mask_iova + dst_offset;
524 	if (ipv4)
525 		len = sizeof(rte_be32_t);
526 	else
527 		len = NH_FLD_IPV6_ADDR_SIZE;
528 
529 	memcpy(tmp, (char *)key_src, len);
530 	memset((char *)key_src, 0, len);
531 	memcpy((char *)key_dst, tmp, len);
532 
533 	memcpy(tmp, (char *)mask_src, len);
534 	memset((char *)mask_src, 0, len);
535 	memcpy((char *)mask_dst, tmp, len);
536 
537 	return 0;
538 }
539 
540 static inline int
541 dpaa2_flow_rule_move_ipaddr_tail(
542 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
543 	int fs_group)
544 {
545 	int ret;
546 	enum net_prot prot;
547 
548 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
549 		return 0;
550 
551 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
552 		prot = NET_PROT_IPV4;
553 	else
554 		prot = NET_PROT_IPV6;
555 
556 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
557 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
558 				&priv->extract.qos_key_extract,
559 				&flow->qos_rule,
560 				flow->ipaddr_rule.qos_ipsrc_offset,
561 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
562 		if (ret) {
563 			DPAA2_PMD_ERR("QoS src address reorder failed");
564 			return -1;
565 		}
566 		flow->ipaddr_rule.qos_ipsrc_offset =
567 			dpaa2_flow_extract_key_offset(
568 				&priv->extract.qos_key_extract,
569 				prot, NH_FLD_IP_SRC);
570 	}
571 
572 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
573 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
574 				&priv->extract.qos_key_extract,
575 				&flow->qos_rule,
576 				flow->ipaddr_rule.qos_ipdst_offset,
577 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
578 		if (ret) {
579 			DPAA2_PMD_ERR("QoS dst address reorder failed");
580 			return -1;
581 		}
582 		flow->ipaddr_rule.qos_ipdst_offset =
583 			dpaa2_flow_extract_key_offset(
584 				&priv->extract.qos_key_extract,
585 				prot, NH_FLD_IP_DST);
586 	}
587 
588 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
589 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
590 				&priv->extract.tc_key_extract[fs_group],
591 				&flow->fs_rule,
592 				flow->ipaddr_rule.fs_ipsrc_offset,
593 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
594 		if (ret) {
595 			DPAA2_PMD_ERR("FS src address reorder failed");
596 			return -1;
597 		}
598 		flow->ipaddr_rule.fs_ipsrc_offset =
599 			dpaa2_flow_extract_key_offset(
600 				&priv->extract.tc_key_extract[fs_group],
601 				prot, NH_FLD_IP_SRC);
602 	}
603 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
604 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
605 				&priv->extract.tc_key_extract[fs_group],
606 				&flow->fs_rule,
607 				flow->ipaddr_rule.fs_ipdst_offset,
608 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
609 		if (ret) {
610 			DPAA2_PMD_ERR("FS dst address reorder failed");
611 			return -1;
612 		}
613 		flow->ipaddr_rule.fs_ipdst_offset =
614 			dpaa2_flow_extract_key_offset(
615 				&priv->extract.tc_key_extract[fs_group],
616 				prot, NH_FLD_IP_DST);
617 	}
618 
619 	return 0;
620 }
621 
622 static int
623 dpaa2_flow_extract_support(
624 	const uint8_t *mask_src,
625 	enum rte_flow_item_type type)
626 {
627 	char mask[64];
628 	int i, size = 0;
629 	const char *mask_support = 0;
630 
631 	switch (type) {
632 	case RTE_FLOW_ITEM_TYPE_ETH:
633 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
634 		size = sizeof(struct rte_flow_item_eth);
635 		break;
636 	case RTE_FLOW_ITEM_TYPE_VLAN:
637 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
638 		size = sizeof(struct rte_flow_item_vlan);
639 		break;
640 	case RTE_FLOW_ITEM_TYPE_IPV4:
641 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
642 		size = sizeof(struct rte_flow_item_ipv4);
643 		break;
644 	case RTE_FLOW_ITEM_TYPE_IPV6:
645 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
646 		size = sizeof(struct rte_flow_item_ipv6);
647 		break;
648 	case RTE_FLOW_ITEM_TYPE_ICMP:
649 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
650 		size = sizeof(struct rte_flow_item_icmp);
651 		break;
652 	case RTE_FLOW_ITEM_TYPE_UDP:
653 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
654 		size = sizeof(struct rte_flow_item_udp);
655 		break;
656 	case RTE_FLOW_ITEM_TYPE_TCP:
657 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
658 		size = sizeof(struct rte_flow_item_tcp);
659 		break;
660 	case RTE_FLOW_ITEM_TYPE_SCTP:
661 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
662 		size = sizeof(struct rte_flow_item_sctp);
663 		break;
664 	case RTE_FLOW_ITEM_TYPE_GRE:
665 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
666 		size = sizeof(struct rte_flow_item_gre);
667 		break;
668 	default:
669 		return -1;
670 	}
671 
672 	memcpy(mask, mask_support, size);
673 
674 	for (i = 0; i < size; i++)
675 		mask[i] = (mask[i] | mask_src[i]);
676 
677 	if (memcmp(mask, mask_support, size))
678 		return -1;
679 
680 	return 0;
681 }
682 
683 static int
684 dpaa2_configure_flow_eth(struct rte_flow *flow,
685 			 struct rte_eth_dev *dev,
686 			 const struct rte_flow_attr *attr,
687 			 const struct rte_flow_item *pattern,
688 			 const struct rte_flow_action actions[] __rte_unused,
689 			 struct rte_flow_error *error __rte_unused,
690 			 int *device_configured)
691 {
692 	int index, ret;
693 	int local_cfg = 0;
694 	uint32_t group;
695 	const struct rte_flow_item_eth *spec, *mask;
696 
697 	/* TODO: Currently upper bound of range parameter is not implemented */
698 	const struct rte_flow_item_eth *last __rte_unused;
699 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
700 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
701 
702 	group = attr->group;
703 
704 	/* Parse pattern list to get the matching parameters */
705 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
706 	last    = (const struct rte_flow_item_eth *)pattern->last;
707 	mask    = (const struct rte_flow_item_eth *)
708 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
709 	if (!spec) {
710 		/* Don't care any field of eth header,
711 		 * only care eth protocol.
712 		 */
713 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
714 		return 0;
715 	}
716 
717 	/* Get traffic class index and flow id to be configured */
718 	flow->tc_id = group;
719 	flow->tc_index = attr->priority;
720 
721 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
722 		RTE_FLOW_ITEM_TYPE_ETH)) {
723 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
724 
725 		return -1;
726 	}
727 
728 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
729 		index = dpaa2_flow_extract_search(
730 				&priv->extract.qos_key_extract.dpkg,
731 				NET_PROT_ETH, NH_FLD_ETH_SA);
732 		if (index < 0) {
733 			ret = dpaa2_flow_extract_add(
734 					&priv->extract.qos_key_extract,
735 					NET_PROT_ETH, NH_FLD_ETH_SA,
736 					RTE_ETHER_ADDR_LEN);
737 			if (ret) {
738 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
739 
740 				return -1;
741 			}
742 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
743 		}
744 		index = dpaa2_flow_extract_search(
745 				&priv->extract.tc_key_extract[group].dpkg,
746 				NET_PROT_ETH, NH_FLD_ETH_SA);
747 		if (index < 0) {
748 			ret = dpaa2_flow_extract_add(
749 					&priv->extract.tc_key_extract[group],
750 					NET_PROT_ETH, NH_FLD_ETH_SA,
751 					RTE_ETHER_ADDR_LEN);
752 			if (ret) {
753 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
754 				return -1;
755 			}
756 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
757 		}
758 
759 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
760 		if (ret) {
761 			DPAA2_PMD_ERR(
762 				"Move ipaddr before ETH_SA rule set failed");
763 			return -1;
764 		}
765 
766 		ret = dpaa2_flow_rule_data_set(
767 				&priv->extract.qos_key_extract,
768 				&flow->qos_rule,
769 				NET_PROT_ETH,
770 				NH_FLD_ETH_SA,
771 				&spec->src.addr_bytes,
772 				&mask->src.addr_bytes,
773 				sizeof(struct rte_ether_addr));
774 		if (ret) {
775 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
776 			return -1;
777 		}
778 
779 		ret = dpaa2_flow_rule_data_set(
780 				&priv->extract.tc_key_extract[group],
781 				&flow->fs_rule,
782 				NET_PROT_ETH,
783 				NH_FLD_ETH_SA,
784 				&spec->src.addr_bytes,
785 				&mask->src.addr_bytes,
786 				sizeof(struct rte_ether_addr));
787 		if (ret) {
788 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
789 			return -1;
790 		}
791 	}
792 
793 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
794 		index = dpaa2_flow_extract_search(
795 				&priv->extract.qos_key_extract.dpkg,
796 				NET_PROT_ETH, NH_FLD_ETH_DA);
797 		if (index < 0) {
798 			ret = dpaa2_flow_extract_add(
799 					&priv->extract.qos_key_extract,
800 					NET_PROT_ETH, NH_FLD_ETH_DA,
801 					RTE_ETHER_ADDR_LEN);
802 			if (ret) {
803 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
804 
805 				return -1;
806 			}
807 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
808 		}
809 
810 		index = dpaa2_flow_extract_search(
811 				&priv->extract.tc_key_extract[group].dpkg,
812 				NET_PROT_ETH, NH_FLD_ETH_DA);
813 		if (index < 0) {
814 			ret = dpaa2_flow_extract_add(
815 					&priv->extract.tc_key_extract[group],
816 					NET_PROT_ETH, NH_FLD_ETH_DA,
817 					RTE_ETHER_ADDR_LEN);
818 			if (ret) {
819 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
820 
821 				return -1;
822 			}
823 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
824 		}
825 
826 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
827 		if (ret) {
828 			DPAA2_PMD_ERR(
829 				"Move ipaddr before ETH DA rule set failed");
830 			return -1;
831 		}
832 
833 		ret = dpaa2_flow_rule_data_set(
834 				&priv->extract.qos_key_extract,
835 				&flow->qos_rule,
836 				NET_PROT_ETH,
837 				NH_FLD_ETH_DA,
838 				&spec->dst.addr_bytes,
839 				&mask->dst.addr_bytes,
840 				sizeof(struct rte_ether_addr));
841 		if (ret) {
842 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
843 			return -1;
844 		}
845 
846 		ret = dpaa2_flow_rule_data_set(
847 				&priv->extract.tc_key_extract[group],
848 				&flow->fs_rule,
849 				NET_PROT_ETH,
850 				NH_FLD_ETH_DA,
851 				&spec->dst.addr_bytes,
852 				&mask->dst.addr_bytes,
853 				sizeof(struct rte_ether_addr));
854 		if (ret) {
855 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
856 			return -1;
857 		}
858 	}
859 
860 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
861 		index = dpaa2_flow_extract_search(
862 				&priv->extract.qos_key_extract.dpkg,
863 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
864 		if (index < 0) {
865 			ret = dpaa2_flow_extract_add(
866 					&priv->extract.qos_key_extract,
867 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
868 					RTE_ETHER_TYPE_LEN);
869 			if (ret) {
870 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
871 
872 				return -1;
873 			}
874 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
875 		}
876 		index = dpaa2_flow_extract_search(
877 				&priv->extract.tc_key_extract[group].dpkg,
878 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
879 		if (index < 0) {
880 			ret = dpaa2_flow_extract_add(
881 					&priv->extract.tc_key_extract[group],
882 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
883 					RTE_ETHER_TYPE_LEN);
884 			if (ret) {
885 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
886 
887 				return -1;
888 			}
889 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
890 		}
891 
892 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
893 		if (ret) {
894 			DPAA2_PMD_ERR(
895 				"Move ipaddr before ETH TYPE rule set failed");
896 				return -1;
897 		}
898 
899 		ret = dpaa2_flow_rule_data_set(
900 				&priv->extract.qos_key_extract,
901 				&flow->qos_rule,
902 				NET_PROT_ETH,
903 				NH_FLD_ETH_TYPE,
904 				&spec->type,
905 				&mask->type,
906 				sizeof(rte_be16_t));
907 		if (ret) {
908 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
909 			return -1;
910 		}
911 
912 		ret = dpaa2_flow_rule_data_set(
913 				&priv->extract.tc_key_extract[group],
914 				&flow->fs_rule,
915 				NET_PROT_ETH,
916 				NH_FLD_ETH_TYPE,
917 				&spec->type,
918 				&mask->type,
919 				sizeof(rte_be16_t));
920 		if (ret) {
921 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
922 			return -1;
923 		}
924 	}
925 
926 	(*device_configured) |= local_cfg;
927 
928 	return 0;
929 }
930 
931 static int
932 dpaa2_configure_flow_vlan(struct rte_flow *flow,
933 			  struct rte_eth_dev *dev,
934 			  const struct rte_flow_attr *attr,
935 			  const struct rte_flow_item *pattern,
936 			  const struct rte_flow_action actions[] __rte_unused,
937 			  struct rte_flow_error *error __rte_unused,
938 			  int *device_configured)
939 {
940 	int index, ret;
941 	int local_cfg = 0;
942 	uint32_t group;
943 	const struct rte_flow_item_vlan *spec, *mask;
944 
945 	const struct rte_flow_item_vlan *last __rte_unused;
946 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
947 
948 	group = attr->group;
949 
950 	/* Parse pattern list to get the matching parameters */
951 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
952 	last    = (const struct rte_flow_item_vlan *)pattern->last;
953 	mask    = (const struct rte_flow_item_vlan *)
954 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
955 
956 	/* Get traffic class index and flow id to be configured */
957 	flow->tc_id = group;
958 	flow->tc_index = attr->priority;
959 
960 	if (!spec) {
961 		/* Don't care any field of vlan header,
962 		 * only care vlan protocol.
963 		 */
964 		/* Eth type is actually used for vLan classification.
965 		 */
966 		struct proto_discrimination proto;
967 
968 		index = dpaa2_flow_extract_search(
969 				&priv->extract.qos_key_extract.dpkg,
970 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
971 		if (index < 0) {
972 			ret = dpaa2_flow_proto_discrimination_extract(
973 						&priv->extract.qos_key_extract,
974 						RTE_FLOW_ITEM_TYPE_ETH);
975 			if (ret) {
976 				DPAA2_PMD_ERR(
977 				"QoS Ext ETH_TYPE to discriminate vLan failed");
978 
979 				return -1;
980 			}
981 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
982 		}
983 
984 		index = dpaa2_flow_extract_search(
985 				&priv->extract.tc_key_extract[group].dpkg,
986 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
987 		if (index < 0) {
988 			ret = dpaa2_flow_proto_discrimination_extract(
989 					&priv->extract.tc_key_extract[group],
990 					RTE_FLOW_ITEM_TYPE_ETH);
991 			if (ret) {
992 				DPAA2_PMD_ERR(
993 				"FS Ext ETH_TYPE to discriminate vLan failed.");
994 
995 				return -1;
996 			}
997 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
998 		}
999 
1000 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1001 		if (ret) {
1002 			DPAA2_PMD_ERR(
1003 			"Move ipaddr before vLan discrimination set failed");
1004 			return -1;
1005 		}
1006 
1007 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1008 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1009 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1010 							proto, group);
1011 		if (ret) {
1012 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1013 			return -1;
1014 		}
1015 
1016 		(*device_configured) |= local_cfg;
1017 
1018 		return 0;
1019 	}
1020 
1021 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1022 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1023 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1024 
1025 		return -1;
1026 	}
1027 
1028 	if (!mask->tci)
1029 		return 0;
1030 
1031 	index = dpaa2_flow_extract_search(
1032 				&priv->extract.qos_key_extract.dpkg,
1033 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1034 	if (index < 0) {
1035 		ret = dpaa2_flow_extract_add(
1036 						&priv->extract.qos_key_extract,
1037 						NET_PROT_VLAN,
1038 						NH_FLD_VLAN_TCI,
1039 						sizeof(rte_be16_t));
1040 		if (ret) {
1041 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1042 
1043 			return -1;
1044 		}
1045 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1046 	}
1047 
1048 	index = dpaa2_flow_extract_search(
1049 			&priv->extract.tc_key_extract[group].dpkg,
1050 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1051 	if (index < 0) {
1052 		ret = dpaa2_flow_extract_add(
1053 				&priv->extract.tc_key_extract[group],
1054 				NET_PROT_VLAN,
1055 				NH_FLD_VLAN_TCI,
1056 				sizeof(rte_be16_t));
1057 		if (ret) {
1058 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1059 
1060 			return -1;
1061 		}
1062 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1063 	}
1064 
1065 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1066 	if (ret) {
1067 		DPAA2_PMD_ERR(
1068 			"Move ipaddr before VLAN TCI rule set failed");
1069 		return -1;
1070 	}
1071 
1072 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1073 				&flow->qos_rule,
1074 				NET_PROT_VLAN,
1075 				NH_FLD_VLAN_TCI,
1076 				&spec->tci,
1077 				&mask->tci,
1078 				sizeof(rte_be16_t));
1079 	if (ret) {
1080 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1081 		return -1;
1082 	}
1083 
1084 	ret = dpaa2_flow_rule_data_set(
1085 			&priv->extract.tc_key_extract[group],
1086 			&flow->fs_rule,
1087 			NET_PROT_VLAN,
1088 			NH_FLD_VLAN_TCI,
1089 			&spec->tci,
1090 			&mask->tci,
1091 			sizeof(rte_be16_t));
1092 	if (ret) {
1093 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1094 		return -1;
1095 	}
1096 
1097 	(*device_configured) |= local_cfg;
1098 
1099 	return 0;
1100 }
1101 
1102 static int
1103 dpaa2_configure_flow_generic_ip(
1104 	struct rte_flow *flow,
1105 	struct rte_eth_dev *dev,
1106 	const struct rte_flow_attr *attr,
1107 	const struct rte_flow_item *pattern,
1108 	const struct rte_flow_action actions[] __rte_unused,
1109 	struct rte_flow_error *error __rte_unused,
1110 	int *device_configured)
1111 {
1112 	int index, ret;
1113 	int local_cfg = 0;
1114 	uint32_t group;
1115 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1116 		*mask_ipv4 = 0;
1117 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1118 		*mask_ipv6 = 0;
1119 	const void *key, *mask;
1120 	enum net_prot prot;
1121 
1122 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1123 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1124 	int size;
1125 
1126 	group = attr->group;
1127 
1128 	/* Parse pattern list to get the matching parameters */
1129 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1130 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1131 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1132 			(pattern->mask ? pattern->mask :
1133 					&dpaa2_flow_item_ipv4_mask);
1134 	} else {
1135 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1136 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1137 			(pattern->mask ? pattern->mask :
1138 					&dpaa2_flow_item_ipv6_mask);
1139 	}
1140 
1141 	/* Get traffic class index and flow id to be configured */
1142 	flow->tc_id = group;
1143 	flow->tc_index = attr->priority;
1144 
1145 	if (!spec_ipv4 && !spec_ipv6) {
1146 		/* Don't care any field of IP header,
1147 		 * only care IP protocol.
1148 		 * Example: flow create 0 ingress pattern ipv6 /
1149 		 */
1150 		/* Eth type is actually used for IP identification.
1151 		 */
1152 		/* TODO: Current design only supports Eth + IP,
1153 		 *  Eth + vLan + IP needs to add.
1154 		 */
1155 		struct proto_discrimination proto;
1156 
1157 		index = dpaa2_flow_extract_search(
1158 				&priv->extract.qos_key_extract.dpkg,
1159 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1160 		if (index < 0) {
1161 			ret = dpaa2_flow_proto_discrimination_extract(
1162 					&priv->extract.qos_key_extract,
1163 					RTE_FLOW_ITEM_TYPE_ETH);
1164 			if (ret) {
1165 				DPAA2_PMD_ERR(
1166 				"QoS Ext ETH_TYPE to discriminate IP failed.");
1167 
1168 				return -1;
1169 			}
1170 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1171 		}
1172 
1173 		index = dpaa2_flow_extract_search(
1174 				&priv->extract.tc_key_extract[group].dpkg,
1175 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1176 		if (index < 0) {
1177 			ret = dpaa2_flow_proto_discrimination_extract(
1178 					&priv->extract.tc_key_extract[group],
1179 					RTE_FLOW_ITEM_TYPE_ETH);
1180 			if (ret) {
1181 				DPAA2_PMD_ERR(
1182 				"FS Ext ETH_TYPE to discriminate IP failed");
1183 
1184 				return -1;
1185 			}
1186 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1187 		}
1188 
1189 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1190 		if (ret) {
1191 			DPAA2_PMD_ERR(
1192 			"Move ipaddr before IP discrimination set failed");
1193 			return -1;
1194 		}
1195 
1196 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1197 		if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1198 			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1199 		else
1200 			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1201 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1202 							proto, group);
1203 		if (ret) {
1204 			DPAA2_PMD_ERR("IP discrimination rule set failed");
1205 			return -1;
1206 		}
1207 
1208 		(*device_configured) |= local_cfg;
1209 
1210 		return 0;
1211 	}
1212 
1213 	if (mask_ipv4) {
1214 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1215 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1216 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1217 
1218 			return -1;
1219 		}
1220 	}
1221 
1222 	if (mask_ipv6) {
1223 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1224 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1225 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1226 
1227 			return -1;
1228 		}
1229 	}
1230 
1231 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1232 		mask_ipv4->hdr.dst_addr)) {
1233 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1234 	} else if (mask_ipv6 &&
1235 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1236 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1237 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1238 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1239 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1240 	}
1241 
1242 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1243 		(mask_ipv6 &&
1244 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1245 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1246 		index = dpaa2_flow_extract_search(
1247 				&priv->extract.qos_key_extract.dpkg,
1248 				NET_PROT_IP, NH_FLD_IP_SRC);
1249 		if (index < 0) {
1250 			ret = dpaa2_flow_extract_add(
1251 						&priv->extract.qos_key_extract,
1252 						NET_PROT_IP,
1253 						NH_FLD_IP_SRC,
1254 						0);
1255 			if (ret) {
1256 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1257 
1258 				return -1;
1259 			}
1260 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1261 		}
1262 
1263 		index = dpaa2_flow_extract_search(
1264 				&priv->extract.tc_key_extract[group].dpkg,
1265 				NET_PROT_IP, NH_FLD_IP_SRC);
1266 		if (index < 0) {
1267 			ret = dpaa2_flow_extract_add(
1268 					&priv->extract.tc_key_extract[group],
1269 					NET_PROT_IP,
1270 					NH_FLD_IP_SRC,
1271 					0);
1272 			if (ret) {
1273 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1274 
1275 				return -1;
1276 			}
1277 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1278 		}
1279 
1280 		if (spec_ipv4)
1281 			key = &spec_ipv4->hdr.src_addr;
1282 		else
1283 			key = &spec_ipv6->hdr.src_addr[0];
1284 		if (mask_ipv4) {
1285 			mask = &mask_ipv4->hdr.src_addr;
1286 			size = NH_FLD_IPV4_ADDR_SIZE;
1287 			prot = NET_PROT_IPV4;
1288 		} else {
1289 			mask = &mask_ipv6->hdr.src_addr[0];
1290 			size = NH_FLD_IPV6_ADDR_SIZE;
1291 			prot = NET_PROT_IPV6;
1292 		}
1293 
1294 		ret = dpaa2_flow_rule_data_set(
1295 				&priv->extract.qos_key_extract,
1296 				&flow->qos_rule,
1297 				prot, NH_FLD_IP_SRC,
1298 				key,	mask, size);
1299 		if (ret) {
1300 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1301 			return -1;
1302 		}
1303 
1304 		ret = dpaa2_flow_rule_data_set(
1305 				&priv->extract.tc_key_extract[group],
1306 				&flow->fs_rule,
1307 				prot, NH_FLD_IP_SRC,
1308 				key,	mask, size);
1309 		if (ret) {
1310 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1311 			return -1;
1312 		}
1313 
1314 		flow->ipaddr_rule.qos_ipsrc_offset =
1315 			dpaa2_flow_extract_key_offset(
1316 				&priv->extract.qos_key_extract,
1317 				prot, NH_FLD_IP_SRC);
1318 		flow->ipaddr_rule.fs_ipsrc_offset =
1319 			dpaa2_flow_extract_key_offset(
1320 				&priv->extract.tc_key_extract[group],
1321 				prot, NH_FLD_IP_SRC);
1322 	}
1323 
1324 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1325 		(mask_ipv6 &&
1326 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1327 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1328 		index = dpaa2_flow_extract_search(
1329 				&priv->extract.qos_key_extract.dpkg,
1330 				NET_PROT_IP, NH_FLD_IP_DST);
1331 		if (index < 0) {
1332 			if (mask_ipv4)
1333 				size = NH_FLD_IPV4_ADDR_SIZE;
1334 			else
1335 				size = NH_FLD_IPV6_ADDR_SIZE;
1336 			ret = dpaa2_flow_extract_add(
1337 						&priv->extract.qos_key_extract,
1338 						NET_PROT_IP,
1339 						NH_FLD_IP_DST,
1340 						size);
1341 			if (ret) {
1342 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1343 
1344 				return -1;
1345 			}
1346 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1347 		}
1348 
1349 		index = dpaa2_flow_extract_search(
1350 				&priv->extract.tc_key_extract[group].dpkg,
1351 				NET_PROT_IP, NH_FLD_IP_DST);
1352 		if (index < 0) {
1353 			if (mask_ipv4)
1354 				size = NH_FLD_IPV4_ADDR_SIZE;
1355 			else
1356 				size = NH_FLD_IPV6_ADDR_SIZE;
1357 			ret = dpaa2_flow_extract_add(
1358 					&priv->extract.tc_key_extract[group],
1359 					NET_PROT_IP,
1360 					NH_FLD_IP_DST,
1361 					size);
1362 			if (ret) {
1363 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1364 
1365 				return -1;
1366 			}
1367 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1368 		}
1369 
1370 		if (spec_ipv4)
1371 			key = &spec_ipv4->hdr.dst_addr;
1372 		else
1373 			key = spec_ipv6->hdr.dst_addr;
1374 		if (mask_ipv4) {
1375 			mask = &mask_ipv4->hdr.dst_addr;
1376 			size = NH_FLD_IPV4_ADDR_SIZE;
1377 			prot = NET_PROT_IPV4;
1378 		} else {
1379 			mask = &mask_ipv6->hdr.dst_addr[0];
1380 			size = NH_FLD_IPV6_ADDR_SIZE;
1381 			prot = NET_PROT_IPV6;
1382 		}
1383 
1384 		ret = dpaa2_flow_rule_data_set(
1385 				&priv->extract.qos_key_extract,
1386 				&flow->qos_rule,
1387 				prot, NH_FLD_IP_DST,
1388 				key,	mask, size);
1389 		if (ret) {
1390 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1391 			return -1;
1392 		}
1393 
1394 		ret = dpaa2_flow_rule_data_set(
1395 				&priv->extract.tc_key_extract[group],
1396 				&flow->fs_rule,
1397 				prot, NH_FLD_IP_DST,
1398 				key,	mask, size);
1399 		if (ret) {
1400 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1401 			return -1;
1402 		}
1403 		flow->ipaddr_rule.qos_ipdst_offset =
1404 			dpaa2_flow_extract_key_offset(
1405 				&priv->extract.qos_key_extract,
1406 				prot, NH_FLD_IP_DST);
1407 		flow->ipaddr_rule.fs_ipdst_offset =
1408 			dpaa2_flow_extract_key_offset(
1409 				&priv->extract.tc_key_extract[group],
1410 				prot, NH_FLD_IP_DST);
1411 	}
1412 
1413 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1414 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1415 		index = dpaa2_flow_extract_search(
1416 				&priv->extract.qos_key_extract.dpkg,
1417 				NET_PROT_IP, NH_FLD_IP_PROTO);
1418 		if (index < 0) {
1419 			ret = dpaa2_flow_extract_add(
1420 				&priv->extract.qos_key_extract,
1421 				NET_PROT_IP,
1422 				NH_FLD_IP_PROTO,
1423 				NH_FLD_IP_PROTO_SIZE);
1424 			if (ret) {
1425 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1426 
1427 				return -1;
1428 			}
1429 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1430 		}
1431 
1432 		index = dpaa2_flow_extract_search(
1433 				&priv->extract.tc_key_extract[group].dpkg,
1434 				NET_PROT_IP, NH_FLD_IP_PROTO);
1435 		if (index < 0) {
1436 			ret = dpaa2_flow_extract_add(
1437 					&priv->extract.tc_key_extract[group],
1438 					NET_PROT_IP,
1439 					NH_FLD_IP_PROTO,
1440 					NH_FLD_IP_PROTO_SIZE);
1441 			if (ret) {
1442 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1443 
1444 				return -1;
1445 			}
1446 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1447 		}
1448 
1449 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1450 		if (ret) {
1451 			DPAA2_PMD_ERR(
1452 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1453 			return -1;
1454 		}
1455 
1456 		if (spec_ipv4)
1457 			key = &spec_ipv4->hdr.next_proto_id;
1458 		else
1459 			key = &spec_ipv6->hdr.proto;
1460 		if (mask_ipv4)
1461 			mask = &mask_ipv4->hdr.next_proto_id;
1462 		else
1463 			mask = &mask_ipv6->hdr.proto;
1464 
1465 		ret = dpaa2_flow_rule_data_set(
1466 				&priv->extract.qos_key_extract,
1467 				&flow->qos_rule,
1468 				NET_PROT_IP,
1469 				NH_FLD_IP_PROTO,
1470 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1471 		if (ret) {
1472 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1473 			return -1;
1474 		}
1475 
1476 		ret = dpaa2_flow_rule_data_set(
1477 				&priv->extract.tc_key_extract[group],
1478 				&flow->fs_rule,
1479 				NET_PROT_IP,
1480 				NH_FLD_IP_PROTO,
1481 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1482 		if (ret) {
1483 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1484 			return -1;
1485 		}
1486 	}
1487 
1488 	(*device_configured) |= local_cfg;
1489 
1490 	return 0;
1491 }
1492 
1493 static int
1494 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1495 			  struct rte_eth_dev *dev,
1496 			  const struct rte_flow_attr *attr,
1497 			  const struct rte_flow_item *pattern,
1498 			  const struct rte_flow_action actions[] __rte_unused,
1499 			  struct rte_flow_error *error __rte_unused,
1500 			  int *device_configured)
1501 {
1502 	int index, ret;
1503 	int local_cfg = 0;
1504 	uint32_t group;
1505 	const struct rte_flow_item_icmp *spec, *mask;
1506 
1507 	const struct rte_flow_item_icmp *last __rte_unused;
1508 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1509 
1510 	group = attr->group;
1511 
1512 	/* Parse pattern list to get the matching parameters */
1513 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1514 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1515 	mask    = (const struct rte_flow_item_icmp *)
1516 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1517 
1518 	/* Get traffic class index and flow id to be configured */
1519 	flow->tc_id = group;
1520 	flow->tc_index = attr->priority;
1521 
1522 	if (!spec) {
1523 		/* Don't care any field of ICMP header,
1524 		 * only care ICMP protocol.
1525 		 * Example: flow create 0 ingress pattern icmp /
1526 		 */
1527 		/* Next proto of Generical IP is actually used
1528 		 * for ICMP identification.
1529 		 */
1530 		struct proto_discrimination proto;
1531 
1532 		index = dpaa2_flow_extract_search(
1533 				&priv->extract.qos_key_extract.dpkg,
1534 				NET_PROT_IP, NH_FLD_IP_PROTO);
1535 		if (index < 0) {
1536 			ret = dpaa2_flow_proto_discrimination_extract(
1537 					&priv->extract.qos_key_extract,
1538 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1539 			if (ret) {
1540 				DPAA2_PMD_ERR(
1541 					"QoS Extract IP protocol to discriminate ICMP failed.");
1542 
1543 				return -1;
1544 			}
1545 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1546 		}
1547 
1548 		index = dpaa2_flow_extract_search(
1549 				&priv->extract.tc_key_extract[group].dpkg,
1550 				NET_PROT_IP, NH_FLD_IP_PROTO);
1551 		if (index < 0) {
1552 			ret = dpaa2_flow_proto_discrimination_extract(
1553 					&priv->extract.tc_key_extract[group],
1554 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1555 			if (ret) {
1556 				DPAA2_PMD_ERR(
1557 					"FS Extract IP protocol to discriminate ICMP failed.");
1558 
1559 				return -1;
1560 			}
1561 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1562 		}
1563 
1564 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1565 		if (ret) {
1566 			DPAA2_PMD_ERR(
1567 				"Move IP addr before ICMP discrimination set failed");
1568 			return -1;
1569 		}
1570 
1571 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1572 		proto.ip_proto = IPPROTO_ICMP;
1573 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1574 							proto, group);
1575 		if (ret) {
1576 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1577 			return -1;
1578 		}
1579 
1580 		(*device_configured) |= local_cfg;
1581 
1582 		return 0;
1583 	}
1584 
1585 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1586 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1587 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1588 
1589 		return -1;
1590 	}
1591 
1592 	if (mask->hdr.icmp_type) {
1593 		index = dpaa2_flow_extract_search(
1594 				&priv->extract.qos_key_extract.dpkg,
1595 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1596 		if (index < 0) {
1597 			ret = dpaa2_flow_extract_add(
1598 					&priv->extract.qos_key_extract,
1599 					NET_PROT_ICMP,
1600 					NH_FLD_ICMP_TYPE,
1601 					NH_FLD_ICMP_TYPE_SIZE);
1602 			if (ret) {
1603 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1604 
1605 				return -1;
1606 			}
1607 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1608 		}
1609 
1610 		index = dpaa2_flow_extract_search(
1611 				&priv->extract.tc_key_extract[group].dpkg,
1612 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1613 		if (index < 0) {
1614 			ret = dpaa2_flow_extract_add(
1615 					&priv->extract.tc_key_extract[group],
1616 					NET_PROT_ICMP,
1617 					NH_FLD_ICMP_TYPE,
1618 					NH_FLD_ICMP_TYPE_SIZE);
1619 			if (ret) {
1620 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1621 
1622 				return -1;
1623 			}
1624 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1625 		}
1626 
1627 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1628 		if (ret) {
1629 			DPAA2_PMD_ERR(
1630 				"Move ipaddr before ICMP TYPE set failed");
1631 			return -1;
1632 		}
1633 
1634 		ret = dpaa2_flow_rule_data_set(
1635 				&priv->extract.qos_key_extract,
1636 				&flow->qos_rule,
1637 				NET_PROT_ICMP,
1638 				NH_FLD_ICMP_TYPE,
1639 				&spec->hdr.icmp_type,
1640 				&mask->hdr.icmp_type,
1641 				NH_FLD_ICMP_TYPE_SIZE);
1642 		if (ret) {
1643 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1644 			return -1;
1645 		}
1646 
1647 		ret = dpaa2_flow_rule_data_set(
1648 				&priv->extract.tc_key_extract[group],
1649 				&flow->fs_rule,
1650 				NET_PROT_ICMP,
1651 				NH_FLD_ICMP_TYPE,
1652 				&spec->hdr.icmp_type,
1653 				&mask->hdr.icmp_type,
1654 				NH_FLD_ICMP_TYPE_SIZE);
1655 		if (ret) {
1656 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1657 			return -1;
1658 		}
1659 	}
1660 
1661 	if (mask->hdr.icmp_code) {
1662 		index = dpaa2_flow_extract_search(
1663 				&priv->extract.qos_key_extract.dpkg,
1664 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1665 		if (index < 0) {
1666 			ret = dpaa2_flow_extract_add(
1667 					&priv->extract.qos_key_extract,
1668 					NET_PROT_ICMP,
1669 					NH_FLD_ICMP_CODE,
1670 					NH_FLD_ICMP_CODE_SIZE);
1671 			if (ret) {
1672 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1673 
1674 				return -1;
1675 			}
1676 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1677 		}
1678 
1679 		index = dpaa2_flow_extract_search(
1680 				&priv->extract.tc_key_extract[group].dpkg,
1681 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1682 		if (index < 0) {
1683 			ret = dpaa2_flow_extract_add(
1684 					&priv->extract.tc_key_extract[group],
1685 					NET_PROT_ICMP,
1686 					NH_FLD_ICMP_CODE,
1687 					NH_FLD_ICMP_CODE_SIZE);
1688 			if (ret) {
1689 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1690 
1691 				return -1;
1692 			}
1693 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1694 		}
1695 
1696 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1697 		if (ret) {
1698 			DPAA2_PMD_ERR(
1699 				"Move ipaddr after ICMP CODE set failed");
1700 			return -1;
1701 		}
1702 
1703 		ret = dpaa2_flow_rule_data_set(
1704 				&priv->extract.qos_key_extract,
1705 				&flow->qos_rule,
1706 				NET_PROT_ICMP,
1707 				NH_FLD_ICMP_CODE,
1708 				&spec->hdr.icmp_code,
1709 				&mask->hdr.icmp_code,
1710 				NH_FLD_ICMP_CODE_SIZE);
1711 		if (ret) {
1712 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1713 			return -1;
1714 		}
1715 
1716 		ret = dpaa2_flow_rule_data_set(
1717 				&priv->extract.tc_key_extract[group],
1718 				&flow->fs_rule,
1719 				NET_PROT_ICMP,
1720 				NH_FLD_ICMP_CODE,
1721 				&spec->hdr.icmp_code,
1722 				&mask->hdr.icmp_code,
1723 				NH_FLD_ICMP_CODE_SIZE);
1724 		if (ret) {
1725 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1726 			return -1;
1727 		}
1728 	}
1729 
1730 	(*device_configured) |= local_cfg;
1731 
1732 	return 0;
1733 }
1734 
1735 static int
1736 dpaa2_configure_flow_udp(struct rte_flow *flow,
1737 			 struct rte_eth_dev *dev,
1738 			  const struct rte_flow_attr *attr,
1739 			  const struct rte_flow_item *pattern,
1740 			  const struct rte_flow_action actions[] __rte_unused,
1741 			  struct rte_flow_error *error __rte_unused,
1742 			  int *device_configured)
1743 {
1744 	int index, ret;
1745 	int local_cfg = 0;
1746 	uint32_t group;
1747 	const struct rte_flow_item_udp *spec, *mask;
1748 
1749 	const struct rte_flow_item_udp *last __rte_unused;
1750 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1751 
1752 	group = attr->group;
1753 
1754 	/* Parse pattern list to get the matching parameters */
1755 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1756 	last    = (const struct rte_flow_item_udp *)pattern->last;
1757 	mask    = (const struct rte_flow_item_udp *)
1758 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1759 
1760 	/* Get traffic class index and flow id to be configured */
1761 	flow->tc_id = group;
1762 	flow->tc_index = attr->priority;
1763 
1764 	if (!spec || !mc_l4_port_identification) {
1765 		struct proto_discrimination proto;
1766 
1767 		index = dpaa2_flow_extract_search(
1768 				&priv->extract.qos_key_extract.dpkg,
1769 				NET_PROT_IP, NH_FLD_IP_PROTO);
1770 		if (index < 0) {
1771 			ret = dpaa2_flow_proto_discrimination_extract(
1772 					&priv->extract.qos_key_extract,
1773 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1774 			if (ret) {
1775 				DPAA2_PMD_ERR(
1776 					"QoS Extract IP protocol to discriminate UDP failed.");
1777 
1778 				return -1;
1779 			}
1780 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1781 		}
1782 
1783 		index = dpaa2_flow_extract_search(
1784 				&priv->extract.tc_key_extract[group].dpkg,
1785 				NET_PROT_IP, NH_FLD_IP_PROTO);
1786 		if (index < 0) {
1787 			ret = dpaa2_flow_proto_discrimination_extract(
1788 				&priv->extract.tc_key_extract[group],
1789 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1790 			if (ret) {
1791 				DPAA2_PMD_ERR(
1792 					"FS Extract IP protocol to discriminate UDP failed.");
1793 
1794 				return -1;
1795 			}
1796 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1797 		}
1798 
1799 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1800 		if (ret) {
1801 			DPAA2_PMD_ERR(
1802 				"Move IP addr before UDP discrimination set failed");
1803 			return -1;
1804 		}
1805 
1806 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1807 		proto.ip_proto = IPPROTO_UDP;
1808 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1809 							proto, group);
1810 		if (ret) {
1811 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
1812 			return -1;
1813 		}
1814 
1815 		(*device_configured) |= local_cfg;
1816 
1817 		if (!spec)
1818 			return 0;
1819 	}
1820 
1821 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1822 		RTE_FLOW_ITEM_TYPE_UDP)) {
1823 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
1824 
1825 		return -1;
1826 	}
1827 
1828 	if (mask->hdr.src_port) {
1829 		index = dpaa2_flow_extract_search(
1830 				&priv->extract.qos_key_extract.dpkg,
1831 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1832 		if (index < 0) {
1833 			ret = dpaa2_flow_extract_add(
1834 					&priv->extract.qos_key_extract,
1835 				NET_PROT_UDP,
1836 				NH_FLD_UDP_PORT_SRC,
1837 				NH_FLD_UDP_PORT_SIZE);
1838 			if (ret) {
1839 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
1840 
1841 				return -1;
1842 			}
1843 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1844 		}
1845 
1846 		index = dpaa2_flow_extract_search(
1847 				&priv->extract.tc_key_extract[group].dpkg,
1848 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1849 		if (index < 0) {
1850 			ret = dpaa2_flow_extract_add(
1851 					&priv->extract.tc_key_extract[group],
1852 					NET_PROT_UDP,
1853 					NH_FLD_UDP_PORT_SRC,
1854 					NH_FLD_UDP_PORT_SIZE);
1855 			if (ret) {
1856 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
1857 
1858 				return -1;
1859 			}
1860 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1861 		}
1862 
1863 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1864 		if (ret) {
1865 			DPAA2_PMD_ERR(
1866 				"Move ipaddr before UDP_PORT_SRC set failed");
1867 			return -1;
1868 		}
1869 
1870 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1871 				&flow->qos_rule,
1872 				NET_PROT_UDP,
1873 				NH_FLD_UDP_PORT_SRC,
1874 				&spec->hdr.src_port,
1875 				&mask->hdr.src_port,
1876 				NH_FLD_UDP_PORT_SIZE);
1877 		if (ret) {
1878 			DPAA2_PMD_ERR(
1879 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
1880 			return -1;
1881 		}
1882 
1883 		ret = dpaa2_flow_rule_data_set(
1884 				&priv->extract.tc_key_extract[group],
1885 				&flow->fs_rule,
1886 				NET_PROT_UDP,
1887 				NH_FLD_UDP_PORT_SRC,
1888 				&spec->hdr.src_port,
1889 				&mask->hdr.src_port,
1890 				NH_FLD_UDP_PORT_SIZE);
1891 		if (ret) {
1892 			DPAA2_PMD_ERR(
1893 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
1894 			return -1;
1895 		}
1896 	}
1897 
1898 	if (mask->hdr.dst_port) {
1899 		index = dpaa2_flow_extract_search(
1900 				&priv->extract.qos_key_extract.dpkg,
1901 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1902 		if (index < 0) {
1903 			ret = dpaa2_flow_extract_add(
1904 					&priv->extract.qos_key_extract,
1905 					NET_PROT_UDP,
1906 					NH_FLD_UDP_PORT_DST,
1907 					NH_FLD_UDP_PORT_SIZE);
1908 			if (ret) {
1909 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
1910 
1911 				return -1;
1912 			}
1913 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1914 		}
1915 
1916 		index = dpaa2_flow_extract_search(
1917 				&priv->extract.tc_key_extract[group].dpkg,
1918 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1919 		if (index < 0) {
1920 			ret = dpaa2_flow_extract_add(
1921 					&priv->extract.tc_key_extract[group],
1922 					NET_PROT_UDP,
1923 					NH_FLD_UDP_PORT_DST,
1924 					NH_FLD_UDP_PORT_SIZE);
1925 			if (ret) {
1926 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
1927 
1928 				return -1;
1929 			}
1930 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1931 		}
1932 
1933 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1934 		if (ret) {
1935 			DPAA2_PMD_ERR(
1936 				"Move ipaddr before UDP_PORT_DST set failed");
1937 			return -1;
1938 		}
1939 
1940 		ret = dpaa2_flow_rule_data_set(
1941 				&priv->extract.qos_key_extract,
1942 				&flow->qos_rule,
1943 				NET_PROT_UDP,
1944 				NH_FLD_UDP_PORT_DST,
1945 				&spec->hdr.dst_port,
1946 				&mask->hdr.dst_port,
1947 				NH_FLD_UDP_PORT_SIZE);
1948 		if (ret) {
1949 			DPAA2_PMD_ERR(
1950 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
1951 			return -1;
1952 		}
1953 
1954 		ret = dpaa2_flow_rule_data_set(
1955 				&priv->extract.tc_key_extract[group],
1956 				&flow->fs_rule,
1957 				NET_PROT_UDP,
1958 				NH_FLD_UDP_PORT_DST,
1959 				&spec->hdr.dst_port,
1960 				&mask->hdr.dst_port,
1961 				NH_FLD_UDP_PORT_SIZE);
1962 		if (ret) {
1963 			DPAA2_PMD_ERR(
1964 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
1965 			return -1;
1966 		}
1967 	}
1968 
1969 	(*device_configured) |= local_cfg;
1970 
1971 	return 0;
1972 }
1973 
1974 static int
1975 dpaa2_configure_flow_tcp(struct rte_flow *flow,
1976 			 struct rte_eth_dev *dev,
1977 			 const struct rte_flow_attr *attr,
1978 			 const struct rte_flow_item *pattern,
1979 			 const struct rte_flow_action actions[] __rte_unused,
1980 			 struct rte_flow_error *error __rte_unused,
1981 			 int *device_configured)
1982 {
1983 	int index, ret;
1984 	int local_cfg = 0;
1985 	uint32_t group;
1986 	const struct rte_flow_item_tcp *spec, *mask;
1987 
1988 	const struct rte_flow_item_tcp *last __rte_unused;
1989 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1990 
1991 	group = attr->group;
1992 
1993 	/* Parse pattern list to get the matching parameters */
1994 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
1995 	last    = (const struct rte_flow_item_tcp *)pattern->last;
1996 	mask    = (const struct rte_flow_item_tcp *)
1997 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
1998 
1999 	/* Get traffic class index and flow id to be configured */
2000 	flow->tc_id = group;
2001 	flow->tc_index = attr->priority;
2002 
2003 	if (!spec || !mc_l4_port_identification) {
2004 		struct proto_discrimination proto;
2005 
2006 		index = dpaa2_flow_extract_search(
2007 				&priv->extract.qos_key_extract.dpkg,
2008 				NET_PROT_IP, NH_FLD_IP_PROTO);
2009 		if (index < 0) {
2010 			ret = dpaa2_flow_proto_discrimination_extract(
2011 					&priv->extract.qos_key_extract,
2012 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2013 			if (ret) {
2014 				DPAA2_PMD_ERR(
2015 					"QoS Extract IP protocol to discriminate TCP failed.");
2016 
2017 				return -1;
2018 			}
2019 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2020 		}
2021 
2022 		index = dpaa2_flow_extract_search(
2023 				&priv->extract.tc_key_extract[group].dpkg,
2024 				NET_PROT_IP, NH_FLD_IP_PROTO);
2025 		if (index < 0) {
2026 			ret = dpaa2_flow_proto_discrimination_extract(
2027 				&priv->extract.tc_key_extract[group],
2028 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2029 			if (ret) {
2030 				DPAA2_PMD_ERR(
2031 					"FS Extract IP protocol to discriminate TCP failed.");
2032 
2033 				return -1;
2034 			}
2035 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2036 		}
2037 
2038 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2039 		if (ret) {
2040 			DPAA2_PMD_ERR(
2041 				"Move IP addr before TCP discrimination set failed");
2042 			return -1;
2043 		}
2044 
2045 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2046 		proto.ip_proto = IPPROTO_TCP;
2047 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2048 							proto, group);
2049 		if (ret) {
2050 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2051 			return -1;
2052 		}
2053 
2054 		(*device_configured) |= local_cfg;
2055 
2056 		if (!spec)
2057 			return 0;
2058 	}
2059 
2060 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2061 		RTE_FLOW_ITEM_TYPE_TCP)) {
2062 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2063 
2064 		return -1;
2065 	}
2066 
2067 	if (mask->hdr.src_port) {
2068 		index = dpaa2_flow_extract_search(
2069 				&priv->extract.qos_key_extract.dpkg,
2070 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2071 		if (index < 0) {
2072 			ret = dpaa2_flow_extract_add(
2073 					&priv->extract.qos_key_extract,
2074 					NET_PROT_TCP,
2075 					NH_FLD_TCP_PORT_SRC,
2076 					NH_FLD_TCP_PORT_SIZE);
2077 			if (ret) {
2078 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2079 
2080 				return -1;
2081 			}
2082 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2083 		}
2084 
2085 		index = dpaa2_flow_extract_search(
2086 				&priv->extract.tc_key_extract[group].dpkg,
2087 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2088 		if (index < 0) {
2089 			ret = dpaa2_flow_extract_add(
2090 					&priv->extract.tc_key_extract[group],
2091 					NET_PROT_TCP,
2092 					NH_FLD_TCP_PORT_SRC,
2093 					NH_FLD_TCP_PORT_SIZE);
2094 			if (ret) {
2095 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2096 
2097 				return -1;
2098 			}
2099 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2100 		}
2101 
2102 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2103 		if (ret) {
2104 			DPAA2_PMD_ERR(
2105 				"Move ipaddr before TCP_PORT_SRC set failed");
2106 			return -1;
2107 		}
2108 
2109 		ret = dpaa2_flow_rule_data_set(
2110 				&priv->extract.qos_key_extract,
2111 				&flow->qos_rule,
2112 				NET_PROT_TCP,
2113 				NH_FLD_TCP_PORT_SRC,
2114 				&spec->hdr.src_port,
2115 				&mask->hdr.src_port,
2116 				NH_FLD_TCP_PORT_SIZE);
2117 		if (ret) {
2118 			DPAA2_PMD_ERR(
2119 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2120 			return -1;
2121 		}
2122 
2123 		ret = dpaa2_flow_rule_data_set(
2124 				&priv->extract.tc_key_extract[group],
2125 				&flow->fs_rule,
2126 				NET_PROT_TCP,
2127 				NH_FLD_TCP_PORT_SRC,
2128 				&spec->hdr.src_port,
2129 				&mask->hdr.src_port,
2130 				NH_FLD_TCP_PORT_SIZE);
2131 		if (ret) {
2132 			DPAA2_PMD_ERR(
2133 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2134 			return -1;
2135 		}
2136 	}
2137 
2138 	if (mask->hdr.dst_port) {
2139 		index = dpaa2_flow_extract_search(
2140 				&priv->extract.qos_key_extract.dpkg,
2141 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2142 		if (index < 0) {
2143 			ret = dpaa2_flow_extract_add(
2144 					&priv->extract.qos_key_extract,
2145 					NET_PROT_TCP,
2146 					NH_FLD_TCP_PORT_DST,
2147 					NH_FLD_TCP_PORT_SIZE);
2148 			if (ret) {
2149 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2150 
2151 				return -1;
2152 			}
2153 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2154 		}
2155 
2156 		index = dpaa2_flow_extract_search(
2157 				&priv->extract.tc_key_extract[group].dpkg,
2158 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2159 		if (index < 0) {
2160 			ret = dpaa2_flow_extract_add(
2161 					&priv->extract.tc_key_extract[group],
2162 					NET_PROT_TCP,
2163 					NH_FLD_TCP_PORT_DST,
2164 					NH_FLD_TCP_PORT_SIZE);
2165 			if (ret) {
2166 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2167 
2168 				return -1;
2169 			}
2170 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2171 		}
2172 
2173 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2174 		if (ret) {
2175 			DPAA2_PMD_ERR(
2176 				"Move ipaddr before TCP_PORT_DST set failed");
2177 			return -1;
2178 		}
2179 
2180 		ret = dpaa2_flow_rule_data_set(
2181 				&priv->extract.qos_key_extract,
2182 				&flow->qos_rule,
2183 				NET_PROT_TCP,
2184 				NH_FLD_TCP_PORT_DST,
2185 				&spec->hdr.dst_port,
2186 				&mask->hdr.dst_port,
2187 				NH_FLD_TCP_PORT_SIZE);
2188 		if (ret) {
2189 			DPAA2_PMD_ERR(
2190 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2191 			return -1;
2192 		}
2193 
2194 		ret = dpaa2_flow_rule_data_set(
2195 				&priv->extract.tc_key_extract[group],
2196 				&flow->fs_rule,
2197 				NET_PROT_TCP,
2198 				NH_FLD_TCP_PORT_DST,
2199 				&spec->hdr.dst_port,
2200 				&mask->hdr.dst_port,
2201 				NH_FLD_TCP_PORT_SIZE);
2202 		if (ret) {
2203 			DPAA2_PMD_ERR(
2204 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2205 			return -1;
2206 		}
2207 	}
2208 
2209 	(*device_configured) |= local_cfg;
2210 
2211 	return 0;
2212 }
2213 
2214 static int
2215 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2216 			  struct rte_eth_dev *dev,
2217 			  const struct rte_flow_attr *attr,
2218 			  const struct rte_flow_item *pattern,
2219 			  const struct rte_flow_action actions[] __rte_unused,
2220 			  struct rte_flow_error *error __rte_unused,
2221 			  int *device_configured)
2222 {
2223 	int index, ret;
2224 	int local_cfg = 0;
2225 	uint32_t group;
2226 	const struct rte_flow_item_sctp *spec, *mask;
2227 
2228 	const struct rte_flow_item_sctp *last __rte_unused;
2229 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2230 
2231 	group = attr->group;
2232 
2233 	/* Parse pattern list to get the matching parameters */
2234 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2235 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2236 	mask    = (const struct rte_flow_item_sctp *)
2237 			(pattern->mask ? pattern->mask :
2238 				&dpaa2_flow_item_sctp_mask);
2239 
2240 	/* Get traffic class index and flow id to be configured */
2241 	flow->tc_id = group;
2242 	flow->tc_index = attr->priority;
2243 
2244 	if (!spec || !mc_l4_port_identification) {
2245 		struct proto_discrimination proto;
2246 
2247 		index = dpaa2_flow_extract_search(
2248 				&priv->extract.qos_key_extract.dpkg,
2249 				NET_PROT_IP, NH_FLD_IP_PROTO);
2250 		if (index < 0) {
2251 			ret = dpaa2_flow_proto_discrimination_extract(
2252 					&priv->extract.qos_key_extract,
2253 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2254 			if (ret) {
2255 				DPAA2_PMD_ERR(
2256 					"QoS Extract IP protocol to discriminate SCTP failed.");
2257 
2258 				return -1;
2259 			}
2260 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2261 		}
2262 
2263 		index = dpaa2_flow_extract_search(
2264 				&priv->extract.tc_key_extract[group].dpkg,
2265 				NET_PROT_IP, NH_FLD_IP_PROTO);
2266 		if (index < 0) {
2267 			ret = dpaa2_flow_proto_discrimination_extract(
2268 					&priv->extract.tc_key_extract[group],
2269 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2270 			if (ret) {
2271 				DPAA2_PMD_ERR(
2272 					"FS Extract IP protocol to discriminate SCTP failed.");
2273 
2274 				return -1;
2275 			}
2276 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2277 		}
2278 
2279 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2280 		if (ret) {
2281 			DPAA2_PMD_ERR(
2282 				"Move ipaddr before SCTP discrimination set failed");
2283 			return -1;
2284 		}
2285 
2286 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2287 		proto.ip_proto = IPPROTO_SCTP;
2288 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2289 							proto, group);
2290 		if (ret) {
2291 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2292 			return -1;
2293 		}
2294 
2295 		(*device_configured) |= local_cfg;
2296 
2297 		if (!spec)
2298 			return 0;
2299 	}
2300 
2301 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2302 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2303 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2304 
2305 		return -1;
2306 	}
2307 
2308 	if (mask->hdr.src_port) {
2309 		index = dpaa2_flow_extract_search(
2310 				&priv->extract.qos_key_extract.dpkg,
2311 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2312 		if (index < 0) {
2313 			ret = dpaa2_flow_extract_add(
2314 					&priv->extract.qos_key_extract,
2315 					NET_PROT_SCTP,
2316 					NH_FLD_SCTP_PORT_SRC,
2317 					NH_FLD_SCTP_PORT_SIZE);
2318 			if (ret) {
2319 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2320 
2321 				return -1;
2322 			}
2323 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2324 		}
2325 
2326 		index = dpaa2_flow_extract_search(
2327 				&priv->extract.tc_key_extract[group].dpkg,
2328 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2329 		if (index < 0) {
2330 			ret = dpaa2_flow_extract_add(
2331 					&priv->extract.tc_key_extract[group],
2332 					NET_PROT_SCTP,
2333 					NH_FLD_SCTP_PORT_SRC,
2334 					NH_FLD_SCTP_PORT_SIZE);
2335 			if (ret) {
2336 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2337 
2338 				return -1;
2339 			}
2340 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2341 		}
2342 
2343 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2344 		if (ret) {
2345 			DPAA2_PMD_ERR(
2346 				"Move ipaddr before SCTP_PORT_SRC set failed");
2347 			return -1;
2348 		}
2349 
2350 		ret = dpaa2_flow_rule_data_set(
2351 				&priv->extract.qos_key_extract,
2352 				&flow->qos_rule,
2353 				NET_PROT_SCTP,
2354 				NH_FLD_SCTP_PORT_SRC,
2355 				&spec->hdr.src_port,
2356 				&mask->hdr.src_port,
2357 				NH_FLD_SCTP_PORT_SIZE);
2358 		if (ret) {
2359 			DPAA2_PMD_ERR(
2360 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2361 			return -1;
2362 		}
2363 
2364 		ret = dpaa2_flow_rule_data_set(
2365 				&priv->extract.tc_key_extract[group],
2366 				&flow->fs_rule,
2367 				NET_PROT_SCTP,
2368 				NH_FLD_SCTP_PORT_SRC,
2369 				&spec->hdr.src_port,
2370 				&mask->hdr.src_port,
2371 				NH_FLD_SCTP_PORT_SIZE);
2372 		if (ret) {
2373 			DPAA2_PMD_ERR(
2374 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2375 			return -1;
2376 		}
2377 	}
2378 
2379 	if (mask->hdr.dst_port) {
2380 		index = dpaa2_flow_extract_search(
2381 				&priv->extract.qos_key_extract.dpkg,
2382 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2383 		if (index < 0) {
2384 			ret = dpaa2_flow_extract_add(
2385 					&priv->extract.qos_key_extract,
2386 					NET_PROT_SCTP,
2387 					NH_FLD_SCTP_PORT_DST,
2388 					NH_FLD_SCTP_PORT_SIZE);
2389 			if (ret) {
2390 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2391 
2392 				return -1;
2393 			}
2394 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2395 		}
2396 
2397 		index = dpaa2_flow_extract_search(
2398 				&priv->extract.tc_key_extract[group].dpkg,
2399 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2400 		if (index < 0) {
2401 			ret = dpaa2_flow_extract_add(
2402 					&priv->extract.tc_key_extract[group],
2403 					NET_PROT_SCTP,
2404 					NH_FLD_SCTP_PORT_DST,
2405 					NH_FLD_SCTP_PORT_SIZE);
2406 			if (ret) {
2407 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2408 
2409 				return -1;
2410 			}
2411 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2412 		}
2413 
2414 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2415 		if (ret) {
2416 			DPAA2_PMD_ERR(
2417 				"Move ipaddr before SCTP_PORT_DST set failed");
2418 			return -1;
2419 		}
2420 
2421 		ret = dpaa2_flow_rule_data_set(
2422 				&priv->extract.qos_key_extract,
2423 				&flow->qos_rule,
2424 				NET_PROT_SCTP,
2425 				NH_FLD_SCTP_PORT_DST,
2426 				&spec->hdr.dst_port,
2427 				&mask->hdr.dst_port,
2428 				NH_FLD_SCTP_PORT_SIZE);
2429 		if (ret) {
2430 			DPAA2_PMD_ERR(
2431 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2432 			return -1;
2433 		}
2434 
2435 		ret = dpaa2_flow_rule_data_set(
2436 				&priv->extract.tc_key_extract[group],
2437 				&flow->fs_rule,
2438 				NET_PROT_SCTP,
2439 				NH_FLD_SCTP_PORT_DST,
2440 				&spec->hdr.dst_port,
2441 				&mask->hdr.dst_port,
2442 				NH_FLD_SCTP_PORT_SIZE);
2443 		if (ret) {
2444 			DPAA2_PMD_ERR(
2445 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2446 			return -1;
2447 		}
2448 	}
2449 
2450 	(*device_configured) |= local_cfg;
2451 
2452 	return 0;
2453 }
2454 
2455 static int
2456 dpaa2_configure_flow_gre(struct rte_flow *flow,
2457 			 struct rte_eth_dev *dev,
2458 			 const struct rte_flow_attr *attr,
2459 			 const struct rte_flow_item *pattern,
2460 			 const struct rte_flow_action actions[] __rte_unused,
2461 			 struct rte_flow_error *error __rte_unused,
2462 			 int *device_configured)
2463 {
2464 	int index, ret;
2465 	int local_cfg = 0;
2466 	uint32_t group;
2467 	const struct rte_flow_item_gre *spec, *mask;
2468 
2469 	const struct rte_flow_item_gre *last __rte_unused;
2470 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2471 
2472 	group = attr->group;
2473 
2474 	/* Parse pattern list to get the matching parameters */
2475 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2476 	last    = (const struct rte_flow_item_gre *)pattern->last;
2477 	mask    = (const struct rte_flow_item_gre *)
2478 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2479 
2480 	/* Get traffic class index and flow id to be configured */
2481 	flow->tc_id = group;
2482 	flow->tc_index = attr->priority;
2483 
2484 	if (!spec) {
2485 		struct proto_discrimination proto;
2486 
2487 		index = dpaa2_flow_extract_search(
2488 				&priv->extract.qos_key_extract.dpkg,
2489 				NET_PROT_IP, NH_FLD_IP_PROTO);
2490 		if (index < 0) {
2491 			ret = dpaa2_flow_proto_discrimination_extract(
2492 					&priv->extract.qos_key_extract,
2493 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2494 			if (ret) {
2495 				DPAA2_PMD_ERR(
2496 					"QoS Extract IP protocol to discriminate GRE failed.");
2497 
2498 				return -1;
2499 			}
2500 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2501 		}
2502 
2503 		index = dpaa2_flow_extract_search(
2504 				&priv->extract.tc_key_extract[group].dpkg,
2505 				NET_PROT_IP, NH_FLD_IP_PROTO);
2506 		if (index < 0) {
2507 			ret = dpaa2_flow_proto_discrimination_extract(
2508 					&priv->extract.tc_key_extract[group],
2509 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2510 			if (ret) {
2511 				DPAA2_PMD_ERR(
2512 					"FS Extract IP protocol to discriminate GRE failed.");
2513 
2514 				return -1;
2515 			}
2516 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2517 		}
2518 
2519 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2520 		if (ret) {
2521 			DPAA2_PMD_ERR(
2522 				"Move IP addr before GRE discrimination set failed");
2523 			return -1;
2524 		}
2525 
2526 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2527 		proto.ip_proto = IPPROTO_GRE;
2528 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2529 							proto, group);
2530 		if (ret) {
2531 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2532 			return -1;
2533 		}
2534 
2535 		(*device_configured) |= local_cfg;
2536 
2537 		return 0;
2538 	}
2539 
2540 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2541 		RTE_FLOW_ITEM_TYPE_GRE)) {
2542 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2543 
2544 		return -1;
2545 	}
2546 
2547 	if (!mask->protocol)
2548 		return 0;
2549 
2550 	index = dpaa2_flow_extract_search(
2551 			&priv->extract.qos_key_extract.dpkg,
2552 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2553 	if (index < 0) {
2554 		ret = dpaa2_flow_extract_add(
2555 				&priv->extract.qos_key_extract,
2556 				NET_PROT_GRE,
2557 				NH_FLD_GRE_TYPE,
2558 				sizeof(rte_be16_t));
2559 		if (ret) {
2560 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2561 
2562 			return -1;
2563 		}
2564 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2565 	}
2566 
2567 	index = dpaa2_flow_extract_search(
2568 			&priv->extract.tc_key_extract[group].dpkg,
2569 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2570 	if (index < 0) {
2571 		ret = dpaa2_flow_extract_add(
2572 				&priv->extract.tc_key_extract[group],
2573 				NET_PROT_GRE,
2574 				NH_FLD_GRE_TYPE,
2575 				sizeof(rte_be16_t));
2576 		if (ret) {
2577 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2578 
2579 			return -1;
2580 		}
2581 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2582 	}
2583 
2584 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2585 	if (ret) {
2586 		DPAA2_PMD_ERR(
2587 			"Move ipaddr before GRE_TYPE set failed");
2588 		return -1;
2589 	}
2590 
2591 	ret = dpaa2_flow_rule_data_set(
2592 				&priv->extract.qos_key_extract,
2593 				&flow->qos_rule,
2594 				NET_PROT_GRE,
2595 				NH_FLD_GRE_TYPE,
2596 				&spec->protocol,
2597 				&mask->protocol,
2598 				sizeof(rte_be16_t));
2599 	if (ret) {
2600 		DPAA2_PMD_ERR(
2601 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2602 		return -1;
2603 	}
2604 
2605 	ret = dpaa2_flow_rule_data_set(
2606 			&priv->extract.tc_key_extract[group],
2607 			&flow->fs_rule,
2608 			NET_PROT_GRE,
2609 			NH_FLD_GRE_TYPE,
2610 			&spec->protocol,
2611 			&mask->protocol,
2612 			sizeof(rte_be16_t));
2613 	if (ret) {
2614 		DPAA2_PMD_ERR(
2615 			"FS NH_FLD_GRE_TYPE rule data set failed");
2616 		return -1;
2617 	}
2618 
2619 	(*device_configured) |= local_cfg;
2620 
2621 	return 0;
2622 }
2623 
2624 /* The existing QoS/FS entry with IP address(es)
2625  * needs update after
2626  * new extract(s) are inserted before IP
2627  * address(es) extract(s).
2628  */
2629 static int
2630 dpaa2_flow_entry_update(
2631 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2632 {
2633 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2634 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2635 	int ret;
2636 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2637 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2638 	struct dpaa2_key_extract *qos_key_extract =
2639 		&priv->extract.qos_key_extract;
2640 	struct dpaa2_key_extract *tc_key_extract =
2641 		&priv->extract.tc_key_extract[tc_id];
2642 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2643 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2644 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2645 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2646 	int extend = -1, extend1, size = -1;
2647 	uint16_t qos_index;
2648 
2649 	while (curr) {
2650 		if (curr->ipaddr_rule.ipaddr_type ==
2651 			FLOW_NONE_IPADDR) {
2652 			curr = LIST_NEXT(curr, next);
2653 			continue;
2654 		}
2655 
2656 		if (curr->ipaddr_rule.ipaddr_type ==
2657 			FLOW_IPV4_ADDR) {
2658 			qos_ipsrc_offset =
2659 				qos_key_extract->key_info.ipv4_src_offset;
2660 			qos_ipdst_offset =
2661 				qos_key_extract->key_info.ipv4_dst_offset;
2662 			fs_ipsrc_offset =
2663 				tc_key_extract->key_info.ipv4_src_offset;
2664 			fs_ipdst_offset =
2665 				tc_key_extract->key_info.ipv4_dst_offset;
2666 			size = NH_FLD_IPV4_ADDR_SIZE;
2667 		} else {
2668 			qos_ipsrc_offset =
2669 				qos_key_extract->key_info.ipv6_src_offset;
2670 			qos_ipdst_offset =
2671 				qos_key_extract->key_info.ipv6_dst_offset;
2672 			fs_ipsrc_offset =
2673 				tc_key_extract->key_info.ipv6_src_offset;
2674 			fs_ipdst_offset =
2675 				tc_key_extract->key_info.ipv6_dst_offset;
2676 			size = NH_FLD_IPV6_ADDR_SIZE;
2677 		}
2678 
2679 		qos_index = curr->tc_id * priv->fs_entries +
2680 			curr->tc_index;
2681 
2682 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
2683 				priv->token, &curr->qos_rule);
2684 		if (ret) {
2685 			DPAA2_PMD_ERR("Qos entry remove failed.");
2686 			return -1;
2687 		}
2688 
2689 		extend = -1;
2690 
2691 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2692 			RTE_ASSERT(qos_ipsrc_offset >=
2693 				curr->ipaddr_rule.qos_ipsrc_offset);
2694 			extend1 = qos_ipsrc_offset -
2695 				curr->ipaddr_rule.qos_ipsrc_offset;
2696 			if (extend >= 0)
2697 				RTE_ASSERT(extend == extend1);
2698 			else
2699 				extend = extend1;
2700 
2701 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2702 				(size == NH_FLD_IPV6_ADDR_SIZE));
2703 
2704 			memcpy(ipsrc_key,
2705 				(char *)(size_t)curr->qos_rule.key_iova +
2706 				curr->ipaddr_rule.qos_ipsrc_offset,
2707 				size);
2708 			memset((char *)(size_t)curr->qos_rule.key_iova +
2709 				curr->ipaddr_rule.qos_ipsrc_offset,
2710 				0, size);
2711 
2712 			memcpy(ipsrc_mask,
2713 				(char *)(size_t)curr->qos_rule.mask_iova +
2714 				curr->ipaddr_rule.qos_ipsrc_offset,
2715 				size);
2716 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2717 				curr->ipaddr_rule.qos_ipsrc_offset,
2718 				0, size);
2719 
2720 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
2721 		}
2722 
2723 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2724 			RTE_ASSERT(qos_ipdst_offset >=
2725 				curr->ipaddr_rule.qos_ipdst_offset);
2726 			extend1 = qos_ipdst_offset -
2727 				curr->ipaddr_rule.qos_ipdst_offset;
2728 			if (extend >= 0)
2729 				RTE_ASSERT(extend == extend1);
2730 			else
2731 				extend = extend1;
2732 
2733 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2734 				(size == NH_FLD_IPV6_ADDR_SIZE));
2735 
2736 			memcpy(ipdst_key,
2737 				(char *)(size_t)curr->qos_rule.key_iova +
2738 				curr->ipaddr_rule.qos_ipdst_offset,
2739 				size);
2740 			memset((char *)(size_t)curr->qos_rule.key_iova +
2741 				curr->ipaddr_rule.qos_ipdst_offset,
2742 				0, size);
2743 
2744 			memcpy(ipdst_mask,
2745 				(char *)(size_t)curr->qos_rule.mask_iova +
2746 				curr->ipaddr_rule.qos_ipdst_offset,
2747 				size);
2748 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2749 				curr->ipaddr_rule.qos_ipdst_offset,
2750 				0, size);
2751 
2752 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
2753 		}
2754 
2755 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2756 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2757 				(size == NH_FLD_IPV6_ADDR_SIZE));
2758 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2759 				curr->ipaddr_rule.qos_ipsrc_offset,
2760 				ipsrc_key,
2761 				size);
2762 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2763 				curr->ipaddr_rule.qos_ipsrc_offset,
2764 				ipsrc_mask,
2765 				size);
2766 		}
2767 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2768 			RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2769 				(size == NH_FLD_IPV6_ADDR_SIZE));
2770 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2771 				curr->ipaddr_rule.qos_ipdst_offset,
2772 				ipdst_key,
2773 				size);
2774 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2775 				curr->ipaddr_rule.qos_ipdst_offset,
2776 				ipdst_mask,
2777 				size);
2778 		}
2779 
2780 		if (extend >= 0)
2781 			curr->qos_real_key_size += extend;
2782 
2783 		curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
2784 
2785 		ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
2786 				priv->token, &curr->qos_rule,
2787 				curr->tc_id, qos_index,
2788 				0, 0);
2789 		if (ret) {
2790 			DPAA2_PMD_ERR("Qos entry update failed.");
2791 			return -1;
2792 		}
2793 
2794 		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
2795 			curr = LIST_NEXT(curr, next);
2796 			continue;
2797 		}
2798 
2799 		extend = -1;
2800 
2801 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
2802 				priv->token, curr->tc_id, &curr->fs_rule);
2803 		if (ret) {
2804 			DPAA2_PMD_ERR("FS entry remove failed.");
2805 			return -1;
2806 		}
2807 
2808 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
2809 			tc_id == curr->tc_id) {
2810 			RTE_ASSERT(fs_ipsrc_offset >=
2811 				curr->ipaddr_rule.fs_ipsrc_offset);
2812 			extend1 = fs_ipsrc_offset -
2813 				curr->ipaddr_rule.fs_ipsrc_offset;
2814 			if (extend >= 0)
2815 				RTE_ASSERT(extend == extend1);
2816 			else
2817 				extend = extend1;
2818 
2819 			memcpy(ipsrc_key,
2820 				(char *)(size_t)curr->fs_rule.key_iova +
2821 				curr->ipaddr_rule.fs_ipsrc_offset,
2822 				size);
2823 			memset((char *)(size_t)curr->fs_rule.key_iova +
2824 				curr->ipaddr_rule.fs_ipsrc_offset,
2825 				0, size);
2826 
2827 			memcpy(ipsrc_mask,
2828 				(char *)(size_t)curr->fs_rule.mask_iova +
2829 				curr->ipaddr_rule.fs_ipsrc_offset,
2830 				size);
2831 			memset((char *)(size_t)curr->fs_rule.mask_iova +
2832 				curr->ipaddr_rule.fs_ipsrc_offset,
2833 				0, size);
2834 
2835 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
2836 		}
2837 
2838 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
2839 			tc_id == curr->tc_id) {
2840 			RTE_ASSERT(fs_ipdst_offset >=
2841 				curr->ipaddr_rule.fs_ipdst_offset);
2842 			extend1 = fs_ipdst_offset -
2843 				curr->ipaddr_rule.fs_ipdst_offset;
2844 			if (extend >= 0)
2845 				RTE_ASSERT(extend == extend1);
2846 			else
2847 				extend = extend1;
2848 
2849 			memcpy(ipdst_key,
2850 				(char *)(size_t)curr->fs_rule.key_iova +
2851 				curr->ipaddr_rule.fs_ipdst_offset,
2852 				size);
2853 			memset((char *)(size_t)curr->fs_rule.key_iova +
2854 				curr->ipaddr_rule.fs_ipdst_offset,
2855 				0, size);
2856 
2857 			memcpy(ipdst_mask,
2858 				(char *)(size_t)curr->fs_rule.mask_iova +
2859 				curr->ipaddr_rule.fs_ipdst_offset,
2860 				size);
2861 			memset((char *)(size_t)curr->fs_rule.mask_iova +
2862 				curr->ipaddr_rule.fs_ipdst_offset,
2863 				0, size);
2864 
2865 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
2866 		}
2867 
2868 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
2869 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
2870 				curr->ipaddr_rule.fs_ipsrc_offset,
2871 				ipsrc_key,
2872 				size);
2873 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2874 				curr->ipaddr_rule.fs_ipsrc_offset,
2875 				ipsrc_mask,
2876 				size);
2877 		}
2878 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
2879 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
2880 				curr->ipaddr_rule.fs_ipdst_offset,
2881 				ipdst_key,
2882 				size);
2883 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2884 				curr->ipaddr_rule.fs_ipdst_offset,
2885 				ipdst_mask,
2886 				size);
2887 		}
2888 
2889 		if (extend >= 0)
2890 			curr->fs_real_key_size += extend;
2891 		curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
2892 
2893 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
2894 				priv->token, curr->tc_id, curr->tc_index,
2895 				&curr->fs_rule, &curr->action_cfg);
2896 		if (ret) {
2897 			DPAA2_PMD_ERR("FS entry update failed.");
2898 			return -1;
2899 		}
2900 
2901 		curr = LIST_NEXT(curr, next);
2902 	}
2903 
2904 	return 0;
2905 }
2906 
2907 static inline int
2908 dpaa2_flow_verify_attr(
2909 	struct dpaa2_dev_priv *priv,
2910 	const struct rte_flow_attr *attr)
2911 {
2912 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2913 
2914 	while (curr) {
2915 		if (curr->tc_id == attr->group &&
2916 			curr->tc_index == attr->priority) {
2917 			DPAA2_PMD_ERR(
2918 				"Flow with group %d and priority %d already exists.",
2919 				attr->group, attr->priority);
2920 
2921 			return -1;
2922 		}
2923 		curr = LIST_NEXT(curr, next);
2924 	}
2925 
2926 	return 0;
2927 }
2928 
2929 static int
2930 dpaa2_generic_flow_set(struct rte_flow *flow,
2931 		       struct rte_eth_dev *dev,
2932 		       const struct rte_flow_attr *attr,
2933 		       const struct rte_flow_item pattern[],
2934 		       const struct rte_flow_action actions[],
2935 		       struct rte_flow_error *error)
2936 {
2937 	const struct rte_flow_action_queue *dest_queue;
2938 	const struct rte_flow_action_rss *rss_conf;
2939 	int is_keycfg_configured = 0, end_of_list = 0;
2940 	int ret = 0, i = 0, j = 0;
2941 	struct dpni_rx_tc_dist_cfg tc_cfg;
2942 	struct dpni_qos_tbl_cfg qos_cfg;
2943 	struct dpni_fs_action_cfg action;
2944 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2945 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2946 	size_t param;
2947 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2948 	uint16_t qos_index;
2949 
2950 	ret = dpaa2_flow_verify_attr(priv, attr);
2951 	if (ret)
2952 		return ret;
2953 
2954 	/* Parse pattern list to get the matching parameters */
2955 	while (!end_of_list) {
2956 		switch (pattern[i].type) {
2957 		case RTE_FLOW_ITEM_TYPE_ETH:
2958 			ret = dpaa2_configure_flow_eth(flow,
2959 					dev, attr, &pattern[i], actions, error,
2960 					&is_keycfg_configured);
2961 			if (ret) {
2962 				DPAA2_PMD_ERR("ETH flow configuration failed!");
2963 				return ret;
2964 			}
2965 			break;
2966 		case RTE_FLOW_ITEM_TYPE_VLAN:
2967 			ret = dpaa2_configure_flow_vlan(flow,
2968 					dev, attr, &pattern[i], actions, error,
2969 					&is_keycfg_configured);
2970 			if (ret) {
2971 				DPAA2_PMD_ERR("vLan flow configuration failed!");
2972 				return ret;
2973 			}
2974 			break;
2975 		case RTE_FLOW_ITEM_TYPE_IPV4:
2976 		case RTE_FLOW_ITEM_TYPE_IPV6:
2977 			ret = dpaa2_configure_flow_generic_ip(flow,
2978 					dev, attr, &pattern[i], actions, error,
2979 					&is_keycfg_configured);
2980 			if (ret) {
2981 				DPAA2_PMD_ERR("IP flow configuration failed!");
2982 				return ret;
2983 			}
2984 			break;
2985 		case RTE_FLOW_ITEM_TYPE_ICMP:
2986 			ret = dpaa2_configure_flow_icmp(flow,
2987 					dev, attr, &pattern[i], actions, error,
2988 					&is_keycfg_configured);
2989 			if (ret) {
2990 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
2991 				return ret;
2992 			}
2993 			break;
2994 		case RTE_FLOW_ITEM_TYPE_UDP:
2995 			ret = dpaa2_configure_flow_udp(flow,
2996 					dev, attr, &pattern[i], actions, error,
2997 					&is_keycfg_configured);
2998 			if (ret) {
2999 				DPAA2_PMD_ERR("UDP flow configuration failed!");
3000 				return ret;
3001 			}
3002 			break;
3003 		case RTE_FLOW_ITEM_TYPE_TCP:
3004 			ret = dpaa2_configure_flow_tcp(flow,
3005 					dev, attr, &pattern[i], actions, error,
3006 					&is_keycfg_configured);
3007 			if (ret) {
3008 				DPAA2_PMD_ERR("TCP flow configuration failed!");
3009 				return ret;
3010 			}
3011 			break;
3012 		case RTE_FLOW_ITEM_TYPE_SCTP:
3013 			ret = dpaa2_configure_flow_sctp(flow,
3014 					dev, attr, &pattern[i], actions, error,
3015 					&is_keycfg_configured);
3016 			if (ret) {
3017 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
3018 				return ret;
3019 			}
3020 			break;
3021 		case RTE_FLOW_ITEM_TYPE_GRE:
3022 			ret = dpaa2_configure_flow_gre(flow,
3023 					dev, attr, &pattern[i], actions, error,
3024 					&is_keycfg_configured);
3025 			if (ret) {
3026 				DPAA2_PMD_ERR("GRE flow configuration failed!");
3027 				return ret;
3028 			}
3029 			break;
3030 		case RTE_FLOW_ITEM_TYPE_END:
3031 			end_of_list = 1;
3032 			break; /*End of List*/
3033 		default:
3034 			DPAA2_PMD_ERR("Invalid action type");
3035 			ret = -ENOTSUP;
3036 			break;
3037 		}
3038 		i++;
3039 	}
3040 
3041 	/* Let's parse action on matching traffic */
3042 	end_of_list = 0;
3043 	while (!end_of_list) {
3044 		switch (actions[j].type) {
3045 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3046 			dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
3047 			flow->flow_id = dest_queue->index;
3048 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3049 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3050 			action.flow_id = flow->flow_id;
3051 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3052 				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_extract.dpkg,
3053 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3054 					DPAA2_PMD_ERR(
3055 					"Unable to prepare extract parameters");
3056 					return -1;
3057 				}
3058 
3059 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3060 				qos_cfg.discard_on_miss = true;
3061 				qos_cfg.keep_entries = true;
3062 				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
3063 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3064 						priv->token, &qos_cfg);
3065 				if (ret < 0) {
3066 					DPAA2_PMD_ERR(
3067 					"Distribution cannot be configured.(%d)"
3068 					, ret);
3069 					return -1;
3070 				}
3071 			}
3072 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3073 				if (dpkg_prepare_key_cfg(
3074 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3075 				(uint8_t *)(size_t)priv->extract
3076 				.tc_extract_param[flow->tc_id]) < 0) {
3077 					DPAA2_PMD_ERR(
3078 					"Unable to prepare extract parameters");
3079 					return -1;
3080 				}
3081 
3082 				memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3083 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3084 				tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
3085 				tc_cfg.key_cfg_iova =
3086 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3087 				tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3088 				tc_cfg.fs_cfg.keep_entries = true;
3089 				ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3090 							 priv->token,
3091 							 flow->tc_id, &tc_cfg);
3092 				if (ret < 0) {
3093 					DPAA2_PMD_ERR(
3094 					"Distribution cannot be configured.(%d)"
3095 					, ret);
3096 					return -1;
3097 				}
3098 			}
3099 			/* Configure QoS table first */
3100 
3101 			action.flow_id = action.flow_id % priv->num_rx_tc;
3102 
3103 			qos_index = flow->tc_id * priv->fs_entries +
3104 				flow->tc_index;
3105 
3106 			if (qos_index >= priv->qos_entries) {
3107 				DPAA2_PMD_ERR("QoS table with %d entries full",
3108 					priv->qos_entries);
3109 				return -1;
3110 			}
3111 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3112 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3113 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3114 					flow->ipaddr_rule.qos_ipsrc_offset) {
3115 					flow->qos_real_key_size =
3116 						flow->ipaddr_rule.qos_ipdst_offset +
3117 						NH_FLD_IPV4_ADDR_SIZE;
3118 				} else {
3119 					flow->qos_real_key_size =
3120 						flow->ipaddr_rule.qos_ipsrc_offset +
3121 						NH_FLD_IPV4_ADDR_SIZE;
3122 				}
3123 			} else if (flow->ipaddr_rule.ipaddr_type ==
3124 				FLOW_IPV6_ADDR) {
3125 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3126 					flow->ipaddr_rule.qos_ipsrc_offset) {
3127 					flow->qos_real_key_size =
3128 						flow->ipaddr_rule.qos_ipdst_offset +
3129 						NH_FLD_IPV6_ADDR_SIZE;
3130 				} else {
3131 					flow->qos_real_key_size =
3132 						flow->ipaddr_rule.qos_ipsrc_offset +
3133 						NH_FLD_IPV6_ADDR_SIZE;
3134 				}
3135 			}
3136 
3137 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3138 
3139 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3140 						priv->token, &flow->qos_rule,
3141 						flow->tc_id, qos_index,
3142 						0, 0);
3143 			if (ret < 0) {
3144 				DPAA2_PMD_ERR(
3145 				"Error in addnig entry to QoS table(%d)", ret);
3146 				return ret;
3147 			}
3148 
3149 			/* Then Configure FS table */
3150 			if (flow->tc_index >= priv->fs_entries) {
3151 				DPAA2_PMD_ERR("FS table with %d entries full",
3152 					priv->fs_entries);
3153 				return -1;
3154 			}
3155 
3156 			flow->fs_real_key_size =
3157 				priv->extract.tc_key_extract[flow->tc_id]
3158 				.key_info.key_total_size;
3159 
3160 			if (flow->ipaddr_rule.ipaddr_type ==
3161 				FLOW_IPV4_ADDR) {
3162 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3163 					flow->ipaddr_rule.fs_ipsrc_offset) {
3164 					flow->fs_real_key_size =
3165 						flow->ipaddr_rule.fs_ipdst_offset +
3166 						NH_FLD_IPV4_ADDR_SIZE;
3167 				} else {
3168 					flow->fs_real_key_size =
3169 						flow->ipaddr_rule.fs_ipsrc_offset +
3170 						NH_FLD_IPV4_ADDR_SIZE;
3171 				}
3172 			} else if (flow->ipaddr_rule.ipaddr_type ==
3173 				FLOW_IPV6_ADDR) {
3174 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3175 					flow->ipaddr_rule.fs_ipsrc_offset) {
3176 					flow->fs_real_key_size =
3177 						flow->ipaddr_rule.fs_ipdst_offset +
3178 						NH_FLD_IPV6_ADDR_SIZE;
3179 				} else {
3180 					flow->fs_real_key_size =
3181 						flow->ipaddr_rule.fs_ipsrc_offset +
3182 						NH_FLD_IPV6_ADDR_SIZE;
3183 				}
3184 			}
3185 
3186 			flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3187 
3188 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3189 						flow->tc_id, flow->tc_index,
3190 						&flow->fs_rule, &action);
3191 			if (ret < 0) {
3192 				DPAA2_PMD_ERR(
3193 				"Error in adding entry to FS table(%d)", ret);
3194 				return ret;
3195 			}
3196 			memcpy(&flow->action_cfg, &action,
3197 				sizeof(struct dpni_fs_action_cfg));
3198 			break;
3199 		case RTE_FLOW_ACTION_TYPE_RSS:
3200 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3201 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3202 				if (rss_conf->queue[i] <
3203 					(attr->group * priv->dist_queues) ||
3204 					rss_conf->queue[i] >=
3205 					((attr->group + 1) * priv->dist_queues)) {
3206 					DPAA2_PMD_ERR(
3207 					"Queue/Group combination are not supported\n");
3208 					return -ENOTSUP;
3209 				}
3210 			}
3211 
3212 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3213 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3214 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3215 			if (ret < 0) {
3216 				DPAA2_PMD_ERR(
3217 				"unable to set flow distribution.please check queue config\n");
3218 				return ret;
3219 			}
3220 
3221 			/* Allocate DMA'ble memory to write the rules */
3222 			param = (size_t)rte_malloc(NULL, 256, 64);
3223 			if (!param) {
3224 				DPAA2_PMD_ERR("Memory allocation failure\n");
3225 				return -1;
3226 			}
3227 
3228 			if (dpkg_prepare_key_cfg(
3229 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3230 				(uint8_t *)param) < 0) {
3231 				DPAA2_PMD_ERR(
3232 				"Unable to prepare extract parameters");
3233 				rte_free((void *)param);
3234 				return -1;
3235 			}
3236 
3237 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3238 			tc_cfg.dist_size = rss_conf->queue_num;
3239 			tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3240 			tc_cfg.key_cfg_iova = (size_t)param;
3241 			tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3242 
3243 			ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3244 						 priv->token, flow->tc_id,
3245 						 &tc_cfg);
3246 			if (ret < 0) {
3247 				DPAA2_PMD_ERR(
3248 				"Distribution cannot be configured: %d\n", ret);
3249 				rte_free((void *)param);
3250 				return -1;
3251 			}
3252 
3253 			rte_free((void *)param);
3254 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3255 				if (dpkg_prepare_key_cfg(
3256 					&priv->extract.qos_key_extract.dpkg,
3257 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3258 					DPAA2_PMD_ERR(
3259 					"Unable to prepare extract parameters");
3260 					return -1;
3261 				}
3262 				memset(&qos_cfg, 0,
3263 					sizeof(struct dpni_qos_tbl_cfg));
3264 				qos_cfg.discard_on_miss = true;
3265 				qos_cfg.keep_entries = true;
3266 				qos_cfg.key_cfg_iova =
3267 					(size_t)priv->extract.qos_extract_param;
3268 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3269 							 priv->token, &qos_cfg);
3270 				if (ret < 0) {
3271 					DPAA2_PMD_ERR(
3272 					"Distribution can't be configured %d\n",
3273 					ret);
3274 					return -1;
3275 				}
3276 			}
3277 
3278 			/* Add Rule into QoS table */
3279 			qos_index = flow->tc_id * priv->fs_entries +
3280 				flow->tc_index;
3281 			if (qos_index >= priv->qos_entries) {
3282 				DPAA2_PMD_ERR("QoS table with %d entries full",
3283 					priv->qos_entries);
3284 				return -1;
3285 			}
3286 
3287 			flow->qos_real_key_size =
3288 			  priv->extract.qos_key_extract.key_info.key_total_size;
3289 			flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3290 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3291 						&flow->qos_rule, flow->tc_id,
3292 						qos_index, 0, 0);
3293 			if (ret < 0) {
3294 				DPAA2_PMD_ERR(
3295 				"Error in entry addition in QoS table(%d)",
3296 				ret);
3297 				return ret;
3298 			}
3299 			break;
3300 		case RTE_FLOW_ACTION_TYPE_END:
3301 			end_of_list = 1;
3302 			break;
3303 		default:
3304 			DPAA2_PMD_ERR("Invalid action type");
3305 			ret = -ENOTSUP;
3306 			break;
3307 		}
3308 		j++;
3309 	}
3310 
3311 	if (!ret) {
3312 		if (is_keycfg_configured &
3313 			(DPAA2_QOS_TABLE_RECONFIGURE |
3314 			DPAA2_FS_TABLE_RECONFIGURE)) {
3315 			ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3316 			if (ret) {
3317 				DPAA2_PMD_ERR("Flow entry update failed.");
3318 
3319 				return -1;
3320 			}
3321 		}
3322 		/* New rules are inserted. */
3323 		if (!curr) {
3324 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3325 		} else {
3326 			while (LIST_NEXT(curr, next))
3327 				curr = LIST_NEXT(curr, next);
3328 			LIST_INSERT_AFTER(curr, flow, next);
3329 		}
3330 	}
3331 	return ret;
3332 }
3333 
3334 static inline int
3335 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3336 		      const struct rte_flow_attr *attr)
3337 {
3338 	int ret = 0;
3339 
3340 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3341 		DPAA2_PMD_ERR("Priority group is out of range\n");
3342 		ret = -ENOTSUP;
3343 	}
3344 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3345 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3346 		ret = -ENOTSUP;
3347 	}
3348 	if (unlikely(attr->egress)) {
3349 		DPAA2_PMD_ERR(
3350 			"Flow configuration is not supported on egress side\n");
3351 		ret = -ENOTSUP;
3352 	}
3353 	if (unlikely(!attr->ingress)) {
3354 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3355 		ret = -EINVAL;
3356 	}
3357 	return ret;
3358 }
3359 
3360 static inline int
3361 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3362 {
3363 	unsigned int i, j, is_found = 0;
3364 	int ret = 0;
3365 
3366 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3367 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3368 			if (dpaa2_supported_pattern_type[i]
3369 					== pattern[j].type) {
3370 				is_found = 1;
3371 				break;
3372 			}
3373 		}
3374 		if (!is_found) {
3375 			ret = -ENOTSUP;
3376 			break;
3377 		}
3378 	}
3379 	/* Lets verify other combinations of given pattern rules */
3380 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3381 		if (!pattern[j].spec) {
3382 			ret = -EINVAL;
3383 			break;
3384 		}
3385 	}
3386 
3387 	return ret;
3388 }
3389 
3390 static inline int
3391 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3392 {
3393 	unsigned int i, j, is_found = 0;
3394 	int ret = 0;
3395 
3396 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3397 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3398 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3399 				is_found = 1;
3400 				break;
3401 			}
3402 		}
3403 		if (!is_found) {
3404 			ret = -ENOTSUP;
3405 			break;
3406 		}
3407 	}
3408 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3409 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3410 				!actions[j].conf)
3411 			ret = -EINVAL;
3412 	}
3413 	return ret;
3414 }
3415 
3416 static
3417 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3418 			const struct rte_flow_attr *flow_attr,
3419 			const struct rte_flow_item pattern[],
3420 			const struct rte_flow_action actions[],
3421 			struct rte_flow_error *error)
3422 {
3423 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3424 	struct dpni_attr dpni_attr;
3425 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3426 	uint16_t token = priv->token;
3427 	int ret = 0;
3428 
3429 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3430 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3431 	if (ret < 0) {
3432 		DPAA2_PMD_ERR(
3433 			"Failure to get dpni@%p attribute, err code  %d\n",
3434 			dpni, ret);
3435 		rte_flow_error_set(error, EPERM,
3436 			   RTE_FLOW_ERROR_TYPE_ATTR,
3437 			   flow_attr, "invalid");
3438 		return ret;
3439 	}
3440 
3441 	/* Verify input attributes */
3442 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3443 	if (ret < 0) {
3444 		DPAA2_PMD_ERR(
3445 			"Invalid attributes are given\n");
3446 		rte_flow_error_set(error, EPERM,
3447 			   RTE_FLOW_ERROR_TYPE_ATTR,
3448 			   flow_attr, "invalid");
3449 		goto not_valid_params;
3450 	}
3451 	/* Verify input pattern list */
3452 	ret = dpaa2_dev_verify_patterns(pattern);
3453 	if (ret < 0) {
3454 		DPAA2_PMD_ERR(
3455 			"Invalid pattern list is given\n");
3456 		rte_flow_error_set(error, EPERM,
3457 			   RTE_FLOW_ERROR_TYPE_ITEM,
3458 			   pattern, "invalid");
3459 		goto not_valid_params;
3460 	}
3461 	/* Verify input action list */
3462 	ret = dpaa2_dev_verify_actions(actions);
3463 	if (ret < 0) {
3464 		DPAA2_PMD_ERR(
3465 			"Invalid action list is given\n");
3466 		rte_flow_error_set(error, EPERM,
3467 			   RTE_FLOW_ERROR_TYPE_ACTION,
3468 			   actions, "invalid");
3469 		goto not_valid_params;
3470 	}
3471 not_valid_params:
3472 	return ret;
3473 }
3474 
3475 static
3476 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3477 				   const struct rte_flow_attr *attr,
3478 				   const struct rte_flow_item pattern[],
3479 				   const struct rte_flow_action actions[],
3480 				   struct rte_flow_error *error)
3481 {
3482 	struct rte_flow *flow = NULL;
3483 	size_t key_iova = 0, mask_iova = 0;
3484 	int ret;
3485 
3486 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3487 	if (!flow) {
3488 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3489 		goto mem_failure;
3490 	}
3491 	/* Allocate DMA'ble memory to write the rules */
3492 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3493 	if (!key_iova) {
3494 		DPAA2_PMD_ERR(
3495 			"Memory allocation failure for rule configuration\n");
3496 		goto mem_failure;
3497 	}
3498 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3499 	if (!mask_iova) {
3500 		DPAA2_PMD_ERR(
3501 			"Memory allocation failure for rule configuration\n");
3502 		goto mem_failure;
3503 	}
3504 
3505 	flow->qos_rule.key_iova = key_iova;
3506 	flow->qos_rule.mask_iova = mask_iova;
3507 
3508 	/* Allocate DMA'ble memory to write the rules */
3509 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3510 	if (!key_iova) {
3511 		DPAA2_PMD_ERR(
3512 			"Memory allocation failure for rule configuration\n");
3513 		goto mem_failure;
3514 	}
3515 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3516 	if (!mask_iova) {
3517 		DPAA2_PMD_ERR(
3518 			"Memory allocation failure for rule configuration\n");
3519 		goto mem_failure;
3520 	}
3521 
3522 	flow->fs_rule.key_iova = key_iova;
3523 	flow->fs_rule.mask_iova = mask_iova;
3524 
3525 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3526 	flow->ipaddr_rule.qos_ipsrc_offset =
3527 		IP_ADDRESS_OFFSET_INVALID;
3528 	flow->ipaddr_rule.qos_ipdst_offset =
3529 		IP_ADDRESS_OFFSET_INVALID;
3530 	flow->ipaddr_rule.fs_ipsrc_offset =
3531 		IP_ADDRESS_OFFSET_INVALID;
3532 	flow->ipaddr_rule.fs_ipdst_offset =
3533 		IP_ADDRESS_OFFSET_INVALID;
3534 
3535 	switch (dpaa2_filter_type) {
3536 	case RTE_ETH_FILTER_GENERIC:
3537 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3538 					     actions, error);
3539 		if (ret < 0) {
3540 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3541 				rte_flow_error_set(error, EPERM,
3542 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3543 						attr, "unknown");
3544 			DPAA2_PMD_ERR(
3545 			"Failure to create flow, return code (%d)", ret);
3546 			goto creation_error;
3547 		}
3548 		break;
3549 	default:
3550 		DPAA2_PMD_ERR("Filter type (%d) not supported",
3551 		dpaa2_filter_type);
3552 		break;
3553 	}
3554 
3555 	return flow;
3556 mem_failure:
3557 	rte_flow_error_set(error, EPERM,
3558 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3559 			   NULL, "memory alloc");
3560 creation_error:
3561 	rte_free((void *)flow);
3562 	rte_free((void *)key_iova);
3563 	rte_free((void *)mask_iova);
3564 
3565 	return NULL;
3566 }
3567 
3568 static
3569 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3570 		       struct rte_flow *flow,
3571 		       struct rte_flow_error *error)
3572 {
3573 	int ret = 0;
3574 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3575 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3576 
3577 	switch (flow->action) {
3578 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3579 		/* Remove entry from QoS table first */
3580 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3581 					   &flow->qos_rule);
3582 		if (ret < 0) {
3583 			DPAA2_PMD_ERR(
3584 				"Error in adding entry to QoS table(%d)", ret);
3585 			goto error;
3586 		}
3587 
3588 		/* Then remove entry from FS table */
3589 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3590 					   flow->tc_id, &flow->fs_rule);
3591 		if (ret < 0) {
3592 			DPAA2_PMD_ERR(
3593 				"Error in entry addition in FS table(%d)", ret);
3594 			goto error;
3595 		}
3596 		break;
3597 	case RTE_FLOW_ACTION_TYPE_RSS:
3598 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3599 					   &flow->qos_rule);
3600 		if (ret < 0) {
3601 			DPAA2_PMD_ERR(
3602 			"Error in entry addition in QoS table(%d)", ret);
3603 			goto error;
3604 		}
3605 		break;
3606 	default:
3607 		DPAA2_PMD_ERR(
3608 		"Action type (%d) is not supported", flow->action);
3609 		ret = -ENOTSUP;
3610 		break;
3611 	}
3612 
3613 	LIST_REMOVE(flow, next);
3614 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
3615 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
3616 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
3617 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
3618 	/* Now free the flow */
3619 	rte_free(flow);
3620 
3621 error:
3622 	if (ret)
3623 		rte_flow_error_set(error, EPERM,
3624 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3625 				   NULL, "unknown");
3626 	return ret;
3627 }
3628 
3629 /**
3630  * Destroy user-configured flow rules.
3631  *
3632  * This function skips internal flows rules.
3633  *
3634  * @see rte_flow_flush()
3635  * @see rte_flow_ops
3636  */
3637 static int
3638 dpaa2_flow_flush(struct rte_eth_dev *dev,
3639 		struct rte_flow_error *error)
3640 {
3641 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3642 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
3643 
3644 	while (flow) {
3645 		struct rte_flow *next = LIST_NEXT(flow, next);
3646 
3647 		dpaa2_flow_destroy(dev, flow, error);
3648 		flow = next;
3649 	}
3650 	return 0;
3651 }
3652 
3653 static int
3654 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3655 		struct rte_flow *flow __rte_unused,
3656 		const struct rte_flow_action *actions __rte_unused,
3657 		void *data __rte_unused,
3658 		struct rte_flow_error *error __rte_unused)
3659 {
3660 	return 0;
3661 }
3662 
3663 /**
3664  * Clean up all flow rules.
3665  *
3666  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3667  * rules regardless of whether they are internal or user-configured.
3668  *
3669  * @param priv
3670  *   Pointer to private structure.
3671  */
3672 void
3673 dpaa2_flow_clean(struct rte_eth_dev *dev)
3674 {
3675 	struct rte_flow *flow;
3676 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3677 
3678 	while ((flow = LIST_FIRST(&priv->flows)))
3679 		dpaa2_flow_destroy(dev, flow, NULL);
3680 }
3681 
3682 const struct rte_flow_ops dpaa2_flow_ops = {
3683 	.create	= dpaa2_flow_create,
3684 	.validate = dpaa2_flow_validate,
3685 	.destroy = dpaa2_flow_destroy,
3686 	.flush	= dpaa2_flow_flush,
3687 	.query	= dpaa2_flow_query,
3688 };
3689