xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 627fb636dba8b571b0b8093792f8d4d70f3e2cb2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 /* Workaround to discriminate the UDP/TCP/SCTP
26  * with next protocol of l3.
27  * MC/WRIOP are not able to identify
28  * the l4 protocol with l4 ports.
29  */
30 int mc_l4_port_identification;
31 
32 enum flow_rule_ipaddr_type {
33 	FLOW_NONE_IPADDR,
34 	FLOW_IPV4_ADDR,
35 	FLOW_IPV6_ADDR
36 };
37 
38 struct flow_rule_ipaddr {
39 	enum flow_rule_ipaddr_type ipaddr_type;
40 	int qos_ipsrc_offset;
41 	int qos_ipdst_offset;
42 	int fs_ipsrc_offset;
43 	int fs_ipdst_offset;
44 };
45 
46 struct rte_flow {
47 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
48 	struct dpni_rule_cfg qos_rule;
49 	struct dpni_rule_cfg fs_rule;
50 	uint16_t qos_index;
51 	uint16_t fs_index;
52 	uint8_t key_size;
53 	uint8_t tc_id; /** Traffic Class ID. */
54 	uint8_t flow_type;
55 	uint8_t tc_index; /** index within this Traffic Class. */
56 	enum rte_flow_action_type action;
57 	uint16_t flow_id;
58 	/* Special for IP address to specify the offset
59 	 * in key/mask.
60 	 */
61 	struct flow_rule_ipaddr ipaddr_rule;
62 	struct dpni_fs_action_cfg action_cfg;
63 };
64 
65 static const
66 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
67 	RTE_FLOW_ITEM_TYPE_END,
68 	RTE_FLOW_ITEM_TYPE_ETH,
69 	RTE_FLOW_ITEM_TYPE_VLAN,
70 	RTE_FLOW_ITEM_TYPE_IPV4,
71 	RTE_FLOW_ITEM_TYPE_IPV6,
72 	RTE_FLOW_ITEM_TYPE_ICMP,
73 	RTE_FLOW_ITEM_TYPE_UDP,
74 	RTE_FLOW_ITEM_TYPE_TCP,
75 	RTE_FLOW_ITEM_TYPE_SCTP,
76 	RTE_FLOW_ITEM_TYPE_GRE,
77 };
78 
79 static const
80 enum rte_flow_action_type dpaa2_supported_action_type[] = {
81 	RTE_FLOW_ACTION_TYPE_END,
82 	RTE_FLOW_ACTION_TYPE_QUEUE,
83 	RTE_FLOW_ACTION_TYPE_RSS
84 };
85 
86 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
87 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
88 
89 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
90 
91 #ifndef __cplusplus
92 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
93 	.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
94 	.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
95 	.type = RTE_BE16(0xffff),
96 };
97 
98 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
99 	.tci = RTE_BE16(0xffff),
100 };
101 
102 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
103 	.hdr.src_addr = RTE_BE32(0xffffffff),
104 	.hdr.dst_addr = RTE_BE32(0xffffffff),
105 	.hdr.next_proto_id = 0xff,
106 };
107 
108 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
109 	.hdr = {
110 		.src_addr =
111 			"\xff\xff\xff\xff\xff\xff\xff\xff"
112 			"\xff\xff\xff\xff\xff\xff\xff\xff",
113 		.dst_addr =
114 			"\xff\xff\xff\xff\xff\xff\xff\xff"
115 			"\xff\xff\xff\xff\xff\xff\xff\xff",
116 		.proto = 0xff
117 	},
118 };
119 
120 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
121 	.hdr.icmp_type = 0xff,
122 	.hdr.icmp_code = 0xff,
123 };
124 
125 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
126 	.hdr = {
127 		.src_port = RTE_BE16(0xffff),
128 		.dst_port = RTE_BE16(0xffff),
129 	},
130 };
131 
132 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
133 	.hdr = {
134 		.src_port = RTE_BE16(0xffff),
135 		.dst_port = RTE_BE16(0xffff),
136 	},
137 };
138 
139 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
140 	.hdr = {
141 		.src_port = RTE_BE16(0xffff),
142 		.dst_port = RTE_BE16(0xffff),
143 	},
144 };
145 
146 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
147 	.protocol = RTE_BE16(0xffff),
148 };
149 
150 #endif
151 
152 
153 static inline void dpaa2_flow_extract_key_set(
154 	struct dpaa2_key_info *key_info, int index, uint8_t size)
155 {
156 	key_info->key_size[index] = size;
157 	if (index > 0) {
158 		key_info->key_offset[index] =
159 			key_info->key_offset[index - 1] +
160 			key_info->key_size[index - 1];
161 	} else {
162 		key_info->key_offset[index] = 0;
163 	}
164 	key_info->key_total_size += size;
165 }
166 
167 static int dpaa2_flow_extract_add(
168 	struct dpaa2_key_extract *key_extract,
169 	enum net_prot prot,
170 	uint32_t field, uint8_t field_size)
171 {
172 	int index, ip_src = -1, ip_dst = -1;
173 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
174 	struct dpaa2_key_info *key_info = &key_extract->key_info;
175 
176 	if (dpkg->num_extracts >=
177 		DPKG_MAX_NUM_OF_EXTRACTS) {
178 		DPAA2_PMD_WARN("Number of extracts overflows");
179 		return -1;
180 	}
181 	/* Before reorder, the IP SRC and IP DST are already last
182 	 * extract(s).
183 	 */
184 	for (index = 0; index < dpkg->num_extracts; index++) {
185 		if (dpkg->extracts[index].extract.from_hdr.prot ==
186 			NET_PROT_IP) {
187 			if (dpkg->extracts[index].extract.from_hdr.field ==
188 				NH_FLD_IP_SRC) {
189 				ip_src = index;
190 			}
191 			if (dpkg->extracts[index].extract.from_hdr.field ==
192 				NH_FLD_IP_DST) {
193 				ip_dst = index;
194 			}
195 		}
196 	}
197 
198 	if (ip_src >= 0)
199 		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
200 
201 	if (ip_dst >= 0)
202 		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
203 
204 	if (prot == NET_PROT_IP &&
205 		(field == NH_FLD_IP_SRC ||
206 		field == NH_FLD_IP_DST)) {
207 		index = dpkg->num_extracts;
208 	} else {
209 		if (ip_src >= 0 && ip_dst >= 0)
210 			index = dpkg->num_extracts - 2;
211 		else if (ip_src >= 0 || ip_dst >= 0)
212 			index = dpkg->num_extracts - 1;
213 		else
214 			index = dpkg->num_extracts;
215 	}
216 
217 	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
218 	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
219 	dpkg->extracts[index].extract.from_hdr.prot = prot;
220 	dpkg->extracts[index].extract.from_hdr.field = field;
221 	if (prot == NET_PROT_IP &&
222 		(field == NH_FLD_IP_SRC ||
223 		field == NH_FLD_IP_DST)) {
224 		dpaa2_flow_extract_key_set(key_info, index, 0);
225 	} else {
226 		dpaa2_flow_extract_key_set(key_info, index, field_size);
227 	}
228 
229 	if (prot == NET_PROT_IP) {
230 		if (field == NH_FLD_IP_SRC) {
231 			if (key_info->ipv4_dst_offset >= 0) {
232 				key_info->ipv4_src_offset =
233 					key_info->ipv4_dst_offset +
234 					NH_FLD_IPV4_ADDR_SIZE;
235 			} else {
236 				key_info->ipv4_src_offset =
237 					key_info->key_offset[index - 1] +
238 						key_info->key_size[index - 1];
239 			}
240 			if (key_info->ipv6_dst_offset >= 0) {
241 				key_info->ipv6_src_offset =
242 					key_info->ipv6_dst_offset +
243 					NH_FLD_IPV6_ADDR_SIZE;
244 			} else {
245 				key_info->ipv6_src_offset =
246 					key_info->key_offset[index - 1] +
247 						key_info->key_size[index - 1];
248 			}
249 		} else if (field == NH_FLD_IP_DST) {
250 			if (key_info->ipv4_src_offset >= 0) {
251 				key_info->ipv4_dst_offset =
252 					key_info->ipv4_src_offset +
253 					NH_FLD_IPV4_ADDR_SIZE;
254 			} else {
255 				key_info->ipv4_dst_offset =
256 					key_info->key_offset[index - 1] +
257 						key_info->key_size[index - 1];
258 			}
259 			if (key_info->ipv6_src_offset >= 0) {
260 				key_info->ipv6_dst_offset =
261 					key_info->ipv6_src_offset +
262 					NH_FLD_IPV6_ADDR_SIZE;
263 			} else {
264 				key_info->ipv6_dst_offset =
265 					key_info->key_offset[index - 1] +
266 						key_info->key_size[index - 1];
267 			}
268 		}
269 	}
270 
271 	if (index == dpkg->num_extracts) {
272 		dpkg->num_extracts++;
273 		return 0;
274 	}
275 
276 	if (ip_src >= 0) {
277 		ip_src++;
278 		dpkg->extracts[ip_src].type =
279 			DPKG_EXTRACT_FROM_HDR;
280 		dpkg->extracts[ip_src].extract.from_hdr.type =
281 			DPKG_FULL_FIELD;
282 		dpkg->extracts[ip_src].extract.from_hdr.prot =
283 			NET_PROT_IP;
284 		dpkg->extracts[ip_src].extract.from_hdr.field =
285 			NH_FLD_IP_SRC;
286 		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
287 		key_info->ipv4_src_offset += field_size;
288 		key_info->ipv6_src_offset += field_size;
289 	}
290 	if (ip_dst >= 0) {
291 		ip_dst++;
292 		dpkg->extracts[ip_dst].type =
293 			DPKG_EXTRACT_FROM_HDR;
294 		dpkg->extracts[ip_dst].extract.from_hdr.type =
295 			DPKG_FULL_FIELD;
296 		dpkg->extracts[ip_dst].extract.from_hdr.prot =
297 			NET_PROT_IP;
298 		dpkg->extracts[ip_dst].extract.from_hdr.field =
299 			NH_FLD_IP_DST;
300 		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
301 		key_info->ipv4_dst_offset += field_size;
302 		key_info->ipv6_dst_offset += field_size;
303 	}
304 
305 	dpkg->num_extracts++;
306 
307 	return 0;
308 }
309 
310 /* Protocol discrimination.
311  * Discriminate IPv4/IPv6/vLan by Eth type.
312  * Discriminate UDP/TCP/ICMP by next proto of IP.
313  */
314 static inline int
315 dpaa2_flow_proto_discrimination_extract(
316 	struct dpaa2_key_extract *key_extract,
317 	enum rte_flow_item_type type)
318 {
319 	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
320 		return dpaa2_flow_extract_add(
321 				key_extract, NET_PROT_ETH,
322 				NH_FLD_ETH_TYPE,
323 				sizeof(rte_be16_t));
324 	} else if (type == (enum rte_flow_item_type)
325 		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
326 		return dpaa2_flow_extract_add(
327 				key_extract, NET_PROT_IP,
328 				NH_FLD_IP_PROTO,
329 				NH_FLD_IP_PROTO_SIZE);
330 	}
331 
332 	return -1;
333 }
334 
335 static inline int dpaa2_flow_extract_search(
336 	struct dpkg_profile_cfg *dpkg,
337 	enum net_prot prot, uint32_t field)
338 {
339 	int i;
340 
341 	for (i = 0; i < dpkg->num_extracts; i++) {
342 		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
343 			dpkg->extracts[i].extract.from_hdr.field == field) {
344 			return i;
345 		}
346 	}
347 
348 	return -1;
349 }
350 
351 static inline int dpaa2_flow_extract_key_offset(
352 	struct dpaa2_key_extract *key_extract,
353 	enum net_prot prot, uint32_t field)
354 {
355 	int i;
356 	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
357 	struct dpaa2_key_info *key_info = &key_extract->key_info;
358 
359 	if (prot == NET_PROT_IPV4 ||
360 		prot == NET_PROT_IPV6)
361 		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
362 	else
363 		i = dpaa2_flow_extract_search(dpkg, prot, field);
364 
365 	if (i >= 0) {
366 		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
367 			return key_info->ipv4_src_offset;
368 		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
369 			return key_info->ipv4_dst_offset;
370 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
371 			return key_info->ipv6_src_offset;
372 		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
373 			return key_info->ipv6_dst_offset;
374 		else
375 			return key_info->key_offset[i];
376 	} else {
377 		return -1;
378 	}
379 }
380 
381 struct proto_discrimination {
382 	enum rte_flow_item_type type;
383 	union {
384 		rte_be16_t eth_type;
385 		uint8_t ip_proto;
386 	};
387 };
388 
389 static int
390 dpaa2_flow_proto_discrimination_rule(
391 	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
392 	struct proto_discrimination proto, int group)
393 {
394 	enum net_prot prot;
395 	uint32_t field;
396 	int offset;
397 	size_t key_iova;
398 	size_t mask_iova;
399 	rte_be16_t eth_type;
400 	uint8_t ip_proto;
401 
402 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
403 		prot = NET_PROT_ETH;
404 		field = NH_FLD_ETH_TYPE;
405 	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
406 		prot = NET_PROT_IP;
407 		field = NH_FLD_IP_PROTO;
408 	} else {
409 		DPAA2_PMD_ERR(
410 			"Only Eth and IP support to discriminate next proto.");
411 		return -1;
412 	}
413 
414 	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
415 			prot, field);
416 	if (offset < 0) {
417 		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
418 				prot, field);
419 		return -1;
420 	}
421 	key_iova = flow->qos_rule.key_iova + offset;
422 	mask_iova = flow->qos_rule.mask_iova + offset;
423 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
424 		eth_type = proto.eth_type;
425 		memcpy((void *)key_iova, (const void *)(&eth_type),
426 			sizeof(rte_be16_t));
427 		eth_type = 0xffff;
428 		memcpy((void *)mask_iova, (const void *)(&eth_type),
429 			sizeof(rte_be16_t));
430 	} else {
431 		ip_proto = proto.ip_proto;
432 		memcpy((void *)key_iova, (const void *)(&ip_proto),
433 			sizeof(uint8_t));
434 		ip_proto = 0xff;
435 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
436 			sizeof(uint8_t));
437 	}
438 
439 	offset = dpaa2_flow_extract_key_offset(
440 			&priv->extract.tc_key_extract[group],
441 			prot, field);
442 	if (offset < 0) {
443 		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
444 				prot, field);
445 		return -1;
446 	}
447 	key_iova = flow->fs_rule.key_iova + offset;
448 	mask_iova = flow->fs_rule.mask_iova + offset;
449 
450 	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
451 		eth_type = proto.eth_type;
452 		memcpy((void *)key_iova, (const void *)(&eth_type),
453 			sizeof(rte_be16_t));
454 		eth_type = 0xffff;
455 		memcpy((void *)mask_iova, (const void *)(&eth_type),
456 			sizeof(rte_be16_t));
457 	} else {
458 		ip_proto = proto.ip_proto;
459 		memcpy((void *)key_iova, (const void *)(&ip_proto),
460 			sizeof(uint8_t));
461 		ip_proto = 0xff;
462 		memcpy((void *)mask_iova, (const void *)(&ip_proto),
463 			sizeof(uint8_t));
464 	}
465 
466 	return 0;
467 }
468 
469 static inline int
470 dpaa2_flow_rule_data_set(
471 	struct dpaa2_key_extract *key_extract,
472 	struct dpni_rule_cfg *rule,
473 	enum net_prot prot, uint32_t field,
474 	const void *key, const void *mask, int size)
475 {
476 	int offset = dpaa2_flow_extract_key_offset(key_extract,
477 				prot, field);
478 
479 	if (offset < 0) {
480 		DPAA2_PMD_ERR("prot %d, field %d extract failed",
481 			prot, field);
482 		return -1;
483 	}
484 	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
485 	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
486 
487 	return 0;
488 }
489 
490 static inline int
491 _dpaa2_flow_rule_move_ipaddr_tail(
492 	struct dpaa2_key_extract *key_extract,
493 	struct dpni_rule_cfg *rule, int src_offset,
494 	uint32_t field, bool ipv4)
495 {
496 	size_t key_src;
497 	size_t mask_src;
498 	size_t key_dst;
499 	size_t mask_dst;
500 	int dst_offset, len;
501 	enum net_prot prot;
502 	char tmp[NH_FLD_IPV6_ADDR_SIZE];
503 
504 	if (field != NH_FLD_IP_SRC &&
505 		field != NH_FLD_IP_DST) {
506 		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
507 		return -1;
508 	}
509 	if (ipv4)
510 		prot = NET_PROT_IPV4;
511 	else
512 		prot = NET_PROT_IPV6;
513 	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
514 				prot, field);
515 	if (dst_offset < 0) {
516 		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
517 		return -1;
518 	}
519 	key_src = rule->key_iova + src_offset;
520 	mask_src = rule->mask_iova + src_offset;
521 	key_dst = rule->key_iova + dst_offset;
522 	mask_dst = rule->mask_iova + dst_offset;
523 	if (ipv4)
524 		len = sizeof(rte_be32_t);
525 	else
526 		len = NH_FLD_IPV6_ADDR_SIZE;
527 
528 	memcpy(tmp, (char *)key_src, len);
529 	memcpy((char *)key_dst, tmp, len);
530 
531 	memcpy(tmp, (char *)mask_src, len);
532 	memcpy((char *)mask_dst, tmp, len);
533 
534 	return 0;
535 }
536 
537 static inline int
538 dpaa2_flow_rule_move_ipaddr_tail(
539 	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
540 	int fs_group)
541 {
542 	int ret;
543 	enum net_prot prot;
544 
545 	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
546 		return 0;
547 
548 	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
549 		prot = NET_PROT_IPV4;
550 	else
551 		prot = NET_PROT_IPV6;
552 
553 	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
554 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
555 				&priv->extract.qos_key_extract,
556 				&flow->qos_rule,
557 				flow->ipaddr_rule.qos_ipsrc_offset,
558 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
559 		if (ret) {
560 			DPAA2_PMD_ERR("QoS src address reorder failed");
561 			return -1;
562 		}
563 		flow->ipaddr_rule.qos_ipsrc_offset =
564 			dpaa2_flow_extract_key_offset(
565 				&priv->extract.qos_key_extract,
566 				prot, NH_FLD_IP_SRC);
567 	}
568 
569 	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
570 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
571 				&priv->extract.qos_key_extract,
572 				&flow->qos_rule,
573 				flow->ipaddr_rule.qos_ipdst_offset,
574 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
575 		if (ret) {
576 			DPAA2_PMD_ERR("QoS dst address reorder failed");
577 			return -1;
578 		}
579 		flow->ipaddr_rule.qos_ipdst_offset =
580 			dpaa2_flow_extract_key_offset(
581 				&priv->extract.qos_key_extract,
582 				prot, NH_FLD_IP_DST);
583 	}
584 
585 	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
586 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
587 				&priv->extract.tc_key_extract[fs_group],
588 				&flow->fs_rule,
589 				flow->ipaddr_rule.fs_ipsrc_offset,
590 				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
591 		if (ret) {
592 			DPAA2_PMD_ERR("FS src address reorder failed");
593 			return -1;
594 		}
595 		flow->ipaddr_rule.fs_ipsrc_offset =
596 			dpaa2_flow_extract_key_offset(
597 				&priv->extract.tc_key_extract[fs_group],
598 				prot, NH_FLD_IP_SRC);
599 	}
600 	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
601 		ret = _dpaa2_flow_rule_move_ipaddr_tail(
602 				&priv->extract.tc_key_extract[fs_group],
603 				&flow->fs_rule,
604 				flow->ipaddr_rule.fs_ipdst_offset,
605 				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
606 		if (ret) {
607 			DPAA2_PMD_ERR("FS dst address reorder failed");
608 			return -1;
609 		}
610 		flow->ipaddr_rule.fs_ipdst_offset =
611 			dpaa2_flow_extract_key_offset(
612 				&priv->extract.tc_key_extract[fs_group],
613 				prot, NH_FLD_IP_DST);
614 	}
615 
616 	return 0;
617 }
618 
619 static int
620 dpaa2_flow_extract_support(
621 	const uint8_t *mask_src,
622 	enum rte_flow_item_type type)
623 {
624 	char mask[64];
625 	int i, size = 0;
626 	const char *mask_support = 0;
627 
628 	switch (type) {
629 	case RTE_FLOW_ITEM_TYPE_ETH:
630 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
631 		size = sizeof(struct rte_flow_item_eth);
632 		break;
633 	case RTE_FLOW_ITEM_TYPE_VLAN:
634 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
635 		size = sizeof(struct rte_flow_item_vlan);
636 		break;
637 	case RTE_FLOW_ITEM_TYPE_IPV4:
638 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
639 		size = sizeof(struct rte_flow_item_ipv4);
640 		break;
641 	case RTE_FLOW_ITEM_TYPE_IPV6:
642 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
643 		size = sizeof(struct rte_flow_item_ipv6);
644 		break;
645 	case RTE_FLOW_ITEM_TYPE_ICMP:
646 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
647 		size = sizeof(struct rte_flow_item_icmp);
648 		break;
649 	case RTE_FLOW_ITEM_TYPE_UDP:
650 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
651 		size = sizeof(struct rte_flow_item_udp);
652 		break;
653 	case RTE_FLOW_ITEM_TYPE_TCP:
654 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
655 		size = sizeof(struct rte_flow_item_tcp);
656 		break;
657 	case RTE_FLOW_ITEM_TYPE_SCTP:
658 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
659 		size = sizeof(struct rte_flow_item_sctp);
660 		break;
661 	case RTE_FLOW_ITEM_TYPE_GRE:
662 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
663 		size = sizeof(struct rte_flow_item_gre);
664 		break;
665 	default:
666 		return -1;
667 	}
668 
669 	memcpy(mask, mask_support, size);
670 
671 	for (i = 0; i < size; i++)
672 		mask[i] = (mask[i] | mask_src[i]);
673 
674 	if (memcmp(mask, mask_support, size))
675 		return -1;
676 
677 	return 0;
678 }
679 
680 static int
681 dpaa2_configure_flow_eth(struct rte_flow *flow,
682 			 struct rte_eth_dev *dev,
683 			 const struct rte_flow_attr *attr,
684 			 const struct rte_flow_item *pattern,
685 			 const struct rte_flow_action actions[] __rte_unused,
686 			 struct rte_flow_error *error __rte_unused,
687 			 int *device_configured)
688 {
689 	int index, ret;
690 	int local_cfg = 0;
691 	uint32_t group;
692 	const struct rte_flow_item_eth *spec, *mask;
693 
694 	/* TODO: Currently upper bound of range parameter is not implemented */
695 	const struct rte_flow_item_eth *last __rte_unused;
696 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
697 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
698 
699 	group = attr->group;
700 
701 	/* Parse pattern list to get the matching parameters */
702 	spec    = (const struct rte_flow_item_eth *)pattern->spec;
703 	last    = (const struct rte_flow_item_eth *)pattern->last;
704 	mask    = (const struct rte_flow_item_eth *)
705 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
706 	if (!spec) {
707 		/* Don't care any field of eth header,
708 		 * only care eth protocol.
709 		 */
710 		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
711 		return 0;
712 	}
713 
714 	/* Get traffic class index and flow id to be configured */
715 	flow->tc_id = group;
716 	flow->tc_index = attr->priority;
717 
718 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
719 		RTE_FLOW_ITEM_TYPE_ETH)) {
720 		DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
721 
722 		return -1;
723 	}
724 
725 	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
726 		index = dpaa2_flow_extract_search(
727 				&priv->extract.qos_key_extract.dpkg,
728 				NET_PROT_ETH, NH_FLD_ETH_SA);
729 		if (index < 0) {
730 			ret = dpaa2_flow_extract_add(
731 					&priv->extract.qos_key_extract,
732 					NET_PROT_ETH, NH_FLD_ETH_SA,
733 					RTE_ETHER_ADDR_LEN);
734 			if (ret) {
735 				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
736 
737 				return -1;
738 			}
739 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
740 		}
741 		index = dpaa2_flow_extract_search(
742 				&priv->extract.tc_key_extract[group].dpkg,
743 				NET_PROT_ETH, NH_FLD_ETH_SA);
744 		if (index < 0) {
745 			ret = dpaa2_flow_extract_add(
746 					&priv->extract.tc_key_extract[group],
747 					NET_PROT_ETH, NH_FLD_ETH_SA,
748 					RTE_ETHER_ADDR_LEN);
749 			if (ret) {
750 				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
751 				return -1;
752 			}
753 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
754 		}
755 
756 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
757 		if (ret) {
758 			DPAA2_PMD_ERR(
759 				"Move ipaddr before ETH_SA rule set failed");
760 			return -1;
761 		}
762 
763 		ret = dpaa2_flow_rule_data_set(
764 				&priv->extract.qos_key_extract,
765 				&flow->qos_rule,
766 				NET_PROT_ETH,
767 				NH_FLD_ETH_SA,
768 				&spec->src.addr_bytes,
769 				&mask->src.addr_bytes,
770 				sizeof(struct rte_ether_addr));
771 		if (ret) {
772 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
773 			return -1;
774 		}
775 
776 		ret = dpaa2_flow_rule_data_set(
777 				&priv->extract.tc_key_extract[group],
778 				&flow->fs_rule,
779 				NET_PROT_ETH,
780 				NH_FLD_ETH_SA,
781 				&spec->src.addr_bytes,
782 				&mask->src.addr_bytes,
783 				sizeof(struct rte_ether_addr));
784 		if (ret) {
785 			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
786 			return -1;
787 		}
788 	}
789 
790 	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
791 		index = dpaa2_flow_extract_search(
792 				&priv->extract.qos_key_extract.dpkg,
793 				NET_PROT_ETH, NH_FLD_ETH_DA);
794 		if (index < 0) {
795 			ret = dpaa2_flow_extract_add(
796 					&priv->extract.qos_key_extract,
797 					NET_PROT_ETH, NH_FLD_ETH_DA,
798 					RTE_ETHER_ADDR_LEN);
799 			if (ret) {
800 				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
801 
802 				return -1;
803 			}
804 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
805 		}
806 
807 		index = dpaa2_flow_extract_search(
808 				&priv->extract.tc_key_extract[group].dpkg,
809 				NET_PROT_ETH, NH_FLD_ETH_DA);
810 		if (index < 0) {
811 			ret = dpaa2_flow_extract_add(
812 					&priv->extract.tc_key_extract[group],
813 					NET_PROT_ETH, NH_FLD_ETH_DA,
814 					RTE_ETHER_ADDR_LEN);
815 			if (ret) {
816 				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
817 
818 				return -1;
819 			}
820 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
821 		}
822 
823 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
824 		if (ret) {
825 			DPAA2_PMD_ERR(
826 				"Move ipaddr before ETH DA rule set failed");
827 			return -1;
828 		}
829 
830 		ret = dpaa2_flow_rule_data_set(
831 				&priv->extract.qos_key_extract,
832 				&flow->qos_rule,
833 				NET_PROT_ETH,
834 				NH_FLD_ETH_DA,
835 				&spec->dst.addr_bytes,
836 				&mask->dst.addr_bytes,
837 				sizeof(struct rte_ether_addr));
838 		if (ret) {
839 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
840 			return -1;
841 		}
842 
843 		ret = dpaa2_flow_rule_data_set(
844 				&priv->extract.tc_key_extract[group],
845 				&flow->fs_rule,
846 				NET_PROT_ETH,
847 				NH_FLD_ETH_DA,
848 				&spec->dst.addr_bytes,
849 				&mask->dst.addr_bytes,
850 				sizeof(struct rte_ether_addr));
851 		if (ret) {
852 			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
853 			return -1;
854 		}
855 	}
856 
857 	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
858 		index = dpaa2_flow_extract_search(
859 				&priv->extract.qos_key_extract.dpkg,
860 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
861 		if (index < 0) {
862 			ret = dpaa2_flow_extract_add(
863 					&priv->extract.qos_key_extract,
864 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
865 					RTE_ETHER_TYPE_LEN);
866 			if (ret) {
867 				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
868 
869 				return -1;
870 			}
871 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
872 		}
873 		index = dpaa2_flow_extract_search(
874 				&priv->extract.tc_key_extract[group].dpkg,
875 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
876 		if (index < 0) {
877 			ret = dpaa2_flow_extract_add(
878 					&priv->extract.tc_key_extract[group],
879 					NET_PROT_ETH, NH_FLD_ETH_TYPE,
880 					RTE_ETHER_TYPE_LEN);
881 			if (ret) {
882 				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
883 
884 				return -1;
885 			}
886 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
887 		}
888 
889 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
890 		if (ret) {
891 			DPAA2_PMD_ERR(
892 				"Move ipaddr before ETH TYPE rule set failed");
893 				return -1;
894 		}
895 
896 		ret = dpaa2_flow_rule_data_set(
897 				&priv->extract.qos_key_extract,
898 				&flow->qos_rule,
899 				NET_PROT_ETH,
900 				NH_FLD_ETH_TYPE,
901 				&spec->type,
902 				&mask->type,
903 				sizeof(rte_be16_t));
904 		if (ret) {
905 			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
906 			return -1;
907 		}
908 
909 		ret = dpaa2_flow_rule_data_set(
910 				&priv->extract.tc_key_extract[group],
911 				&flow->fs_rule,
912 				NET_PROT_ETH,
913 				NH_FLD_ETH_TYPE,
914 				&spec->type,
915 				&mask->type,
916 				sizeof(rte_be16_t));
917 		if (ret) {
918 			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
919 			return -1;
920 		}
921 	}
922 
923 	(*device_configured) |= local_cfg;
924 
925 	return 0;
926 }
927 
928 static int
929 dpaa2_configure_flow_vlan(struct rte_flow *flow,
930 			  struct rte_eth_dev *dev,
931 			  const struct rte_flow_attr *attr,
932 			  const struct rte_flow_item *pattern,
933 			  const struct rte_flow_action actions[] __rte_unused,
934 			  struct rte_flow_error *error __rte_unused,
935 			  int *device_configured)
936 {
937 	int index, ret;
938 	int local_cfg = 0;
939 	uint32_t group;
940 	const struct rte_flow_item_vlan *spec, *mask;
941 
942 	const struct rte_flow_item_vlan *last __rte_unused;
943 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
944 
945 	group = attr->group;
946 
947 	/* Parse pattern list to get the matching parameters */
948 	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
949 	last    = (const struct rte_flow_item_vlan *)pattern->last;
950 	mask    = (const struct rte_flow_item_vlan *)
951 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
952 
953 	/* Get traffic class index and flow id to be configured */
954 	flow->tc_id = group;
955 	flow->tc_index = attr->priority;
956 
957 	if (!spec) {
958 		/* Don't care any field of vlan header,
959 		 * only care vlan protocol.
960 		 */
961 		/* Eth type is actually used for vLan classification.
962 		 */
963 		struct proto_discrimination proto;
964 
965 		index = dpaa2_flow_extract_search(
966 				&priv->extract.qos_key_extract.dpkg,
967 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
968 		if (index < 0) {
969 			ret = dpaa2_flow_proto_discrimination_extract(
970 						&priv->extract.qos_key_extract,
971 						RTE_FLOW_ITEM_TYPE_ETH);
972 			if (ret) {
973 				DPAA2_PMD_ERR(
974 				"QoS Ext ETH_TYPE to discriminate vLan failed");
975 
976 				return -1;
977 			}
978 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
979 		}
980 
981 		index = dpaa2_flow_extract_search(
982 				&priv->extract.tc_key_extract[group].dpkg,
983 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
984 		if (index < 0) {
985 			ret = dpaa2_flow_proto_discrimination_extract(
986 					&priv->extract.tc_key_extract[group],
987 					RTE_FLOW_ITEM_TYPE_ETH);
988 			if (ret) {
989 				DPAA2_PMD_ERR(
990 				"FS Ext ETH_TYPE to discriminate vLan failed.");
991 
992 				return -1;
993 			}
994 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
995 		}
996 
997 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
998 		if (ret) {
999 			DPAA2_PMD_ERR(
1000 			"Move ipaddr before vLan discrimination set failed");
1001 			return -1;
1002 		}
1003 
1004 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1005 		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1006 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1007 							proto, group);
1008 		if (ret) {
1009 			DPAA2_PMD_ERR("vLan discrimination rule set failed");
1010 			return -1;
1011 		}
1012 
1013 		(*device_configured) |= local_cfg;
1014 
1015 		return 0;
1016 	}
1017 
1018 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1019 		RTE_FLOW_ITEM_TYPE_VLAN)) {
1020 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1021 
1022 		return -1;
1023 	}
1024 
1025 	if (!mask->tci)
1026 		return 0;
1027 
1028 	index = dpaa2_flow_extract_search(
1029 				&priv->extract.qos_key_extract.dpkg,
1030 				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1031 	if (index < 0) {
1032 		ret = dpaa2_flow_extract_add(
1033 						&priv->extract.qos_key_extract,
1034 						NET_PROT_VLAN,
1035 						NH_FLD_VLAN_TCI,
1036 						sizeof(rte_be16_t));
1037 		if (ret) {
1038 			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1039 
1040 			return -1;
1041 		}
1042 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1043 	}
1044 
1045 	index = dpaa2_flow_extract_search(
1046 			&priv->extract.tc_key_extract[group].dpkg,
1047 			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1048 	if (index < 0) {
1049 		ret = dpaa2_flow_extract_add(
1050 				&priv->extract.tc_key_extract[group],
1051 				NET_PROT_VLAN,
1052 				NH_FLD_VLAN_TCI,
1053 				sizeof(rte_be16_t));
1054 		if (ret) {
1055 			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1056 
1057 			return -1;
1058 		}
1059 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1060 	}
1061 
1062 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1063 	if (ret) {
1064 		DPAA2_PMD_ERR(
1065 			"Move ipaddr before VLAN TCI rule set failed");
1066 		return -1;
1067 	}
1068 
1069 	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1070 				&flow->qos_rule,
1071 				NET_PROT_VLAN,
1072 				NH_FLD_VLAN_TCI,
1073 				&spec->tci,
1074 				&mask->tci,
1075 				sizeof(rte_be16_t));
1076 	if (ret) {
1077 		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1078 		return -1;
1079 	}
1080 
1081 	ret = dpaa2_flow_rule_data_set(
1082 			&priv->extract.tc_key_extract[group],
1083 			&flow->fs_rule,
1084 			NET_PROT_VLAN,
1085 			NH_FLD_VLAN_TCI,
1086 			&spec->tci,
1087 			&mask->tci,
1088 			sizeof(rte_be16_t));
1089 	if (ret) {
1090 		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1091 		return -1;
1092 	}
1093 
1094 	(*device_configured) |= local_cfg;
1095 
1096 	return 0;
1097 }
1098 
1099 static int
1100 dpaa2_configure_flow_generic_ip(
1101 	struct rte_flow *flow,
1102 	struct rte_eth_dev *dev,
1103 	const struct rte_flow_attr *attr,
1104 	const struct rte_flow_item *pattern,
1105 	const struct rte_flow_action actions[] __rte_unused,
1106 	struct rte_flow_error *error __rte_unused,
1107 	int *device_configured)
1108 {
1109 	int index, ret;
1110 	int local_cfg = 0;
1111 	uint32_t group;
1112 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1113 		*mask_ipv4 = 0;
1114 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1115 		*mask_ipv6 = 0;
1116 	const void *key, *mask;
1117 	enum net_prot prot;
1118 
1119 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1120 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1121 	int size;
1122 
1123 	group = attr->group;
1124 
1125 	/* Parse pattern list to get the matching parameters */
1126 	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1127 		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1128 		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1129 			(pattern->mask ? pattern->mask :
1130 					&dpaa2_flow_item_ipv4_mask);
1131 	} else {
1132 		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1133 		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1134 			(pattern->mask ? pattern->mask :
1135 					&dpaa2_flow_item_ipv6_mask);
1136 	}
1137 
1138 	/* Get traffic class index and flow id to be configured */
1139 	flow->tc_id = group;
1140 	flow->tc_index = attr->priority;
1141 
1142 	if (!spec_ipv4 && !spec_ipv6) {
1143 		/* Don't care any field of IP header,
1144 		 * only care IP protocol.
1145 		 * Example: flow create 0 ingress pattern ipv6 /
1146 		 */
1147 		/* Eth type is actually used for IP identification.
1148 		 */
1149 		/* TODO: Current design only supports Eth + IP,
1150 		 *  Eth + vLan + IP needs to add.
1151 		 */
1152 		struct proto_discrimination proto;
1153 
1154 		index = dpaa2_flow_extract_search(
1155 				&priv->extract.qos_key_extract.dpkg,
1156 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1157 		if (index < 0) {
1158 			ret = dpaa2_flow_proto_discrimination_extract(
1159 					&priv->extract.qos_key_extract,
1160 					RTE_FLOW_ITEM_TYPE_ETH);
1161 			if (ret) {
1162 				DPAA2_PMD_ERR(
1163 				"QoS Ext ETH_TYPE to discriminate IP failed.");
1164 
1165 				return -1;
1166 			}
1167 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1168 		}
1169 
1170 		index = dpaa2_flow_extract_search(
1171 				&priv->extract.tc_key_extract[group].dpkg,
1172 				NET_PROT_ETH, NH_FLD_ETH_TYPE);
1173 		if (index < 0) {
1174 			ret = dpaa2_flow_proto_discrimination_extract(
1175 					&priv->extract.tc_key_extract[group],
1176 					RTE_FLOW_ITEM_TYPE_ETH);
1177 			if (ret) {
1178 				DPAA2_PMD_ERR(
1179 				"FS Ext ETH_TYPE to discriminate IP failed");
1180 
1181 				return -1;
1182 			}
1183 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1184 		}
1185 
1186 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1187 		if (ret) {
1188 			DPAA2_PMD_ERR(
1189 			"Move ipaddr before IP discrimination set failed");
1190 			return -1;
1191 		}
1192 
1193 		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1194 		if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1195 			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1196 		else
1197 			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1198 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1199 							proto, group);
1200 		if (ret) {
1201 			DPAA2_PMD_ERR("IP discrimination rule set failed");
1202 			return -1;
1203 		}
1204 
1205 		(*device_configured) |= local_cfg;
1206 
1207 		return 0;
1208 	}
1209 
1210 	if (mask_ipv4) {
1211 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1212 			RTE_FLOW_ITEM_TYPE_IPV4)) {
1213 			DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1214 
1215 			return -1;
1216 		}
1217 	}
1218 
1219 	if (mask_ipv6) {
1220 		if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1221 			RTE_FLOW_ITEM_TYPE_IPV6)) {
1222 			DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1223 
1224 			return -1;
1225 		}
1226 	}
1227 
1228 	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1229 		mask_ipv4->hdr.dst_addr)) {
1230 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1231 	} else if (mask_ipv6 &&
1232 		(memcmp((const char *)mask_ipv6->hdr.src_addr,
1233 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1234 		memcmp((const char *)mask_ipv6->hdr.dst_addr,
1235 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1236 		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1237 	}
1238 
1239 	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1240 		(mask_ipv6 &&
1241 			memcmp((const char *)mask_ipv6->hdr.src_addr,
1242 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1243 		index = dpaa2_flow_extract_search(
1244 				&priv->extract.qos_key_extract.dpkg,
1245 				NET_PROT_IP, NH_FLD_IP_SRC);
1246 		if (index < 0) {
1247 			ret = dpaa2_flow_extract_add(
1248 						&priv->extract.qos_key_extract,
1249 						NET_PROT_IP,
1250 						NH_FLD_IP_SRC,
1251 						0);
1252 			if (ret) {
1253 				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1254 
1255 				return -1;
1256 			}
1257 			local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
1258 				DPAA2_QOS_TABLE_IPADDR_EXTRACT);
1259 		}
1260 
1261 		index = dpaa2_flow_extract_search(
1262 				&priv->extract.tc_key_extract[group].dpkg,
1263 				NET_PROT_IP, NH_FLD_IP_SRC);
1264 		if (index < 0) {
1265 			ret = dpaa2_flow_extract_add(
1266 					&priv->extract.tc_key_extract[group],
1267 					NET_PROT_IP,
1268 					NH_FLD_IP_SRC,
1269 					0);
1270 			if (ret) {
1271 				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1272 
1273 				return -1;
1274 			}
1275 			local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
1276 				DPAA2_FS_TABLE_IPADDR_EXTRACT);
1277 		}
1278 
1279 		if (spec_ipv4)
1280 			key = &spec_ipv4->hdr.src_addr;
1281 		else
1282 			key = &spec_ipv6->hdr.src_addr[0];
1283 		if (mask_ipv4) {
1284 			mask = &mask_ipv4->hdr.src_addr;
1285 			size = NH_FLD_IPV4_ADDR_SIZE;
1286 			prot = NET_PROT_IPV4;
1287 		} else {
1288 			mask = &mask_ipv6->hdr.src_addr[0];
1289 			size = NH_FLD_IPV6_ADDR_SIZE;
1290 			prot = NET_PROT_IPV6;
1291 		}
1292 
1293 		ret = dpaa2_flow_rule_data_set(
1294 				&priv->extract.qos_key_extract,
1295 				&flow->qos_rule,
1296 				prot, NH_FLD_IP_SRC,
1297 				key,	mask, size);
1298 		if (ret) {
1299 			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1300 			return -1;
1301 		}
1302 
1303 		ret = dpaa2_flow_rule_data_set(
1304 				&priv->extract.tc_key_extract[group],
1305 				&flow->fs_rule,
1306 				prot, NH_FLD_IP_SRC,
1307 				key,	mask, size);
1308 		if (ret) {
1309 			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1310 			return -1;
1311 		}
1312 
1313 		flow->ipaddr_rule.qos_ipsrc_offset =
1314 			dpaa2_flow_extract_key_offset(
1315 				&priv->extract.qos_key_extract,
1316 				prot, NH_FLD_IP_SRC);
1317 		flow->ipaddr_rule.fs_ipsrc_offset =
1318 			dpaa2_flow_extract_key_offset(
1319 				&priv->extract.tc_key_extract[group],
1320 				prot, NH_FLD_IP_SRC);
1321 	}
1322 
1323 	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1324 		(mask_ipv6 &&
1325 			memcmp((const char *)mask_ipv6->hdr.dst_addr,
1326 				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1327 		index = dpaa2_flow_extract_search(
1328 				&priv->extract.qos_key_extract.dpkg,
1329 				NET_PROT_IP, NH_FLD_IP_DST);
1330 		if (index < 0) {
1331 			if (mask_ipv4)
1332 				size = NH_FLD_IPV4_ADDR_SIZE;
1333 			else
1334 				size = NH_FLD_IPV6_ADDR_SIZE;
1335 			ret = dpaa2_flow_extract_add(
1336 						&priv->extract.qos_key_extract,
1337 						NET_PROT_IP,
1338 						NH_FLD_IP_DST,
1339 						size);
1340 			if (ret) {
1341 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1342 
1343 				return -1;
1344 			}
1345 			local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
1346 				DPAA2_QOS_TABLE_IPADDR_EXTRACT);
1347 		}
1348 
1349 		index = dpaa2_flow_extract_search(
1350 				&priv->extract.tc_key_extract[group].dpkg,
1351 				NET_PROT_IP, NH_FLD_IP_DST);
1352 		if (index < 0) {
1353 			if (mask_ipv4)
1354 				size = NH_FLD_IPV4_ADDR_SIZE;
1355 			else
1356 				size = NH_FLD_IPV6_ADDR_SIZE;
1357 			ret = dpaa2_flow_extract_add(
1358 					&priv->extract.tc_key_extract[group],
1359 					NET_PROT_IP,
1360 					NH_FLD_IP_DST,
1361 					size);
1362 			if (ret) {
1363 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1364 
1365 				return -1;
1366 			}
1367 			local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
1368 				DPAA2_FS_TABLE_IPADDR_EXTRACT);
1369 		}
1370 
1371 		if (spec_ipv4)
1372 			key = &spec_ipv4->hdr.dst_addr;
1373 		else
1374 			key = spec_ipv6->hdr.dst_addr;
1375 		if (mask_ipv4) {
1376 			mask = &mask_ipv4->hdr.dst_addr;
1377 			size = NH_FLD_IPV4_ADDR_SIZE;
1378 			prot = NET_PROT_IPV4;
1379 		} else {
1380 			mask = &mask_ipv6->hdr.dst_addr[0];
1381 			size = NH_FLD_IPV6_ADDR_SIZE;
1382 			prot = NET_PROT_IPV6;
1383 		}
1384 
1385 		ret = dpaa2_flow_rule_data_set(
1386 				&priv->extract.qos_key_extract,
1387 				&flow->qos_rule,
1388 				prot, NH_FLD_IP_DST,
1389 				key,	mask, size);
1390 		if (ret) {
1391 			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1392 			return -1;
1393 		}
1394 
1395 		ret = dpaa2_flow_rule_data_set(
1396 				&priv->extract.tc_key_extract[group],
1397 				&flow->fs_rule,
1398 				prot, NH_FLD_IP_DST,
1399 				key,	mask, size);
1400 		if (ret) {
1401 			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1402 			return -1;
1403 		}
1404 		flow->ipaddr_rule.qos_ipdst_offset =
1405 			dpaa2_flow_extract_key_offset(
1406 				&priv->extract.qos_key_extract,
1407 				prot, NH_FLD_IP_DST);
1408 		flow->ipaddr_rule.fs_ipdst_offset =
1409 			dpaa2_flow_extract_key_offset(
1410 				&priv->extract.tc_key_extract[group],
1411 				prot, NH_FLD_IP_DST);
1412 	}
1413 
1414 	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1415 		(mask_ipv6 && mask_ipv6->hdr.proto)) {
1416 		index = dpaa2_flow_extract_search(
1417 				&priv->extract.qos_key_extract.dpkg,
1418 				NET_PROT_IP, NH_FLD_IP_PROTO);
1419 		if (index < 0) {
1420 			ret = dpaa2_flow_extract_add(
1421 				&priv->extract.qos_key_extract,
1422 				NET_PROT_IP,
1423 				NH_FLD_IP_PROTO,
1424 				NH_FLD_IP_PROTO_SIZE);
1425 			if (ret) {
1426 				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1427 
1428 				return -1;
1429 			}
1430 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1431 		}
1432 
1433 		index = dpaa2_flow_extract_search(
1434 				&priv->extract.tc_key_extract[group].dpkg,
1435 				NET_PROT_IP, NH_FLD_IP_PROTO);
1436 		if (index < 0) {
1437 			ret = dpaa2_flow_extract_add(
1438 					&priv->extract.tc_key_extract[group],
1439 					NET_PROT_IP,
1440 					NH_FLD_IP_PROTO,
1441 					NH_FLD_IP_PROTO_SIZE);
1442 			if (ret) {
1443 				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1444 
1445 				return -1;
1446 			}
1447 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1448 		}
1449 
1450 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1451 		if (ret) {
1452 			DPAA2_PMD_ERR(
1453 				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1454 			return -1;
1455 		}
1456 
1457 		if (spec_ipv4)
1458 			key = &spec_ipv4->hdr.next_proto_id;
1459 		else
1460 			key = &spec_ipv6->hdr.proto;
1461 		if (mask_ipv4)
1462 			mask = &mask_ipv4->hdr.next_proto_id;
1463 		else
1464 			mask = &mask_ipv6->hdr.proto;
1465 
1466 		ret = dpaa2_flow_rule_data_set(
1467 				&priv->extract.qos_key_extract,
1468 				&flow->qos_rule,
1469 				NET_PROT_IP,
1470 				NH_FLD_IP_PROTO,
1471 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1472 		if (ret) {
1473 			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1474 			return -1;
1475 		}
1476 
1477 		ret = dpaa2_flow_rule_data_set(
1478 				&priv->extract.tc_key_extract[group],
1479 				&flow->fs_rule,
1480 				NET_PROT_IP,
1481 				NH_FLD_IP_PROTO,
1482 				key,	mask, NH_FLD_IP_PROTO_SIZE);
1483 		if (ret) {
1484 			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1485 			return -1;
1486 		}
1487 	}
1488 
1489 	(*device_configured) |= local_cfg;
1490 
1491 	return 0;
1492 }
1493 
1494 static int
1495 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1496 			  struct rte_eth_dev *dev,
1497 			  const struct rte_flow_attr *attr,
1498 			  const struct rte_flow_item *pattern,
1499 			  const struct rte_flow_action actions[] __rte_unused,
1500 			  struct rte_flow_error *error __rte_unused,
1501 			  int *device_configured)
1502 {
1503 	int index, ret;
1504 	int local_cfg = 0;
1505 	uint32_t group;
1506 	const struct rte_flow_item_icmp *spec, *mask;
1507 
1508 	const struct rte_flow_item_icmp *last __rte_unused;
1509 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1510 
1511 	group = attr->group;
1512 
1513 	/* Parse pattern list to get the matching parameters */
1514 	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
1515 	last    = (const struct rte_flow_item_icmp *)pattern->last;
1516 	mask    = (const struct rte_flow_item_icmp *)
1517 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1518 
1519 	/* Get traffic class index and flow id to be configured */
1520 	flow->tc_id = group;
1521 	flow->tc_index = attr->priority;
1522 
1523 	if (!spec) {
1524 		/* Don't care any field of ICMP header,
1525 		 * only care ICMP protocol.
1526 		 * Example: flow create 0 ingress pattern icmp /
1527 		 */
1528 		/* Next proto of Generical IP is actually used
1529 		 * for ICMP identification.
1530 		 */
1531 		struct proto_discrimination proto;
1532 
1533 		index = dpaa2_flow_extract_search(
1534 				&priv->extract.qos_key_extract.dpkg,
1535 				NET_PROT_IP, NH_FLD_IP_PROTO);
1536 		if (index < 0) {
1537 			ret = dpaa2_flow_proto_discrimination_extract(
1538 					&priv->extract.qos_key_extract,
1539 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1540 			if (ret) {
1541 				DPAA2_PMD_ERR(
1542 					"QoS Extract IP protocol to discriminate ICMP failed.");
1543 
1544 				return -1;
1545 			}
1546 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1547 		}
1548 
1549 		index = dpaa2_flow_extract_search(
1550 				&priv->extract.tc_key_extract[group].dpkg,
1551 				NET_PROT_IP, NH_FLD_IP_PROTO);
1552 		if (index < 0) {
1553 			ret = dpaa2_flow_proto_discrimination_extract(
1554 					&priv->extract.tc_key_extract[group],
1555 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1556 			if (ret) {
1557 				DPAA2_PMD_ERR(
1558 					"FS Extract IP protocol to discriminate ICMP failed.");
1559 
1560 				return -1;
1561 			}
1562 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1563 		}
1564 
1565 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1566 		if (ret) {
1567 			DPAA2_PMD_ERR(
1568 				"Move IP addr before ICMP discrimination set failed");
1569 			return -1;
1570 		}
1571 
1572 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1573 		proto.ip_proto = IPPROTO_ICMP;
1574 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1575 							proto, group);
1576 		if (ret) {
1577 			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1578 			return -1;
1579 		}
1580 
1581 		(*device_configured) |= local_cfg;
1582 
1583 		return 0;
1584 	}
1585 
1586 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1587 		RTE_FLOW_ITEM_TYPE_ICMP)) {
1588 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1589 
1590 		return -1;
1591 	}
1592 
1593 	if (mask->hdr.icmp_type) {
1594 		index = dpaa2_flow_extract_search(
1595 				&priv->extract.qos_key_extract.dpkg,
1596 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1597 		if (index < 0) {
1598 			ret = dpaa2_flow_extract_add(
1599 					&priv->extract.qos_key_extract,
1600 					NET_PROT_ICMP,
1601 					NH_FLD_ICMP_TYPE,
1602 					NH_FLD_ICMP_TYPE_SIZE);
1603 			if (ret) {
1604 				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1605 
1606 				return -1;
1607 			}
1608 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1609 		}
1610 
1611 		index = dpaa2_flow_extract_search(
1612 				&priv->extract.tc_key_extract[group].dpkg,
1613 				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1614 		if (index < 0) {
1615 			ret = dpaa2_flow_extract_add(
1616 					&priv->extract.tc_key_extract[group],
1617 					NET_PROT_ICMP,
1618 					NH_FLD_ICMP_TYPE,
1619 					NH_FLD_ICMP_TYPE_SIZE);
1620 			if (ret) {
1621 				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1622 
1623 				return -1;
1624 			}
1625 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1626 		}
1627 
1628 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1629 		if (ret) {
1630 			DPAA2_PMD_ERR(
1631 				"Move ipaddr before ICMP TYPE set failed");
1632 			return -1;
1633 		}
1634 
1635 		ret = dpaa2_flow_rule_data_set(
1636 				&priv->extract.qos_key_extract,
1637 				&flow->qos_rule,
1638 				NET_PROT_ICMP,
1639 				NH_FLD_ICMP_TYPE,
1640 				&spec->hdr.icmp_type,
1641 				&mask->hdr.icmp_type,
1642 				NH_FLD_ICMP_TYPE_SIZE);
1643 		if (ret) {
1644 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1645 			return -1;
1646 		}
1647 
1648 		ret = dpaa2_flow_rule_data_set(
1649 				&priv->extract.tc_key_extract[group],
1650 				&flow->fs_rule,
1651 				NET_PROT_ICMP,
1652 				NH_FLD_ICMP_TYPE,
1653 				&spec->hdr.icmp_type,
1654 				&mask->hdr.icmp_type,
1655 				NH_FLD_ICMP_TYPE_SIZE);
1656 		if (ret) {
1657 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1658 			return -1;
1659 		}
1660 	}
1661 
1662 	if (mask->hdr.icmp_code) {
1663 		index = dpaa2_flow_extract_search(
1664 				&priv->extract.qos_key_extract.dpkg,
1665 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1666 		if (index < 0) {
1667 			ret = dpaa2_flow_extract_add(
1668 					&priv->extract.qos_key_extract,
1669 					NET_PROT_ICMP,
1670 					NH_FLD_ICMP_CODE,
1671 					NH_FLD_ICMP_CODE_SIZE);
1672 			if (ret) {
1673 				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1674 
1675 				return -1;
1676 			}
1677 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1678 		}
1679 
1680 		index = dpaa2_flow_extract_search(
1681 				&priv->extract.tc_key_extract[group].dpkg,
1682 				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1683 		if (index < 0) {
1684 			ret = dpaa2_flow_extract_add(
1685 					&priv->extract.tc_key_extract[group],
1686 					NET_PROT_ICMP,
1687 					NH_FLD_ICMP_CODE,
1688 					NH_FLD_ICMP_CODE_SIZE);
1689 			if (ret) {
1690 				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1691 
1692 				return -1;
1693 			}
1694 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1695 		}
1696 
1697 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1698 		if (ret) {
1699 			DPAA2_PMD_ERR(
1700 				"Move ipaddr after ICMP CODE set failed");
1701 			return -1;
1702 		}
1703 
1704 		ret = dpaa2_flow_rule_data_set(
1705 				&priv->extract.qos_key_extract,
1706 				&flow->qos_rule,
1707 				NET_PROT_ICMP,
1708 				NH_FLD_ICMP_CODE,
1709 				&spec->hdr.icmp_code,
1710 				&mask->hdr.icmp_code,
1711 				NH_FLD_ICMP_CODE_SIZE);
1712 		if (ret) {
1713 			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1714 			return -1;
1715 		}
1716 
1717 		ret = dpaa2_flow_rule_data_set(
1718 				&priv->extract.tc_key_extract[group],
1719 				&flow->fs_rule,
1720 				NET_PROT_ICMP,
1721 				NH_FLD_ICMP_CODE,
1722 				&spec->hdr.icmp_code,
1723 				&mask->hdr.icmp_code,
1724 				NH_FLD_ICMP_CODE_SIZE);
1725 		if (ret) {
1726 			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1727 			return -1;
1728 		}
1729 	}
1730 
1731 	(*device_configured) |= local_cfg;
1732 
1733 	return 0;
1734 }
1735 
1736 static int
1737 dpaa2_configure_flow_udp(struct rte_flow *flow,
1738 			 struct rte_eth_dev *dev,
1739 			  const struct rte_flow_attr *attr,
1740 			  const struct rte_flow_item *pattern,
1741 			  const struct rte_flow_action actions[] __rte_unused,
1742 			  struct rte_flow_error *error __rte_unused,
1743 			  int *device_configured)
1744 {
1745 	int index, ret;
1746 	int local_cfg = 0;
1747 	uint32_t group;
1748 	const struct rte_flow_item_udp *spec, *mask;
1749 
1750 	const struct rte_flow_item_udp *last __rte_unused;
1751 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1752 
1753 	group = attr->group;
1754 
1755 	/* Parse pattern list to get the matching parameters */
1756 	spec    = (const struct rte_flow_item_udp *)pattern->spec;
1757 	last    = (const struct rte_flow_item_udp *)pattern->last;
1758 	mask    = (const struct rte_flow_item_udp *)
1759 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1760 
1761 	/* Get traffic class index and flow id to be configured */
1762 	flow->tc_id = group;
1763 	flow->tc_index = attr->priority;
1764 
1765 	if (!spec || !mc_l4_port_identification) {
1766 		struct proto_discrimination proto;
1767 
1768 		index = dpaa2_flow_extract_search(
1769 				&priv->extract.qos_key_extract.dpkg,
1770 				NET_PROT_IP, NH_FLD_IP_PROTO);
1771 		if (index < 0) {
1772 			ret = dpaa2_flow_proto_discrimination_extract(
1773 					&priv->extract.qos_key_extract,
1774 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1775 			if (ret) {
1776 				DPAA2_PMD_ERR(
1777 					"QoS Extract IP protocol to discriminate UDP failed.");
1778 
1779 				return -1;
1780 			}
1781 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1782 		}
1783 
1784 		index = dpaa2_flow_extract_search(
1785 				&priv->extract.tc_key_extract[group].dpkg,
1786 				NET_PROT_IP, NH_FLD_IP_PROTO);
1787 		if (index < 0) {
1788 			ret = dpaa2_flow_proto_discrimination_extract(
1789 				&priv->extract.tc_key_extract[group],
1790 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1791 			if (ret) {
1792 				DPAA2_PMD_ERR(
1793 					"FS Extract IP protocol to discriminate UDP failed.");
1794 
1795 				return -1;
1796 			}
1797 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1798 		}
1799 
1800 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1801 		if (ret) {
1802 			DPAA2_PMD_ERR(
1803 				"Move IP addr before UDP discrimination set failed");
1804 			return -1;
1805 		}
1806 
1807 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1808 		proto.ip_proto = IPPROTO_UDP;
1809 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1810 							proto, group);
1811 		if (ret) {
1812 			DPAA2_PMD_ERR("UDP discrimination rule set failed");
1813 			return -1;
1814 		}
1815 
1816 		(*device_configured) |= local_cfg;
1817 
1818 		if (!spec)
1819 			return 0;
1820 	}
1821 
1822 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1823 		RTE_FLOW_ITEM_TYPE_UDP)) {
1824 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
1825 
1826 		return -1;
1827 	}
1828 
1829 	if (mask->hdr.src_port) {
1830 		index = dpaa2_flow_extract_search(
1831 				&priv->extract.qos_key_extract.dpkg,
1832 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1833 		if (index < 0) {
1834 			ret = dpaa2_flow_extract_add(
1835 					&priv->extract.qos_key_extract,
1836 				NET_PROT_UDP,
1837 				NH_FLD_UDP_PORT_SRC,
1838 				NH_FLD_UDP_PORT_SIZE);
1839 			if (ret) {
1840 				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
1841 
1842 				return -1;
1843 			}
1844 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1845 		}
1846 
1847 		index = dpaa2_flow_extract_search(
1848 				&priv->extract.tc_key_extract[group].dpkg,
1849 				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1850 		if (index < 0) {
1851 			ret = dpaa2_flow_extract_add(
1852 					&priv->extract.tc_key_extract[group],
1853 					NET_PROT_UDP,
1854 					NH_FLD_UDP_PORT_SRC,
1855 					NH_FLD_UDP_PORT_SIZE);
1856 			if (ret) {
1857 				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
1858 
1859 				return -1;
1860 			}
1861 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1862 		}
1863 
1864 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1865 		if (ret) {
1866 			DPAA2_PMD_ERR(
1867 				"Move ipaddr before UDP_PORT_SRC set failed");
1868 			return -1;
1869 		}
1870 
1871 		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1872 				&flow->qos_rule,
1873 				NET_PROT_UDP,
1874 				NH_FLD_UDP_PORT_SRC,
1875 				&spec->hdr.src_port,
1876 				&mask->hdr.src_port,
1877 				NH_FLD_UDP_PORT_SIZE);
1878 		if (ret) {
1879 			DPAA2_PMD_ERR(
1880 				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
1881 			return -1;
1882 		}
1883 
1884 		ret = dpaa2_flow_rule_data_set(
1885 				&priv->extract.tc_key_extract[group],
1886 				&flow->fs_rule,
1887 				NET_PROT_UDP,
1888 				NH_FLD_UDP_PORT_SRC,
1889 				&spec->hdr.src_port,
1890 				&mask->hdr.src_port,
1891 				NH_FLD_UDP_PORT_SIZE);
1892 		if (ret) {
1893 			DPAA2_PMD_ERR(
1894 				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
1895 			return -1;
1896 		}
1897 	}
1898 
1899 	if (mask->hdr.dst_port) {
1900 		index = dpaa2_flow_extract_search(
1901 				&priv->extract.qos_key_extract.dpkg,
1902 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1903 		if (index < 0) {
1904 			ret = dpaa2_flow_extract_add(
1905 					&priv->extract.qos_key_extract,
1906 					NET_PROT_UDP,
1907 					NH_FLD_UDP_PORT_DST,
1908 					NH_FLD_UDP_PORT_SIZE);
1909 			if (ret) {
1910 				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
1911 
1912 				return -1;
1913 			}
1914 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1915 		}
1916 
1917 		index = dpaa2_flow_extract_search(
1918 				&priv->extract.tc_key_extract[group].dpkg,
1919 				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1920 		if (index < 0) {
1921 			ret = dpaa2_flow_extract_add(
1922 					&priv->extract.tc_key_extract[group],
1923 					NET_PROT_UDP,
1924 					NH_FLD_UDP_PORT_DST,
1925 					NH_FLD_UDP_PORT_SIZE);
1926 			if (ret) {
1927 				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
1928 
1929 				return -1;
1930 			}
1931 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1932 		}
1933 
1934 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1935 		if (ret) {
1936 			DPAA2_PMD_ERR(
1937 				"Move ipaddr before UDP_PORT_DST set failed");
1938 			return -1;
1939 		}
1940 
1941 		ret = dpaa2_flow_rule_data_set(
1942 				&priv->extract.qos_key_extract,
1943 				&flow->qos_rule,
1944 				NET_PROT_UDP,
1945 				NH_FLD_UDP_PORT_DST,
1946 				&spec->hdr.dst_port,
1947 				&mask->hdr.dst_port,
1948 				NH_FLD_UDP_PORT_SIZE);
1949 		if (ret) {
1950 			DPAA2_PMD_ERR(
1951 				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
1952 			return -1;
1953 		}
1954 
1955 		ret = dpaa2_flow_rule_data_set(
1956 				&priv->extract.tc_key_extract[group],
1957 				&flow->fs_rule,
1958 				NET_PROT_UDP,
1959 				NH_FLD_UDP_PORT_DST,
1960 				&spec->hdr.dst_port,
1961 				&mask->hdr.dst_port,
1962 				NH_FLD_UDP_PORT_SIZE);
1963 		if (ret) {
1964 			DPAA2_PMD_ERR(
1965 				"FS NH_FLD_UDP_PORT_DST rule data set failed");
1966 			return -1;
1967 		}
1968 	}
1969 
1970 	(*device_configured) |= local_cfg;
1971 
1972 	return 0;
1973 }
1974 
1975 static int
1976 dpaa2_configure_flow_tcp(struct rte_flow *flow,
1977 			 struct rte_eth_dev *dev,
1978 			 const struct rte_flow_attr *attr,
1979 			 const struct rte_flow_item *pattern,
1980 			 const struct rte_flow_action actions[] __rte_unused,
1981 			 struct rte_flow_error *error __rte_unused,
1982 			 int *device_configured)
1983 {
1984 	int index, ret;
1985 	int local_cfg = 0;
1986 	uint32_t group;
1987 	const struct rte_flow_item_tcp *spec, *mask;
1988 
1989 	const struct rte_flow_item_tcp *last __rte_unused;
1990 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1991 
1992 	group = attr->group;
1993 
1994 	/* Parse pattern list to get the matching parameters */
1995 	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
1996 	last    = (const struct rte_flow_item_tcp *)pattern->last;
1997 	mask    = (const struct rte_flow_item_tcp *)
1998 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
1999 
2000 	/* Get traffic class index and flow id to be configured */
2001 	flow->tc_id = group;
2002 	flow->tc_index = attr->priority;
2003 
2004 	if (!spec || !mc_l4_port_identification) {
2005 		struct proto_discrimination proto;
2006 
2007 		index = dpaa2_flow_extract_search(
2008 				&priv->extract.qos_key_extract.dpkg,
2009 				NET_PROT_IP, NH_FLD_IP_PROTO);
2010 		if (index < 0) {
2011 			ret = dpaa2_flow_proto_discrimination_extract(
2012 					&priv->extract.qos_key_extract,
2013 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2014 			if (ret) {
2015 				DPAA2_PMD_ERR(
2016 					"QoS Extract IP protocol to discriminate TCP failed.");
2017 
2018 				return -1;
2019 			}
2020 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2021 		}
2022 
2023 		index = dpaa2_flow_extract_search(
2024 				&priv->extract.tc_key_extract[group].dpkg,
2025 				NET_PROT_IP, NH_FLD_IP_PROTO);
2026 		if (index < 0) {
2027 			ret = dpaa2_flow_proto_discrimination_extract(
2028 				&priv->extract.tc_key_extract[group],
2029 				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2030 			if (ret) {
2031 				DPAA2_PMD_ERR(
2032 					"FS Extract IP protocol to discriminate TCP failed.");
2033 
2034 				return -1;
2035 			}
2036 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2037 		}
2038 
2039 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2040 		if (ret) {
2041 			DPAA2_PMD_ERR(
2042 				"Move IP addr before TCP discrimination set failed");
2043 			return -1;
2044 		}
2045 
2046 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2047 		proto.ip_proto = IPPROTO_TCP;
2048 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2049 							proto, group);
2050 		if (ret) {
2051 			DPAA2_PMD_ERR("TCP discrimination rule set failed");
2052 			return -1;
2053 		}
2054 
2055 		(*device_configured) |= local_cfg;
2056 
2057 		if (!spec)
2058 			return 0;
2059 	}
2060 
2061 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2062 		RTE_FLOW_ITEM_TYPE_TCP)) {
2063 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2064 
2065 		return -1;
2066 	}
2067 
2068 	if (mask->hdr.src_port) {
2069 		index = dpaa2_flow_extract_search(
2070 				&priv->extract.qos_key_extract.dpkg,
2071 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2072 		if (index < 0) {
2073 			ret = dpaa2_flow_extract_add(
2074 					&priv->extract.qos_key_extract,
2075 					NET_PROT_TCP,
2076 					NH_FLD_TCP_PORT_SRC,
2077 					NH_FLD_TCP_PORT_SIZE);
2078 			if (ret) {
2079 				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2080 
2081 				return -1;
2082 			}
2083 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2084 		}
2085 
2086 		index = dpaa2_flow_extract_search(
2087 				&priv->extract.tc_key_extract[group].dpkg,
2088 				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2089 		if (index < 0) {
2090 			ret = dpaa2_flow_extract_add(
2091 					&priv->extract.tc_key_extract[group],
2092 					NET_PROT_TCP,
2093 					NH_FLD_TCP_PORT_SRC,
2094 					NH_FLD_TCP_PORT_SIZE);
2095 			if (ret) {
2096 				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2097 
2098 				return -1;
2099 			}
2100 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2101 		}
2102 
2103 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2104 		if (ret) {
2105 			DPAA2_PMD_ERR(
2106 				"Move ipaddr before TCP_PORT_SRC set failed");
2107 			return -1;
2108 		}
2109 
2110 		ret = dpaa2_flow_rule_data_set(
2111 				&priv->extract.qos_key_extract,
2112 				&flow->qos_rule,
2113 				NET_PROT_TCP,
2114 				NH_FLD_TCP_PORT_SRC,
2115 				&spec->hdr.src_port,
2116 				&mask->hdr.src_port,
2117 				NH_FLD_TCP_PORT_SIZE);
2118 		if (ret) {
2119 			DPAA2_PMD_ERR(
2120 				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2121 			return -1;
2122 		}
2123 
2124 		ret = dpaa2_flow_rule_data_set(
2125 				&priv->extract.tc_key_extract[group],
2126 				&flow->fs_rule,
2127 				NET_PROT_TCP,
2128 				NH_FLD_TCP_PORT_SRC,
2129 				&spec->hdr.src_port,
2130 				&mask->hdr.src_port,
2131 				NH_FLD_TCP_PORT_SIZE);
2132 		if (ret) {
2133 			DPAA2_PMD_ERR(
2134 				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
2135 			return -1;
2136 		}
2137 	}
2138 
2139 	if (mask->hdr.dst_port) {
2140 		index = dpaa2_flow_extract_search(
2141 				&priv->extract.qos_key_extract.dpkg,
2142 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2143 		if (index < 0) {
2144 			ret = dpaa2_flow_extract_add(
2145 					&priv->extract.qos_key_extract,
2146 					NET_PROT_TCP,
2147 					NH_FLD_TCP_PORT_DST,
2148 					NH_FLD_TCP_PORT_SIZE);
2149 			if (ret) {
2150 				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2151 
2152 				return -1;
2153 			}
2154 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2155 		}
2156 
2157 		index = dpaa2_flow_extract_search(
2158 				&priv->extract.tc_key_extract[group].dpkg,
2159 				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2160 		if (index < 0) {
2161 			ret = dpaa2_flow_extract_add(
2162 					&priv->extract.tc_key_extract[group],
2163 					NET_PROT_TCP,
2164 					NH_FLD_TCP_PORT_DST,
2165 					NH_FLD_TCP_PORT_SIZE);
2166 			if (ret) {
2167 				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2168 
2169 				return -1;
2170 			}
2171 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2172 		}
2173 
2174 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2175 		if (ret) {
2176 			DPAA2_PMD_ERR(
2177 				"Move ipaddr before TCP_PORT_DST set failed");
2178 			return -1;
2179 		}
2180 
2181 		ret = dpaa2_flow_rule_data_set(
2182 				&priv->extract.qos_key_extract,
2183 				&flow->qos_rule,
2184 				NET_PROT_TCP,
2185 				NH_FLD_TCP_PORT_DST,
2186 				&spec->hdr.dst_port,
2187 				&mask->hdr.dst_port,
2188 				NH_FLD_TCP_PORT_SIZE);
2189 		if (ret) {
2190 			DPAA2_PMD_ERR(
2191 				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
2192 			return -1;
2193 		}
2194 
2195 		ret = dpaa2_flow_rule_data_set(
2196 				&priv->extract.tc_key_extract[group],
2197 				&flow->fs_rule,
2198 				NET_PROT_TCP,
2199 				NH_FLD_TCP_PORT_DST,
2200 				&spec->hdr.dst_port,
2201 				&mask->hdr.dst_port,
2202 				NH_FLD_TCP_PORT_SIZE);
2203 		if (ret) {
2204 			DPAA2_PMD_ERR(
2205 				"FS NH_FLD_TCP_PORT_DST rule data set failed");
2206 			return -1;
2207 		}
2208 	}
2209 
2210 	(*device_configured) |= local_cfg;
2211 
2212 	return 0;
2213 }
2214 
2215 static int
2216 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2217 			  struct rte_eth_dev *dev,
2218 			  const struct rte_flow_attr *attr,
2219 			  const struct rte_flow_item *pattern,
2220 			  const struct rte_flow_action actions[] __rte_unused,
2221 			  struct rte_flow_error *error __rte_unused,
2222 			  int *device_configured)
2223 {
2224 	int index, ret;
2225 	int local_cfg = 0;
2226 	uint32_t group;
2227 	const struct rte_flow_item_sctp *spec, *mask;
2228 
2229 	const struct rte_flow_item_sctp *last __rte_unused;
2230 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2231 
2232 	group = attr->group;
2233 
2234 	/* Parse pattern list to get the matching parameters */
2235 	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
2236 	last    = (const struct rte_flow_item_sctp *)pattern->last;
2237 	mask    = (const struct rte_flow_item_sctp *)
2238 			(pattern->mask ? pattern->mask :
2239 				&dpaa2_flow_item_sctp_mask);
2240 
2241 	/* Get traffic class index and flow id to be configured */
2242 	flow->tc_id = group;
2243 	flow->tc_index = attr->priority;
2244 
2245 	if (!spec || !mc_l4_port_identification) {
2246 		struct proto_discrimination proto;
2247 
2248 		index = dpaa2_flow_extract_search(
2249 				&priv->extract.qos_key_extract.dpkg,
2250 				NET_PROT_IP, NH_FLD_IP_PROTO);
2251 		if (index < 0) {
2252 			ret = dpaa2_flow_proto_discrimination_extract(
2253 					&priv->extract.qos_key_extract,
2254 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2255 			if (ret) {
2256 				DPAA2_PMD_ERR(
2257 					"QoS Extract IP protocol to discriminate SCTP failed.");
2258 
2259 				return -1;
2260 			}
2261 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2262 		}
2263 
2264 		index = dpaa2_flow_extract_search(
2265 				&priv->extract.tc_key_extract[group].dpkg,
2266 				NET_PROT_IP, NH_FLD_IP_PROTO);
2267 		if (index < 0) {
2268 			ret = dpaa2_flow_proto_discrimination_extract(
2269 					&priv->extract.tc_key_extract[group],
2270 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2271 			if (ret) {
2272 				DPAA2_PMD_ERR(
2273 					"FS Extract IP protocol to discriminate SCTP failed.");
2274 
2275 				return -1;
2276 			}
2277 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2278 		}
2279 
2280 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2281 		if (ret) {
2282 			DPAA2_PMD_ERR(
2283 				"Move ipaddr before SCTP discrimination set failed");
2284 			return -1;
2285 		}
2286 
2287 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2288 		proto.ip_proto = IPPROTO_SCTP;
2289 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2290 							proto, group);
2291 		if (ret) {
2292 			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2293 			return -1;
2294 		}
2295 
2296 		(*device_configured) |= local_cfg;
2297 
2298 		if (!spec)
2299 			return 0;
2300 	}
2301 
2302 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2303 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2304 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2305 
2306 		return -1;
2307 	}
2308 
2309 	if (mask->hdr.src_port) {
2310 		index = dpaa2_flow_extract_search(
2311 				&priv->extract.qos_key_extract.dpkg,
2312 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2313 		if (index < 0) {
2314 			ret = dpaa2_flow_extract_add(
2315 					&priv->extract.qos_key_extract,
2316 					NET_PROT_SCTP,
2317 					NH_FLD_SCTP_PORT_SRC,
2318 					NH_FLD_SCTP_PORT_SIZE);
2319 			if (ret) {
2320 				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2321 
2322 				return -1;
2323 			}
2324 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2325 		}
2326 
2327 		index = dpaa2_flow_extract_search(
2328 				&priv->extract.tc_key_extract[group].dpkg,
2329 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2330 		if (index < 0) {
2331 			ret = dpaa2_flow_extract_add(
2332 					&priv->extract.tc_key_extract[group],
2333 					NET_PROT_SCTP,
2334 					NH_FLD_SCTP_PORT_SRC,
2335 					NH_FLD_SCTP_PORT_SIZE);
2336 			if (ret) {
2337 				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2338 
2339 				return -1;
2340 			}
2341 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2342 		}
2343 
2344 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2345 		if (ret) {
2346 			DPAA2_PMD_ERR(
2347 				"Move ipaddr before SCTP_PORT_SRC set failed");
2348 			return -1;
2349 		}
2350 
2351 		ret = dpaa2_flow_rule_data_set(
2352 				&priv->extract.qos_key_extract,
2353 				&flow->qos_rule,
2354 				NET_PROT_SCTP,
2355 				NH_FLD_SCTP_PORT_SRC,
2356 				&spec->hdr.src_port,
2357 				&mask->hdr.src_port,
2358 				NH_FLD_SCTP_PORT_SIZE);
2359 		if (ret) {
2360 			DPAA2_PMD_ERR(
2361 				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2362 			return -1;
2363 		}
2364 
2365 		ret = dpaa2_flow_rule_data_set(
2366 				&priv->extract.tc_key_extract[group],
2367 				&flow->fs_rule,
2368 				NET_PROT_SCTP,
2369 				NH_FLD_SCTP_PORT_SRC,
2370 				&spec->hdr.src_port,
2371 				&mask->hdr.src_port,
2372 				NH_FLD_SCTP_PORT_SIZE);
2373 		if (ret) {
2374 			DPAA2_PMD_ERR(
2375 				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2376 			return -1;
2377 		}
2378 	}
2379 
2380 	if (mask->hdr.dst_port) {
2381 		index = dpaa2_flow_extract_search(
2382 				&priv->extract.qos_key_extract.dpkg,
2383 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2384 		if (index < 0) {
2385 			ret = dpaa2_flow_extract_add(
2386 					&priv->extract.qos_key_extract,
2387 					NET_PROT_SCTP,
2388 					NH_FLD_SCTP_PORT_DST,
2389 					NH_FLD_SCTP_PORT_SIZE);
2390 			if (ret) {
2391 				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2392 
2393 				return -1;
2394 			}
2395 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2396 		}
2397 
2398 		index = dpaa2_flow_extract_search(
2399 				&priv->extract.tc_key_extract[group].dpkg,
2400 				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2401 		if (index < 0) {
2402 			ret = dpaa2_flow_extract_add(
2403 					&priv->extract.tc_key_extract[group],
2404 					NET_PROT_SCTP,
2405 					NH_FLD_SCTP_PORT_DST,
2406 					NH_FLD_SCTP_PORT_SIZE);
2407 			if (ret) {
2408 				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2409 
2410 				return -1;
2411 			}
2412 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2413 		}
2414 
2415 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2416 		if (ret) {
2417 			DPAA2_PMD_ERR(
2418 				"Move ipaddr before SCTP_PORT_DST set failed");
2419 			return -1;
2420 		}
2421 
2422 		ret = dpaa2_flow_rule_data_set(
2423 				&priv->extract.qos_key_extract,
2424 				&flow->qos_rule,
2425 				NET_PROT_SCTP,
2426 				NH_FLD_SCTP_PORT_DST,
2427 				&spec->hdr.dst_port,
2428 				&mask->hdr.dst_port,
2429 				NH_FLD_SCTP_PORT_SIZE);
2430 		if (ret) {
2431 			DPAA2_PMD_ERR(
2432 				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2433 			return -1;
2434 		}
2435 
2436 		ret = dpaa2_flow_rule_data_set(
2437 				&priv->extract.tc_key_extract[group],
2438 				&flow->fs_rule,
2439 				NET_PROT_SCTP,
2440 				NH_FLD_SCTP_PORT_DST,
2441 				&spec->hdr.dst_port,
2442 				&mask->hdr.dst_port,
2443 				NH_FLD_SCTP_PORT_SIZE);
2444 		if (ret) {
2445 			DPAA2_PMD_ERR(
2446 				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
2447 			return -1;
2448 		}
2449 	}
2450 
2451 	(*device_configured) |= local_cfg;
2452 
2453 	return 0;
2454 }
2455 
2456 static int
2457 dpaa2_configure_flow_gre(struct rte_flow *flow,
2458 			 struct rte_eth_dev *dev,
2459 			 const struct rte_flow_attr *attr,
2460 			 const struct rte_flow_item *pattern,
2461 			 const struct rte_flow_action actions[] __rte_unused,
2462 			 struct rte_flow_error *error __rte_unused,
2463 			 int *device_configured)
2464 {
2465 	int index, ret;
2466 	int local_cfg = 0;
2467 	uint32_t group;
2468 	const struct rte_flow_item_gre *spec, *mask;
2469 
2470 	const struct rte_flow_item_gre *last __rte_unused;
2471 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2472 
2473 	group = attr->group;
2474 
2475 	/* Parse pattern list to get the matching parameters */
2476 	spec    = (const struct rte_flow_item_gre *)pattern->spec;
2477 	last    = (const struct rte_flow_item_gre *)pattern->last;
2478 	mask    = (const struct rte_flow_item_gre *)
2479 		(pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2480 
2481 	/* Get traffic class index and flow id to be configured */
2482 	flow->tc_id = group;
2483 	flow->tc_index = attr->priority;
2484 
2485 	if (!spec) {
2486 		struct proto_discrimination proto;
2487 
2488 		index = dpaa2_flow_extract_search(
2489 				&priv->extract.qos_key_extract.dpkg,
2490 				NET_PROT_IP, NH_FLD_IP_PROTO);
2491 		if (index < 0) {
2492 			ret = dpaa2_flow_proto_discrimination_extract(
2493 					&priv->extract.qos_key_extract,
2494 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2495 			if (ret) {
2496 				DPAA2_PMD_ERR(
2497 					"QoS Extract IP protocol to discriminate GRE failed.");
2498 
2499 				return -1;
2500 			}
2501 			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2502 		}
2503 
2504 		index = dpaa2_flow_extract_search(
2505 				&priv->extract.tc_key_extract[group].dpkg,
2506 				NET_PROT_IP, NH_FLD_IP_PROTO);
2507 		if (index < 0) {
2508 			ret = dpaa2_flow_proto_discrimination_extract(
2509 					&priv->extract.tc_key_extract[group],
2510 					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2511 			if (ret) {
2512 				DPAA2_PMD_ERR(
2513 					"FS Extract IP protocol to discriminate GRE failed.");
2514 
2515 				return -1;
2516 			}
2517 			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2518 		}
2519 
2520 		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2521 		if (ret) {
2522 			DPAA2_PMD_ERR(
2523 				"Move IP addr before GRE discrimination set failed");
2524 			return -1;
2525 		}
2526 
2527 		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2528 		proto.ip_proto = IPPROTO_GRE;
2529 		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2530 							proto, group);
2531 		if (ret) {
2532 			DPAA2_PMD_ERR("GRE discrimination rule set failed");
2533 			return -1;
2534 		}
2535 
2536 		(*device_configured) |= local_cfg;
2537 
2538 		return 0;
2539 	}
2540 
2541 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2542 		RTE_FLOW_ITEM_TYPE_GRE)) {
2543 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2544 
2545 		return -1;
2546 	}
2547 
2548 	if (!mask->protocol)
2549 		return 0;
2550 
2551 	index = dpaa2_flow_extract_search(
2552 			&priv->extract.qos_key_extract.dpkg,
2553 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2554 	if (index < 0) {
2555 		ret = dpaa2_flow_extract_add(
2556 				&priv->extract.qos_key_extract,
2557 				NET_PROT_GRE,
2558 				NH_FLD_GRE_TYPE,
2559 				sizeof(rte_be16_t));
2560 		if (ret) {
2561 			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2562 
2563 			return -1;
2564 		}
2565 		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2566 	}
2567 
2568 	index = dpaa2_flow_extract_search(
2569 			&priv->extract.tc_key_extract[group].dpkg,
2570 			NET_PROT_GRE, NH_FLD_GRE_TYPE);
2571 	if (index < 0) {
2572 		ret = dpaa2_flow_extract_add(
2573 				&priv->extract.tc_key_extract[group],
2574 				NET_PROT_GRE,
2575 				NH_FLD_GRE_TYPE,
2576 				sizeof(rte_be16_t));
2577 		if (ret) {
2578 			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2579 
2580 			return -1;
2581 		}
2582 		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2583 	}
2584 
2585 	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2586 	if (ret) {
2587 		DPAA2_PMD_ERR(
2588 			"Move ipaddr before GRE_TYPE set failed");
2589 		return -1;
2590 	}
2591 
2592 	ret = dpaa2_flow_rule_data_set(
2593 				&priv->extract.qos_key_extract,
2594 				&flow->qos_rule,
2595 				NET_PROT_GRE,
2596 				NH_FLD_GRE_TYPE,
2597 				&spec->protocol,
2598 				&mask->protocol,
2599 				sizeof(rte_be16_t));
2600 	if (ret) {
2601 		DPAA2_PMD_ERR(
2602 			"QoS NH_FLD_GRE_TYPE rule data set failed");
2603 		return -1;
2604 	}
2605 
2606 	ret = dpaa2_flow_rule_data_set(
2607 			&priv->extract.tc_key_extract[group],
2608 			&flow->fs_rule,
2609 			NET_PROT_GRE,
2610 			NH_FLD_GRE_TYPE,
2611 			&spec->protocol,
2612 			&mask->protocol,
2613 			sizeof(rte_be16_t));
2614 	if (ret) {
2615 		DPAA2_PMD_ERR(
2616 			"FS NH_FLD_GRE_TYPE rule data set failed");
2617 		return -1;
2618 	}
2619 
2620 	(*device_configured) |= local_cfg;
2621 
2622 	return 0;
2623 }
2624 
2625 /* The existing QoS/FS entry with IP address(es)
2626  * needs update after
2627  * new extract(s) are inserted before IP
2628  * address(es) extract(s).
2629  */
2630 static int
2631 dpaa2_flow_entry_update(
2632 	struct dpaa2_dev_priv *priv, uint8_t tc_id)
2633 {
2634 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2635 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2636 	int ret;
2637 	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2638 	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2639 	struct dpaa2_key_extract *qos_key_extract =
2640 		&priv->extract.qos_key_extract;
2641 	struct dpaa2_key_extract *tc_key_extract =
2642 		&priv->extract.tc_key_extract[tc_id];
2643 	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2644 	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2645 	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2646 	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2647 	int extend = -1, extend1, size;
2648 
2649 	while (curr) {
2650 		if (curr->ipaddr_rule.ipaddr_type ==
2651 			FLOW_NONE_IPADDR) {
2652 			curr = LIST_NEXT(curr, next);
2653 			continue;
2654 		}
2655 
2656 		if (curr->ipaddr_rule.ipaddr_type ==
2657 			FLOW_IPV4_ADDR) {
2658 			qos_ipsrc_offset =
2659 				qos_key_extract->key_info.ipv4_src_offset;
2660 			qos_ipdst_offset =
2661 				qos_key_extract->key_info.ipv4_dst_offset;
2662 			fs_ipsrc_offset =
2663 				tc_key_extract->key_info.ipv4_src_offset;
2664 			fs_ipdst_offset =
2665 				tc_key_extract->key_info.ipv4_dst_offset;
2666 			size = NH_FLD_IPV4_ADDR_SIZE;
2667 		} else {
2668 			qos_ipsrc_offset =
2669 				qos_key_extract->key_info.ipv6_src_offset;
2670 			qos_ipdst_offset =
2671 				qos_key_extract->key_info.ipv6_dst_offset;
2672 			fs_ipsrc_offset =
2673 				tc_key_extract->key_info.ipv6_src_offset;
2674 			fs_ipdst_offset =
2675 				tc_key_extract->key_info.ipv6_dst_offset;
2676 			size = NH_FLD_IPV6_ADDR_SIZE;
2677 		}
2678 
2679 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
2680 				priv->token, &curr->qos_rule);
2681 		if (ret) {
2682 			DPAA2_PMD_ERR("Qos entry remove failed.");
2683 			return -1;
2684 		}
2685 
2686 		extend = -1;
2687 
2688 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2689 			RTE_ASSERT(qos_ipsrc_offset >=
2690 				curr->ipaddr_rule.qos_ipsrc_offset);
2691 			extend1 = qos_ipsrc_offset -
2692 				curr->ipaddr_rule.qos_ipsrc_offset;
2693 			if (extend >= 0)
2694 				RTE_ASSERT(extend == extend1);
2695 			else
2696 				extend = extend1;
2697 
2698 			memcpy(ipsrc_key,
2699 				(char *)(size_t)curr->qos_rule.key_iova +
2700 				curr->ipaddr_rule.qos_ipsrc_offset,
2701 				size);
2702 			memset((char *)(size_t)curr->qos_rule.key_iova +
2703 				curr->ipaddr_rule.qos_ipsrc_offset,
2704 				0, size);
2705 
2706 			memcpy(ipsrc_mask,
2707 				(char *)(size_t)curr->qos_rule.mask_iova +
2708 				curr->ipaddr_rule.qos_ipsrc_offset,
2709 				size);
2710 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2711 				curr->ipaddr_rule.qos_ipsrc_offset,
2712 				0, size);
2713 
2714 			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
2715 		}
2716 
2717 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2718 			RTE_ASSERT(qos_ipdst_offset >=
2719 				curr->ipaddr_rule.qos_ipdst_offset);
2720 			extend1 = qos_ipdst_offset -
2721 				curr->ipaddr_rule.qos_ipdst_offset;
2722 			if (extend >= 0)
2723 				RTE_ASSERT(extend == extend1);
2724 			else
2725 				extend = extend1;
2726 
2727 			memcpy(ipdst_key,
2728 				(char *)(size_t)curr->qos_rule.key_iova +
2729 				curr->ipaddr_rule.qos_ipdst_offset,
2730 				size);
2731 			memset((char *)(size_t)curr->qos_rule.key_iova +
2732 				curr->ipaddr_rule.qos_ipdst_offset,
2733 				0, size);
2734 
2735 			memcpy(ipdst_mask,
2736 				(char *)(size_t)curr->qos_rule.mask_iova +
2737 				curr->ipaddr_rule.qos_ipdst_offset,
2738 				size);
2739 			memset((char *)(size_t)curr->qos_rule.mask_iova +
2740 				curr->ipaddr_rule.qos_ipdst_offset,
2741 				0, size);
2742 
2743 			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
2744 		}
2745 
2746 		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2747 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2748 				curr->ipaddr_rule.qos_ipsrc_offset,
2749 				ipsrc_key,
2750 				size);
2751 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2752 				curr->ipaddr_rule.qos_ipsrc_offset,
2753 				ipsrc_mask,
2754 				size);
2755 		}
2756 		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2757 			memcpy((char *)(size_t)curr->qos_rule.key_iova +
2758 				curr->ipaddr_rule.qos_ipdst_offset,
2759 				ipdst_key,
2760 				size);
2761 			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2762 				curr->ipaddr_rule.qos_ipdst_offset,
2763 				ipdst_mask,
2764 				size);
2765 		}
2766 
2767 		if (extend >= 0)
2768 			curr->qos_rule.key_size += extend;
2769 
2770 		ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
2771 				priv->token, &curr->qos_rule,
2772 				curr->tc_id, curr->qos_index,
2773 				0, 0);
2774 		if (ret) {
2775 			DPAA2_PMD_ERR("Qos entry update failed.");
2776 			return -1;
2777 		}
2778 
2779 		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
2780 			curr = LIST_NEXT(curr, next);
2781 			continue;
2782 		}
2783 
2784 		extend = -1;
2785 
2786 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
2787 				priv->token, curr->tc_id, &curr->fs_rule);
2788 		if (ret) {
2789 			DPAA2_PMD_ERR("FS entry remove failed.");
2790 			return -1;
2791 		}
2792 
2793 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
2794 			tc_id == curr->tc_id) {
2795 			RTE_ASSERT(fs_ipsrc_offset >=
2796 				curr->ipaddr_rule.fs_ipsrc_offset);
2797 			extend1 = fs_ipsrc_offset -
2798 				curr->ipaddr_rule.fs_ipsrc_offset;
2799 			if (extend >= 0)
2800 				RTE_ASSERT(extend == extend1);
2801 			else
2802 				extend = extend1;
2803 
2804 			memcpy(ipsrc_key,
2805 				(char *)(size_t)curr->fs_rule.key_iova +
2806 				curr->ipaddr_rule.fs_ipsrc_offset,
2807 				size);
2808 			memset((char *)(size_t)curr->fs_rule.key_iova +
2809 				curr->ipaddr_rule.fs_ipsrc_offset,
2810 				0, size);
2811 
2812 			memcpy(ipsrc_mask,
2813 				(char *)(size_t)curr->fs_rule.mask_iova +
2814 				curr->ipaddr_rule.fs_ipsrc_offset,
2815 				size);
2816 			memset((char *)(size_t)curr->fs_rule.mask_iova +
2817 				curr->ipaddr_rule.fs_ipsrc_offset,
2818 				0, size);
2819 
2820 			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
2821 		}
2822 
2823 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
2824 			tc_id == curr->tc_id) {
2825 			RTE_ASSERT(fs_ipdst_offset >=
2826 				curr->ipaddr_rule.fs_ipdst_offset);
2827 			extend1 = fs_ipdst_offset -
2828 				curr->ipaddr_rule.fs_ipdst_offset;
2829 			if (extend >= 0)
2830 				RTE_ASSERT(extend == extend1);
2831 			else
2832 				extend = extend1;
2833 
2834 			memcpy(ipdst_key,
2835 				(char *)(size_t)curr->fs_rule.key_iova +
2836 				curr->ipaddr_rule.fs_ipdst_offset,
2837 				size);
2838 			memset((char *)(size_t)curr->fs_rule.key_iova +
2839 				curr->ipaddr_rule.fs_ipdst_offset,
2840 				0, size);
2841 
2842 			memcpy(ipdst_mask,
2843 				(char *)(size_t)curr->fs_rule.mask_iova +
2844 				curr->ipaddr_rule.fs_ipdst_offset,
2845 				size);
2846 			memset((char *)(size_t)curr->fs_rule.mask_iova +
2847 				curr->ipaddr_rule.fs_ipdst_offset,
2848 				0, size);
2849 
2850 			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
2851 		}
2852 
2853 		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
2854 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
2855 				curr->ipaddr_rule.fs_ipsrc_offset,
2856 				ipsrc_key,
2857 				size);
2858 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2859 				curr->ipaddr_rule.fs_ipsrc_offset,
2860 				ipsrc_mask,
2861 				size);
2862 		}
2863 		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
2864 			memcpy((char *)(size_t)curr->fs_rule.key_iova +
2865 				curr->ipaddr_rule.fs_ipdst_offset,
2866 				ipdst_key,
2867 				size);
2868 			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2869 				curr->ipaddr_rule.fs_ipdst_offset,
2870 				ipdst_mask,
2871 				size);
2872 		}
2873 
2874 		if (extend >= 0)
2875 			curr->fs_rule.key_size += extend;
2876 
2877 		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
2878 				priv->token, curr->tc_id, curr->fs_index,
2879 				&curr->fs_rule, &curr->action_cfg);
2880 		if (ret) {
2881 			DPAA2_PMD_ERR("FS entry update failed.");
2882 			return -1;
2883 		}
2884 
2885 		curr = LIST_NEXT(curr, next);
2886 	}
2887 
2888 	return 0;
2889 }
2890 
2891 static int
2892 dpaa2_generic_flow_set(struct rte_flow *flow,
2893 		       struct rte_eth_dev *dev,
2894 		       const struct rte_flow_attr *attr,
2895 		       const struct rte_flow_item pattern[],
2896 		       const struct rte_flow_action actions[],
2897 		       struct rte_flow_error *error)
2898 {
2899 	const struct rte_flow_action_queue *dest_queue;
2900 	const struct rte_flow_action_rss *rss_conf;
2901 	uint16_t index;
2902 	int is_keycfg_configured = 0, end_of_list = 0;
2903 	int ret = 0, i = 0, j = 0;
2904 	struct dpni_attr nic_attr;
2905 	struct dpni_rx_tc_dist_cfg tc_cfg;
2906 	struct dpni_qos_tbl_cfg qos_cfg;
2907 	struct dpni_fs_action_cfg action;
2908 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2909 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2910 	size_t param;
2911 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
2912 
2913 	/* Parse pattern list to get the matching parameters */
2914 	while (!end_of_list) {
2915 		switch (pattern[i].type) {
2916 		case RTE_FLOW_ITEM_TYPE_ETH:
2917 			ret = dpaa2_configure_flow_eth(flow,
2918 					dev, attr, &pattern[i], actions, error,
2919 					&is_keycfg_configured);
2920 			if (ret) {
2921 				DPAA2_PMD_ERR("ETH flow configuration failed!");
2922 				return ret;
2923 			}
2924 			break;
2925 		case RTE_FLOW_ITEM_TYPE_VLAN:
2926 			ret = dpaa2_configure_flow_vlan(flow,
2927 					dev, attr, &pattern[i], actions, error,
2928 					&is_keycfg_configured);
2929 			if (ret) {
2930 				DPAA2_PMD_ERR("vLan flow configuration failed!");
2931 				return ret;
2932 			}
2933 			break;
2934 		case RTE_FLOW_ITEM_TYPE_IPV4:
2935 		case RTE_FLOW_ITEM_TYPE_IPV6:
2936 			ret = dpaa2_configure_flow_generic_ip(flow,
2937 					dev, attr, &pattern[i], actions, error,
2938 					&is_keycfg_configured);
2939 			if (ret) {
2940 				DPAA2_PMD_ERR("IP flow configuration failed!");
2941 				return ret;
2942 			}
2943 			break;
2944 		case RTE_FLOW_ITEM_TYPE_ICMP:
2945 			ret = dpaa2_configure_flow_icmp(flow,
2946 					dev, attr, &pattern[i], actions, error,
2947 					&is_keycfg_configured);
2948 			if (ret) {
2949 				DPAA2_PMD_ERR("ICMP flow configuration failed!");
2950 				return ret;
2951 			}
2952 			break;
2953 		case RTE_FLOW_ITEM_TYPE_UDP:
2954 			ret = dpaa2_configure_flow_udp(flow,
2955 					dev, attr, &pattern[i], actions, error,
2956 					&is_keycfg_configured);
2957 			if (ret) {
2958 				DPAA2_PMD_ERR("UDP flow configuration failed!");
2959 				return ret;
2960 			}
2961 			break;
2962 		case RTE_FLOW_ITEM_TYPE_TCP:
2963 			ret = dpaa2_configure_flow_tcp(flow,
2964 					dev, attr, &pattern[i], actions, error,
2965 					&is_keycfg_configured);
2966 			if (ret) {
2967 				DPAA2_PMD_ERR("TCP flow configuration failed!");
2968 				return ret;
2969 			}
2970 			break;
2971 		case RTE_FLOW_ITEM_TYPE_SCTP:
2972 			ret = dpaa2_configure_flow_sctp(flow,
2973 					dev, attr, &pattern[i], actions, error,
2974 					&is_keycfg_configured);
2975 			if (ret) {
2976 				DPAA2_PMD_ERR("SCTP flow configuration failed!");
2977 				return ret;
2978 			}
2979 			break;
2980 		case RTE_FLOW_ITEM_TYPE_GRE:
2981 			ret = dpaa2_configure_flow_gre(flow,
2982 					dev, attr, &pattern[i], actions, error,
2983 					&is_keycfg_configured);
2984 			if (ret) {
2985 				DPAA2_PMD_ERR("GRE flow configuration failed!");
2986 				return ret;
2987 			}
2988 			break;
2989 		case RTE_FLOW_ITEM_TYPE_END:
2990 			end_of_list = 1;
2991 			break; /*End of List*/
2992 		default:
2993 			DPAA2_PMD_ERR("Invalid action type");
2994 			ret = -ENOTSUP;
2995 			break;
2996 		}
2997 		i++;
2998 	}
2999 
3000 	/* Let's parse action on matching traffic */
3001 	end_of_list = 0;
3002 	while (!end_of_list) {
3003 		switch (actions[j].type) {
3004 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3005 			dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
3006 			flow->flow_id = dest_queue->index;
3007 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3008 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3009 			action.flow_id = flow->flow_id;
3010 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3011 				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_extract.dpkg,
3012 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3013 					DPAA2_PMD_ERR(
3014 					"Unable to prepare extract parameters");
3015 					return -1;
3016 				}
3017 
3018 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3019 				qos_cfg.discard_on_miss = true;
3020 				qos_cfg.keep_entries = true;
3021 				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
3022 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3023 						priv->token, &qos_cfg);
3024 				if (ret < 0) {
3025 					DPAA2_PMD_ERR(
3026 					"Distribution cannot be configured.(%d)"
3027 					, ret);
3028 					return -1;
3029 				}
3030 			}
3031 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3032 				if (dpkg_prepare_key_cfg(
3033 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3034 				(uint8_t *)(size_t)priv->extract
3035 				.tc_extract_param[flow->tc_id]) < 0) {
3036 					DPAA2_PMD_ERR(
3037 					"Unable to prepare extract parameters");
3038 					return -1;
3039 				}
3040 
3041 				memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3042 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3043 				tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
3044 				tc_cfg.key_cfg_iova =
3045 					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3046 				tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3047 				tc_cfg.fs_cfg.keep_entries = true;
3048 				ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3049 							 priv->token,
3050 							 flow->tc_id, &tc_cfg);
3051 				if (ret < 0) {
3052 					DPAA2_PMD_ERR(
3053 					"Distribution cannot be configured.(%d)"
3054 					, ret);
3055 					return -1;
3056 				}
3057 			}
3058 			/* Configure QoS table first */
3059 			memset(&nic_attr, 0, sizeof(struct dpni_attr));
3060 			ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
3061 						 priv->token, &nic_attr);
3062 			if (ret < 0) {
3063 				DPAA2_PMD_ERR(
3064 				"Failure to get attribute. dpni@%p err code(%d)\n",
3065 				dpni, ret);
3066 				return ret;
3067 			}
3068 
3069 			action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
3070 
3071 			if (!priv->qos_index) {
3072 				priv->qos_index = rte_zmalloc(0,
3073 						nic_attr.qos_entries, 64);
3074 			}
3075 			for (index = 0; index < nic_attr.qos_entries; index++) {
3076 				if (!priv->qos_index[index]) {
3077 					priv->qos_index[index] = 1;
3078 					break;
3079 				}
3080 			}
3081 			if (index >= nic_attr.qos_entries) {
3082 				DPAA2_PMD_ERR("QoS table with %d entries full",
3083 					nic_attr.qos_entries);
3084 				return -1;
3085 			}
3086 			flow->qos_rule.key_size = priv->extract
3087 				.qos_key_extract.key_info.key_total_size;
3088 			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3089 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3090 					flow->ipaddr_rule.qos_ipsrc_offset) {
3091 					flow->qos_rule.key_size =
3092 						flow->ipaddr_rule.qos_ipdst_offset +
3093 						NH_FLD_IPV4_ADDR_SIZE;
3094 				} else {
3095 					flow->qos_rule.key_size =
3096 						flow->ipaddr_rule.qos_ipsrc_offset +
3097 						NH_FLD_IPV4_ADDR_SIZE;
3098 				}
3099 			} else if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV6_ADDR) {
3100 				if (flow->ipaddr_rule.qos_ipdst_offset >=
3101 					flow->ipaddr_rule.qos_ipsrc_offset) {
3102 					flow->qos_rule.key_size =
3103 						flow->ipaddr_rule.qos_ipdst_offset +
3104 						NH_FLD_IPV6_ADDR_SIZE;
3105 				} else {
3106 					flow->qos_rule.key_size =
3107 						flow->ipaddr_rule.qos_ipsrc_offset +
3108 						NH_FLD_IPV6_ADDR_SIZE;
3109 				}
3110 			}
3111 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3112 						priv->token, &flow->qos_rule,
3113 						flow->tc_id, index,
3114 						0, 0);
3115 			if (ret < 0) {
3116 				DPAA2_PMD_ERR(
3117 				"Error in addnig entry to QoS table(%d)", ret);
3118 				priv->qos_index[index] = 0;
3119 				return ret;
3120 			}
3121 			flow->qos_index = index;
3122 
3123 			/* Then Configure FS table */
3124 			if (!priv->fs_index) {
3125 				priv->fs_index = rte_zmalloc(0,
3126 								nic_attr.fs_entries, 64);
3127 			}
3128 			for (index = 0; index < nic_attr.fs_entries; index++) {
3129 				if (!priv->fs_index[index]) {
3130 					priv->fs_index[index] = 1;
3131 					break;
3132 				}
3133 			}
3134 			if (index >= nic_attr.fs_entries) {
3135 				DPAA2_PMD_ERR("FS table with %d entries full",
3136 					nic_attr.fs_entries);
3137 				return -1;
3138 			}
3139 			flow->fs_rule.key_size = priv->extract
3140 					.tc_key_extract[attr->group].key_info.key_total_size;
3141 			if (flow->ipaddr_rule.ipaddr_type ==
3142 				FLOW_IPV4_ADDR) {
3143 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3144 					flow->ipaddr_rule.fs_ipsrc_offset) {
3145 					flow->fs_rule.key_size =
3146 						flow->ipaddr_rule.fs_ipdst_offset +
3147 						NH_FLD_IPV4_ADDR_SIZE;
3148 				} else {
3149 					flow->fs_rule.key_size =
3150 						flow->ipaddr_rule.fs_ipsrc_offset +
3151 						NH_FLD_IPV4_ADDR_SIZE;
3152 				}
3153 			} else if (flow->ipaddr_rule.ipaddr_type ==
3154 				FLOW_IPV6_ADDR) {
3155 				if (flow->ipaddr_rule.fs_ipdst_offset >=
3156 					flow->ipaddr_rule.fs_ipsrc_offset) {
3157 					flow->fs_rule.key_size =
3158 						flow->ipaddr_rule.fs_ipdst_offset +
3159 						NH_FLD_IPV6_ADDR_SIZE;
3160 				} else {
3161 					flow->fs_rule.key_size =
3162 						flow->ipaddr_rule.fs_ipsrc_offset +
3163 						NH_FLD_IPV6_ADDR_SIZE;
3164 				}
3165 			}
3166 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3167 						flow->tc_id, index,
3168 						&flow->fs_rule, &action);
3169 			if (ret < 0) {
3170 				DPAA2_PMD_ERR(
3171 				"Error in adding entry to FS table(%d)", ret);
3172 				priv->fs_index[index] = 0;
3173 				return ret;
3174 			}
3175 			flow->fs_index = index;
3176 			memcpy(&flow->action_cfg, &action,
3177 				sizeof(struct dpni_fs_action_cfg));
3178 			break;
3179 		case RTE_FLOW_ACTION_TYPE_RSS:
3180 			ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
3181 						 priv->token, &nic_attr);
3182 			if (ret < 0) {
3183 				DPAA2_PMD_ERR(
3184 				"Failure to get attribute. dpni@%p err code(%d)\n",
3185 				dpni, ret);
3186 				return ret;
3187 			}
3188 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3189 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3190 				if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
3191 				    rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
3192 					DPAA2_PMD_ERR(
3193 					"Queue/Group combination are not supported\n");
3194 					return -ENOTSUP;
3195 				}
3196 			}
3197 
3198 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3199 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3200 					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
3201 			if (ret < 0) {
3202 				DPAA2_PMD_ERR(
3203 				"unable to set flow distribution.please check queue config\n");
3204 				return ret;
3205 			}
3206 
3207 			/* Allocate DMA'ble memory to write the rules */
3208 			param = (size_t)rte_malloc(NULL, 256, 64);
3209 			if (!param) {
3210 				DPAA2_PMD_ERR("Memory allocation failure\n");
3211 				return -1;
3212 			}
3213 
3214 			if (dpkg_prepare_key_cfg(
3215 				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
3216 				(uint8_t *)param) < 0) {
3217 				DPAA2_PMD_ERR(
3218 				"Unable to prepare extract parameters");
3219 				rte_free((void *)param);
3220 				return -1;
3221 			}
3222 
3223 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3224 			tc_cfg.dist_size = rss_conf->queue_num;
3225 			tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3226 			tc_cfg.key_cfg_iova = (size_t)param;
3227 			tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3228 
3229 			ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3230 						 priv->token, flow->tc_id,
3231 						 &tc_cfg);
3232 			if (ret < 0) {
3233 				DPAA2_PMD_ERR(
3234 				"Distribution cannot be configured: %d\n", ret);
3235 				rte_free((void *)param);
3236 				return -1;
3237 			}
3238 
3239 			rte_free((void *)param);
3240 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3241 				if (dpkg_prepare_key_cfg(
3242 					&priv->extract.qos_key_extract.dpkg,
3243 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3244 					DPAA2_PMD_ERR(
3245 					"Unable to prepare extract parameters");
3246 					return -1;
3247 				}
3248 				memset(&qos_cfg, 0,
3249 					sizeof(struct dpni_qos_tbl_cfg));
3250 				qos_cfg.discard_on_miss = true;
3251 				qos_cfg.keep_entries = true;
3252 				qos_cfg.key_cfg_iova =
3253 					(size_t)priv->extract.qos_extract_param;
3254 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3255 							 priv->token, &qos_cfg);
3256 				if (ret < 0) {
3257 					DPAA2_PMD_ERR(
3258 					"Distribution can't be configured %d\n",
3259 					ret);
3260 					return -1;
3261 				}
3262 			}
3263 
3264 			/* Add Rule into QoS table */
3265 			if (!priv->qos_index) {
3266 				priv->qos_index = rte_zmalloc(0,
3267 						nic_attr.qos_entries, 64);
3268 			}
3269 			for (index = 0; index < nic_attr.qos_entries; index++) {
3270 				if (!priv->qos_index[index]) {
3271 					priv->qos_index[index] = 1;
3272 					break;
3273 				}
3274 			}
3275 			if (index >= nic_attr.qos_entries) {
3276 				DPAA2_PMD_ERR("QoS table with %d entries full",
3277 					nic_attr.qos_entries);
3278 				return -1;
3279 			}
3280 			flow->qos_rule.key_size =
3281 			  priv->extract.qos_key_extract.key_info.key_total_size;
3282 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3283 						&flow->qos_rule, flow->tc_id,
3284 						index, 0, 0);
3285 			if (ret < 0) {
3286 				DPAA2_PMD_ERR(
3287 				"Error in entry addition in QoS table(%d)",
3288 				ret);
3289 				priv->qos_index[index] = 0;
3290 				return ret;
3291 			}
3292 			flow->qos_index = index;
3293 			break;
3294 		case RTE_FLOW_ACTION_TYPE_END:
3295 			end_of_list = 1;
3296 			break;
3297 		default:
3298 			DPAA2_PMD_ERR("Invalid action type");
3299 			ret = -ENOTSUP;
3300 			break;
3301 		}
3302 		j++;
3303 	}
3304 
3305 	if (!ret) {
3306 		ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3307 		if (ret) {
3308 			DPAA2_PMD_ERR("Flow entry update failed.");
3309 
3310 			return -1;
3311 		}
3312 		/* New rules are inserted. */
3313 		if (!curr) {
3314 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3315 		} else {
3316 			while (LIST_NEXT(curr, next))
3317 				curr = LIST_NEXT(curr, next);
3318 			LIST_INSERT_AFTER(curr, flow, next);
3319 		}
3320 	}
3321 	return ret;
3322 }
3323 
3324 static inline int
3325 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3326 		      const struct rte_flow_attr *attr)
3327 {
3328 	int ret = 0;
3329 
3330 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3331 		DPAA2_PMD_ERR("Priority group is out of range\n");
3332 		ret = -ENOTSUP;
3333 	}
3334 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3335 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
3336 		ret = -ENOTSUP;
3337 	}
3338 	if (unlikely(attr->egress)) {
3339 		DPAA2_PMD_ERR(
3340 			"Flow configuration is not supported on egress side\n");
3341 		ret = -ENOTSUP;
3342 	}
3343 	if (unlikely(!attr->ingress)) {
3344 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
3345 		ret = -EINVAL;
3346 	}
3347 	return ret;
3348 }
3349 
3350 static inline int
3351 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3352 {
3353 	unsigned int i, j, is_found = 0;
3354 	int ret = 0;
3355 
3356 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3357 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3358 			if (dpaa2_supported_pattern_type[i]
3359 					== pattern[j].type) {
3360 				is_found = 1;
3361 				break;
3362 			}
3363 		}
3364 		if (!is_found) {
3365 			ret = -ENOTSUP;
3366 			break;
3367 		}
3368 	}
3369 	/* Lets verify other combinations of given pattern rules */
3370 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3371 		if (!pattern[j].spec) {
3372 			ret = -EINVAL;
3373 			break;
3374 		}
3375 	}
3376 
3377 	return ret;
3378 }
3379 
3380 static inline int
3381 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3382 {
3383 	unsigned int i, j, is_found = 0;
3384 	int ret = 0;
3385 
3386 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3387 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3388 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3389 				is_found = 1;
3390 				break;
3391 			}
3392 		}
3393 		if (!is_found) {
3394 			ret = -ENOTSUP;
3395 			break;
3396 		}
3397 	}
3398 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3399 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3400 				!actions[j].conf)
3401 			ret = -EINVAL;
3402 	}
3403 	return ret;
3404 }
3405 
3406 static
3407 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3408 			const struct rte_flow_attr *flow_attr,
3409 			const struct rte_flow_item pattern[],
3410 			const struct rte_flow_action actions[],
3411 			struct rte_flow_error *error)
3412 {
3413 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3414 	struct dpni_attr dpni_attr;
3415 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3416 	uint16_t token = priv->token;
3417 	int ret = 0;
3418 
3419 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3420 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3421 	if (ret < 0) {
3422 		DPAA2_PMD_ERR(
3423 			"Failure to get dpni@%p attribute, err code  %d\n",
3424 			dpni, ret);
3425 		rte_flow_error_set(error, EPERM,
3426 			   RTE_FLOW_ERROR_TYPE_ATTR,
3427 			   flow_attr, "invalid");
3428 		return ret;
3429 	}
3430 
3431 	/* Verify input attributes */
3432 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3433 	if (ret < 0) {
3434 		DPAA2_PMD_ERR(
3435 			"Invalid attributes are given\n");
3436 		rte_flow_error_set(error, EPERM,
3437 			   RTE_FLOW_ERROR_TYPE_ATTR,
3438 			   flow_attr, "invalid");
3439 		goto not_valid_params;
3440 	}
3441 	/* Verify input pattern list */
3442 	ret = dpaa2_dev_verify_patterns(pattern);
3443 	if (ret < 0) {
3444 		DPAA2_PMD_ERR(
3445 			"Invalid pattern list is given\n");
3446 		rte_flow_error_set(error, EPERM,
3447 			   RTE_FLOW_ERROR_TYPE_ITEM,
3448 			   pattern, "invalid");
3449 		goto not_valid_params;
3450 	}
3451 	/* Verify input action list */
3452 	ret = dpaa2_dev_verify_actions(actions);
3453 	if (ret < 0) {
3454 		DPAA2_PMD_ERR(
3455 			"Invalid action list is given\n");
3456 		rte_flow_error_set(error, EPERM,
3457 			   RTE_FLOW_ERROR_TYPE_ACTION,
3458 			   actions, "invalid");
3459 		goto not_valid_params;
3460 	}
3461 not_valid_params:
3462 	return ret;
3463 }
3464 
3465 static
3466 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3467 				   const struct rte_flow_attr *attr,
3468 				   const struct rte_flow_item pattern[],
3469 				   const struct rte_flow_action actions[],
3470 				   struct rte_flow_error *error)
3471 {
3472 	struct rte_flow *flow = NULL;
3473 	size_t key_iova = 0, mask_iova = 0;
3474 	int ret;
3475 
3476 	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3477 	if (!flow) {
3478 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3479 		goto mem_failure;
3480 	}
3481 	/* Allocate DMA'ble memory to write the rules */
3482 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3483 	if (!key_iova) {
3484 		DPAA2_PMD_ERR(
3485 			"Memory allocation failure for rule configuration\n");
3486 		goto mem_failure;
3487 	}
3488 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3489 	if (!mask_iova) {
3490 		DPAA2_PMD_ERR(
3491 			"Memory allocation failure for rule configuration\n");
3492 		goto mem_failure;
3493 	}
3494 
3495 	flow->qos_rule.key_iova = key_iova;
3496 	flow->qos_rule.mask_iova = mask_iova;
3497 
3498 	/* Allocate DMA'ble memory to write the rules */
3499 	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3500 	if (!key_iova) {
3501 		DPAA2_PMD_ERR(
3502 			"Memory allocation failure for rule configuration\n");
3503 		goto mem_failure;
3504 	}
3505 	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3506 	if (!mask_iova) {
3507 		DPAA2_PMD_ERR(
3508 			"Memory allocation failure for rule configuration\n");
3509 		goto mem_failure;
3510 	}
3511 
3512 	flow->fs_rule.key_iova = key_iova;
3513 	flow->fs_rule.mask_iova = mask_iova;
3514 
3515 	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3516 	flow->ipaddr_rule.qos_ipsrc_offset =
3517 		IP_ADDRESS_OFFSET_INVALID;
3518 	flow->ipaddr_rule.qos_ipdst_offset =
3519 		IP_ADDRESS_OFFSET_INVALID;
3520 	flow->ipaddr_rule.fs_ipsrc_offset =
3521 		IP_ADDRESS_OFFSET_INVALID;
3522 	flow->ipaddr_rule.fs_ipdst_offset =
3523 		IP_ADDRESS_OFFSET_INVALID;
3524 
3525 	switch (dpaa2_filter_type) {
3526 	case RTE_ETH_FILTER_GENERIC:
3527 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3528 					     actions, error);
3529 		if (ret < 0) {
3530 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3531 				rte_flow_error_set(error, EPERM,
3532 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3533 						attr, "unknown");
3534 			DPAA2_PMD_ERR(
3535 			"Failure to create flow, return code (%d)", ret);
3536 			goto creation_error;
3537 		}
3538 		break;
3539 	default:
3540 		DPAA2_PMD_ERR("Filter type (%d) not supported",
3541 		dpaa2_filter_type);
3542 		break;
3543 	}
3544 
3545 	return flow;
3546 mem_failure:
3547 	rte_flow_error_set(error, EPERM,
3548 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3549 			   NULL, "memory alloc");
3550 creation_error:
3551 	rte_free((void *)flow);
3552 	rte_free((void *)key_iova);
3553 	rte_free((void *)mask_iova);
3554 
3555 	return NULL;
3556 }
3557 
3558 static
3559 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3560 		       struct rte_flow *flow,
3561 		       struct rte_flow_error *error)
3562 {
3563 	int ret = 0;
3564 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3565 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3566 
3567 	switch (flow->action) {
3568 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3569 		/* Remove entry from QoS table first */
3570 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3571 					   &flow->qos_rule);
3572 		if (ret < 0) {
3573 			DPAA2_PMD_ERR(
3574 				"Error in adding entry to QoS table(%d)", ret);
3575 			goto error;
3576 		}
3577 		priv->qos_index[flow->qos_index] = 0;
3578 
3579 		/* Then remove entry from FS table */
3580 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3581 					   flow->tc_id, &flow->fs_rule);
3582 		if (ret < 0) {
3583 			DPAA2_PMD_ERR(
3584 				"Error in entry addition in FS table(%d)", ret);
3585 			goto error;
3586 		}
3587 		priv->fs_index[flow->fs_index] = 0;
3588 		break;
3589 	case RTE_FLOW_ACTION_TYPE_RSS:
3590 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3591 					   &flow->qos_rule);
3592 		if (ret < 0) {
3593 			DPAA2_PMD_ERR(
3594 			"Error in entry addition in QoS table(%d)", ret);
3595 			goto error;
3596 		}
3597 		priv->qos_index[flow->qos_index] = 0;
3598 		break;
3599 	default:
3600 		DPAA2_PMD_ERR(
3601 		"Action type (%d) is not supported", flow->action);
3602 		ret = -ENOTSUP;
3603 		break;
3604 	}
3605 
3606 	LIST_REMOVE(flow, next);
3607 	rte_free((void *)(size_t)flow->qos_rule.key_iova);
3608 	rte_free((void *)(size_t)flow->qos_rule.mask_iova);
3609 	rte_free((void *)(size_t)flow->fs_rule.key_iova);
3610 	rte_free((void *)(size_t)flow->fs_rule.mask_iova);
3611 	/* Now free the flow */
3612 	rte_free(flow);
3613 
3614 error:
3615 	if (ret)
3616 		rte_flow_error_set(error, EPERM,
3617 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3618 				   NULL, "unknown");
3619 	return ret;
3620 }
3621 
3622 /**
3623  * Destroy user-configured flow rules.
3624  *
3625  * This function skips internal flows rules.
3626  *
3627  * @see rte_flow_flush()
3628  * @see rte_flow_ops
3629  */
3630 static int
3631 dpaa2_flow_flush(struct rte_eth_dev *dev,
3632 		struct rte_flow_error *error)
3633 {
3634 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3635 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
3636 
3637 	while (flow) {
3638 		struct rte_flow *next = LIST_NEXT(flow, next);
3639 
3640 		dpaa2_flow_destroy(dev, flow, error);
3641 		flow = next;
3642 	}
3643 	return 0;
3644 }
3645 
3646 static int
3647 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3648 		struct rte_flow *flow __rte_unused,
3649 		const struct rte_flow_action *actions __rte_unused,
3650 		void *data __rte_unused,
3651 		struct rte_flow_error *error __rte_unused)
3652 {
3653 	return 0;
3654 }
3655 
3656 /**
3657  * Clean up all flow rules.
3658  *
3659  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3660  * rules regardless of whether they are internal or user-configured.
3661  *
3662  * @param priv
3663  *   Pointer to private structure.
3664  */
3665 void
3666 dpaa2_flow_clean(struct rte_eth_dev *dev)
3667 {
3668 	struct rte_flow *flow;
3669 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3670 
3671 	while ((flow = LIST_FIRST(&priv->flows)))
3672 		dpaa2_flow_destroy(dev, flow, NULL);
3673 }
3674 
3675 const struct rte_flow_ops dpaa2_flow_ops = {
3676 	.create	= dpaa2_flow_create,
3677 	.validate = dpaa2_flow_validate,
3678 	.destroy = dpaa2_flow_destroy,
3679 	.flush	= dpaa2_flow_flush,
3680 	.query	= dpaa2_flow_query,
3681 };
3682