xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision a8a6b82e80ef3f96ca3370a98c67ae09df940886)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 static char *dpaa2_flow_control_log;
26 static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */
27 
28 enum dpaa2_flow_entry_size {
29 	DPAA2_FLOW_ENTRY_MIN_SIZE = (DPNI_MAX_KEY_SIZE / 2),
30 	DPAA2_FLOW_ENTRY_MAX_SIZE = DPNI_MAX_KEY_SIZE
31 };
32 
33 enum dpaa2_flow_dist_type {
34 	DPAA2_FLOW_QOS_TYPE = 1 << 0,
35 	DPAA2_FLOW_FS_TYPE = 1 << 1
36 };
37 
38 #define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT	16
39 #define DPAA2_FLOW_MAX_KEY_SIZE			16
40 
41 #define VXLAN_HF_VNI 0x08
42 
43 struct dpaa2_dev_flow {
44 	LIST_ENTRY(dpaa2_dev_flow) next;
45 	struct dpni_rule_cfg qos_rule;
46 	uint8_t *qos_key_addr;
47 	uint8_t *qos_mask_addr;
48 	uint16_t qos_rule_size;
49 	struct dpni_rule_cfg fs_rule;
50 	uint8_t qos_real_key_size;
51 	uint8_t fs_real_key_size;
52 	uint8_t *fs_key_addr;
53 	uint8_t *fs_mask_addr;
54 	uint16_t fs_rule_size;
55 	uint8_t tc_id; /** Traffic Class ID. */
56 	uint8_t tc_index; /** index within this Traffic Class. */
57 	enum rte_flow_action_type action_type;
58 	struct dpni_fs_action_cfg fs_action_cfg;
59 };
60 
61 struct rte_dpaa2_flow_item {
62 	struct rte_flow_item generic_item;
63 	int in_tunnel;
64 };
65 
66 static const
67 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
68 	RTE_FLOW_ITEM_TYPE_END,
69 	RTE_FLOW_ITEM_TYPE_ETH,
70 	RTE_FLOW_ITEM_TYPE_VLAN,
71 	RTE_FLOW_ITEM_TYPE_IPV4,
72 	RTE_FLOW_ITEM_TYPE_IPV6,
73 	RTE_FLOW_ITEM_TYPE_ICMP,
74 	RTE_FLOW_ITEM_TYPE_UDP,
75 	RTE_FLOW_ITEM_TYPE_TCP,
76 	RTE_FLOW_ITEM_TYPE_SCTP,
77 	RTE_FLOW_ITEM_TYPE_GRE,
78 };
79 
80 static const
81 enum rte_flow_action_type dpaa2_supported_action_type[] = {
82 	RTE_FLOW_ACTION_TYPE_END,
83 	RTE_FLOW_ACTION_TYPE_QUEUE,
84 	RTE_FLOW_ACTION_TYPE_PORT_ID,
85 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
86 	RTE_FLOW_ACTION_TYPE_RSS
87 };
88 
89 #ifndef __cplusplus
90 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
91 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
92 	.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
93 	.hdr.ether_type = RTE_BE16(0xffff),
94 };
95 
96 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
97 	.hdr.vlan_tci = RTE_BE16(0xffff),
98 };
99 
100 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
101 	.hdr.src_addr = RTE_BE32(0xffffffff),
102 	.hdr.dst_addr = RTE_BE32(0xffffffff),
103 	.hdr.next_proto_id = 0xff,
104 };
105 
106 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
107 	.hdr = {
108 		.src_addr = RTE_IPV6_MASK_FULL,
109 		.dst_addr = RTE_IPV6_MASK_FULL,
110 		.proto = 0xff
111 	},
112 };
113 
114 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
115 	.hdr.icmp_type = 0xff,
116 	.hdr.icmp_code = 0xff,
117 };
118 
119 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
120 	.hdr = {
121 		.src_port = RTE_BE16(0xffff),
122 		.dst_port = RTE_BE16(0xffff),
123 	},
124 };
125 
126 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
127 	.hdr = {
128 		.src_port = RTE_BE16(0xffff),
129 		.dst_port = RTE_BE16(0xffff),
130 	},
131 };
132 
133 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
134 	.hdr = {
135 		.src_port = RTE_BE16(0xffff),
136 		.dst_port = RTE_BE16(0xffff),
137 	},
138 };
139 
140 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
141 	.protocol = RTE_BE16(0xffff),
142 };
143 
144 static const struct rte_flow_item_vxlan dpaa2_flow_item_vxlan_mask = {
145 	.flags = 0xff,
146 	.vni = "\xff\xff\xff",
147 };
148 
149 static const struct rte_flow_item_ecpri dpaa2_flow_item_ecpri_mask = {
150 	.hdr.common.type = 0xff,
151 	.hdr.dummy[0] = RTE_BE32(0xffffffff),
152 	.hdr.dummy[1] = RTE_BE32(0xffffffff),
153 	.hdr.dummy[2] = RTE_BE32(0xffffffff),
154 };
155 #endif
156 
157 #define DPAA2_FLOW_DUMP printf
158 
159 static inline void
160 dpaa2_prot_field_string(uint32_t prot, uint32_t field,
161 	char *string)
162 {
163 	if (!dpaa2_flow_control_log)
164 		return;
165 
166 	if (prot == NET_PROT_ETH) {
167 		strcpy(string, "eth");
168 		if (field == NH_FLD_ETH_DA)
169 			strcat(string, ".dst");
170 		else if (field == NH_FLD_ETH_SA)
171 			strcat(string, ".src");
172 		else if (field == NH_FLD_ETH_TYPE)
173 			strcat(string, ".type");
174 		else
175 			strcat(string, ".unknown field");
176 	} else if (prot == NET_PROT_VLAN) {
177 		strcpy(string, "vlan");
178 		if (field == NH_FLD_VLAN_TCI)
179 			strcat(string, ".tci");
180 		else
181 			strcat(string, ".unknown field");
182 	} else if (prot == NET_PROT_IP) {
183 		strcpy(string, "ip");
184 		if (field == NH_FLD_IP_SRC)
185 			strcat(string, ".src");
186 		else if (field == NH_FLD_IP_DST)
187 			strcat(string, ".dst");
188 		else if (field == NH_FLD_IP_PROTO)
189 			strcat(string, ".proto");
190 		else
191 			strcat(string, ".unknown field");
192 	} else if (prot == NET_PROT_TCP) {
193 		strcpy(string, "tcp");
194 		if (field == NH_FLD_TCP_PORT_SRC)
195 			strcat(string, ".src");
196 		else if (field == NH_FLD_TCP_PORT_DST)
197 			strcat(string, ".dst");
198 		else
199 			strcat(string, ".unknown field");
200 	} else if (prot == NET_PROT_UDP) {
201 		strcpy(string, "udp");
202 		if (field == NH_FLD_UDP_PORT_SRC)
203 			strcat(string, ".src");
204 		else if (field == NH_FLD_UDP_PORT_DST)
205 			strcat(string, ".dst");
206 		else
207 			strcat(string, ".unknown field");
208 	} else if (prot == NET_PROT_ICMP) {
209 		strcpy(string, "icmp");
210 		if (field == NH_FLD_ICMP_TYPE)
211 			strcat(string, ".type");
212 		else if (field == NH_FLD_ICMP_CODE)
213 			strcat(string, ".code");
214 		else
215 			strcat(string, ".unknown field");
216 	} else if (prot == NET_PROT_SCTP) {
217 		strcpy(string, "sctp");
218 		if (field == NH_FLD_SCTP_PORT_SRC)
219 			strcat(string, ".src");
220 		else if (field == NH_FLD_SCTP_PORT_DST)
221 			strcat(string, ".dst");
222 		else
223 			strcat(string, ".unknown field");
224 	} else if (prot == NET_PROT_GRE) {
225 		strcpy(string, "gre");
226 		if (field == NH_FLD_GRE_TYPE)
227 			strcat(string, ".type");
228 		else
229 			strcat(string, ".unknown field");
230 	} else {
231 		strcpy(string, "unknown protocol");
232 	}
233 }
234 
235 static inline void
236 dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv)
237 {
238 	int idx;
239 	char string[32];
240 	const struct dpkg_profile_cfg *dpkg =
241 		&priv->extract.qos_key_extract.dpkg;
242 	const struct dpkg_extract *extract;
243 	enum dpkg_extract_type type;
244 	enum net_prot prot;
245 	uint32_t field;
246 
247 	if (!dpaa2_flow_control_log)
248 		return;
249 
250 	DPAA2_FLOW_DUMP("QoS table: %d extracts\r\n",
251 		dpkg->num_extracts);
252 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
253 		extract = &dpkg->extracts[idx];
254 		type = extract->type;
255 		if (type == DPKG_EXTRACT_FROM_HDR) {
256 			prot = extract->extract.from_hdr.prot;
257 			field = extract->extract.from_hdr.field;
258 			dpaa2_prot_field_string(prot, field,
259 				string);
260 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
261 			sprintf(string, "raw offset/len: %d/%d",
262 				extract->extract.from_data.offset,
263 				extract->extract.from_data.size);
264 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
265 			sprintf(string, "parse offset/len: %d/%d",
266 				extract->extract.from_parse.offset,
267 				extract->extract.from_parse.size);
268 		}
269 		DPAA2_FLOW_DUMP("%s", string);
270 		if ((idx + 1) < dpkg->num_extracts)
271 			DPAA2_FLOW_DUMP(" / ");
272 	}
273 	DPAA2_FLOW_DUMP("\r\n");
274 }
275 
276 static inline void
277 dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv,
278 	int tc_id)
279 {
280 	int idx;
281 	char string[32];
282 	const struct dpkg_profile_cfg *dpkg =
283 		&priv->extract.tc_key_extract[tc_id].dpkg;
284 	const struct dpkg_extract *extract;
285 	enum dpkg_extract_type type;
286 	enum net_prot prot;
287 	uint32_t field;
288 
289 	if (!dpaa2_flow_control_log)
290 		return;
291 
292 	DPAA2_FLOW_DUMP("FS table: %d extracts in TC[%d]\r\n",
293 		dpkg->num_extracts, tc_id);
294 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
295 		extract = &dpkg->extracts[idx];
296 		type = extract->type;
297 		if (type == DPKG_EXTRACT_FROM_HDR) {
298 			prot = extract->extract.from_hdr.prot;
299 			field = extract->extract.from_hdr.field;
300 			dpaa2_prot_field_string(prot, field,
301 				string);
302 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
303 			sprintf(string, "raw offset/len: %d/%d",
304 				extract->extract.from_data.offset,
305 				extract->extract.from_data.size);
306 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
307 			sprintf(string, "parse offset/len: %d/%d",
308 				extract->extract.from_parse.offset,
309 				extract->extract.from_parse.size);
310 		}
311 		DPAA2_FLOW_DUMP("%s", string);
312 		if ((idx + 1) < dpkg->num_extracts)
313 			DPAA2_FLOW_DUMP(" / ");
314 	}
315 	DPAA2_FLOW_DUMP("\r\n");
316 }
317 
318 static inline void
319 dpaa2_flow_qos_entry_log(const char *log_info,
320 	const struct dpaa2_dev_flow *flow, int qos_index)
321 {
322 	int idx;
323 	uint8_t *key, *mask;
324 
325 	if (!dpaa2_flow_control_log)
326 		return;
327 
328 	if (qos_index >= 0) {
329 		DPAA2_FLOW_DUMP("%s QoS entry[%d](size %d/%d) for TC[%d]\r\n",
330 			log_info, qos_index, flow->qos_rule_size,
331 			flow->qos_rule.key_size,
332 			flow->tc_id);
333 	} else {
334 		DPAA2_FLOW_DUMP("%s QoS entry(size %d/%d) for TC[%d]\r\n",
335 			log_info, flow->qos_rule_size,
336 			flow->qos_rule.key_size,
337 			flow->tc_id);
338 	}
339 
340 	key = flow->qos_key_addr;
341 	mask = flow->qos_mask_addr;
342 
343 	DPAA2_FLOW_DUMP("key:\r\n");
344 	for (idx = 0; idx < flow->qos_rule_size; idx++)
345 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
346 
347 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
348 	for (idx = 0; idx < flow->qos_rule_size; idx++)
349 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
350 	DPAA2_FLOW_DUMP("\r\n");
351 }
352 
353 static inline void
354 dpaa2_flow_fs_entry_log(const char *log_info,
355 	const struct dpaa2_dev_flow *flow)
356 {
357 	int idx;
358 	uint8_t *key, *mask;
359 
360 	if (!dpaa2_flow_control_log)
361 		return;
362 
363 	DPAA2_FLOW_DUMP("%s FS/TC entry[%d](size %d/%d) of TC[%d]\r\n",
364 		log_info, flow->tc_index,
365 		flow->fs_rule_size, flow->fs_rule.key_size,
366 		flow->tc_id);
367 
368 	key = flow->fs_key_addr;
369 	mask = flow->fs_mask_addr;
370 
371 	DPAA2_FLOW_DUMP("key:\r\n");
372 	for (idx = 0; idx < flow->fs_rule_size; idx++)
373 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
374 
375 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
376 	for (idx = 0; idx < flow->fs_rule_size; idx++)
377 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
378 	DPAA2_FLOW_DUMP("\r\n");
379 }
380 
381 static int
382 dpaa2_flow_ip_address_extract(enum net_prot prot,
383 	uint32_t field)
384 {
385 	if (prot == NET_PROT_IPV4 &&
386 		(field == NH_FLD_IPV4_SRC_IP ||
387 		field == NH_FLD_IPV4_DST_IP))
388 		return true;
389 	else if (prot == NET_PROT_IPV6 &&
390 		(field == NH_FLD_IPV6_SRC_IP ||
391 		field == NH_FLD_IPV6_DST_IP))
392 		return true;
393 	else if (prot == NET_PROT_IP &&
394 		(field == NH_FLD_IP_SRC ||
395 		field == NH_FLD_IP_DST))
396 		return true;
397 
398 	return false;
399 }
400 
401 static int
402 dpaa2_flow_l4_src_port_extract(enum net_prot prot,
403 	uint32_t field)
404 {
405 	if (prot == NET_PROT_TCP &&
406 		field == NH_FLD_TCP_PORT_SRC)
407 		return true;
408 	else if (prot == NET_PROT_UDP &&
409 		field == NH_FLD_UDP_PORT_SRC)
410 		return true;
411 	else if (prot == NET_PROT_SCTP &&
412 		field == NH_FLD_SCTP_PORT_SRC)
413 		return true;
414 
415 	return false;
416 }
417 
418 static int
419 dpaa2_flow_l4_dst_port_extract(enum net_prot prot,
420 	uint32_t field)
421 {
422 	if (prot == NET_PROT_TCP &&
423 		field == NH_FLD_TCP_PORT_DST)
424 		return true;
425 	else if (prot == NET_PROT_UDP &&
426 		field == NH_FLD_UDP_PORT_DST)
427 		return true;
428 	else if (prot == NET_PROT_SCTP &&
429 		field == NH_FLD_SCTP_PORT_DST)
430 		return true;
431 
432 	return false;
433 }
434 
435 static int
436 dpaa2_flow_add_qos_rule(struct dpaa2_dev_priv *priv,
437 	struct dpaa2_dev_flow *flow)
438 {
439 	uint16_t qos_index;
440 	int ret;
441 	struct fsl_mc_io *dpni = priv->hw;
442 
443 	if (priv->num_rx_tc <= 1 &&
444 		flow->action_type != RTE_FLOW_ACTION_TYPE_RSS) {
445 		DPAA2_PMD_WARN("No QoS Table for FS");
446 		return -EINVAL;
447 	}
448 
449 	/* QoS entry added is only effective for multiple TCs.*/
450 	qos_index = flow->tc_id * priv->fs_entries + flow->tc_index;
451 	if (qos_index >= priv->qos_entries) {
452 		DPAA2_PMD_ERR("QoS table full(%d >= %d)",
453 			qos_index, priv->qos_entries);
454 		return -EINVAL;
455 	}
456 
457 	dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
458 
459 	ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
460 			priv->token, &flow->qos_rule,
461 			flow->tc_id, qos_index,
462 			0, 0);
463 	if (ret < 0) {
464 		DPAA2_PMD_ERR("Add entry(%d) to table(%d) failed",
465 			qos_index, flow->tc_id);
466 		return ret;
467 	}
468 
469 	return 0;
470 }
471 
472 static int
473 dpaa2_flow_add_fs_rule(struct dpaa2_dev_priv *priv,
474 	struct dpaa2_dev_flow *flow)
475 {
476 	int ret;
477 	struct fsl_mc_io *dpni = priv->hw;
478 
479 	if (flow->tc_index >= priv->fs_entries) {
480 		DPAA2_PMD_ERR("FS table full(%d >= %d)",
481 			flow->tc_index, priv->fs_entries);
482 		return -EINVAL;
483 	}
484 
485 	dpaa2_flow_fs_entry_log("Start add", flow);
486 
487 	ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
488 			priv->token, flow->tc_id,
489 			flow->tc_index, &flow->fs_rule,
490 			&flow->fs_action_cfg);
491 	if (ret < 0) {
492 		DPAA2_PMD_ERR("Add rule(%d) to FS table(%d) failed",
493 			flow->tc_index, flow->tc_id);
494 		return ret;
495 	}
496 
497 	return 0;
498 }
499 
500 static int
501 dpaa2_flow_rule_insert_hole(struct dpaa2_dev_flow *flow,
502 	int offset, int size,
503 	enum dpaa2_flow_dist_type dist_type)
504 {
505 	int end;
506 
507 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
508 		end = flow->qos_rule_size;
509 		if (end > offset) {
510 			memmove(flow->qos_key_addr + offset + size,
511 					flow->qos_key_addr + offset,
512 					end - offset);
513 			memset(flow->qos_key_addr + offset,
514 					0, size);
515 
516 			memmove(flow->qos_mask_addr + offset + size,
517 					flow->qos_mask_addr + offset,
518 					end - offset);
519 			memset(flow->qos_mask_addr + offset,
520 					0, size);
521 		}
522 		flow->qos_rule_size += size;
523 	}
524 
525 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
526 		end = flow->fs_rule_size;
527 		if (end > offset) {
528 			memmove(flow->fs_key_addr + offset + size,
529 					flow->fs_key_addr + offset,
530 					end - offset);
531 			memset(flow->fs_key_addr + offset,
532 					0, size);
533 
534 			memmove(flow->fs_mask_addr + offset + size,
535 					flow->fs_mask_addr + offset,
536 					end - offset);
537 			memset(flow->fs_mask_addr + offset,
538 					0, size);
539 		}
540 		flow->fs_rule_size += size;
541 	}
542 
543 	return 0;
544 }
545 
546 static int
547 dpaa2_flow_rule_add_all(struct dpaa2_dev_priv *priv,
548 	enum dpaa2_flow_dist_type dist_type,
549 	uint16_t entry_size, uint8_t tc_id)
550 {
551 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
552 	int ret;
553 
554 	while (curr) {
555 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
556 			if (priv->num_rx_tc > 1 ||
557 				curr->action_type ==
558 				RTE_FLOW_ACTION_TYPE_RSS) {
559 				curr->qos_rule.key_size = entry_size;
560 				ret = dpaa2_flow_add_qos_rule(priv, curr);
561 				if (ret)
562 					return ret;
563 			}
564 		}
565 		if (dist_type & DPAA2_FLOW_FS_TYPE &&
566 			curr->tc_id == tc_id) {
567 			curr->fs_rule.key_size = entry_size;
568 			ret = dpaa2_flow_add_fs_rule(priv, curr);
569 			if (ret)
570 				return ret;
571 		}
572 		curr = LIST_NEXT(curr, next);
573 	}
574 
575 	return 0;
576 }
577 
578 static int
579 dpaa2_flow_qos_rule_insert_hole(struct dpaa2_dev_priv *priv,
580 	int offset, int size)
581 {
582 	struct dpaa2_dev_flow *curr;
583 	int ret;
584 
585 	curr = priv->curr;
586 	if (!curr) {
587 		DPAA2_PMD_ERR("Current qos flow insert hole failed.");
588 		return -EINVAL;
589 	} else {
590 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
591 				DPAA2_FLOW_QOS_TYPE);
592 		if (ret)
593 			return ret;
594 	}
595 
596 	curr = LIST_FIRST(&priv->flows);
597 	while (curr) {
598 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
599 				DPAA2_FLOW_QOS_TYPE);
600 		if (ret)
601 			return ret;
602 		curr = LIST_NEXT(curr, next);
603 	}
604 
605 	return 0;
606 }
607 
608 static int
609 dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv,
610 	int offset, int size, int tc_id)
611 {
612 	struct dpaa2_dev_flow *curr;
613 	int ret;
614 
615 	curr = priv->curr;
616 	if (!curr || curr->tc_id != tc_id) {
617 		DPAA2_PMD_ERR("Current flow insert hole failed.");
618 		return -EINVAL;
619 	} else {
620 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
621 				DPAA2_FLOW_FS_TYPE);
622 		if (ret)
623 			return ret;
624 	}
625 
626 	curr = LIST_FIRST(&priv->flows);
627 
628 	while (curr) {
629 		if (curr->tc_id != tc_id) {
630 			curr = LIST_NEXT(curr, next);
631 			continue;
632 		}
633 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
634 				DPAA2_FLOW_FS_TYPE);
635 		if (ret)
636 			return ret;
637 		curr = LIST_NEXT(curr, next);
638 	}
639 
640 	return 0;
641 }
642 
643 static int
644 dpaa2_flow_faf_advance(struct dpaa2_dev_priv *priv,
645 	int faf_byte, enum dpaa2_flow_dist_type dist_type, int tc_id,
646 	int *insert_offset)
647 {
648 	int offset, ret;
649 	struct dpaa2_key_profile *key_profile;
650 	int num, pos;
651 
652 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
653 		key_profile = &priv->extract.qos_key_extract.key_profile;
654 	else
655 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
656 
657 	num = key_profile->num;
658 
659 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
660 		DPAA2_PMD_ERR("Number of extracts overflows");
661 		return -EINVAL;
662 	}
663 
664 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
665 		offset = key_profile->ip_addr_extract_off;
666 		pos = key_profile->ip_addr_extract_pos;
667 		key_profile->ip_addr_extract_pos++;
668 		key_profile->ip_addr_extract_off++;
669 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
670 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
671 					offset, 1);
672 		} else {
673 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
674 				offset, 1, tc_id);
675 		}
676 		if (ret)
677 			return ret;
678 	} else {
679 		pos = num;
680 	}
681 
682 	if (pos > 0) {
683 		key_profile->key_offset[pos] =
684 			key_profile->key_offset[pos - 1] +
685 			key_profile->key_size[pos - 1];
686 	} else {
687 		key_profile->key_offset[pos] = 0;
688 	}
689 
690 	key_profile->key_size[pos] = 1;
691 	key_profile->prot_field[pos].type = DPAA2_FAF_KEY;
692 	key_profile->prot_field[pos].key_field = faf_byte;
693 	key_profile->num++;
694 
695 	if (insert_offset)
696 		*insert_offset = key_profile->key_offset[pos];
697 
698 	key_profile->key_max_size++;
699 
700 	return pos;
701 }
702 
703 static int
704 dpaa2_flow_pr_advance(struct dpaa2_dev_priv *priv,
705 	uint32_t pr_offset, uint32_t pr_size,
706 	enum dpaa2_flow_dist_type dist_type, int tc_id,
707 	int *insert_offset)
708 {
709 	int offset, ret;
710 	struct dpaa2_key_profile *key_profile;
711 	int num, pos;
712 
713 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
714 		key_profile = &priv->extract.qos_key_extract.key_profile;
715 	else
716 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
717 
718 	num = key_profile->num;
719 
720 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
721 		DPAA2_PMD_ERR("Number of extracts overflows");
722 		return -EINVAL;
723 	}
724 
725 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
726 		offset = key_profile->ip_addr_extract_off;
727 		pos = key_profile->ip_addr_extract_pos;
728 		key_profile->ip_addr_extract_pos++;
729 		key_profile->ip_addr_extract_off += pr_size;
730 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
731 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
732 					offset, pr_size);
733 		} else {
734 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
735 				offset, pr_size, tc_id);
736 		}
737 		if (ret)
738 			return ret;
739 	} else {
740 		pos = num;
741 	}
742 
743 	if (pos > 0) {
744 		key_profile->key_offset[pos] =
745 			key_profile->key_offset[pos - 1] +
746 			key_profile->key_size[pos - 1];
747 	} else {
748 		key_profile->key_offset[pos] = 0;
749 	}
750 
751 	key_profile->key_size[pos] = pr_size;
752 	key_profile->prot_field[pos].type = DPAA2_PR_KEY;
753 	key_profile->prot_field[pos].key_field =
754 		(pr_offset << 16) | pr_size;
755 	key_profile->num++;
756 
757 	if (insert_offset)
758 		*insert_offset = key_profile->key_offset[pos];
759 
760 	key_profile->key_max_size += pr_size;
761 
762 	return pos;
763 }
764 
765 /* Move IPv4/IPv6 addresses to fill new extract previous IP address.
766  * Current MC/WRIOP only support generic IP extract but IP address
767  * is not fixed, so we have to put them at end of extracts, otherwise,
768  * the extracts position following them can't be identified.
769  */
770 static int
771 dpaa2_flow_key_profile_advance(enum net_prot prot,
772 	uint32_t field, uint8_t field_size,
773 	struct dpaa2_dev_priv *priv,
774 	enum dpaa2_flow_dist_type dist_type, int tc_id,
775 	int *insert_offset)
776 {
777 	int offset, ret;
778 	struct dpaa2_key_profile *key_profile;
779 	int num, pos;
780 
781 	if (dpaa2_flow_ip_address_extract(prot, field)) {
782 		DPAA2_PMD_ERR("%s only for none IP address extract",
783 			__func__);
784 		return -EINVAL;
785 	}
786 
787 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
788 		key_profile = &priv->extract.qos_key_extract.key_profile;
789 	else
790 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
791 
792 	num = key_profile->num;
793 
794 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
795 		DPAA2_PMD_ERR("Number of extracts overflows");
796 		return -EINVAL;
797 	}
798 
799 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
800 		offset = key_profile->ip_addr_extract_off;
801 		pos = key_profile->ip_addr_extract_pos;
802 		key_profile->ip_addr_extract_pos++;
803 		key_profile->ip_addr_extract_off += field_size;
804 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
805 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
806 					offset, field_size);
807 		} else {
808 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
809 				offset, field_size, tc_id);
810 		}
811 		if (ret)
812 			return ret;
813 	} else {
814 		pos = num;
815 	}
816 
817 	if (pos > 0) {
818 		key_profile->key_offset[pos] =
819 			key_profile->key_offset[pos - 1] +
820 			key_profile->key_size[pos - 1];
821 	} else {
822 		key_profile->key_offset[pos] = 0;
823 	}
824 
825 	key_profile->key_size[pos] = field_size;
826 	key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
827 	key_profile->prot_field[pos].prot = prot;
828 	key_profile->prot_field[pos].key_field = field;
829 	key_profile->num++;
830 
831 	if (insert_offset)
832 		*insert_offset = key_profile->key_offset[pos];
833 
834 	if (dpaa2_flow_l4_src_port_extract(prot, field)) {
835 		key_profile->l4_src_port_present = 1;
836 		key_profile->l4_src_port_pos = pos;
837 		key_profile->l4_src_port_offset =
838 			key_profile->key_offset[pos];
839 	} else if (dpaa2_flow_l4_dst_port_extract(prot, field)) {
840 		key_profile->l4_dst_port_present = 1;
841 		key_profile->l4_dst_port_pos = pos;
842 		key_profile->l4_dst_port_offset =
843 			key_profile->key_offset[pos];
844 	}
845 	key_profile->key_max_size += field_size;
846 
847 	return pos;
848 }
849 
850 static int
851 dpaa2_flow_faf_add_hdr(int faf_byte,
852 	struct dpaa2_dev_priv *priv,
853 	enum dpaa2_flow_dist_type dist_type, int tc_id,
854 	int *insert_offset)
855 {
856 	int pos, i, offset;
857 	struct dpaa2_key_extract *key_extract;
858 	struct dpkg_profile_cfg *dpkg;
859 	struct dpkg_extract *extracts;
860 
861 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
862 		key_extract = &priv->extract.qos_key_extract;
863 	else
864 		key_extract = &priv->extract.tc_key_extract[tc_id];
865 
866 	dpkg = &key_extract->dpkg;
867 	extracts = dpkg->extracts;
868 
869 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
870 		DPAA2_PMD_ERR("Number of extracts overflows");
871 		return -EINVAL;
872 	}
873 
874 	pos = dpaa2_flow_faf_advance(priv,
875 			faf_byte, dist_type, tc_id,
876 			insert_offset);
877 	if (pos < 0)
878 		return pos;
879 
880 	if (pos != dpkg->num_extracts) {
881 		/* Not the last pos, must have IP address extract.*/
882 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
883 			memcpy(&extracts[i + 1],
884 				&extracts[i], sizeof(struct dpkg_extract));
885 		}
886 	}
887 
888 	offset = DPAA2_FAFE_PSR_OFFSET + faf_byte;
889 
890 	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
891 	extracts[pos].extract.from_parse.offset = offset;
892 	extracts[pos].extract.from_parse.size = 1;
893 
894 	dpkg->num_extracts++;
895 
896 	return 0;
897 }
898 
899 static int
900 dpaa2_flow_pr_add_hdr(uint32_t pr_offset,
901 	uint32_t pr_size, struct dpaa2_dev_priv *priv,
902 	enum dpaa2_flow_dist_type dist_type, int tc_id,
903 	int *insert_offset)
904 {
905 	int pos, i;
906 	struct dpaa2_key_extract *key_extract;
907 	struct dpkg_profile_cfg *dpkg;
908 	struct dpkg_extract *extracts;
909 
910 	if ((pr_offset + pr_size) > DPAA2_FAPR_SIZE) {
911 		DPAA2_PMD_ERR("PR extracts(%d:%d) overflow",
912 			pr_offset, pr_size);
913 		return -EINVAL;
914 	}
915 
916 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
917 		key_extract = &priv->extract.qos_key_extract;
918 	else
919 		key_extract = &priv->extract.tc_key_extract[tc_id];
920 
921 	dpkg = &key_extract->dpkg;
922 	extracts = dpkg->extracts;
923 
924 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
925 		DPAA2_PMD_ERR("Number of extracts overflows");
926 		return -EINVAL;
927 	}
928 
929 	pos = dpaa2_flow_pr_advance(priv,
930 			pr_offset, pr_size, dist_type, tc_id,
931 			insert_offset);
932 	if (pos < 0)
933 		return pos;
934 
935 	if (pos != dpkg->num_extracts) {
936 		/* Not the last pos, must have IP address extract.*/
937 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
938 			memcpy(&extracts[i + 1],
939 				&extracts[i], sizeof(struct dpkg_extract));
940 		}
941 	}
942 
943 	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
944 	extracts[pos].extract.from_parse.offset = pr_offset;
945 	extracts[pos].extract.from_parse.size = pr_size;
946 
947 	dpkg->num_extracts++;
948 
949 	return 0;
950 }
951 
952 static int
953 dpaa2_flow_extract_add_hdr(enum net_prot prot,
954 	uint32_t field, uint8_t field_size,
955 	struct dpaa2_dev_priv *priv,
956 	enum dpaa2_flow_dist_type dist_type, int tc_id,
957 	int *insert_offset)
958 {
959 	int pos, i;
960 	struct dpaa2_key_extract *key_extract;
961 	struct dpkg_profile_cfg *dpkg;
962 	struct dpkg_extract *extracts;
963 
964 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
965 		key_extract = &priv->extract.qos_key_extract;
966 	else
967 		key_extract = &priv->extract.tc_key_extract[tc_id];
968 
969 	dpkg = &key_extract->dpkg;
970 	extracts = dpkg->extracts;
971 
972 	if (dpaa2_flow_ip_address_extract(prot, field)) {
973 		DPAA2_PMD_ERR("%s only for none IP address extract",
974 			__func__);
975 		return -EINVAL;
976 	}
977 
978 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
979 		DPAA2_PMD_ERR("Number of extracts overflows");
980 		return -EINVAL;
981 	}
982 
983 	pos = dpaa2_flow_key_profile_advance(prot,
984 			field, field_size, priv,
985 			dist_type, tc_id,
986 			insert_offset);
987 	if (pos < 0)
988 		return pos;
989 
990 	if (pos != dpkg->num_extracts) {
991 		/* Not the last pos, must have IP address extract.*/
992 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
993 			memcpy(&extracts[i + 1],
994 				&extracts[i], sizeof(struct dpkg_extract));
995 		}
996 	}
997 
998 	extracts[pos].type = DPKG_EXTRACT_FROM_HDR;
999 	extracts[pos].extract.from_hdr.prot = prot;
1000 	extracts[pos].extract.from_hdr.type = DPKG_FULL_FIELD;
1001 	extracts[pos].extract.from_hdr.field = field;
1002 
1003 	dpkg->num_extracts++;
1004 
1005 	return 0;
1006 }
1007 
1008 static int
1009 dpaa2_flow_extract_new_raw(struct dpaa2_dev_priv *priv,
1010 	int offset, int size,
1011 	enum dpaa2_flow_dist_type dist_type, int tc_id)
1012 {
1013 	struct dpaa2_key_extract *key_extract;
1014 	struct dpkg_profile_cfg *dpkg;
1015 	struct dpaa2_key_profile *key_profile;
1016 	int last_extract_size, index, pos, item_size;
1017 	uint8_t num_extracts;
1018 	uint32_t field;
1019 
1020 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1021 		key_extract = &priv->extract.qos_key_extract;
1022 	else
1023 		key_extract = &priv->extract.tc_key_extract[tc_id];
1024 
1025 	dpkg = &key_extract->dpkg;
1026 	key_profile = &key_extract->key_profile;
1027 
1028 	key_profile->raw_region.raw_start = 0;
1029 	key_profile->raw_region.raw_size = 0;
1030 
1031 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
1032 	num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
1033 	if (last_extract_size)
1034 		num_extracts++;
1035 	else
1036 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
1037 
1038 	for (index = 0; index < num_extracts; index++) {
1039 		if (index == num_extracts - 1)
1040 			item_size = last_extract_size;
1041 		else
1042 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
1043 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1044 		field |= item_size;
1045 
1046 		pos = dpaa2_flow_key_profile_advance(NET_PROT_PAYLOAD,
1047 				field, item_size, priv, dist_type,
1048 				tc_id, NULL);
1049 		if (pos < 0)
1050 			return pos;
1051 
1052 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
1053 		dpkg->extracts[pos].extract.from_data.size = item_size;
1054 		dpkg->extracts[pos].extract.from_data.offset = offset;
1055 
1056 		if (index == 0) {
1057 			key_profile->raw_extract_pos = pos;
1058 			key_profile->raw_extract_off =
1059 				key_profile->key_offset[pos];
1060 			key_profile->raw_region.raw_start = offset;
1061 		}
1062 		key_profile->raw_extract_num++;
1063 		key_profile->raw_region.raw_size +=
1064 			key_profile->key_size[pos];
1065 
1066 		offset += item_size;
1067 		dpkg->num_extracts++;
1068 	}
1069 
1070 	return 0;
1071 }
1072 
1073 static int
1074 dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv,
1075 	int offset, int size, enum dpaa2_flow_dist_type dist_type,
1076 	int tc_id, int *recfg)
1077 {
1078 	struct dpaa2_key_profile *key_profile;
1079 	struct dpaa2_raw_region *raw_region;
1080 	int end = offset + size, ret = 0, extract_extended, sz_extend;
1081 	int start_cmp, end_cmp, new_size, index, pos, end_pos;
1082 	int last_extract_size, item_size, num_extracts, bk_num = 0;
1083 	struct dpkg_extract extract_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1084 	uint8_t key_offset_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1085 	uint8_t key_size_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1086 	struct key_prot_field prot_field_bk[DPKG_MAX_NUM_OF_EXTRACTS];
1087 	struct dpaa2_raw_region raw_hole;
1088 	struct dpkg_profile_cfg *dpkg;
1089 	enum net_prot prot;
1090 	uint32_t field;
1091 
1092 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1093 		key_profile = &priv->extract.qos_key_extract.key_profile;
1094 		dpkg = &priv->extract.qos_key_extract.dpkg;
1095 	} else {
1096 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
1097 		dpkg = &priv->extract.tc_key_extract[tc_id].dpkg;
1098 	}
1099 
1100 	raw_region = &key_profile->raw_region;
1101 	if (!raw_region->raw_size) {
1102 		/* New RAW region*/
1103 		ret = dpaa2_flow_extract_new_raw(priv, offset, size,
1104 			dist_type, tc_id);
1105 		if (!ret && recfg)
1106 			(*recfg) |= dist_type;
1107 
1108 		return ret;
1109 	}
1110 	start_cmp = raw_region->raw_start;
1111 	end_cmp = raw_region->raw_start + raw_region->raw_size;
1112 
1113 	if (offset >= start_cmp && end <= end_cmp)
1114 		return 0;
1115 
1116 	sz_extend = 0;
1117 	new_size = raw_region->raw_size;
1118 	if (offset < start_cmp) {
1119 		sz_extend += start_cmp - offset;
1120 		new_size += (start_cmp - offset);
1121 	}
1122 	if (end > end_cmp) {
1123 		sz_extend += end - end_cmp;
1124 		new_size += (end - end_cmp);
1125 	}
1126 
1127 	last_extract_size = (new_size % DPAA2_FLOW_MAX_KEY_SIZE);
1128 	num_extracts = (new_size / DPAA2_FLOW_MAX_KEY_SIZE);
1129 	if (last_extract_size)
1130 		num_extracts++;
1131 	else
1132 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
1133 
1134 	if ((key_profile->num + num_extracts -
1135 		key_profile->raw_extract_num) >=
1136 		DPKG_MAX_NUM_OF_EXTRACTS) {
1137 		DPAA2_PMD_ERR("%s Failed to expand raw extracts",
1138 			__func__);
1139 		return -EINVAL;
1140 	}
1141 
1142 	if (offset < start_cmp) {
1143 		raw_hole.raw_start = key_profile->raw_extract_off;
1144 		raw_hole.raw_size = start_cmp - offset;
1145 		raw_region->raw_start = offset;
1146 		raw_region->raw_size += start_cmp - offset;
1147 
1148 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1149 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1150 					raw_hole.raw_start,
1151 					raw_hole.raw_size);
1152 			if (ret)
1153 				return ret;
1154 		}
1155 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1156 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1157 					raw_hole.raw_start,
1158 					raw_hole.raw_size, tc_id);
1159 			if (ret)
1160 				return ret;
1161 		}
1162 	}
1163 
1164 	if (end > end_cmp) {
1165 		raw_hole.raw_start =
1166 			key_profile->raw_extract_off +
1167 			raw_region->raw_size;
1168 		raw_hole.raw_size = end - end_cmp;
1169 		raw_region->raw_size += end - end_cmp;
1170 
1171 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1172 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1173 					raw_hole.raw_start,
1174 					raw_hole.raw_size);
1175 			if (ret)
1176 				return ret;
1177 		}
1178 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1179 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1180 					raw_hole.raw_start,
1181 					raw_hole.raw_size, tc_id);
1182 			if (ret)
1183 				return ret;
1184 		}
1185 	}
1186 
1187 	end_pos = key_profile->raw_extract_pos +
1188 		key_profile->raw_extract_num;
1189 	if (key_profile->num > end_pos) {
1190 		bk_num = key_profile->num - end_pos;
1191 		memcpy(extract_bk, &dpkg->extracts[end_pos],
1192 			bk_num * sizeof(struct dpkg_extract));
1193 		memcpy(key_offset_bk, &key_profile->key_offset[end_pos],
1194 			bk_num * sizeof(uint8_t));
1195 		memcpy(key_size_bk, &key_profile->key_size[end_pos],
1196 			bk_num * sizeof(uint8_t));
1197 		memcpy(prot_field_bk, &key_profile->prot_field[end_pos],
1198 			bk_num * sizeof(struct key_prot_field));
1199 
1200 		for (index = 0; index < bk_num; index++) {
1201 			key_offset_bk[index] += sz_extend;
1202 			prot = prot_field_bk[index].prot;
1203 			field = prot_field_bk[index].key_field;
1204 			if (dpaa2_flow_l4_src_port_extract(prot,
1205 				field)) {
1206 				key_profile->l4_src_port_present = 1;
1207 				key_profile->l4_src_port_pos = end_pos + index;
1208 				key_profile->l4_src_port_offset =
1209 					key_offset_bk[index];
1210 			} else if (dpaa2_flow_l4_dst_port_extract(prot,
1211 				field)) {
1212 				key_profile->l4_dst_port_present = 1;
1213 				key_profile->l4_dst_port_pos = end_pos + index;
1214 				key_profile->l4_dst_port_offset =
1215 					key_offset_bk[index];
1216 			}
1217 		}
1218 	}
1219 
1220 	pos = key_profile->raw_extract_pos;
1221 
1222 	for (index = 0; index < num_extracts; index++) {
1223 		if (index == num_extracts - 1)
1224 			item_size = last_extract_size;
1225 		else
1226 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
1227 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1228 		field |= item_size;
1229 
1230 		if (pos > 0) {
1231 			key_profile->key_offset[pos] =
1232 				key_profile->key_offset[pos - 1] +
1233 				key_profile->key_size[pos - 1];
1234 		} else {
1235 			key_profile->key_offset[pos] = 0;
1236 		}
1237 		key_profile->key_size[pos] = item_size;
1238 		key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
1239 		key_profile->prot_field[pos].prot = NET_PROT_PAYLOAD;
1240 		key_profile->prot_field[pos].key_field = field;
1241 
1242 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
1243 		dpkg->extracts[pos].extract.from_data.size = item_size;
1244 		dpkg->extracts[pos].extract.from_data.offset = offset;
1245 		offset += item_size;
1246 		pos++;
1247 	}
1248 
1249 	if (bk_num) {
1250 		memcpy(&dpkg->extracts[pos], extract_bk,
1251 			bk_num * sizeof(struct dpkg_extract));
1252 		memcpy(&key_profile->key_offset[end_pos],
1253 			key_offset_bk, bk_num * sizeof(uint8_t));
1254 		memcpy(&key_profile->key_size[end_pos],
1255 			key_size_bk, bk_num * sizeof(uint8_t));
1256 		memcpy(&key_profile->prot_field[end_pos],
1257 			prot_field_bk, bk_num * sizeof(struct key_prot_field));
1258 	}
1259 
1260 	extract_extended = num_extracts - key_profile->raw_extract_num;
1261 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
1262 		key_profile->ip_addr_extract_pos += extract_extended;
1263 		key_profile->ip_addr_extract_off += sz_extend;
1264 	}
1265 	key_profile->raw_extract_num = num_extracts;
1266 	key_profile->num += extract_extended;
1267 	key_profile->key_max_size += sz_extend;
1268 
1269 	dpkg->num_extracts += extract_extended;
1270 	if (!ret && recfg)
1271 		(*recfg) |= dist_type;
1272 
1273 	return ret;
1274 }
1275 
1276 static inline int
1277 dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
1278 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1279 {
1280 	int pos;
1281 	struct key_prot_field *prot_field;
1282 
1283 	if (dpaa2_flow_ip_address_extract(prot, key_field)) {
1284 		DPAA2_PMD_ERR("%s only for none IP address extract",
1285 			__func__);
1286 		return -EINVAL;
1287 	}
1288 
1289 	prot_field = key_profile->prot_field;
1290 	for (pos = 0; pos < key_profile->num; pos++) {
1291 		if (type == DPAA2_NET_PROT_KEY &&
1292 			prot_field[pos].prot == prot &&
1293 			prot_field[pos].key_field == key_field &&
1294 			prot_field[pos].type == type)
1295 			return pos;
1296 		else if (type == DPAA2_FAF_KEY &&
1297 			prot_field[pos].key_field == key_field &&
1298 			prot_field[pos].type == type)
1299 			return pos;
1300 		else if (type == DPAA2_PR_KEY &&
1301 			prot_field[pos].key_field == key_field &&
1302 			prot_field[pos].type == type)
1303 			return pos;
1304 	}
1305 
1306 	if (type == DPAA2_NET_PROT_KEY &&
1307 		dpaa2_flow_l4_src_port_extract(prot, key_field)) {
1308 		if (key_profile->l4_src_port_present)
1309 			return key_profile->l4_src_port_pos;
1310 	} else if (type == DPAA2_NET_PROT_KEY &&
1311 		dpaa2_flow_l4_dst_port_extract(prot, key_field)) {
1312 		if (key_profile->l4_dst_port_present)
1313 			return key_profile->l4_dst_port_pos;
1314 	}
1315 
1316 	return -ENXIO;
1317 }
1318 
1319 static inline int
1320 dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile,
1321 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1322 {
1323 	int i;
1324 
1325 	i = dpaa2_flow_extract_search(key_profile, type, prot, key_field);
1326 	if (i >= 0)
1327 		return key_profile->key_offset[i];
1328 	else
1329 		return i;
1330 }
1331 
1332 static int
1333 dpaa2_flow_faf_add_rule(struct dpaa2_dev_priv *priv,
1334 	struct dpaa2_dev_flow *flow,
1335 	enum dpaa2_rx_faf_offset faf_bit_off,
1336 	int group,
1337 	enum dpaa2_flow_dist_type dist_type)
1338 {
1339 	int offset;
1340 	uint8_t *key_addr;
1341 	uint8_t *mask_addr;
1342 	struct dpaa2_key_extract *key_extract;
1343 	struct dpaa2_key_profile *key_profile;
1344 	uint8_t faf_byte = faf_bit_off / 8;
1345 	uint8_t faf_bit_in_byte = faf_bit_off % 8;
1346 
1347 	faf_bit_in_byte = 7 - faf_bit_in_byte;
1348 
1349 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1350 		key_extract = &priv->extract.qos_key_extract;
1351 		key_profile = &key_extract->key_profile;
1352 
1353 		offset = dpaa2_flow_extract_key_offset(key_profile,
1354 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1355 		if (offset < 0) {
1356 			DPAA2_PMD_ERR("%s QoS key extract failed", __func__);
1357 			return -EINVAL;
1358 		}
1359 		key_addr = flow->qos_key_addr + offset;
1360 		mask_addr = flow->qos_mask_addr + offset;
1361 
1362 		if (!(*key_addr) &&
1363 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1364 			flow->qos_rule_size++;
1365 
1366 		*key_addr |=  (1 << faf_bit_in_byte);
1367 		*mask_addr |=  (1 << faf_bit_in_byte);
1368 	}
1369 
1370 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1371 		key_extract = &priv->extract.tc_key_extract[group];
1372 		key_profile = &key_extract->key_profile;
1373 
1374 		offset = dpaa2_flow_extract_key_offset(key_profile,
1375 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1376 		if (offset < 0) {
1377 			DPAA2_PMD_ERR("%s TC[%d] key extract failed",
1378 				__func__, group);
1379 			return -EINVAL;
1380 		}
1381 		key_addr = flow->fs_key_addr + offset;
1382 		mask_addr = flow->fs_mask_addr + offset;
1383 
1384 		if (!(*key_addr) &&
1385 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1386 			flow->fs_rule_size++;
1387 
1388 		*key_addr |=  (1 << faf_bit_in_byte);
1389 		*mask_addr |=  (1 << faf_bit_in_byte);
1390 	}
1391 
1392 	return 0;
1393 }
1394 
1395 static inline int
1396 dpaa2_flow_pr_rule_data_set(struct dpaa2_dev_flow *flow,
1397 	struct dpaa2_key_profile *key_profile,
1398 	uint32_t pr_offset, uint32_t pr_size,
1399 	const void *key, const void *mask,
1400 	enum dpaa2_flow_dist_type dist_type)
1401 {
1402 	int offset;
1403 	uint32_t pr_field = pr_offset << 16 | pr_size;
1404 
1405 	offset = dpaa2_flow_extract_key_offset(key_profile,
1406 			DPAA2_PR_KEY, NET_PROT_NONE, pr_field);
1407 	if (offset < 0) {
1408 		DPAA2_PMD_ERR("PR off(%d)/size(%d) does not exist!",
1409 			pr_offset, pr_size);
1410 		return -EINVAL;
1411 	}
1412 
1413 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1414 		memcpy((flow->qos_key_addr + offset), key, pr_size);
1415 		memcpy((flow->qos_mask_addr + offset), mask, pr_size);
1416 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1417 			flow->qos_rule_size = offset + pr_size;
1418 	}
1419 
1420 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1421 		memcpy((flow->fs_key_addr + offset), key, pr_size);
1422 		memcpy((flow->fs_mask_addr + offset), mask, pr_size);
1423 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1424 			flow->fs_rule_size = offset + pr_size;
1425 	}
1426 
1427 	return 0;
1428 }
1429 
1430 static inline int
1431 dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow,
1432 	struct dpaa2_key_profile *key_profile,
1433 	enum net_prot prot, uint32_t field, int size,
1434 	const void *key, const void *mask,
1435 	enum dpaa2_flow_dist_type dist_type)
1436 {
1437 	int offset;
1438 
1439 	if (dpaa2_flow_ip_address_extract(prot, field)) {
1440 		DPAA2_PMD_ERR("%s only for none IP address extract",
1441 			__func__);
1442 		return -EINVAL;
1443 	}
1444 
1445 	offset = dpaa2_flow_extract_key_offset(key_profile,
1446 			DPAA2_NET_PROT_KEY, prot, field);
1447 	if (offset < 0) {
1448 		DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!",
1449 			prot, field);
1450 		return -EINVAL;
1451 	}
1452 
1453 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1454 		memcpy((flow->qos_key_addr + offset), key, size);
1455 		memcpy((flow->qos_mask_addr + offset), mask, size);
1456 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1457 			flow->qos_rule_size = offset + size;
1458 	}
1459 
1460 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1461 		memcpy((flow->fs_key_addr + offset), key, size);
1462 		memcpy((flow->fs_mask_addr + offset), mask, size);
1463 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1464 			flow->fs_rule_size = offset + size;
1465 	}
1466 
1467 	return 0;
1468 }
1469 
1470 static inline int
1471 dpaa2_flow_raw_rule_data_set(struct dpaa2_dev_flow *flow,
1472 	struct dpaa2_key_profile *key_profile,
1473 	uint32_t extract_offset, int size,
1474 	const void *key, const void *mask,
1475 	enum dpaa2_flow_dist_type dist_type)
1476 {
1477 	int extract_size = size > DPAA2_FLOW_MAX_KEY_SIZE ?
1478 		DPAA2_FLOW_MAX_KEY_SIZE : size;
1479 	int offset, field;
1480 
1481 	field = extract_offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1482 	field |= extract_size;
1483 	offset = dpaa2_flow_extract_key_offset(key_profile,
1484 			DPAA2_NET_PROT_KEY, NET_PROT_PAYLOAD, field);
1485 	if (offset < 0) {
1486 		DPAA2_PMD_ERR("offset(%d)/size(%d) raw extract failed",
1487 			extract_offset, size);
1488 		return -EINVAL;
1489 	}
1490 
1491 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1492 		memcpy((flow->qos_key_addr + offset), key, size);
1493 		memcpy((flow->qos_mask_addr + offset), mask, size);
1494 		flow->qos_rule_size = offset + size;
1495 	}
1496 
1497 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1498 		memcpy((flow->fs_key_addr + offset), key, size);
1499 		memcpy((flow->fs_mask_addr + offset), mask, size);
1500 		flow->fs_rule_size = offset + size;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 static int
1507 dpaa2_flow_extract_support(const uint8_t *mask_src,
1508 	enum rte_flow_item_type type)
1509 {
1510 	char mask[64];
1511 	int i, size = 0;
1512 	const char *mask_support = 0;
1513 
1514 	switch (type) {
1515 	case RTE_FLOW_ITEM_TYPE_ETH:
1516 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
1517 		size = sizeof(struct rte_flow_item_eth);
1518 		break;
1519 	case RTE_FLOW_ITEM_TYPE_VLAN:
1520 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
1521 		size = sizeof(struct rte_flow_item_vlan);
1522 		break;
1523 	case RTE_FLOW_ITEM_TYPE_IPV4:
1524 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
1525 		size = sizeof(struct rte_flow_item_ipv4);
1526 		break;
1527 	case RTE_FLOW_ITEM_TYPE_IPV6:
1528 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
1529 		size = sizeof(struct rte_flow_item_ipv6);
1530 		break;
1531 	case RTE_FLOW_ITEM_TYPE_ICMP:
1532 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
1533 		size = sizeof(struct rte_flow_item_icmp);
1534 		break;
1535 	case RTE_FLOW_ITEM_TYPE_UDP:
1536 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
1537 		size = sizeof(struct rte_flow_item_udp);
1538 		break;
1539 	case RTE_FLOW_ITEM_TYPE_TCP:
1540 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
1541 		size = sizeof(struct rte_flow_item_tcp);
1542 		break;
1543 	case RTE_FLOW_ITEM_TYPE_SCTP:
1544 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
1545 		size = sizeof(struct rte_flow_item_sctp);
1546 		break;
1547 	case RTE_FLOW_ITEM_TYPE_GRE:
1548 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
1549 		size = sizeof(struct rte_flow_item_gre);
1550 		break;
1551 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1552 		mask_support = (const char *)&dpaa2_flow_item_vxlan_mask;
1553 		size = sizeof(struct rte_flow_item_vxlan);
1554 		break;
1555 	case RTE_FLOW_ITEM_TYPE_ECPRI:
1556 		mask_support = (const char *)&dpaa2_flow_item_ecpri_mask;
1557 		size = sizeof(struct rte_flow_item_ecpri);
1558 		break;
1559 	default:
1560 		return -EINVAL;
1561 	}
1562 
1563 	memcpy(mask, mask_support, size);
1564 
1565 	for (i = 0; i < size; i++)
1566 		mask[i] = (mask[i] | mask_src[i]);
1567 
1568 	if (memcmp(mask, mask_support, size))
1569 		return -1;
1570 
1571 	return 0;
1572 }
1573 
1574 static int
1575 dpaa2_flow_identify_by_faf(struct dpaa2_dev_priv *priv,
1576 	struct dpaa2_dev_flow *flow,
1577 	enum dpaa2_rx_faf_offset faf_off,
1578 	enum dpaa2_flow_dist_type dist_type,
1579 	int group, int *recfg)
1580 {
1581 	int ret, index, local_cfg = 0;
1582 	struct dpaa2_key_extract *extract;
1583 	struct dpaa2_key_profile *key_profile;
1584 	uint8_t faf_byte = faf_off / 8;
1585 
1586 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1587 		extract = &priv->extract.qos_key_extract;
1588 		key_profile = &extract->key_profile;
1589 
1590 		index = dpaa2_flow_extract_search(key_profile,
1591 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1592 		if (index < 0) {
1593 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1594 					priv, DPAA2_FLOW_QOS_TYPE, group,
1595 					NULL);
1596 			if (ret) {
1597 				DPAA2_PMD_ERR("QOS faf extract add failed");
1598 
1599 				return -EINVAL;
1600 			}
1601 			local_cfg |= DPAA2_FLOW_QOS_TYPE;
1602 		}
1603 
1604 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1605 				DPAA2_FLOW_QOS_TYPE);
1606 		if (ret) {
1607 			DPAA2_PMD_ERR("QoS faf rule set failed");
1608 			return -EINVAL;
1609 		}
1610 	}
1611 
1612 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1613 		extract = &priv->extract.tc_key_extract[group];
1614 		key_profile = &extract->key_profile;
1615 
1616 		index = dpaa2_flow_extract_search(key_profile,
1617 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1618 		if (index < 0) {
1619 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1620 					priv, DPAA2_FLOW_FS_TYPE, group,
1621 					NULL);
1622 			if (ret) {
1623 				DPAA2_PMD_ERR("FS[%d] faf extract add failed",
1624 					group);
1625 
1626 				return -EINVAL;
1627 			}
1628 			local_cfg |= DPAA2_FLOW_FS_TYPE;
1629 		}
1630 
1631 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1632 				DPAA2_FLOW_FS_TYPE);
1633 		if (ret) {
1634 			DPAA2_PMD_ERR("FS[%d] faf rule set failed",
1635 				group);
1636 			return -EINVAL;
1637 		}
1638 	}
1639 
1640 	if (recfg)
1641 		*recfg |= local_cfg;
1642 
1643 	return 0;
1644 }
1645 
1646 static int
1647 dpaa2_flow_add_pr_extract_rule(struct dpaa2_dev_flow *flow,
1648 	uint32_t pr_offset, uint32_t pr_size,
1649 	const void *key, const void *mask,
1650 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1651 	enum dpaa2_flow_dist_type dist_type)
1652 {
1653 	int index, ret, local_cfg = 0;
1654 	struct dpaa2_key_extract *key_extract;
1655 	struct dpaa2_key_profile *key_profile;
1656 	uint32_t pr_field = pr_offset << 16 | pr_size;
1657 
1658 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1659 		key_extract = &priv->extract.qos_key_extract;
1660 	else
1661 		key_extract = &priv->extract.tc_key_extract[tc_id];
1662 
1663 	key_profile = &key_extract->key_profile;
1664 
1665 	index = dpaa2_flow_extract_search(key_profile,
1666 			DPAA2_PR_KEY, NET_PROT_NONE, pr_field);
1667 	if (index < 0) {
1668 		ret = dpaa2_flow_pr_add_hdr(pr_offset,
1669 				pr_size, priv,
1670 				dist_type, tc_id, NULL);
1671 		if (ret) {
1672 			DPAA2_PMD_ERR("PR add off(%d)/size(%d) failed",
1673 				pr_offset, pr_size);
1674 
1675 			return ret;
1676 		}
1677 		local_cfg |= dist_type;
1678 	}
1679 
1680 	ret = dpaa2_flow_pr_rule_data_set(flow, key_profile,
1681 			pr_offset, pr_size, key, mask, dist_type);
1682 	if (ret) {
1683 		DPAA2_PMD_ERR("PR off(%d)/size(%d) rule data set failed",
1684 			pr_offset, pr_size);
1685 
1686 		return ret;
1687 	}
1688 
1689 	if (recfg)
1690 		*recfg |= local_cfg;
1691 
1692 	return 0;
1693 }
1694 
1695 static int
1696 dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow,
1697 	enum net_prot prot, uint32_t field,
1698 	const void *key, const void *mask, int size,
1699 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1700 	enum dpaa2_flow_dist_type dist_type)
1701 {
1702 	int index, ret, local_cfg = 0;
1703 	struct dpaa2_key_extract *key_extract;
1704 	struct dpaa2_key_profile *key_profile;
1705 
1706 	if (dpaa2_flow_ip_address_extract(prot, field))
1707 		return -EINVAL;
1708 
1709 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1710 		key_extract = &priv->extract.qos_key_extract;
1711 	else
1712 		key_extract = &priv->extract.tc_key_extract[tc_id];
1713 
1714 	key_profile = &key_extract->key_profile;
1715 
1716 	index = dpaa2_flow_extract_search(key_profile,
1717 			DPAA2_NET_PROT_KEY, prot, field);
1718 	if (index < 0) {
1719 		ret = dpaa2_flow_extract_add_hdr(prot,
1720 				field, size, priv,
1721 				dist_type, tc_id, NULL);
1722 		if (ret) {
1723 			DPAA2_PMD_ERR("QoS Extract P(%d)/F(%d) failed",
1724 				prot, field);
1725 
1726 			return ret;
1727 		}
1728 		local_cfg |= dist_type;
1729 	}
1730 
1731 	ret = dpaa2_flow_hdr_rule_data_set(flow, key_profile,
1732 			prot, field, size, key, mask, dist_type);
1733 	if (ret) {
1734 		DPAA2_PMD_ERR("QoS P(%d)/F(%d) rule data set failed",
1735 			prot, field);
1736 
1737 		return ret;
1738 	}
1739 
1740 	if (recfg)
1741 		*recfg |= local_cfg;
1742 
1743 	return 0;
1744 }
1745 
1746 static int
1747 dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow,
1748 	enum net_prot prot, uint32_t field,
1749 	const void *key, const void *mask, int size,
1750 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1751 	enum dpaa2_flow_dist_type dist_type)
1752 {
1753 	int local_cfg = 0, num, ipaddr_extract_len = 0;
1754 	struct dpaa2_key_extract *key_extract;
1755 	struct dpaa2_key_profile *key_profile;
1756 	struct dpkg_profile_cfg *dpkg;
1757 	uint8_t *key_addr, *mask_addr;
1758 	union ip_addr_extract_rule *ip_addr_data;
1759 	union ip_addr_extract_rule *ip_addr_mask;
1760 	enum net_prot orig_prot;
1761 	uint32_t orig_field;
1762 
1763 	if (prot != NET_PROT_IPV4 && prot != NET_PROT_IPV6)
1764 		return -EINVAL;
1765 
1766 	if (prot == NET_PROT_IPV4 && field != NH_FLD_IPV4_SRC_IP &&
1767 		field != NH_FLD_IPV4_DST_IP) {
1768 		return -EINVAL;
1769 	}
1770 
1771 	if (prot == NET_PROT_IPV6 && field != NH_FLD_IPV6_SRC_IP &&
1772 		field != NH_FLD_IPV6_DST_IP) {
1773 		return -EINVAL;
1774 	}
1775 
1776 	orig_prot = prot;
1777 	orig_field = field;
1778 
1779 	if (prot == NET_PROT_IPV4 &&
1780 		field == NH_FLD_IPV4_SRC_IP) {
1781 		prot = NET_PROT_IP;
1782 		field = NH_FLD_IP_SRC;
1783 	} else if (prot == NET_PROT_IPV4 &&
1784 		field == NH_FLD_IPV4_DST_IP) {
1785 		prot = NET_PROT_IP;
1786 		field = NH_FLD_IP_DST;
1787 	} else if (prot == NET_PROT_IPV6 &&
1788 		field == NH_FLD_IPV6_SRC_IP) {
1789 		prot = NET_PROT_IP;
1790 		field = NH_FLD_IP_SRC;
1791 	} else if (prot == NET_PROT_IPV6 &&
1792 		field == NH_FLD_IPV6_DST_IP) {
1793 		prot = NET_PROT_IP;
1794 		field = NH_FLD_IP_DST;
1795 	} else {
1796 		DPAA2_PMD_ERR("Inval P(%d)/F(%d) to extract ip address",
1797 			prot, field);
1798 		return -EINVAL;
1799 	}
1800 
1801 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1802 		key_extract = &priv->extract.qos_key_extract;
1803 		key_profile = &key_extract->key_profile;
1804 		dpkg = &key_extract->dpkg;
1805 		num = key_profile->num;
1806 		key_addr = flow->qos_key_addr;
1807 		mask_addr = flow->qos_mask_addr;
1808 	} else {
1809 		key_extract = &priv->extract.tc_key_extract[tc_id];
1810 		key_profile = &key_extract->key_profile;
1811 		dpkg = &key_extract->dpkg;
1812 		num = key_profile->num;
1813 		key_addr = flow->fs_key_addr;
1814 		mask_addr = flow->fs_mask_addr;
1815 	}
1816 
1817 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
1818 		DPAA2_PMD_ERR("Number of extracts overflows");
1819 		return -EINVAL;
1820 	}
1821 
1822 	if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) {
1823 		if (field == NH_FLD_IP_SRC)
1824 			key_profile->ip_addr_type = IP_SRC_EXTRACT;
1825 		else
1826 			key_profile->ip_addr_type = IP_DST_EXTRACT;
1827 		ipaddr_extract_len = size;
1828 
1829 		key_profile->ip_addr_extract_pos = num;
1830 		if (num > 0) {
1831 			key_profile->ip_addr_extract_off =
1832 				key_profile->key_offset[num - 1] +
1833 				key_profile->key_size[num - 1];
1834 		} else {
1835 			key_profile->ip_addr_extract_off = 0;
1836 		}
1837 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1838 	} else if (key_profile->ip_addr_type == IP_SRC_EXTRACT) {
1839 		if (field == NH_FLD_IP_SRC) {
1840 			ipaddr_extract_len = size;
1841 			goto rule_configure;
1842 		}
1843 		key_profile->ip_addr_type = IP_SRC_DST_EXTRACT;
1844 		ipaddr_extract_len = size * 2;
1845 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1846 	} else if (key_profile->ip_addr_type == IP_DST_EXTRACT) {
1847 		if (field == NH_FLD_IP_DST) {
1848 			ipaddr_extract_len = size;
1849 			goto rule_configure;
1850 		}
1851 		key_profile->ip_addr_type = IP_DST_SRC_EXTRACT;
1852 		ipaddr_extract_len = size * 2;
1853 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1854 	}
1855 	key_profile->num++;
1856 	key_profile->prot_field[num].type = DPAA2_NET_PROT_KEY;
1857 
1858 	dpkg->extracts[num].extract.from_hdr.prot = prot;
1859 	dpkg->extracts[num].extract.from_hdr.field = field;
1860 	dpkg->extracts[num].extract.from_hdr.type = DPKG_FULL_FIELD;
1861 	dpkg->num_extracts++;
1862 
1863 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1864 		local_cfg = DPAA2_FLOW_QOS_TYPE;
1865 	else
1866 		local_cfg = DPAA2_FLOW_FS_TYPE;
1867 
1868 rule_configure:
1869 	key_addr += key_profile->ip_addr_extract_off;
1870 	ip_addr_data = (union ip_addr_extract_rule *)key_addr;
1871 	mask_addr += key_profile->ip_addr_extract_off;
1872 	ip_addr_mask = (union ip_addr_extract_rule *)mask_addr;
1873 
1874 	if (orig_prot == NET_PROT_IPV4 &&
1875 		orig_field == NH_FLD_IPV4_SRC_IP) {
1876 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
1877 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
1878 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_src,
1879 				key, size);
1880 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_src,
1881 				mask, size);
1882 		} else {
1883 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_src,
1884 				key, size);
1885 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_src,
1886 				mask, size);
1887 		}
1888 	} else if (orig_prot == NET_PROT_IPV4 &&
1889 		orig_field == NH_FLD_IPV4_DST_IP) {
1890 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
1891 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
1892 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_dst,
1893 				key, size);
1894 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_dst,
1895 				mask, size);
1896 		} else {
1897 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_dst,
1898 				key, size);
1899 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_dst,
1900 				mask, size);
1901 		}
1902 	} else if (orig_prot == NET_PROT_IPV6 &&
1903 		orig_field == NH_FLD_IPV6_SRC_IP) {
1904 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
1905 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
1906 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_src,
1907 				key, size);
1908 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_src,
1909 				mask, size);
1910 		} else {
1911 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_src,
1912 				key, size);
1913 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_src,
1914 				mask, size);
1915 		}
1916 	} else if (orig_prot == NET_PROT_IPV6 &&
1917 		orig_field == NH_FLD_IPV6_DST_IP) {
1918 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
1919 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
1920 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_dst,
1921 				key, size);
1922 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_dst,
1923 				mask, size);
1924 		} else {
1925 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_dst,
1926 				key, size);
1927 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_dst,
1928 				mask, size);
1929 		}
1930 	}
1931 
1932 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1933 		flow->qos_rule_size =
1934 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
1935 	} else {
1936 		flow->fs_rule_size =
1937 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
1938 	}
1939 
1940 	if (recfg)
1941 		*recfg |= local_cfg;
1942 
1943 	return 0;
1944 }
1945 
1946 static int
1947 dpaa2_configure_flow_tunnel_eth(struct dpaa2_dev_flow *flow,
1948 	struct rte_eth_dev *dev,
1949 	const struct rte_flow_attr *attr,
1950 	const struct rte_flow_item *pattern,
1951 	int *device_configured)
1952 {
1953 	int ret, local_cfg = 0;
1954 	uint32_t group;
1955 	const struct rte_flow_item_eth *spec, *mask;
1956 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1957 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
1958 
1959 	group = attr->group;
1960 
1961 	/* Parse pattern list to get the matching parameters */
1962 	spec = pattern->spec;
1963 	mask = pattern->mask ?
1964 			pattern->mask : &dpaa2_flow_item_eth_mask;
1965 
1966 	/* Get traffic class index and flow id to be configured */
1967 	flow->tc_id = group;
1968 	flow->tc_index = attr->priority;
1969 
1970 	if (!spec)
1971 		return 0;
1972 
1973 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1974 		RTE_FLOW_ITEM_TYPE_ETH)) {
1975 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
1976 
1977 		return -EINVAL;
1978 	}
1979 
1980 	if (memcmp((const char *)&mask->src,
1981 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
1982 		/*SRC[0:1]*/
1983 		ret = dpaa2_flow_add_pr_extract_rule(flow,
1984 			DPAA2_VXLAN_IN_SADDR0_OFFSET,
1985 			1, &spec->src.addr_bytes[0],
1986 			&mask->src.addr_bytes[0],
1987 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
1988 		if (ret)
1989 			return ret;
1990 		/*SRC[1:2]*/
1991 		ret = dpaa2_flow_add_pr_extract_rule(flow,
1992 			DPAA2_VXLAN_IN_SADDR1_OFFSET,
1993 			2, &spec->src.addr_bytes[1],
1994 			&mask->src.addr_bytes[1],
1995 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
1996 		if (ret)
1997 			return ret;
1998 		/*SRC[3:1]*/
1999 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2000 			DPAA2_VXLAN_IN_SADDR3_OFFSET,
2001 			1, &spec->src.addr_bytes[3],
2002 			&mask->src.addr_bytes[3],
2003 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2004 		if (ret)
2005 			return ret;
2006 		/*SRC[4:2]*/
2007 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2008 			DPAA2_VXLAN_IN_SADDR4_OFFSET,
2009 			2, &spec->src.addr_bytes[4],
2010 			&mask->src.addr_bytes[4],
2011 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2012 		if (ret)
2013 			return ret;
2014 
2015 		/*SRC[0:1]*/
2016 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2017 			DPAA2_VXLAN_IN_SADDR0_OFFSET,
2018 			1, &spec->src.addr_bytes[0],
2019 			&mask->src.addr_bytes[0],
2020 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2021 		if (ret)
2022 			return ret;
2023 		/*SRC[1:2]*/
2024 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2025 			DPAA2_VXLAN_IN_SADDR1_OFFSET,
2026 			2, &spec->src.addr_bytes[1],
2027 			&mask->src.addr_bytes[1],
2028 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2029 		if (ret)
2030 			return ret;
2031 		/*SRC[3:1]*/
2032 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2033 			DPAA2_VXLAN_IN_SADDR3_OFFSET,
2034 			1, &spec->src.addr_bytes[3],
2035 			&mask->src.addr_bytes[3],
2036 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2037 		if (ret)
2038 			return ret;
2039 		/*SRC[4:2]*/
2040 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2041 			DPAA2_VXLAN_IN_SADDR4_OFFSET,
2042 			2, &spec->src.addr_bytes[4],
2043 			&mask->src.addr_bytes[4],
2044 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2045 		if (ret)
2046 			return ret;
2047 	}
2048 
2049 	if (memcmp((const char *)&mask->dst,
2050 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2051 		/*DST[0:1]*/
2052 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2053 			DPAA2_VXLAN_IN_DADDR0_OFFSET,
2054 			1, &spec->dst.addr_bytes[0],
2055 			&mask->dst.addr_bytes[0],
2056 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2057 		if (ret)
2058 			return ret;
2059 		/*DST[1:1]*/
2060 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2061 			DPAA2_VXLAN_IN_DADDR1_OFFSET,
2062 			1, &spec->dst.addr_bytes[1],
2063 			&mask->dst.addr_bytes[1],
2064 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2065 		if (ret)
2066 			return ret;
2067 		/*DST[2:3]*/
2068 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2069 			DPAA2_VXLAN_IN_DADDR2_OFFSET,
2070 			3, &spec->dst.addr_bytes[2],
2071 			&mask->dst.addr_bytes[2],
2072 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2073 		if (ret)
2074 			return ret;
2075 		/*DST[5:1]*/
2076 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2077 			DPAA2_VXLAN_IN_DADDR5_OFFSET,
2078 			1, &spec->dst.addr_bytes[5],
2079 			&mask->dst.addr_bytes[5],
2080 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2081 		if (ret)
2082 			return ret;
2083 
2084 		/*DST[0:1]*/
2085 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2086 			DPAA2_VXLAN_IN_DADDR0_OFFSET,
2087 			1, &spec->dst.addr_bytes[0],
2088 			&mask->dst.addr_bytes[0],
2089 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2090 		if (ret)
2091 			return ret;
2092 		/*DST[1:1]*/
2093 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2094 			DPAA2_VXLAN_IN_DADDR1_OFFSET,
2095 			1, &spec->dst.addr_bytes[1],
2096 			&mask->dst.addr_bytes[1],
2097 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2098 		if (ret)
2099 			return ret;
2100 		/*DST[2:3]*/
2101 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2102 			DPAA2_VXLAN_IN_DADDR2_OFFSET,
2103 			3, &spec->dst.addr_bytes[2],
2104 			&mask->dst.addr_bytes[2],
2105 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2106 		if (ret)
2107 			return ret;
2108 		/*DST[5:1]*/
2109 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2110 			DPAA2_VXLAN_IN_DADDR5_OFFSET,
2111 			1, &spec->dst.addr_bytes[5],
2112 			&mask->dst.addr_bytes[5],
2113 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2114 		if (ret)
2115 			return ret;
2116 	}
2117 
2118 	if (memcmp((const char *)&mask->type,
2119 		zero_cmp, sizeof(rte_be16_t))) {
2120 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2121 			DPAA2_VXLAN_IN_TYPE_OFFSET,
2122 			sizeof(rte_be16_t), &spec->type, &mask->type,
2123 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2124 		if (ret)
2125 			return ret;
2126 		ret = dpaa2_flow_add_pr_extract_rule(flow,
2127 			DPAA2_VXLAN_IN_TYPE_OFFSET,
2128 			sizeof(rte_be16_t), &spec->type, &mask->type,
2129 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2130 		if (ret)
2131 			return ret;
2132 	}
2133 
2134 	(*device_configured) |= local_cfg;
2135 
2136 	return 0;
2137 }
2138 
2139 static int
2140 dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
2141 	struct rte_eth_dev *dev,
2142 	const struct rte_flow_attr *attr,
2143 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2144 	const struct rte_flow_action actions[] __rte_unused,
2145 	struct rte_flow_error *error __rte_unused,
2146 	int *device_configured)
2147 {
2148 	int ret, local_cfg = 0;
2149 	uint32_t group;
2150 	const struct rte_flow_item_eth *spec, *mask;
2151 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2152 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
2153 	const struct rte_flow_item *pattern =
2154 		&dpaa2_pattern->generic_item;
2155 
2156 	if (dpaa2_pattern->in_tunnel) {
2157 		return dpaa2_configure_flow_tunnel_eth(flow,
2158 				dev, attr, pattern, device_configured);
2159 	}
2160 
2161 	group = attr->group;
2162 
2163 	/* Parse pattern list to get the matching parameters */
2164 	spec = pattern->spec;
2165 	mask = pattern->mask ?
2166 			pattern->mask : &dpaa2_flow_item_eth_mask;
2167 
2168 	/* Get traffic class index and flow id to be configured */
2169 	flow->tc_id = group;
2170 	flow->tc_index = attr->priority;
2171 
2172 	if (!spec) {
2173 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2174 				FAF_ETH_FRAM, DPAA2_FLOW_QOS_TYPE,
2175 				group, &local_cfg);
2176 		if (ret)
2177 			return ret;
2178 
2179 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2180 				FAF_ETH_FRAM, DPAA2_FLOW_FS_TYPE,
2181 				group, &local_cfg);
2182 		if (ret)
2183 			return ret;
2184 
2185 		(*device_configured) |= local_cfg;
2186 		return 0;
2187 	}
2188 
2189 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2190 		RTE_FLOW_ITEM_TYPE_ETH)) {
2191 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
2192 
2193 		return -EINVAL;
2194 	}
2195 
2196 	if (memcmp((const char *)&mask->src,
2197 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2198 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2199 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
2200 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
2201 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2202 		if (ret)
2203 			return ret;
2204 
2205 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2206 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
2207 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
2208 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2209 		if (ret)
2210 			return ret;
2211 	}
2212 
2213 	if (memcmp((const char *)&mask->dst,
2214 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
2215 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2216 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
2217 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
2218 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2219 		if (ret)
2220 			return ret;
2221 
2222 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2223 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
2224 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
2225 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2226 		if (ret)
2227 			return ret;
2228 	}
2229 
2230 	if (memcmp((const char *)&mask->type,
2231 		zero_cmp, sizeof(rte_be16_t))) {
2232 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2233 			NH_FLD_ETH_TYPE, &spec->type,
2234 			&mask->type, sizeof(rte_be16_t),
2235 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2236 		if (ret)
2237 			return ret;
2238 
2239 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
2240 			NH_FLD_ETH_TYPE, &spec->type,
2241 			&mask->type, sizeof(rte_be16_t),
2242 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2243 		if (ret)
2244 			return ret;
2245 	}
2246 
2247 	(*device_configured) |= local_cfg;
2248 
2249 	return 0;
2250 }
2251 
2252 static int
2253 dpaa2_configure_flow_tunnel_vlan(struct dpaa2_dev_flow *flow,
2254 	struct rte_eth_dev *dev,
2255 	const struct rte_flow_attr *attr,
2256 	const struct rte_flow_item *pattern,
2257 	int *device_configured)
2258 {
2259 	int ret, local_cfg = 0;
2260 	uint32_t group;
2261 	const struct rte_flow_item_vlan *spec, *mask;
2262 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2263 
2264 	group = attr->group;
2265 
2266 	/* Parse pattern list to get the matching parameters */
2267 	spec = pattern->spec;
2268 	mask = pattern->mask ?
2269 		pattern->mask : &dpaa2_flow_item_vlan_mask;
2270 
2271 	/* Get traffic class index and flow id to be configured */
2272 	flow->tc_id = group;
2273 	flow->tc_index = attr->priority;
2274 
2275 	if (!spec) {
2276 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2277 				FAFE_VXLAN_IN_VLAN_FRAM,
2278 				DPAA2_FLOW_QOS_TYPE,
2279 				group, &local_cfg);
2280 		if (ret)
2281 			return ret;
2282 
2283 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2284 				FAFE_VXLAN_IN_VLAN_FRAM,
2285 				DPAA2_FLOW_FS_TYPE,
2286 				group, &local_cfg);
2287 		if (ret)
2288 			return ret;
2289 
2290 		(*device_configured) |= local_cfg;
2291 		return 0;
2292 	}
2293 
2294 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2295 		RTE_FLOW_ITEM_TYPE_VLAN)) {
2296 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
2297 
2298 		return -EINVAL;
2299 	}
2300 
2301 	if (!mask->tci)
2302 		return 0;
2303 
2304 	ret = dpaa2_flow_add_pr_extract_rule(flow,
2305 			DPAA2_VXLAN_IN_TCI_OFFSET,
2306 			sizeof(rte_be16_t), &spec->tci, &mask->tci,
2307 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2308 	if (ret)
2309 		return ret;
2310 
2311 	ret = dpaa2_flow_add_pr_extract_rule(flow,
2312 			DPAA2_VXLAN_IN_TCI_OFFSET,
2313 			sizeof(rte_be16_t), &spec->tci, &mask->tci,
2314 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2315 	if (ret)
2316 		return ret;
2317 
2318 	(*device_configured) |= local_cfg;
2319 
2320 	return 0;
2321 }
2322 
2323 static int
2324 dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
2325 	struct rte_eth_dev *dev,
2326 	const struct rte_flow_attr *attr,
2327 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2328 	const struct rte_flow_action actions[] __rte_unused,
2329 	struct rte_flow_error *error __rte_unused,
2330 	int *device_configured)
2331 {
2332 	int ret, local_cfg = 0;
2333 	uint32_t group;
2334 	const struct rte_flow_item_vlan *spec, *mask;
2335 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2336 	const struct rte_flow_item *pattern =
2337 		&dpaa2_pattern->generic_item;
2338 
2339 	if (dpaa2_pattern->in_tunnel) {
2340 		return dpaa2_configure_flow_tunnel_vlan(flow,
2341 				dev, attr, pattern, device_configured);
2342 	}
2343 
2344 	group = attr->group;
2345 
2346 	/* Parse pattern list to get the matching parameters */
2347 	spec = pattern->spec;
2348 	mask = pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask;
2349 
2350 	/* Get traffic class index and flow id to be configured */
2351 	flow->tc_id = group;
2352 	flow->tc_index = attr->priority;
2353 
2354 	if (!spec) {
2355 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
2356 						 DPAA2_FLOW_QOS_TYPE, group,
2357 						 &local_cfg);
2358 		if (ret)
2359 			return ret;
2360 
2361 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
2362 						 DPAA2_FLOW_FS_TYPE, group,
2363 						 &local_cfg);
2364 		if (ret)
2365 			return ret;
2366 
2367 		(*device_configured) |= local_cfg;
2368 		return 0;
2369 	}
2370 
2371 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2372 				       RTE_FLOW_ITEM_TYPE_VLAN)) {
2373 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
2374 		return -EINVAL;
2375 	}
2376 
2377 	if (!mask->tci)
2378 		return 0;
2379 
2380 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
2381 					      NH_FLD_VLAN_TCI, &spec->tci,
2382 					      &mask->tci, sizeof(rte_be16_t),
2383 					      priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2384 	if (ret)
2385 		return ret;
2386 
2387 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
2388 					      NH_FLD_VLAN_TCI, &spec->tci,
2389 					      &mask->tci, sizeof(rte_be16_t),
2390 					      priv, group, &local_cfg,
2391 					      DPAA2_FLOW_FS_TYPE);
2392 	if (ret)
2393 		return ret;
2394 
2395 	(*device_configured) |= local_cfg;
2396 	return 0;
2397 }
2398 
2399 static int
2400 dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
2401 			  const struct rte_flow_attr *attr,
2402 			  const struct rte_dpaa2_flow_item *dpaa2_pattern,
2403 			  const struct rte_flow_action actions[] __rte_unused,
2404 			  struct rte_flow_error *error __rte_unused,
2405 			  int *device_configured)
2406 {
2407 	int ret, local_cfg = 0;
2408 	uint32_t group;
2409 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0, *mask_ipv4 = 0;
2410 	const void *key, *mask;
2411 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2412 	int size;
2413 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2414 
2415 	group = attr->group;
2416 
2417 	/* Parse pattern list to get the matching parameters */
2418 	spec_ipv4 = pattern->spec;
2419 	mask_ipv4 = pattern->mask ?
2420 		    pattern->mask : &dpaa2_flow_item_ipv4_mask;
2421 
2422 	if (dpaa2_pattern->in_tunnel) {
2423 		if (spec_ipv4) {
2424 			DPAA2_PMD_ERR("Tunnel-IPv4 distribution not support");
2425 			return -ENOTSUP;
2426 		}
2427 
2428 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2429 						 FAFE_VXLAN_IN_IPV4_FRAM,
2430 						 DPAA2_FLOW_QOS_TYPE, group,
2431 						 &local_cfg);
2432 		if (ret)
2433 			return ret;
2434 
2435 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2436 						 FAFE_VXLAN_IN_IPV4_FRAM,
2437 						 DPAA2_FLOW_FS_TYPE, group,
2438 						 &local_cfg);
2439 		return ret;
2440 	}
2441 
2442 	/* Get traffic class index and flow id to be configured */
2443 	flow->tc_id = group;
2444 	flow->tc_index = attr->priority;
2445 
2446 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
2447 					 DPAA2_FLOW_QOS_TYPE, group,
2448 					 &local_cfg);
2449 	if (ret)
2450 		return ret;
2451 
2452 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
2453 					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2454 	if (ret)
2455 		return ret;
2456 
2457 	if (!spec_ipv4) {
2458 		(*device_configured) |= local_cfg;
2459 		return 0;
2460 	}
2461 
2462 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
2463 				       RTE_FLOW_ITEM_TYPE_IPV4)) {
2464 		DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
2465 		return -EINVAL;
2466 	}
2467 
2468 	if (mask_ipv4->hdr.src_addr) {
2469 		key = &spec_ipv4->hdr.src_addr;
2470 		mask = &mask_ipv4->hdr.src_addr;
2471 		size = sizeof(rte_be32_t);
2472 
2473 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2474 							 NH_FLD_IPV4_SRC_IP,
2475 							 key, mask, size, priv,
2476 							 group, &local_cfg,
2477 							 DPAA2_FLOW_QOS_TYPE);
2478 		if (ret)
2479 			return ret;
2480 
2481 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2482 							 NH_FLD_IPV4_SRC_IP,
2483 							 key, mask, size, priv,
2484 							 group, &local_cfg,
2485 							 DPAA2_FLOW_FS_TYPE);
2486 		if (ret)
2487 			return ret;
2488 	}
2489 
2490 	if (mask_ipv4->hdr.dst_addr) {
2491 		key = &spec_ipv4->hdr.dst_addr;
2492 		mask = &mask_ipv4->hdr.dst_addr;
2493 		size = sizeof(rte_be32_t);
2494 
2495 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2496 							 NH_FLD_IPV4_DST_IP,
2497 							 key, mask, size, priv,
2498 							 group, &local_cfg,
2499 							 DPAA2_FLOW_QOS_TYPE);
2500 		if (ret)
2501 			return ret;
2502 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
2503 							 NH_FLD_IPV4_DST_IP,
2504 							 key, mask, size, priv,
2505 							 group, &local_cfg,
2506 							 DPAA2_FLOW_FS_TYPE);
2507 		if (ret)
2508 			return ret;
2509 	}
2510 
2511 	if (mask_ipv4->hdr.next_proto_id) {
2512 		key = &spec_ipv4->hdr.next_proto_id;
2513 		mask = &mask_ipv4->hdr.next_proto_id;
2514 		size = sizeof(uint8_t);
2515 
2516 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2517 						      NH_FLD_IP_PROTO, key,
2518 						      mask, size, priv, group,
2519 						      &local_cfg,
2520 						      DPAA2_FLOW_QOS_TYPE);
2521 		if (ret)
2522 			return ret;
2523 
2524 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2525 						      NH_FLD_IP_PROTO, key,
2526 						      mask, size, priv, group,
2527 						      &local_cfg,
2528 						      DPAA2_FLOW_FS_TYPE);
2529 		if (ret)
2530 			return ret;
2531 	}
2532 
2533 	(*device_configured) |= local_cfg;
2534 	return 0;
2535 }
2536 
2537 static int
2538 dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
2539 			  const struct rte_flow_attr *attr,
2540 			  const struct rte_dpaa2_flow_item *dpaa2_pattern,
2541 			  const struct rte_flow_action actions[] __rte_unused,
2542 			  struct rte_flow_error *error __rte_unused,
2543 			  int *device_configured)
2544 {
2545 	int ret, local_cfg = 0;
2546 	uint32_t group;
2547 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0, *mask_ipv6 = 0;
2548 	const void *key, *mask;
2549 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2550 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
2551 	int size;
2552 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2553 
2554 	group = attr->group;
2555 
2556 	/* Parse pattern list to get the matching parameters */
2557 	spec_ipv6 = pattern->spec;
2558 	mask_ipv6 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv6_mask;
2559 
2560 	/* Get traffic class index and flow id to be configured */
2561 	flow->tc_id = group;
2562 	flow->tc_index = attr->priority;
2563 
2564 	if (dpaa2_pattern->in_tunnel) {
2565 		if (spec_ipv6) {
2566 			DPAA2_PMD_ERR("Tunnel-IPv6 distribution not support");
2567 			return -ENOTSUP;
2568 		}
2569 
2570 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2571 						 FAFE_VXLAN_IN_IPV6_FRAM,
2572 						 DPAA2_FLOW_QOS_TYPE, group,
2573 						 &local_cfg);
2574 		if (ret)
2575 			return ret;
2576 
2577 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2578 						 FAFE_VXLAN_IN_IPV6_FRAM,
2579 						 DPAA2_FLOW_FS_TYPE, group,
2580 						 &local_cfg);
2581 		return ret;
2582 	}
2583 
2584 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2585 					 DPAA2_FLOW_QOS_TYPE, group,
2586 					 &local_cfg);
2587 	if (ret)
2588 		return ret;
2589 
2590 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2591 					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2592 	if (ret)
2593 		return ret;
2594 
2595 	if (!spec_ipv6) {
2596 		(*device_configured) |= local_cfg;
2597 		return 0;
2598 	}
2599 
2600 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
2601 				       RTE_FLOW_ITEM_TYPE_IPV6)) {
2602 		DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
2603 		return -EINVAL;
2604 	}
2605 
2606 	if (memcmp((const char *)&mask_ipv6->hdr.src_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2607 		key = &spec_ipv6->hdr.src_addr;
2608 		mask = &mask_ipv6->hdr.src_addr;
2609 		size = NH_FLD_IPV6_ADDR_SIZE;
2610 
2611 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2612 							 NH_FLD_IPV6_SRC_IP,
2613 							 key, mask, size, priv,
2614 							 group, &local_cfg,
2615 							 DPAA2_FLOW_QOS_TYPE);
2616 		if (ret)
2617 			return ret;
2618 
2619 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2620 							 NH_FLD_IPV6_SRC_IP,
2621 							 key, mask, size, priv,
2622 							 group, &local_cfg,
2623 							 DPAA2_FLOW_FS_TYPE);
2624 		if (ret)
2625 			return ret;
2626 	}
2627 
2628 	if (memcmp((const char *)&mask_ipv6->hdr.dst_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2629 		key = &spec_ipv6->hdr.dst_addr;
2630 		mask = &mask_ipv6->hdr.dst_addr;
2631 		size = NH_FLD_IPV6_ADDR_SIZE;
2632 
2633 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2634 							 NH_FLD_IPV6_DST_IP,
2635 							 key, mask, size, priv,
2636 							 group, &local_cfg,
2637 							 DPAA2_FLOW_QOS_TYPE);
2638 		if (ret)
2639 			return ret;
2640 
2641 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2642 							 NH_FLD_IPV6_DST_IP,
2643 							 key, mask, size, priv,
2644 							 group, &local_cfg,
2645 							 DPAA2_FLOW_FS_TYPE);
2646 		if (ret)
2647 			return ret;
2648 	}
2649 
2650 	if (mask_ipv6->hdr.proto) {
2651 		key = &spec_ipv6->hdr.proto;
2652 		mask = &mask_ipv6->hdr.proto;
2653 		size = sizeof(uint8_t);
2654 
2655 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2656 						      NH_FLD_IP_PROTO, key,
2657 						      mask, size, priv, group,
2658 						      &local_cfg,
2659 						      DPAA2_FLOW_QOS_TYPE);
2660 		if (ret)
2661 			return ret;
2662 
2663 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2664 						      NH_FLD_IP_PROTO, key,
2665 						      mask, size, priv, group,
2666 						      &local_cfg,
2667 						      DPAA2_FLOW_FS_TYPE);
2668 		if (ret)
2669 			return ret;
2670 	}
2671 
2672 	(*device_configured) |= local_cfg;
2673 	return 0;
2674 }
2675 
2676 static int
2677 dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
2678 	struct rte_eth_dev *dev,
2679 	const struct rte_flow_attr *attr,
2680 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2681 	const struct rte_flow_action actions[] __rte_unused,
2682 	struct rte_flow_error *error __rte_unused,
2683 	int *device_configured)
2684 {
2685 	int ret, local_cfg = 0;
2686 	uint32_t group;
2687 	const struct rte_flow_item_icmp *spec, *mask;
2688 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2689 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2690 
2691 	group = attr->group;
2692 
2693 	/* Parse pattern list to get the matching parameters */
2694 	spec = pattern->spec;
2695 	mask = pattern->mask ?
2696 		pattern->mask : &dpaa2_flow_item_icmp_mask;
2697 
2698 	/* Get traffic class index and flow id to be configured */
2699 	flow->tc_id = group;
2700 	flow->tc_index = attr->priority;
2701 
2702 	if (dpaa2_pattern->in_tunnel) {
2703 		DPAA2_PMD_ERR("Tunnel-ICMP distribution not support");
2704 		return -ENOTSUP;
2705 	}
2706 
2707 	if (!spec) {
2708 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2709 				FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE,
2710 				group, &local_cfg);
2711 		if (ret)
2712 			return ret;
2713 
2714 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2715 				FAF_ICMP_FRAM, DPAA2_FLOW_FS_TYPE,
2716 				group, &local_cfg);
2717 		if (ret)
2718 			return ret;
2719 
2720 		(*device_configured) |= local_cfg;
2721 		return 0;
2722 	}
2723 
2724 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2725 		RTE_FLOW_ITEM_TYPE_ICMP)) {
2726 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
2727 
2728 		return -EINVAL;
2729 	}
2730 
2731 	if (mask->hdr.icmp_type) {
2732 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2733 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2734 			&mask->hdr.icmp_type, sizeof(uint8_t),
2735 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2736 		if (ret)
2737 			return ret;
2738 
2739 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2740 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2741 			&mask->hdr.icmp_type, sizeof(uint8_t),
2742 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2743 		if (ret)
2744 			return ret;
2745 	}
2746 
2747 	if (mask->hdr.icmp_code) {
2748 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2749 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2750 			&mask->hdr.icmp_code, sizeof(uint8_t),
2751 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2752 		if (ret)
2753 			return ret;
2754 
2755 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2756 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2757 			&mask->hdr.icmp_code, sizeof(uint8_t),
2758 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2759 		if (ret)
2760 			return ret;
2761 	}
2762 
2763 	(*device_configured) |= local_cfg;
2764 
2765 	return 0;
2766 }
2767 
2768 static int
2769 dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
2770 	struct rte_eth_dev *dev,
2771 	const struct rte_flow_attr *attr,
2772 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2773 	const struct rte_flow_action actions[] __rte_unused,
2774 	struct rte_flow_error *error __rte_unused,
2775 	int *device_configured)
2776 {
2777 	int ret, local_cfg = 0;
2778 	uint32_t group;
2779 	const struct rte_flow_item_udp *spec, *mask;
2780 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2781 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2782 
2783 	group = attr->group;
2784 
2785 	/* Parse pattern list to get the matching parameters */
2786 	spec = pattern->spec;
2787 	mask = pattern->mask ?
2788 		pattern->mask : &dpaa2_flow_item_udp_mask;
2789 
2790 	/* Get traffic class index and flow id to be configured */
2791 	flow->tc_id = group;
2792 	flow->tc_index = attr->priority;
2793 
2794 	if (dpaa2_pattern->in_tunnel) {
2795 		if (spec) {
2796 			DPAA2_PMD_ERR("Tunnel-UDP distribution not support");
2797 			return -ENOTSUP;
2798 		}
2799 
2800 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2801 						 FAFE_VXLAN_IN_UDP_FRAM,
2802 						 DPAA2_FLOW_QOS_TYPE, group,
2803 						 &local_cfg);
2804 		if (ret)
2805 			return ret;
2806 
2807 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2808 						 FAFE_VXLAN_IN_UDP_FRAM,
2809 						 DPAA2_FLOW_FS_TYPE, group,
2810 						 &local_cfg);
2811 		return ret;
2812 	}
2813 
2814 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2815 			FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE,
2816 			group, &local_cfg);
2817 	if (ret)
2818 		return ret;
2819 
2820 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2821 			FAF_UDP_FRAM, DPAA2_FLOW_FS_TYPE,
2822 			group, &local_cfg);
2823 	if (ret)
2824 		return ret;
2825 
2826 	if (!spec) {
2827 		(*device_configured) |= local_cfg;
2828 		return 0;
2829 	}
2830 
2831 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2832 		RTE_FLOW_ITEM_TYPE_UDP)) {
2833 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2834 
2835 		return -EINVAL;
2836 	}
2837 
2838 	if (mask->hdr.src_port) {
2839 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2840 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2841 			&mask->hdr.src_port, sizeof(rte_be16_t),
2842 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2843 		if (ret)
2844 			return ret;
2845 
2846 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2847 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2848 			&mask->hdr.src_port, sizeof(rte_be16_t),
2849 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2850 		if (ret)
2851 			return ret;
2852 	}
2853 
2854 	if (mask->hdr.dst_port) {
2855 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2856 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
2857 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2858 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2859 		if (ret)
2860 			return ret;
2861 
2862 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2863 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
2864 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2865 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2866 		if (ret)
2867 			return ret;
2868 	}
2869 
2870 	(*device_configured) |= local_cfg;
2871 
2872 	return 0;
2873 }
2874 
2875 static int
2876 dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
2877 	struct rte_eth_dev *dev,
2878 	const struct rte_flow_attr *attr,
2879 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2880 	const struct rte_flow_action actions[] __rte_unused,
2881 	struct rte_flow_error *error __rte_unused,
2882 	int *device_configured)
2883 {
2884 	int ret, local_cfg = 0;
2885 	uint32_t group;
2886 	const struct rte_flow_item_tcp *spec, *mask;
2887 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2888 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2889 
2890 	group = attr->group;
2891 
2892 	/* Parse pattern list to get the matching parameters */
2893 	spec = pattern->spec;
2894 	mask = pattern->mask ?
2895 		pattern->mask : &dpaa2_flow_item_tcp_mask;
2896 
2897 	/* Get traffic class index and flow id to be configured */
2898 	flow->tc_id = group;
2899 	flow->tc_index = attr->priority;
2900 
2901 	if (dpaa2_pattern->in_tunnel) {
2902 		if (spec) {
2903 			DPAA2_PMD_ERR("Tunnel-TCP distribution not support");
2904 			return -ENOTSUP;
2905 		}
2906 
2907 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2908 						 FAFE_VXLAN_IN_TCP_FRAM,
2909 						 DPAA2_FLOW_QOS_TYPE, group,
2910 						 &local_cfg);
2911 		if (ret)
2912 			return ret;
2913 
2914 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2915 						 FAFE_VXLAN_IN_TCP_FRAM,
2916 						 DPAA2_FLOW_FS_TYPE, group,
2917 						 &local_cfg);
2918 		return ret;
2919 	}
2920 
2921 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2922 			FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE,
2923 			group, &local_cfg);
2924 	if (ret)
2925 		return ret;
2926 
2927 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2928 			FAF_TCP_FRAM, DPAA2_FLOW_FS_TYPE,
2929 			group, &local_cfg);
2930 	if (ret)
2931 		return ret;
2932 
2933 	if (!spec) {
2934 		(*device_configured) |= local_cfg;
2935 		return 0;
2936 	}
2937 
2938 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2939 		RTE_FLOW_ITEM_TYPE_TCP)) {
2940 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2941 
2942 		return -EINVAL;
2943 	}
2944 
2945 	if (mask->hdr.src_port) {
2946 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2947 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
2948 			&mask->hdr.src_port, sizeof(rte_be16_t),
2949 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2950 		if (ret)
2951 			return ret;
2952 
2953 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2954 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
2955 			&mask->hdr.src_port, sizeof(rte_be16_t),
2956 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2957 		if (ret)
2958 			return ret;
2959 	}
2960 
2961 	if (mask->hdr.dst_port) {
2962 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2963 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
2964 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2965 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2966 		if (ret)
2967 			return ret;
2968 
2969 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2970 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
2971 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2972 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2973 		if (ret)
2974 			return ret;
2975 	}
2976 
2977 	(*device_configured) |= local_cfg;
2978 
2979 	return 0;
2980 }
2981 
2982 static int
2983 dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
2984 	struct rte_eth_dev *dev,
2985 	const struct rte_flow_attr *attr,
2986 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
2987 	const struct rte_flow_action actions[] __rte_unused,
2988 	struct rte_flow_error *error __rte_unused,
2989 	int *device_configured)
2990 {
2991 	int ret, local_cfg = 0;
2992 	uint32_t group;
2993 	const struct rte_flow_item_sctp *spec, *mask;
2994 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2995 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
2996 
2997 	group = attr->group;
2998 
2999 	/* Parse pattern list to get the matching parameters */
3000 	spec = pattern->spec;
3001 	mask = pattern->mask ?
3002 		pattern->mask : &dpaa2_flow_item_sctp_mask;
3003 
3004 	/* Get traffic class index and flow id to be configured */
3005 	flow->tc_id = group;
3006 	flow->tc_index = attr->priority;
3007 
3008 	if (dpaa2_pattern->in_tunnel) {
3009 		DPAA2_PMD_ERR("Tunnel-SCTP distribution not support");
3010 		return -ENOTSUP;
3011 	}
3012 
3013 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3014 			FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE,
3015 			group, &local_cfg);
3016 	if (ret)
3017 		return ret;
3018 
3019 	ret = dpaa2_flow_identify_by_faf(priv, flow,
3020 			FAF_SCTP_FRAM, DPAA2_FLOW_FS_TYPE,
3021 			group, &local_cfg);
3022 	if (ret)
3023 		return ret;
3024 
3025 	if (!spec) {
3026 		(*device_configured) |= local_cfg;
3027 		return 0;
3028 	}
3029 
3030 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3031 		RTE_FLOW_ITEM_TYPE_SCTP)) {
3032 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
3033 
3034 		return -1;
3035 	}
3036 
3037 	if (mask->hdr.src_port) {
3038 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3039 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
3040 			&mask->hdr.src_port, sizeof(rte_be16_t),
3041 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3042 		if (ret)
3043 			return ret;
3044 
3045 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3046 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
3047 			&mask->hdr.src_port, sizeof(rte_be16_t),
3048 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3049 		if (ret)
3050 			return ret;
3051 	}
3052 
3053 	if (mask->hdr.dst_port) {
3054 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3055 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
3056 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3057 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3058 		if (ret)
3059 			return ret;
3060 
3061 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
3062 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
3063 			&mask->hdr.dst_port, sizeof(rte_be16_t),
3064 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3065 		if (ret)
3066 			return ret;
3067 	}
3068 
3069 	(*device_configured) |= local_cfg;
3070 
3071 	return 0;
3072 }
3073 
3074 static int
3075 dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
3076 	struct rte_eth_dev *dev,
3077 	const struct rte_flow_attr *attr,
3078 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3079 	const struct rte_flow_action actions[] __rte_unused,
3080 	struct rte_flow_error *error __rte_unused,
3081 	int *device_configured)
3082 {
3083 	int ret, local_cfg = 0;
3084 	uint32_t group;
3085 	const struct rte_flow_item_gre *spec, *mask;
3086 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3087 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3088 
3089 	group = attr->group;
3090 
3091 	/* Parse pattern list to get the matching parameters */
3092 	spec = pattern->spec;
3093 	mask = pattern->mask ?
3094 		pattern->mask : &dpaa2_flow_item_gre_mask;
3095 
3096 	/* Get traffic class index and flow id to be configured */
3097 	flow->tc_id = group;
3098 	flow->tc_index = attr->priority;
3099 
3100 	if (dpaa2_pattern->in_tunnel) {
3101 		DPAA2_PMD_ERR("Tunnel-GRE distribution not support");
3102 		return -ENOTSUP;
3103 	}
3104 
3105 	if (!spec) {
3106 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3107 				FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE,
3108 				group, &local_cfg);
3109 		if (ret)
3110 			return ret;
3111 
3112 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3113 				FAF_GRE_FRAM, DPAA2_FLOW_FS_TYPE,
3114 				group, &local_cfg);
3115 		if (ret)
3116 			return ret;
3117 
3118 		(*device_configured) |= local_cfg;
3119 		return 0;
3120 	}
3121 
3122 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3123 		RTE_FLOW_ITEM_TYPE_GRE)) {
3124 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
3125 
3126 		return -1;
3127 	}
3128 
3129 	if (!mask->protocol)
3130 		return 0;
3131 
3132 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
3133 			NH_FLD_GRE_TYPE, &spec->protocol,
3134 			&mask->protocol, sizeof(rte_be16_t),
3135 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3136 	if (ret)
3137 		return ret;
3138 
3139 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
3140 			NH_FLD_GRE_TYPE, &spec->protocol,
3141 			&mask->protocol, sizeof(rte_be16_t),
3142 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3143 	if (ret)
3144 		return ret;
3145 
3146 	(*device_configured) |= local_cfg;
3147 
3148 	return 0;
3149 }
3150 
3151 static int
3152 dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow,
3153 	struct rte_eth_dev *dev,
3154 	const struct rte_flow_attr *attr,
3155 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3156 	const struct rte_flow_action actions[] __rte_unused,
3157 	struct rte_flow_error *error __rte_unused,
3158 	int *device_configured)
3159 {
3160 	int ret, local_cfg = 0;
3161 	uint32_t group;
3162 	const struct rte_flow_item_vxlan *spec, *mask;
3163 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3164 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3165 
3166 	group = attr->group;
3167 
3168 	/* Parse pattern list to get the matching parameters */
3169 	spec = pattern->spec;
3170 	mask = pattern->mask ?
3171 		pattern->mask : &dpaa2_flow_item_vxlan_mask;
3172 
3173 	/* Get traffic class index and flow id to be configured */
3174 	flow->tc_id = group;
3175 	flow->tc_index = attr->priority;
3176 
3177 	if (dpaa2_pattern->in_tunnel) {
3178 		DPAA2_PMD_ERR("Tunnel-VXLAN distribution not support");
3179 		return -ENOTSUP;
3180 	}
3181 
3182 	if (!spec) {
3183 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3184 				FAF_VXLAN_FRAM, DPAA2_FLOW_QOS_TYPE,
3185 				group, &local_cfg);
3186 		if (ret)
3187 			return ret;
3188 
3189 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3190 				FAF_VXLAN_FRAM, DPAA2_FLOW_FS_TYPE,
3191 				group, &local_cfg);
3192 		if (ret)
3193 			return ret;
3194 
3195 		(*device_configured) |= local_cfg;
3196 		return 0;
3197 	}
3198 
3199 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3200 		RTE_FLOW_ITEM_TYPE_VXLAN)) {
3201 		DPAA2_PMD_WARN("Extract field(s) of VXLAN not support.");
3202 
3203 		return -1;
3204 	}
3205 
3206 	if (mask->flags) {
3207 		if (spec->flags != VXLAN_HF_VNI) {
3208 			DPAA2_PMD_ERR("vxlan flag(0x%02x) must be 0x%02x.",
3209 				spec->flags, VXLAN_HF_VNI);
3210 			return -EINVAL;
3211 		}
3212 		if (mask->flags != 0xff) {
3213 			DPAA2_PMD_ERR("Not support to extract vxlan flag.");
3214 			return -EINVAL;
3215 		}
3216 	}
3217 
3218 	if (mask->vni[0] || mask->vni[1] || mask->vni[2]) {
3219 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3220 			DPAA2_VXLAN_VNI_OFFSET,
3221 			sizeof(mask->vni), spec->vni,
3222 			mask->vni,
3223 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
3224 		if (ret)
3225 			return ret;
3226 
3227 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3228 			DPAA2_VXLAN_VNI_OFFSET,
3229 			sizeof(mask->vni), spec->vni,
3230 			mask->vni,
3231 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
3232 		if (ret)
3233 			return ret;
3234 	}
3235 
3236 	(*device_configured) |= local_cfg;
3237 
3238 	return 0;
3239 }
3240 
3241 static int
3242 dpaa2_configure_flow_ecpri(struct dpaa2_dev_flow *flow,
3243 	struct rte_eth_dev *dev,
3244 	const struct rte_flow_attr *attr,
3245 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3246 	const struct rte_flow_action actions[] __rte_unused,
3247 	struct rte_flow_error *error __rte_unused,
3248 	int *device_configured)
3249 {
3250 	int ret, local_cfg = 0;
3251 	uint32_t group;
3252 	const struct rte_flow_item_ecpri *spec, *mask;
3253 	struct rte_flow_item_ecpri local_mask;
3254 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3255 	const struct rte_flow_item *pattern =
3256 		&dpaa2_pattern->generic_item;
3257 	uint8_t extract_nb = 0, i;
3258 	uint64_t rule_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
3259 	uint64_t mask_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
3260 	uint8_t extract_size[DPAA2_ECPRI_MAX_EXTRACT_NB];
3261 	uint8_t extract_off[DPAA2_ECPRI_MAX_EXTRACT_NB];
3262 
3263 	group = attr->group;
3264 
3265 	/* Parse pattern list to get the matching parameters */
3266 	spec = pattern->spec;
3267 	if (pattern->mask) {
3268 		memcpy(&local_mask, pattern->mask,
3269 			sizeof(struct rte_flow_item_ecpri));
3270 		local_mask.hdr.common.u32 =
3271 			rte_be_to_cpu_32(local_mask.hdr.common.u32);
3272 		mask = &local_mask;
3273 	} else {
3274 		mask = &dpaa2_flow_item_ecpri_mask;
3275 	}
3276 
3277 	/* Get traffic class index and flow id to be configured */
3278 	flow->tc_id = group;
3279 	flow->tc_index = attr->priority;
3280 
3281 	if (dpaa2_pattern->in_tunnel) {
3282 		DPAA2_PMD_ERR("Tunnel-ECPRI distribution not support");
3283 		return -ENOTSUP;
3284 	}
3285 
3286 	if (!spec) {
3287 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3288 			FAFE_ECPRI_FRAM, DPAA2_FLOW_QOS_TYPE,
3289 			group, &local_cfg);
3290 		if (ret)
3291 			return ret;
3292 
3293 		ret = dpaa2_flow_identify_by_faf(priv, flow,
3294 			FAFE_ECPRI_FRAM, DPAA2_FLOW_FS_TYPE,
3295 			group, &local_cfg);
3296 		if (ret)
3297 			return ret;
3298 
3299 		(*device_configured) |= local_cfg;
3300 		return 0;
3301 	}
3302 
3303 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
3304 		RTE_FLOW_ITEM_TYPE_ECPRI)) {
3305 		DPAA2_PMD_WARN("Extract field(s) of ECPRI not support.");
3306 
3307 		return -1;
3308 	}
3309 
3310 	if (mask->hdr.common.type != 0xff) {
3311 		DPAA2_PMD_WARN("ECPRI header type not specified.");
3312 
3313 		return -1;
3314 	}
3315 
3316 	if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA) {
3317 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_0;
3318 		mask_data[extract_nb] = 0xff;
3319 		extract_size[extract_nb] = sizeof(uint8_t);
3320 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3321 		extract_nb++;
3322 
3323 		if (mask->hdr.type0.pc_id) {
3324 			rule_data[extract_nb] = spec->hdr.type0.pc_id;
3325 			mask_data[extract_nb] = mask->hdr.type0.pc_id;
3326 			extract_size[extract_nb] = sizeof(rte_be16_t);
3327 			extract_off[extract_nb] =
3328 				DPAA2_ECPRI_MSG_OFFSET +
3329 				offsetof(struct rte_ecpri_msg_iq_data, pc_id);
3330 			extract_nb++;
3331 		}
3332 		if (mask->hdr.type0.seq_id) {
3333 			rule_data[extract_nb] = spec->hdr.type0.seq_id;
3334 			mask_data[extract_nb] = mask->hdr.type0.seq_id;
3335 			extract_size[extract_nb] = sizeof(rte_be16_t);
3336 			extract_off[extract_nb] =
3337 				DPAA2_ECPRI_MSG_OFFSET +
3338 				offsetof(struct rte_ecpri_msg_iq_data, seq_id);
3339 			extract_nb++;
3340 		}
3341 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_BIT_SEQ) {
3342 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_1;
3343 		mask_data[extract_nb] = 0xff;
3344 		extract_size[extract_nb] = sizeof(uint8_t);
3345 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3346 		extract_nb++;
3347 
3348 		if (mask->hdr.type1.pc_id) {
3349 			rule_data[extract_nb] = spec->hdr.type1.pc_id;
3350 			mask_data[extract_nb] = mask->hdr.type1.pc_id;
3351 			extract_size[extract_nb] = sizeof(rte_be16_t);
3352 			extract_off[extract_nb] =
3353 				DPAA2_ECPRI_MSG_OFFSET +
3354 				offsetof(struct rte_ecpri_msg_bit_seq, pc_id);
3355 			extract_nb++;
3356 		}
3357 		if (mask->hdr.type1.seq_id) {
3358 			rule_data[extract_nb] = spec->hdr.type1.seq_id;
3359 			mask_data[extract_nb] = mask->hdr.type1.seq_id;
3360 			extract_size[extract_nb] = sizeof(rte_be16_t);
3361 			extract_off[extract_nb] =
3362 				DPAA2_ECPRI_MSG_OFFSET +
3363 				offsetof(struct rte_ecpri_msg_bit_seq, seq_id);
3364 			extract_nb++;
3365 		}
3366 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RTC_CTRL) {
3367 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_2;
3368 		mask_data[extract_nb] = 0xff;
3369 		extract_size[extract_nb] = sizeof(uint8_t);
3370 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3371 		extract_nb++;
3372 
3373 		if (mask->hdr.type2.rtc_id) {
3374 			rule_data[extract_nb] = spec->hdr.type2.rtc_id;
3375 			mask_data[extract_nb] = mask->hdr.type2.rtc_id;
3376 			extract_size[extract_nb] = sizeof(rte_be16_t);
3377 			extract_off[extract_nb] =
3378 				DPAA2_ECPRI_MSG_OFFSET +
3379 				offsetof(struct rte_ecpri_msg_rtc_ctrl, rtc_id);
3380 			extract_nb++;
3381 		}
3382 		if (mask->hdr.type2.seq_id) {
3383 			rule_data[extract_nb] = spec->hdr.type2.seq_id;
3384 			mask_data[extract_nb] = mask->hdr.type2.seq_id;
3385 			extract_size[extract_nb] = sizeof(rte_be16_t);
3386 			extract_off[extract_nb] =
3387 				DPAA2_ECPRI_MSG_OFFSET +
3388 				offsetof(struct rte_ecpri_msg_rtc_ctrl, seq_id);
3389 			extract_nb++;
3390 		}
3391 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_GEN_DATA) {
3392 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_3;
3393 		mask_data[extract_nb] = 0xff;
3394 		extract_size[extract_nb] = sizeof(uint8_t);
3395 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3396 		extract_nb++;
3397 
3398 		if (mask->hdr.type3.pc_id || mask->hdr.type3.seq_id)
3399 			DPAA2_PMD_WARN("Extract type3 msg not support.");
3400 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RM_ACC) {
3401 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_4;
3402 		mask_data[extract_nb] = 0xff;
3403 		extract_size[extract_nb] = sizeof(uint8_t);
3404 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3405 		extract_nb++;
3406 
3407 		if (mask->hdr.type4.rma_id) {
3408 			rule_data[extract_nb] = spec->hdr.type4.rma_id;
3409 			mask_data[extract_nb] = mask->hdr.type4.rma_id;
3410 			extract_size[extract_nb] = sizeof(uint8_t);
3411 			extract_off[extract_nb] =
3412 				DPAA2_ECPRI_MSG_OFFSET + 0;
3413 				/** Compiler not support to take address
3414 				 * of bit-field
3415 				 * offsetof(struct rte_ecpri_msg_rm_access,
3416 				 * rma_id);
3417 				 */
3418 			extract_nb++;
3419 		}
3420 		if (mask->hdr.type4.ele_id) {
3421 			rule_data[extract_nb] = spec->hdr.type4.ele_id;
3422 			mask_data[extract_nb] = mask->hdr.type4.ele_id;
3423 			extract_size[extract_nb] = sizeof(rte_be16_t);
3424 			extract_off[extract_nb] =
3425 				DPAA2_ECPRI_MSG_OFFSET + 2;
3426 				/** Compiler not support to take address
3427 				 * of bit-field
3428 				 * offsetof(struct rte_ecpri_msg_rm_access,
3429 				 * ele_id);
3430 				 */
3431 			extract_nb++;
3432 		}
3433 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_DLY_MSR) {
3434 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_5;
3435 		mask_data[extract_nb] = 0xff;
3436 		extract_size[extract_nb] = sizeof(uint8_t);
3437 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3438 		extract_nb++;
3439 
3440 		if (mask->hdr.type5.msr_id) {
3441 			rule_data[extract_nb] = spec->hdr.type5.msr_id;
3442 			mask_data[extract_nb] = mask->hdr.type5.msr_id;
3443 			extract_size[extract_nb] = sizeof(uint8_t);
3444 			extract_off[extract_nb] =
3445 				DPAA2_ECPRI_MSG_OFFSET +
3446 				offsetof(struct rte_ecpri_msg_delay_measure,
3447 					msr_id);
3448 			extract_nb++;
3449 		}
3450 		if (mask->hdr.type5.act_type) {
3451 			rule_data[extract_nb] = spec->hdr.type5.act_type;
3452 			mask_data[extract_nb] = mask->hdr.type5.act_type;
3453 			extract_size[extract_nb] = sizeof(uint8_t);
3454 			extract_off[extract_nb] =
3455 				DPAA2_ECPRI_MSG_OFFSET +
3456 				offsetof(struct rte_ecpri_msg_delay_measure,
3457 					act_type);
3458 			extract_nb++;
3459 		}
3460 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RMT_RST) {
3461 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_6;
3462 		mask_data[extract_nb] = 0xff;
3463 		extract_size[extract_nb] = sizeof(uint8_t);
3464 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3465 		extract_nb++;
3466 
3467 		if (mask->hdr.type6.rst_id) {
3468 			rule_data[extract_nb] = spec->hdr.type6.rst_id;
3469 			mask_data[extract_nb] = mask->hdr.type6.rst_id;
3470 			extract_size[extract_nb] = sizeof(rte_be16_t);
3471 			extract_off[extract_nb] =
3472 				DPAA2_ECPRI_MSG_OFFSET +
3473 				offsetof(struct rte_ecpri_msg_remote_reset,
3474 					rst_id);
3475 			extract_nb++;
3476 		}
3477 		if (mask->hdr.type6.rst_op) {
3478 			rule_data[extract_nb] = spec->hdr.type6.rst_op;
3479 			mask_data[extract_nb] = mask->hdr.type6.rst_op;
3480 			extract_size[extract_nb] = sizeof(uint8_t);
3481 			extract_off[extract_nb] =
3482 				DPAA2_ECPRI_MSG_OFFSET +
3483 				offsetof(struct rte_ecpri_msg_remote_reset,
3484 					rst_op);
3485 			extract_nb++;
3486 		}
3487 	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_EVT_IND) {
3488 		rule_data[extract_nb] = ECPRI_FAFE_TYPE_7;
3489 		mask_data[extract_nb] = 0xff;
3490 		extract_size[extract_nb] = sizeof(uint8_t);
3491 		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
3492 		extract_nb++;
3493 
3494 		if (mask->hdr.type7.evt_id) {
3495 			rule_data[extract_nb] = spec->hdr.type7.evt_id;
3496 			mask_data[extract_nb] = mask->hdr.type7.evt_id;
3497 			extract_size[extract_nb] = sizeof(uint8_t);
3498 			extract_off[extract_nb] =
3499 				DPAA2_ECPRI_MSG_OFFSET +
3500 				offsetof(struct rte_ecpri_msg_event_ind,
3501 					evt_id);
3502 			extract_nb++;
3503 		}
3504 		if (mask->hdr.type7.evt_type) {
3505 			rule_data[extract_nb] = spec->hdr.type7.evt_type;
3506 			mask_data[extract_nb] = mask->hdr.type7.evt_type;
3507 			extract_size[extract_nb] = sizeof(uint8_t);
3508 			extract_off[extract_nb] =
3509 				DPAA2_ECPRI_MSG_OFFSET +
3510 				offsetof(struct rte_ecpri_msg_event_ind,
3511 					evt_type);
3512 			extract_nb++;
3513 		}
3514 		if (mask->hdr.type7.seq) {
3515 			rule_data[extract_nb] = spec->hdr.type7.seq;
3516 			mask_data[extract_nb] = mask->hdr.type7.seq;
3517 			extract_size[extract_nb] = sizeof(uint8_t);
3518 			extract_off[extract_nb] =
3519 				DPAA2_ECPRI_MSG_OFFSET +
3520 				offsetof(struct rte_ecpri_msg_event_ind,
3521 					seq);
3522 			extract_nb++;
3523 		}
3524 		if (mask->hdr.type7.number) {
3525 			rule_data[extract_nb] = spec->hdr.type7.number;
3526 			mask_data[extract_nb] = mask->hdr.type7.number;
3527 			extract_size[extract_nb] = sizeof(uint8_t);
3528 			extract_off[extract_nb] =
3529 				DPAA2_ECPRI_MSG_OFFSET +
3530 				offsetof(struct rte_ecpri_msg_event_ind,
3531 					number);
3532 			extract_nb++;
3533 		}
3534 	} else {
3535 		DPAA2_PMD_ERR("Invalid ecpri header type(%d)",
3536 				spec->hdr.common.type);
3537 		return -EINVAL;
3538 	}
3539 
3540 	for (i = 0; i < extract_nb; i++) {
3541 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3542 			extract_off[i],
3543 			extract_size[i], &rule_data[i], &mask_data[i],
3544 			priv, group,
3545 			device_configured,
3546 			DPAA2_FLOW_QOS_TYPE);
3547 		if (ret)
3548 			return ret;
3549 
3550 		ret = dpaa2_flow_add_pr_extract_rule(flow,
3551 			extract_off[i],
3552 			extract_size[i], &rule_data[i], &mask_data[i],
3553 			priv, group,
3554 			device_configured,
3555 			DPAA2_FLOW_FS_TYPE);
3556 		if (ret)
3557 			return ret;
3558 	}
3559 
3560 	(*device_configured) |= local_cfg;
3561 
3562 	return 0;
3563 }
3564 
3565 static int
3566 dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
3567 	struct rte_eth_dev *dev,
3568 	const struct rte_flow_attr *attr,
3569 	const struct rte_dpaa2_flow_item *dpaa2_pattern,
3570 	const struct rte_flow_action actions[] __rte_unused,
3571 	struct rte_flow_error *error __rte_unused,
3572 	int *device_configured)
3573 {
3574 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3575 	int local_cfg = 0, ret;
3576 	uint32_t group;
3577 	struct dpaa2_key_extract *qos_key_extract;
3578 	struct dpaa2_key_extract *tc_key_extract;
3579 	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
3580 	const struct rte_flow_item_raw *spec = pattern->spec;
3581 	const struct rte_flow_item_raw *mask = pattern->mask;
3582 
3583 	/* Need both spec and mask */
3584 	if (!spec || !mask) {
3585 		DPAA2_PMD_ERR("spec or mask not present.");
3586 		return -EINVAL;
3587 	}
3588 
3589 	if (spec->relative) {
3590 		/* TBD: relative offset support.
3591 		 * To support relative offset of previous L3 protocol item,
3592 		 * extracts should be expanded to identify if the frame is:
3593 		 * vlan or none-vlan.
3594 		 *
3595 		 * To support relative offset of previous L4 protocol item,
3596 		 * extracts should be expanded to identify if the frame is:
3597 		 * vlan/IPv4 or vlan/IPv6 or none-vlan/IPv4 or none-vlan/IPv6.
3598 		 */
3599 		DPAA2_PMD_ERR("relative not supported.");
3600 		return -EINVAL;
3601 	}
3602 
3603 	if (spec->search) {
3604 		DPAA2_PMD_ERR("search not supported.");
3605 		return -EINVAL;
3606 	}
3607 
3608 	/* Spec len and mask len should be same */
3609 	if (spec->length != mask->length) {
3610 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
3611 		return -EINVAL;
3612 	}
3613 
3614 	/* Get traffic class index and flow id to be configured */
3615 	group = attr->group;
3616 	flow->tc_id = group;
3617 	flow->tc_index = attr->priority;
3618 
3619 	qos_key_extract = &priv->extract.qos_key_extract;
3620 	tc_key_extract = &priv->extract.tc_key_extract[group];
3621 
3622 	ret = dpaa2_flow_extract_add_raw(priv,
3623 			spec->offset, spec->length,
3624 			DPAA2_FLOW_QOS_TYPE, 0, &local_cfg);
3625 	if (ret) {
3626 		DPAA2_PMD_ERR("QoS Extract RAW add failed.");
3627 		return -EINVAL;
3628 	}
3629 
3630 	ret = dpaa2_flow_extract_add_raw(priv,
3631 			spec->offset, spec->length,
3632 			DPAA2_FLOW_FS_TYPE, group, &local_cfg);
3633 	if (ret) {
3634 		DPAA2_PMD_ERR("FS[%d] Extract RAW add failed.",
3635 			group);
3636 		return -EINVAL;
3637 	}
3638 
3639 	ret = dpaa2_flow_raw_rule_data_set(flow,
3640 			&qos_key_extract->key_profile,
3641 			spec->offset, spec->length,
3642 			spec->pattern, mask->pattern,
3643 			DPAA2_FLOW_QOS_TYPE);
3644 	if (ret) {
3645 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
3646 		return -EINVAL;
3647 	}
3648 
3649 	ret = dpaa2_flow_raw_rule_data_set(flow,
3650 			&tc_key_extract->key_profile,
3651 			spec->offset, spec->length,
3652 			spec->pattern, mask->pattern,
3653 			DPAA2_FLOW_FS_TYPE);
3654 	if (ret) {
3655 		DPAA2_PMD_ERR("FS RAW rule data set failed");
3656 		return -EINVAL;
3657 	}
3658 
3659 	(*device_configured) |= local_cfg;
3660 
3661 	return 0;
3662 }
3663 
3664 static inline int
3665 dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv,
3666 	const struct rte_flow_attr *attr)
3667 {
3668 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
3669 
3670 	while (curr) {
3671 		if (curr->tc_id == attr->group &&
3672 			curr->tc_index == attr->priority) {
3673 			DPAA2_PMD_ERR("Flow(TC[%d].entry[%d] exists",
3674 				attr->group, attr->priority);
3675 
3676 			return -EINVAL;
3677 		}
3678 		curr = LIST_NEXT(curr, next);
3679 	}
3680 
3681 	return 0;
3682 }
3683 
3684 static inline struct rte_eth_dev *
3685 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3686 	const struct rte_flow_action *action)
3687 {
3688 	const struct rte_flow_action_port_id *port_id;
3689 	const struct rte_flow_action_ethdev *ethdev;
3690 	int idx = -1;
3691 	struct rte_eth_dev *dest_dev;
3692 
3693 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3694 		port_id = action->conf;
3695 		if (!port_id->original)
3696 			idx = port_id->id;
3697 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3698 		ethdev = action->conf;
3699 		idx = ethdev->port_id;
3700 	} else {
3701 		return NULL;
3702 	}
3703 
3704 	if (idx >= 0) {
3705 		if (!rte_eth_dev_is_valid_port(idx))
3706 			return NULL;
3707 		if (!rte_pmd_dpaa2_dev_is_dpaa2(idx))
3708 			return NULL;
3709 		dest_dev = &rte_eth_devices[idx];
3710 	} else {
3711 		dest_dev = priv->eth_dev;
3712 	}
3713 
3714 	return dest_dev;
3715 }
3716 
3717 static inline int
3718 dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv,
3719 	const struct rte_flow_attr *attr,
3720 	const struct rte_flow_action actions[])
3721 {
3722 	int end_of_list = 0, i, j = 0;
3723 	const struct rte_flow_action_queue *dest_queue;
3724 	const struct rte_flow_action_rss *rss_conf;
3725 	struct dpaa2_queue *rxq;
3726 
3727 	while (!end_of_list) {
3728 		switch (actions[j].type) {
3729 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3730 			dest_queue = actions[j].conf;
3731 			rxq = priv->rx_vq[dest_queue->index];
3732 			if (attr->group != rxq->tc_index) {
3733 				DPAA2_PMD_ERR("FSQ(%d.%d) not in TC[%d]",
3734 					rxq->tc_index, rxq->flow_id,
3735 					attr->group);
3736 
3737 				return -ENOTSUP;
3738 			}
3739 			break;
3740 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3741 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3742 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3743 				DPAA2_PMD_ERR("Invalid port id of action");
3744 				return -ENOTSUP;
3745 			}
3746 			break;
3747 		case RTE_FLOW_ACTION_TYPE_RSS:
3748 			rss_conf = (const struct rte_flow_action_rss *)
3749 					(actions[j].conf);
3750 			if (rss_conf->queue_num > priv->dist_queues) {
3751 				DPAA2_PMD_ERR("RSS number too large");
3752 				return -ENOTSUP;
3753 			}
3754 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
3755 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3756 					DPAA2_PMD_ERR("RSS queue not in range");
3757 					return -ENOTSUP;
3758 				}
3759 				rxq = priv->rx_vq[rss_conf->queue[i]];
3760 				if (rxq->tc_index != attr->group) {
3761 					DPAA2_PMD_ERR("RSS queue not in group");
3762 					return -ENOTSUP;
3763 				}
3764 			}
3765 
3766 			break;
3767 		case RTE_FLOW_ACTION_TYPE_PF:
3768 			/* Skip this action, have to add for vxlan */
3769 			break;
3770 		case RTE_FLOW_ACTION_TYPE_END:
3771 			end_of_list = 1;
3772 			break;
3773 		default:
3774 			DPAA2_PMD_ERR("Invalid action type");
3775 			return -ENOTSUP;
3776 		}
3777 		j++;
3778 	}
3779 
3780 	return 0;
3781 }
3782 
3783 static int
3784 dpaa2_configure_flow_fs_action(struct dpaa2_dev_priv *priv,
3785 	struct dpaa2_dev_flow *flow,
3786 	const struct rte_flow_action *rte_action)
3787 {
3788 	struct rte_eth_dev *dest_dev;
3789 	struct dpaa2_dev_priv *dest_priv;
3790 	const struct rte_flow_action_queue *dest_queue;
3791 	struct dpaa2_queue *dest_q;
3792 
3793 	memset(&flow->fs_action_cfg, 0,
3794 		sizeof(struct dpni_fs_action_cfg));
3795 	flow->action_type = rte_action->type;
3796 
3797 	if (flow->action_type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3798 		dest_queue = rte_action->conf;
3799 		dest_q = priv->rx_vq[dest_queue->index];
3800 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
3801 	} else if (flow->action_type == RTE_FLOW_ACTION_TYPE_PORT_ID ||
3802 		   flow->action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
3803 		dest_dev = dpaa2_flow_redirect_dev(priv, rte_action);
3804 		if (!dest_dev) {
3805 			DPAA2_PMD_ERR("Invalid device to redirect");
3806 			return -EINVAL;
3807 		}
3808 
3809 		dest_priv = dest_dev->data->dev_private;
3810 		dest_q = dest_priv->tx_vq[0];
3811 		flow->fs_action_cfg.options =
3812 			DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
3813 		flow->fs_action_cfg.redirect_obj_token =
3814 			dest_priv->token;
3815 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
3816 	}
3817 
3818 	return 0;
3819 }
3820 
3821 static inline uint16_t
3822 dpaa2_flow_entry_size(uint16_t key_max_size)
3823 {
3824 	if (key_max_size > DPAA2_FLOW_ENTRY_MAX_SIZE) {
3825 		DPAA2_PMD_ERR("Key size(%d) > max(%d)",
3826 			key_max_size,
3827 			DPAA2_FLOW_ENTRY_MAX_SIZE);
3828 
3829 		return 0;
3830 	}
3831 
3832 	if (key_max_size > DPAA2_FLOW_ENTRY_MIN_SIZE)
3833 		return DPAA2_FLOW_ENTRY_MAX_SIZE;
3834 
3835 	/* Current MC only support fixed entry size(56)*/
3836 	return DPAA2_FLOW_ENTRY_MAX_SIZE;
3837 }
3838 
3839 static inline int
3840 dpaa2_flow_clear_fs_table(struct dpaa2_dev_priv *priv,
3841 	uint8_t tc_id)
3842 {
3843 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
3844 	int need_clear = 0, ret;
3845 	struct fsl_mc_io *dpni = priv->hw;
3846 
3847 	while (curr) {
3848 		if (curr->tc_id == tc_id) {
3849 			need_clear = 1;
3850 			break;
3851 		}
3852 		curr = LIST_NEXT(curr, next);
3853 	}
3854 
3855 	if (need_clear) {
3856 		ret = dpni_clear_fs_entries(dpni, CMD_PRI_LOW,
3857 				priv->token, tc_id);
3858 		if (ret) {
3859 			DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
3860 			return ret;
3861 		}
3862 	}
3863 
3864 	return 0;
3865 }
3866 
3867 static int
3868 dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv,
3869 	uint8_t tc_id, uint16_t dist_size, int rss_dist)
3870 {
3871 	struct dpaa2_key_extract *tc_extract;
3872 	uint8_t *key_cfg_buf;
3873 	uint64_t key_cfg_iova;
3874 	int ret;
3875 	struct dpni_rx_dist_cfg tc_cfg;
3876 	struct fsl_mc_io *dpni = priv->hw;
3877 	uint16_t entry_size;
3878 	uint16_t key_max_size;
3879 
3880 	ret = dpaa2_flow_clear_fs_table(priv, tc_id);
3881 	if (ret < 0) {
3882 		DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
3883 		return ret;
3884 	}
3885 
3886 	tc_extract = &priv->extract.tc_key_extract[tc_id];
3887 	key_cfg_buf = priv->extract.tc_extract_param[tc_id];
3888 	key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
3889 
3890 	key_max_size = tc_extract->key_profile.key_max_size;
3891 	entry_size = dpaa2_flow_entry_size(key_max_size);
3892 
3893 	dpaa2_flow_fs_extracts_log(priv, tc_id);
3894 	ret = dpkg_prepare_key_cfg(&tc_extract->dpkg,
3895 			key_cfg_buf);
3896 	if (ret < 0) {
3897 		DPAA2_PMD_ERR("TC[%d] prepare key failed", tc_id);
3898 		return ret;
3899 	}
3900 
3901 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3902 	tc_cfg.dist_size = dist_size;
3903 	tc_cfg.key_cfg_iova = key_cfg_iova;
3904 	if (rss_dist)
3905 		tc_cfg.enable = true;
3906 	else
3907 		tc_cfg.enable = false;
3908 	tc_cfg.tc = tc_id;
3909 	ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3910 			priv->token, &tc_cfg);
3911 	if (ret < 0) {
3912 		if (rss_dist) {
3913 			DPAA2_PMD_ERR("RSS TC[%d] set failed",
3914 				tc_id);
3915 		} else {
3916 			DPAA2_PMD_ERR("FS TC[%d] hash disable failed",
3917 				tc_id);
3918 		}
3919 
3920 		return ret;
3921 	}
3922 
3923 	if (rss_dist)
3924 		return 0;
3925 
3926 	tc_cfg.enable = true;
3927 	tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
3928 	ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3929 			priv->token, &tc_cfg);
3930 	if (ret < 0) {
3931 		DPAA2_PMD_ERR("TC[%d] FS configured failed", tc_id);
3932 		return ret;
3933 	}
3934 
3935 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_FS_TYPE,
3936 			entry_size, tc_id);
3937 	if (ret)
3938 		return ret;
3939 
3940 	return 0;
3941 }
3942 
3943 static int
3944 dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv,
3945 	int rss_dist)
3946 {
3947 	struct dpaa2_key_extract *qos_extract;
3948 	uint8_t *key_cfg_buf;
3949 	uint64_t key_cfg_iova;
3950 	int ret;
3951 	struct dpni_qos_tbl_cfg qos_cfg;
3952 	struct fsl_mc_io *dpni = priv->hw;
3953 	uint16_t entry_size;
3954 	uint16_t key_max_size;
3955 
3956 	if (!rss_dist && priv->num_rx_tc <= 1) {
3957 		/* QoS table is effecitive for FS multiple TCs or RSS.*/
3958 		return 0;
3959 	}
3960 
3961 	if (LIST_FIRST(&priv->flows)) {
3962 		ret = dpni_clear_qos_table(dpni, CMD_PRI_LOW,
3963 				priv->token);
3964 		if (ret < 0) {
3965 			DPAA2_PMD_ERR("QoS table clear failed");
3966 			return ret;
3967 		}
3968 	}
3969 
3970 	qos_extract = &priv->extract.qos_key_extract;
3971 	key_cfg_buf = priv->extract.qos_extract_param;
3972 	key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
3973 
3974 	key_max_size = qos_extract->key_profile.key_max_size;
3975 	entry_size = dpaa2_flow_entry_size(key_max_size);
3976 
3977 	dpaa2_flow_qos_extracts_log(priv);
3978 
3979 	ret = dpkg_prepare_key_cfg(&qos_extract->dpkg,
3980 			key_cfg_buf);
3981 	if (ret < 0) {
3982 		DPAA2_PMD_ERR("QoS prepare extract failed");
3983 		return ret;
3984 	}
3985 	memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3986 	qos_cfg.keep_entries = true;
3987 	qos_cfg.key_cfg_iova = key_cfg_iova;
3988 	if (rss_dist) {
3989 		qos_cfg.discard_on_miss = true;
3990 	} else {
3991 		qos_cfg.discard_on_miss = false;
3992 		qos_cfg.default_tc = 0;
3993 	}
3994 
3995 	ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3996 			priv->token, &qos_cfg);
3997 	if (ret < 0) {
3998 		DPAA2_PMD_ERR("QoS table set failed");
3999 		return ret;
4000 	}
4001 
4002 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_QOS_TYPE,
4003 			entry_size, 0);
4004 	if (ret)
4005 		return ret;
4006 
4007 	return 0;
4008 }
4009 
4010 static int
4011 dpaa2_flow_item_convert(const struct rte_flow_item pattern[],
4012 			struct rte_dpaa2_flow_item **dpaa2_pattern)
4013 {
4014 	struct rte_dpaa2_flow_item *new_pattern;
4015 	int num = 0, tunnel_start = 0;
4016 
4017 	while (1) {
4018 		num++;
4019 		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_END)
4020 			break;
4021 	}
4022 
4023 	new_pattern = rte_malloc(NULL, sizeof(struct rte_dpaa2_flow_item) * num,
4024 				 RTE_CACHE_LINE_SIZE);
4025 	if (!new_pattern) {
4026 		DPAA2_PMD_ERR("Failed to alloc %d flow items", num);
4027 		return -ENOMEM;
4028 	}
4029 
4030 	num = 0;
4031 	while (pattern[num].type != RTE_FLOW_ITEM_TYPE_END) {
4032 		memcpy(&new_pattern[num].generic_item, &pattern[num],
4033 		       sizeof(struct rte_flow_item));
4034 		new_pattern[num].in_tunnel = 0;
4035 
4036 		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_VXLAN)
4037 			tunnel_start = 1;
4038 		else if (tunnel_start)
4039 			new_pattern[num].in_tunnel = 1;
4040 		num++;
4041 	}
4042 
4043 	new_pattern[num].generic_item.type = RTE_FLOW_ITEM_TYPE_END;
4044 	*dpaa2_pattern = new_pattern;
4045 
4046 	return 0;
4047 }
4048 
4049 static int
4050 dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
4051 	struct rte_eth_dev *dev,
4052 	const struct rte_flow_attr *attr,
4053 	const struct rte_flow_item pattern[],
4054 	const struct rte_flow_action actions[],
4055 	struct rte_flow_error *error)
4056 {
4057 	const struct rte_flow_action_rss *rss_conf;
4058 	int is_keycfg_configured = 0, end_of_list = 0;
4059 	int ret = 0, i = 0, j = 0;
4060 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4061 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
4062 	uint16_t dist_size, key_size;
4063 	struct dpaa2_key_extract *qos_key_extract;
4064 	struct dpaa2_key_extract *tc_key_extract;
4065 	struct rte_dpaa2_flow_item *dpaa2_pattern = NULL;
4066 
4067 	ret = dpaa2_flow_verify_attr(priv, attr);
4068 	if (ret)
4069 		return ret;
4070 
4071 	ret = dpaa2_flow_verify_action(priv, attr, actions);
4072 	if (ret)
4073 		return ret;
4074 
4075 	ret = dpaa2_flow_item_convert(pattern, &dpaa2_pattern);
4076 	if (ret)
4077 		return ret;
4078 
4079 	/* Parse pattern list to get the matching parameters */
4080 	while (!end_of_list) {
4081 		switch (pattern[i].type) {
4082 		case RTE_FLOW_ITEM_TYPE_ETH:
4083 			ret = dpaa2_configure_flow_eth(flow, dev, attr,
4084 						       &dpaa2_pattern[i],
4085 						       actions, error,
4086 						       &is_keycfg_configured);
4087 			if (ret) {
4088 				DPAA2_PMD_ERR("ETH flow config failed!");
4089 				goto end_flow_set;
4090 			}
4091 			break;
4092 		case RTE_FLOW_ITEM_TYPE_VLAN:
4093 			ret = dpaa2_configure_flow_vlan(flow, dev, attr,
4094 							&dpaa2_pattern[i],
4095 							actions, error,
4096 							&is_keycfg_configured);
4097 			if (ret) {
4098 				DPAA2_PMD_ERR("vLan flow config failed!");
4099 				goto end_flow_set;
4100 			}
4101 			break;
4102 		case RTE_FLOW_ITEM_TYPE_IPV4:
4103 			ret = dpaa2_configure_flow_ipv4(flow, dev, attr,
4104 							&dpaa2_pattern[i],
4105 							actions, error,
4106 							&is_keycfg_configured);
4107 			if (ret) {
4108 				DPAA2_PMD_ERR("IPV4 flow config failed!");
4109 				goto end_flow_set;
4110 			}
4111 			break;
4112 		case RTE_FLOW_ITEM_TYPE_IPV6:
4113 			ret = dpaa2_configure_flow_ipv6(flow, dev, attr,
4114 							&dpaa2_pattern[i],
4115 							actions, error,
4116 							&is_keycfg_configured);
4117 			if (ret) {
4118 				DPAA2_PMD_ERR("IPV6 flow config failed!");
4119 				goto end_flow_set;
4120 			}
4121 			break;
4122 		case RTE_FLOW_ITEM_TYPE_ICMP:
4123 			ret = dpaa2_configure_flow_icmp(flow, dev, attr,
4124 							&dpaa2_pattern[i],
4125 							actions, error,
4126 							&is_keycfg_configured);
4127 			if (ret) {
4128 				DPAA2_PMD_ERR("ICMP flow config failed!");
4129 				goto end_flow_set;
4130 			}
4131 			break;
4132 		case RTE_FLOW_ITEM_TYPE_UDP:
4133 			ret = dpaa2_configure_flow_udp(flow, dev, attr,
4134 						       &dpaa2_pattern[i],
4135 						       actions, error,
4136 						       &is_keycfg_configured);
4137 			if (ret) {
4138 				DPAA2_PMD_ERR("UDP flow config failed!");
4139 				goto end_flow_set;
4140 			}
4141 			break;
4142 		case RTE_FLOW_ITEM_TYPE_TCP:
4143 			ret = dpaa2_configure_flow_tcp(flow, dev, attr,
4144 						       &dpaa2_pattern[i],
4145 						       actions, error,
4146 						       &is_keycfg_configured);
4147 			if (ret) {
4148 				DPAA2_PMD_ERR("TCP flow config failed!");
4149 				goto end_flow_set;
4150 			}
4151 			break;
4152 		case RTE_FLOW_ITEM_TYPE_SCTP:
4153 			ret = dpaa2_configure_flow_sctp(flow, dev, attr,
4154 							&dpaa2_pattern[i],
4155 							actions, error,
4156 							&is_keycfg_configured);
4157 			if (ret) {
4158 				DPAA2_PMD_ERR("SCTP flow config failed!");
4159 				goto end_flow_set;
4160 			}
4161 			break;
4162 		case RTE_FLOW_ITEM_TYPE_GRE:
4163 			ret = dpaa2_configure_flow_gre(flow, dev, attr,
4164 						       &dpaa2_pattern[i],
4165 						       actions, error,
4166 						       &is_keycfg_configured);
4167 			if (ret) {
4168 				DPAA2_PMD_ERR("GRE flow config failed!");
4169 				goto end_flow_set;
4170 			}
4171 			break;
4172 		case RTE_FLOW_ITEM_TYPE_VXLAN:
4173 			ret = dpaa2_configure_flow_vxlan(flow, dev, attr,
4174 							 &dpaa2_pattern[i],
4175 							 actions, error,
4176 							 &is_keycfg_configured);
4177 			if (ret) {
4178 				DPAA2_PMD_ERR("VXLAN flow config failed!");
4179 				goto end_flow_set;
4180 			}
4181 			break;
4182 		case RTE_FLOW_ITEM_TYPE_ECPRI:
4183 			ret = dpaa2_configure_flow_ecpri(flow,
4184 					dev, attr, &dpaa2_pattern[i],
4185 					actions, error,
4186 					&is_keycfg_configured);
4187 			if (ret) {
4188 				DPAA2_PMD_ERR("ECPRI flow config failed!");
4189 				goto end_flow_set;
4190 			}
4191 			break;
4192 		case RTE_FLOW_ITEM_TYPE_RAW:
4193 			ret = dpaa2_configure_flow_raw(flow, dev, attr,
4194 						       &dpaa2_pattern[i],
4195 						       actions, error,
4196 						       &is_keycfg_configured);
4197 			if (ret) {
4198 				DPAA2_PMD_ERR("RAW flow config failed!");
4199 				goto end_flow_set;
4200 			}
4201 			break;
4202 		case RTE_FLOW_ITEM_TYPE_END:
4203 			end_of_list = 1;
4204 			break; /*End of List*/
4205 		default:
4206 			DPAA2_PMD_ERR("Invalid flow item[%d] type(%d)",
4207 				i, pattern[i].type);
4208 			ret = -ENOTSUP;
4209 			break;
4210 		}
4211 		i++;
4212 	}
4213 
4214 	qos_key_extract = &priv->extract.qos_key_extract;
4215 	key_size = qos_key_extract->key_profile.key_max_size;
4216 	flow->qos_rule.key_size = dpaa2_flow_entry_size(key_size);
4217 
4218 	tc_key_extract = &priv->extract.tc_key_extract[flow->tc_id];
4219 	key_size = tc_key_extract->key_profile.key_max_size;
4220 	flow->fs_rule.key_size = dpaa2_flow_entry_size(key_size);
4221 
4222 	/* Let's parse action on matching traffic */
4223 	end_of_list = 0;
4224 	while (!end_of_list) {
4225 		switch (actions[j].type) {
4226 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4227 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4228 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
4229 			ret = dpaa2_configure_flow_fs_action(priv, flow,
4230 							     &actions[j]);
4231 			if (ret)
4232 				goto end_flow_set;
4233 
4234 			/* Configure FS table first*/
4235 			dist_size = priv->nb_rx_queues / priv->num_rx_tc;
4236 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
4237 				ret = dpaa2_configure_fs_rss_table(priv,
4238 								   flow->tc_id,
4239 								   dist_size,
4240 								   false);
4241 				if (ret)
4242 					goto end_flow_set;
4243 			}
4244 
4245 			/* Configure QoS table then.*/
4246 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
4247 				ret = dpaa2_configure_qos_table(priv, false);
4248 				if (ret)
4249 					goto end_flow_set;
4250 			}
4251 
4252 			if (priv->num_rx_tc > 1) {
4253 				ret = dpaa2_flow_add_qos_rule(priv, flow);
4254 				if (ret)
4255 					goto end_flow_set;
4256 			}
4257 
4258 			if (flow->tc_index >= priv->fs_entries) {
4259 				DPAA2_PMD_ERR("FS table with %d entries full",
4260 					priv->fs_entries);
4261 				return -1;
4262 			}
4263 
4264 			ret = dpaa2_flow_add_fs_rule(priv, flow);
4265 			if (ret)
4266 				goto end_flow_set;
4267 
4268 			break;
4269 		case RTE_FLOW_ACTION_TYPE_RSS:
4270 			rss_conf = actions[j].conf;
4271 			flow->action_type = RTE_FLOW_ACTION_TYPE_RSS;
4272 
4273 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
4274 					&tc_key_extract->dpkg);
4275 			if (ret < 0) {
4276 				DPAA2_PMD_ERR("TC[%d] distset RSS failed",
4277 					      flow->tc_id);
4278 				goto end_flow_set;
4279 			}
4280 
4281 			dist_size = rss_conf->queue_num;
4282 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
4283 				ret = dpaa2_configure_fs_rss_table(priv,
4284 								   flow->tc_id,
4285 								   dist_size,
4286 								   true);
4287 				if (ret)
4288 					goto end_flow_set;
4289 			}
4290 
4291 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
4292 				ret = dpaa2_configure_qos_table(priv, true);
4293 				if (ret)
4294 					goto end_flow_set;
4295 			}
4296 
4297 			ret = dpaa2_flow_add_qos_rule(priv, flow);
4298 			if (ret)
4299 				goto end_flow_set;
4300 
4301 			ret = dpaa2_flow_add_fs_rule(priv, flow);
4302 			if (ret)
4303 				goto end_flow_set;
4304 
4305 			break;
4306 		case RTE_FLOW_ACTION_TYPE_PF:
4307 			/* Skip this action, have to add for vxlan */
4308 			break;
4309 		case RTE_FLOW_ACTION_TYPE_END:
4310 			end_of_list = 1;
4311 			break;
4312 		default:
4313 			DPAA2_PMD_ERR("Invalid action type");
4314 			ret = -ENOTSUP;
4315 			break;
4316 		}
4317 		j++;
4318 	}
4319 
4320 end_flow_set:
4321 	if (!ret) {
4322 		/* New rules are inserted. */
4323 		if (!curr) {
4324 			LIST_INSERT_HEAD(&priv->flows, flow, next);
4325 		} else {
4326 			while (LIST_NEXT(curr, next))
4327 				curr = LIST_NEXT(curr, next);
4328 			LIST_INSERT_AFTER(curr, flow, next);
4329 		}
4330 	}
4331 
4332 	if (dpaa2_pattern)
4333 		rte_free(dpaa2_pattern);
4334 
4335 	return ret;
4336 }
4337 
4338 static inline int
4339 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
4340 	const struct rte_flow_attr *attr)
4341 {
4342 	int ret = 0;
4343 
4344 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
4345 		DPAA2_PMD_ERR("Priority group is out of range");
4346 		ret = -ENOTSUP;
4347 	}
4348 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
4349 		DPAA2_PMD_ERR("Priority within the group is out of range");
4350 		ret = -ENOTSUP;
4351 	}
4352 	if (unlikely(attr->egress)) {
4353 		DPAA2_PMD_ERR(
4354 			"Flow configuration is not supported on egress side");
4355 		ret = -ENOTSUP;
4356 	}
4357 	if (unlikely(!attr->ingress)) {
4358 		DPAA2_PMD_ERR("Ingress flag must be configured");
4359 		ret = -EINVAL;
4360 	}
4361 	return ret;
4362 }
4363 
4364 static inline int
4365 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
4366 {
4367 	unsigned int i, j, is_found = 0;
4368 	int ret = 0;
4369 
4370 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
4371 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
4372 			if (dpaa2_supported_pattern_type[i]
4373 					== pattern[j].type) {
4374 				is_found = 1;
4375 				break;
4376 			}
4377 		}
4378 		if (!is_found) {
4379 			ret = -ENOTSUP;
4380 			break;
4381 		}
4382 	}
4383 	/* Lets verify other combinations of given pattern rules */
4384 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
4385 		if (!pattern[j].spec) {
4386 			ret = -EINVAL;
4387 			break;
4388 		}
4389 	}
4390 
4391 	return ret;
4392 }
4393 
4394 static inline int
4395 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
4396 {
4397 	unsigned int i, j, is_found = 0;
4398 	int ret = 0;
4399 
4400 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
4401 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
4402 			if (dpaa2_supported_action_type[i] == actions[j].type) {
4403 				is_found = 1;
4404 				break;
4405 			}
4406 		}
4407 		if (!is_found) {
4408 			ret = -ENOTSUP;
4409 			break;
4410 		}
4411 	}
4412 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
4413 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
4414 		    !actions[j].conf)
4415 			ret = -EINVAL;
4416 	}
4417 	return ret;
4418 }
4419 
4420 static int
4421 dpaa2_flow_validate(struct rte_eth_dev *dev,
4422 	const struct rte_flow_attr *flow_attr,
4423 	const struct rte_flow_item pattern[],
4424 	const struct rte_flow_action actions[],
4425 	struct rte_flow_error *error)
4426 {
4427 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4428 	struct dpni_attr dpni_attr;
4429 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4430 	uint16_t token = priv->token;
4431 	int ret = 0;
4432 
4433 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
4434 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
4435 	if (ret < 0) {
4436 		DPAA2_PMD_ERR(
4437 			"Failure to get dpni@%p attribute, err code  %d",
4438 			dpni, ret);
4439 		rte_flow_error_set(error, EPERM,
4440 			   RTE_FLOW_ERROR_TYPE_ATTR,
4441 			   flow_attr, "invalid");
4442 		return ret;
4443 	}
4444 
4445 	/* Verify input attributes */
4446 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
4447 	if (ret < 0) {
4448 		DPAA2_PMD_ERR(
4449 			"Invalid attributes are given");
4450 		rte_flow_error_set(error, EPERM,
4451 			   RTE_FLOW_ERROR_TYPE_ATTR,
4452 			   flow_attr, "invalid");
4453 		goto not_valid_params;
4454 	}
4455 	/* Verify input pattern list */
4456 	ret = dpaa2_dev_verify_patterns(pattern);
4457 	if (ret < 0) {
4458 		DPAA2_PMD_ERR(
4459 			"Invalid pattern list is given");
4460 		rte_flow_error_set(error, EPERM,
4461 			   RTE_FLOW_ERROR_TYPE_ITEM,
4462 			   pattern, "invalid");
4463 		goto not_valid_params;
4464 	}
4465 	/* Verify input action list */
4466 	ret = dpaa2_dev_verify_actions(actions);
4467 	if (ret < 0) {
4468 		DPAA2_PMD_ERR(
4469 			"Invalid action list is given");
4470 		rte_flow_error_set(error, EPERM,
4471 			   RTE_FLOW_ERROR_TYPE_ACTION,
4472 			   actions, "invalid");
4473 		goto not_valid_params;
4474 	}
4475 not_valid_params:
4476 	return ret;
4477 }
4478 
4479 static struct rte_flow *
4480 dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4481 		  const struct rte_flow_item pattern[],
4482 		  const struct rte_flow_action actions[],
4483 		  struct rte_flow_error *error)
4484 {
4485 	struct dpaa2_dev_flow *flow = NULL;
4486 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4487 	int ret;
4488 
4489 	dpaa2_flow_control_log =
4490 		getenv("DPAA2_FLOW_CONTROL_LOG");
4491 
4492 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
4493 		dpaa2_flow_miss_flow_id =
4494 			(uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
4495 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
4496 			DPAA2_PMD_ERR("Missed flow ID %d >= dist size(%d)",
4497 				      dpaa2_flow_miss_flow_id,
4498 				      priv->dist_queues);
4499 			return NULL;
4500 		}
4501 	}
4502 
4503 	flow = rte_zmalloc(NULL, sizeof(struct dpaa2_dev_flow),
4504 			   RTE_CACHE_LINE_SIZE);
4505 	if (!flow) {
4506 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
4507 		goto mem_failure;
4508 	}
4509 
4510 	/* Allocate DMA'ble memory to write the qos rules */
4511 	flow->qos_key_addr = rte_zmalloc(NULL, 256, 64);
4512 	if (!flow->qos_key_addr) {
4513 		DPAA2_PMD_ERR("Memory allocation failed");
4514 		goto mem_failure;
4515 	}
4516 	flow->qos_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->qos_key_addr);
4517 
4518 	flow->qos_mask_addr = rte_zmalloc(NULL, 256, 64);
4519 	if (!flow->qos_mask_addr) {
4520 		DPAA2_PMD_ERR("Memory allocation failed");
4521 		goto mem_failure;
4522 	}
4523 	flow->qos_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->qos_mask_addr);
4524 
4525 	/* Allocate DMA'ble memory to write the FS rules */
4526 	flow->fs_key_addr = rte_zmalloc(NULL, 256, 64);
4527 	if (!flow->fs_key_addr) {
4528 		DPAA2_PMD_ERR("Memory allocation failed");
4529 		goto mem_failure;
4530 	}
4531 	flow->fs_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->fs_key_addr);
4532 
4533 	flow->fs_mask_addr = rte_zmalloc(NULL, 256, 64);
4534 	if (!flow->fs_mask_addr) {
4535 		DPAA2_PMD_ERR("Memory allocation failed");
4536 		goto mem_failure;
4537 	}
4538 	flow->fs_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->fs_mask_addr);
4539 
4540 	priv->curr = flow;
4541 
4542 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, actions, error);
4543 	if (ret < 0) {
4544 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4545 			rte_flow_error_set(error, EPERM,
4546 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4547 					   attr, "unknown");
4548 		DPAA2_PMD_ERR("Create flow failed (%d)", ret);
4549 		goto creation_error;
4550 	}
4551 
4552 	priv->curr = NULL;
4553 	return (struct rte_flow *)flow;
4554 
4555 mem_failure:
4556 	rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4557 			   "memory alloc");
4558 
4559 creation_error:
4560 	if (flow) {
4561 		if (flow->qos_key_addr)
4562 			rte_free(flow->qos_key_addr);
4563 		if (flow->qos_mask_addr)
4564 			rte_free(flow->qos_mask_addr);
4565 		if (flow->fs_key_addr)
4566 			rte_free(flow->fs_key_addr);
4567 		if (flow->fs_mask_addr)
4568 			rte_free(flow->fs_mask_addr);
4569 		rte_free(flow);
4570 	}
4571 	priv->curr = NULL;
4572 
4573 	return NULL;
4574 }
4575 
4576 static int
4577 dpaa2_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *_flow,
4578 		   struct rte_flow_error *error)
4579 {
4580 	int ret = 0;
4581 	struct dpaa2_dev_flow *flow;
4582 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4583 	struct fsl_mc_io *dpni = priv->hw;
4584 
4585 	flow = (struct dpaa2_dev_flow *)_flow;
4586 
4587 	switch (flow->action_type) {
4588 	case RTE_FLOW_ACTION_TYPE_QUEUE:
4589 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
4590 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
4591 		if (priv->num_rx_tc > 1) {
4592 			/* Remove entry from QoS table first */
4593 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
4594 						    priv->token,
4595 						    &flow->qos_rule);
4596 			if (ret < 0) {
4597 				DPAA2_PMD_ERR("Remove FS QoS entry failed");
4598 				dpaa2_flow_qos_entry_log("Delete failed", flow,
4599 							 -1);
4600 				abort();
4601 				goto error;
4602 			}
4603 		}
4604 
4605 		/* Then remove entry from FS table */
4606 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4607 					   flow->tc_id, &flow->fs_rule);
4608 		if (ret < 0) {
4609 			DPAA2_PMD_ERR("Remove entry from FS[%d] failed",
4610 				      flow->tc_id);
4611 			goto error;
4612 		}
4613 		break;
4614 	case RTE_FLOW_ACTION_TYPE_RSS:
4615 		if (priv->num_rx_tc > 1) {
4616 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
4617 						    priv->token,
4618 						    &flow->qos_rule);
4619 			if (ret < 0) {
4620 				DPAA2_PMD_ERR("Remove RSS QoS entry failed");
4621 				goto error;
4622 			}
4623 		}
4624 		break;
4625 	default:
4626 		DPAA2_PMD_ERR("Action(%d) not supported", flow->action_type);
4627 		ret = -ENOTSUP;
4628 		break;
4629 	}
4630 
4631 	LIST_REMOVE(flow, next);
4632 	if (flow->qos_key_addr)
4633 		rte_free(flow->qos_key_addr);
4634 	if (flow->qos_mask_addr)
4635 		rte_free(flow->qos_mask_addr);
4636 	if (flow->fs_key_addr)
4637 		rte_free(flow->fs_key_addr);
4638 	if (flow->fs_mask_addr)
4639 		rte_free(flow->fs_mask_addr);
4640 	/* Now free the flow */
4641 	rte_free(flow);
4642 
4643 error:
4644 	if (ret)
4645 		rte_flow_error_set(error, EPERM,
4646 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4647 				   NULL, "unknown");
4648 	return ret;
4649 }
4650 
4651 /**
4652  * Destroy user-configured flow rules.
4653  *
4654  * This function skips internal flows rules.
4655  *
4656  * @see rte_flow_flush()
4657  * @see rte_flow_ops
4658  */
4659 static int
4660 dpaa2_flow_flush(struct rte_eth_dev *dev,
4661 		struct rte_flow_error *error)
4662 {
4663 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4664 	struct dpaa2_dev_flow *flow = LIST_FIRST(&priv->flows);
4665 
4666 	while (flow) {
4667 		struct dpaa2_dev_flow *next = LIST_NEXT(flow, next);
4668 
4669 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, error);
4670 		flow = next;
4671 	}
4672 	return 0;
4673 }
4674 
4675 static int
4676 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4677 	struct rte_flow *_flow __rte_unused,
4678 	const struct rte_flow_action *actions __rte_unused,
4679 	void *data __rte_unused,
4680 	struct rte_flow_error *error __rte_unused)
4681 {
4682 	return 0;
4683 }
4684 
4685 /**
4686  * Clean up all flow rules.
4687  *
4688  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4689  * rules regardless of whether they are internal or user-configured.
4690  *
4691  * @param priv
4692  *   Pointer to private structure.
4693  */
4694 void
4695 dpaa2_flow_clean(struct rte_eth_dev *dev)
4696 {
4697 	struct dpaa2_dev_flow *flow;
4698 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
4699 
4700 	while ((flow = LIST_FIRST(&priv->flows)))
4701 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, NULL);
4702 }
4703 
4704 const struct rte_flow_ops dpaa2_flow_ops = {
4705 	.create	= dpaa2_flow_create,
4706 	.validate = dpaa2_flow_validate,
4707 	.destroy = dpaa2_flow_destroy,
4708 	.flush	= dpaa2_flow_flush,
4709 	.query	= dpaa2_flow_query,
4710 };
4711