xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 200a33e4c2b0f401a9bfb3cc4a8f3fe8ad8923ad)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 static char *dpaa2_flow_control_log;
26 static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */
27 
28 enum dpaa2_flow_entry_size {
29 	DPAA2_FLOW_ENTRY_MIN_SIZE = (DPNI_MAX_KEY_SIZE / 2),
30 	DPAA2_FLOW_ENTRY_MAX_SIZE = DPNI_MAX_KEY_SIZE
31 };
32 
33 enum dpaa2_flow_dist_type {
34 	DPAA2_FLOW_QOS_TYPE = 1 << 0,
35 	DPAA2_FLOW_FS_TYPE = 1 << 1
36 };
37 
38 #define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT	16
39 #define DPAA2_FLOW_MAX_KEY_SIZE			16
40 
41 struct dpaa2_dev_flow {
42 	LIST_ENTRY(dpaa2_dev_flow) next;
43 	struct dpni_rule_cfg qos_rule;
44 	uint8_t *qos_key_addr;
45 	uint8_t *qos_mask_addr;
46 	uint16_t qos_rule_size;
47 	struct dpni_rule_cfg fs_rule;
48 	uint8_t qos_real_key_size;
49 	uint8_t fs_real_key_size;
50 	uint8_t *fs_key_addr;
51 	uint8_t *fs_mask_addr;
52 	uint16_t fs_rule_size;
53 	uint8_t tc_id; /** Traffic Class ID. */
54 	uint8_t tc_index; /** index within this Traffic Class. */
55 	enum rte_flow_action_type action_type;
56 	struct dpni_fs_action_cfg fs_action_cfg;
57 };
58 
59 static const
60 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
61 	RTE_FLOW_ITEM_TYPE_END,
62 	RTE_FLOW_ITEM_TYPE_ETH,
63 	RTE_FLOW_ITEM_TYPE_VLAN,
64 	RTE_FLOW_ITEM_TYPE_IPV4,
65 	RTE_FLOW_ITEM_TYPE_IPV6,
66 	RTE_FLOW_ITEM_TYPE_ICMP,
67 	RTE_FLOW_ITEM_TYPE_UDP,
68 	RTE_FLOW_ITEM_TYPE_TCP,
69 	RTE_FLOW_ITEM_TYPE_SCTP,
70 	RTE_FLOW_ITEM_TYPE_GRE,
71 };
72 
73 static const
74 enum rte_flow_action_type dpaa2_supported_action_type[] = {
75 	RTE_FLOW_ACTION_TYPE_END,
76 	RTE_FLOW_ACTION_TYPE_QUEUE,
77 	RTE_FLOW_ACTION_TYPE_PORT_ID,
78 	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
79 	RTE_FLOW_ACTION_TYPE_RSS
80 };
81 
82 #ifndef __cplusplus
83 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
84 	.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
85 	.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
86 	.hdr.ether_type = RTE_BE16(0xffff),
87 };
88 
89 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
90 	.hdr.vlan_tci = RTE_BE16(0xffff),
91 };
92 
93 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
94 	.hdr.src_addr = RTE_BE32(0xffffffff),
95 	.hdr.dst_addr = RTE_BE32(0xffffffff),
96 	.hdr.next_proto_id = 0xff,
97 };
98 
99 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
100 	.hdr = {
101 		.src_addr = RTE_IPV6_MASK_FULL,
102 		.dst_addr = RTE_IPV6_MASK_FULL,
103 		.proto = 0xff
104 	},
105 };
106 
107 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
108 	.hdr.icmp_type = 0xff,
109 	.hdr.icmp_code = 0xff,
110 };
111 
112 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
113 	.hdr = {
114 		.src_port = RTE_BE16(0xffff),
115 		.dst_port = RTE_BE16(0xffff),
116 	},
117 };
118 
119 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
120 	.hdr = {
121 		.src_port = RTE_BE16(0xffff),
122 		.dst_port = RTE_BE16(0xffff),
123 	},
124 };
125 
126 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
127 	.hdr = {
128 		.src_port = RTE_BE16(0xffff),
129 		.dst_port = RTE_BE16(0xffff),
130 	},
131 };
132 
133 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
134 	.protocol = RTE_BE16(0xffff),
135 };
136 #endif
137 
138 #define DPAA2_FLOW_DUMP printf
139 
140 static inline void
141 dpaa2_prot_field_string(uint32_t prot, uint32_t field,
142 	char *string)
143 {
144 	if (!dpaa2_flow_control_log)
145 		return;
146 
147 	if (prot == NET_PROT_ETH) {
148 		strcpy(string, "eth");
149 		if (field == NH_FLD_ETH_DA)
150 			strcat(string, ".dst");
151 		else if (field == NH_FLD_ETH_SA)
152 			strcat(string, ".src");
153 		else if (field == NH_FLD_ETH_TYPE)
154 			strcat(string, ".type");
155 		else
156 			strcat(string, ".unknown field");
157 	} else if (prot == NET_PROT_VLAN) {
158 		strcpy(string, "vlan");
159 		if (field == NH_FLD_VLAN_TCI)
160 			strcat(string, ".tci");
161 		else
162 			strcat(string, ".unknown field");
163 	} else if (prot == NET_PROT_IP) {
164 		strcpy(string, "ip");
165 		if (field == NH_FLD_IP_SRC)
166 			strcat(string, ".src");
167 		else if (field == NH_FLD_IP_DST)
168 			strcat(string, ".dst");
169 		else if (field == NH_FLD_IP_PROTO)
170 			strcat(string, ".proto");
171 		else
172 			strcat(string, ".unknown field");
173 	} else if (prot == NET_PROT_TCP) {
174 		strcpy(string, "tcp");
175 		if (field == NH_FLD_TCP_PORT_SRC)
176 			strcat(string, ".src");
177 		else if (field == NH_FLD_TCP_PORT_DST)
178 			strcat(string, ".dst");
179 		else
180 			strcat(string, ".unknown field");
181 	} else if (prot == NET_PROT_UDP) {
182 		strcpy(string, "udp");
183 		if (field == NH_FLD_UDP_PORT_SRC)
184 			strcat(string, ".src");
185 		else if (field == NH_FLD_UDP_PORT_DST)
186 			strcat(string, ".dst");
187 		else
188 			strcat(string, ".unknown field");
189 	} else if (prot == NET_PROT_ICMP) {
190 		strcpy(string, "icmp");
191 		if (field == NH_FLD_ICMP_TYPE)
192 			strcat(string, ".type");
193 		else if (field == NH_FLD_ICMP_CODE)
194 			strcat(string, ".code");
195 		else
196 			strcat(string, ".unknown field");
197 	} else if (prot == NET_PROT_SCTP) {
198 		strcpy(string, "sctp");
199 		if (field == NH_FLD_SCTP_PORT_SRC)
200 			strcat(string, ".src");
201 		else if (field == NH_FLD_SCTP_PORT_DST)
202 			strcat(string, ".dst");
203 		else
204 			strcat(string, ".unknown field");
205 	} else if (prot == NET_PROT_GRE) {
206 		strcpy(string, "gre");
207 		if (field == NH_FLD_GRE_TYPE)
208 			strcat(string, ".type");
209 		else
210 			strcat(string, ".unknown field");
211 	} else {
212 		strcpy(string, "unknown protocol");
213 	}
214 }
215 
216 static inline void
217 dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv)
218 {
219 	int idx;
220 	char string[32];
221 	const struct dpkg_profile_cfg *dpkg =
222 		&priv->extract.qos_key_extract.dpkg;
223 	const struct dpkg_extract *extract;
224 	enum dpkg_extract_type type;
225 	enum net_prot prot;
226 	uint32_t field;
227 
228 	if (!dpaa2_flow_control_log)
229 		return;
230 
231 	DPAA2_FLOW_DUMP("QoS table: %d extracts\r\n",
232 		dpkg->num_extracts);
233 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
234 		extract = &dpkg->extracts[idx];
235 		type = extract->type;
236 		if (type == DPKG_EXTRACT_FROM_HDR) {
237 			prot = extract->extract.from_hdr.prot;
238 			field = extract->extract.from_hdr.field;
239 			dpaa2_prot_field_string(prot, field,
240 				string);
241 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
242 			sprintf(string, "raw offset/len: %d/%d",
243 				extract->extract.from_data.offset,
244 				extract->extract.from_data.size);
245 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
246 			sprintf(string, "parse offset/len: %d/%d",
247 				extract->extract.from_parse.offset,
248 				extract->extract.from_parse.size);
249 		}
250 		DPAA2_FLOW_DUMP("%s", string);
251 		if ((idx + 1) < dpkg->num_extracts)
252 			DPAA2_FLOW_DUMP(" / ");
253 	}
254 	DPAA2_FLOW_DUMP("\r\n");
255 }
256 
257 static inline void
258 dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv,
259 	int tc_id)
260 {
261 	int idx;
262 	char string[32];
263 	const struct dpkg_profile_cfg *dpkg =
264 		&priv->extract.tc_key_extract[tc_id].dpkg;
265 	const struct dpkg_extract *extract;
266 	enum dpkg_extract_type type;
267 	enum net_prot prot;
268 	uint32_t field;
269 
270 	if (!dpaa2_flow_control_log)
271 		return;
272 
273 	DPAA2_FLOW_DUMP("FS table: %d extracts in TC[%d]\r\n",
274 		dpkg->num_extracts, tc_id);
275 	for (idx = 0; idx < dpkg->num_extracts; idx++) {
276 		extract = &dpkg->extracts[idx];
277 		type = extract->type;
278 		if (type == DPKG_EXTRACT_FROM_HDR) {
279 			prot = extract->extract.from_hdr.prot;
280 			field = extract->extract.from_hdr.field;
281 			dpaa2_prot_field_string(prot, field,
282 				string);
283 		} else if (type == DPKG_EXTRACT_FROM_DATA) {
284 			sprintf(string, "raw offset/len: %d/%d",
285 				extract->extract.from_data.offset,
286 				extract->extract.from_data.size);
287 		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
288 			sprintf(string, "parse offset/len: %d/%d",
289 				extract->extract.from_parse.offset,
290 				extract->extract.from_parse.size);
291 		}
292 		DPAA2_FLOW_DUMP("%s", string);
293 		if ((idx + 1) < dpkg->num_extracts)
294 			DPAA2_FLOW_DUMP(" / ");
295 	}
296 	DPAA2_FLOW_DUMP("\r\n");
297 }
298 
299 static inline void
300 dpaa2_flow_qos_entry_log(const char *log_info,
301 	const struct dpaa2_dev_flow *flow, int qos_index)
302 {
303 	int idx;
304 	uint8_t *key, *mask;
305 
306 	if (!dpaa2_flow_control_log)
307 		return;
308 
309 	if (qos_index >= 0) {
310 		DPAA2_FLOW_DUMP("%s QoS entry[%d](size %d/%d) for TC[%d]\r\n",
311 			log_info, qos_index, flow->qos_rule_size,
312 			flow->qos_rule.key_size,
313 			flow->tc_id);
314 	} else {
315 		DPAA2_FLOW_DUMP("%s QoS entry(size %d/%d) for TC[%d]\r\n",
316 			log_info, flow->qos_rule_size,
317 			flow->qos_rule.key_size,
318 			flow->tc_id);
319 	}
320 
321 	key = flow->qos_key_addr;
322 	mask = flow->qos_mask_addr;
323 
324 	DPAA2_FLOW_DUMP("key:\r\n");
325 	for (idx = 0; idx < flow->qos_rule_size; idx++)
326 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
327 
328 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
329 	for (idx = 0; idx < flow->qos_rule_size; idx++)
330 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
331 	DPAA2_FLOW_DUMP("\r\n");
332 }
333 
334 static inline void
335 dpaa2_flow_fs_entry_log(const char *log_info,
336 	const struct dpaa2_dev_flow *flow)
337 {
338 	int idx;
339 	uint8_t *key, *mask;
340 
341 	if (!dpaa2_flow_control_log)
342 		return;
343 
344 	DPAA2_FLOW_DUMP("%s FS/TC entry[%d](size %d/%d) of TC[%d]\r\n",
345 		log_info, flow->tc_index,
346 		flow->fs_rule_size, flow->fs_rule.key_size,
347 		flow->tc_id);
348 
349 	key = flow->fs_key_addr;
350 	mask = flow->fs_mask_addr;
351 
352 	DPAA2_FLOW_DUMP("key:\r\n");
353 	for (idx = 0; idx < flow->fs_rule_size; idx++)
354 		DPAA2_FLOW_DUMP("%02x ", key[idx]);
355 
356 	DPAA2_FLOW_DUMP("\r\nmask:\r\n");
357 	for (idx = 0; idx < flow->fs_rule_size; idx++)
358 		DPAA2_FLOW_DUMP("%02x ", mask[idx]);
359 	DPAA2_FLOW_DUMP("\r\n");
360 }
361 
362 static int
363 dpaa2_flow_ip_address_extract(enum net_prot prot,
364 	uint32_t field)
365 {
366 	if (prot == NET_PROT_IPV4 &&
367 		(field == NH_FLD_IPV4_SRC_IP ||
368 		field == NH_FLD_IPV4_DST_IP))
369 		return true;
370 	else if (prot == NET_PROT_IPV6 &&
371 		(field == NH_FLD_IPV6_SRC_IP ||
372 		field == NH_FLD_IPV6_DST_IP))
373 		return true;
374 	else if (prot == NET_PROT_IP &&
375 		(field == NH_FLD_IP_SRC ||
376 		field == NH_FLD_IP_DST))
377 		return true;
378 
379 	return false;
380 }
381 
382 static int
383 dpaa2_flow_l4_src_port_extract(enum net_prot prot,
384 	uint32_t field)
385 {
386 	if (prot == NET_PROT_TCP &&
387 		field == NH_FLD_TCP_PORT_SRC)
388 		return true;
389 	else if (prot == NET_PROT_UDP &&
390 		field == NH_FLD_UDP_PORT_SRC)
391 		return true;
392 	else if (prot == NET_PROT_SCTP &&
393 		field == NH_FLD_SCTP_PORT_SRC)
394 		return true;
395 
396 	return false;
397 }
398 
399 static int
400 dpaa2_flow_l4_dst_port_extract(enum net_prot prot,
401 	uint32_t field)
402 {
403 	if (prot == NET_PROT_TCP &&
404 		field == NH_FLD_TCP_PORT_DST)
405 		return true;
406 	else if (prot == NET_PROT_UDP &&
407 		field == NH_FLD_UDP_PORT_DST)
408 		return true;
409 	else if (prot == NET_PROT_SCTP &&
410 		field == NH_FLD_SCTP_PORT_DST)
411 		return true;
412 
413 	return false;
414 }
415 
416 static int
417 dpaa2_flow_add_qos_rule(struct dpaa2_dev_priv *priv,
418 	struct dpaa2_dev_flow *flow)
419 {
420 	uint16_t qos_index;
421 	int ret;
422 	struct fsl_mc_io *dpni = priv->hw;
423 
424 	if (priv->num_rx_tc <= 1 &&
425 		flow->action_type != RTE_FLOW_ACTION_TYPE_RSS) {
426 		DPAA2_PMD_WARN("No QoS Table for FS");
427 		return -EINVAL;
428 	}
429 
430 	/* QoS entry added is only effective for multiple TCs.*/
431 	qos_index = flow->tc_id * priv->fs_entries + flow->tc_index;
432 	if (qos_index >= priv->qos_entries) {
433 		DPAA2_PMD_ERR("QoS table full(%d >= %d)",
434 			qos_index, priv->qos_entries);
435 		return -EINVAL;
436 	}
437 
438 	dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
439 
440 	ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
441 			priv->token, &flow->qos_rule,
442 			flow->tc_id, qos_index,
443 			0, 0);
444 	if (ret < 0) {
445 		DPAA2_PMD_ERR("Add entry(%d) to table(%d) failed",
446 			qos_index, flow->tc_id);
447 		return ret;
448 	}
449 
450 	return 0;
451 }
452 
453 static int
454 dpaa2_flow_add_fs_rule(struct dpaa2_dev_priv *priv,
455 	struct dpaa2_dev_flow *flow)
456 {
457 	int ret;
458 	struct fsl_mc_io *dpni = priv->hw;
459 
460 	if (flow->tc_index >= priv->fs_entries) {
461 		DPAA2_PMD_ERR("FS table full(%d >= %d)",
462 			flow->tc_index, priv->fs_entries);
463 		return -EINVAL;
464 	}
465 
466 	dpaa2_flow_fs_entry_log("Start add", flow);
467 
468 	ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
469 			priv->token, flow->tc_id,
470 			flow->tc_index, &flow->fs_rule,
471 			&flow->fs_action_cfg);
472 	if (ret < 0) {
473 		DPAA2_PMD_ERR("Add rule(%d) to FS table(%d) failed",
474 			flow->tc_index, flow->tc_id);
475 		return ret;
476 	}
477 
478 	return 0;
479 }
480 
481 static int
482 dpaa2_flow_rule_insert_hole(struct dpaa2_dev_flow *flow,
483 	int offset, int size,
484 	enum dpaa2_flow_dist_type dist_type)
485 {
486 	int end;
487 
488 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
489 		end = flow->qos_rule_size;
490 		if (end > offset) {
491 			memmove(flow->qos_key_addr + offset + size,
492 					flow->qos_key_addr + offset,
493 					end - offset);
494 			memset(flow->qos_key_addr + offset,
495 					0, size);
496 
497 			memmove(flow->qos_mask_addr + offset + size,
498 					flow->qos_mask_addr + offset,
499 					end - offset);
500 			memset(flow->qos_mask_addr + offset,
501 					0, size);
502 		}
503 		flow->qos_rule_size += size;
504 	}
505 
506 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
507 		end = flow->fs_rule_size;
508 		if (end > offset) {
509 			memmove(flow->fs_key_addr + offset + size,
510 					flow->fs_key_addr + offset,
511 					end - offset);
512 			memset(flow->fs_key_addr + offset,
513 					0, size);
514 
515 			memmove(flow->fs_mask_addr + offset + size,
516 					flow->fs_mask_addr + offset,
517 					end - offset);
518 			memset(flow->fs_mask_addr + offset,
519 					0, size);
520 		}
521 		flow->fs_rule_size += size;
522 	}
523 
524 	return 0;
525 }
526 
527 static int
528 dpaa2_flow_rule_add_all(struct dpaa2_dev_priv *priv,
529 	enum dpaa2_flow_dist_type dist_type,
530 	uint16_t entry_size, uint8_t tc_id)
531 {
532 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
533 	int ret;
534 
535 	while (curr) {
536 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
537 			if (priv->num_rx_tc > 1 ||
538 				curr->action_type ==
539 				RTE_FLOW_ACTION_TYPE_RSS) {
540 				curr->qos_rule.key_size = entry_size;
541 				ret = dpaa2_flow_add_qos_rule(priv, curr);
542 				if (ret)
543 					return ret;
544 			}
545 		}
546 		if (dist_type & DPAA2_FLOW_FS_TYPE &&
547 			curr->tc_id == tc_id) {
548 			curr->fs_rule.key_size = entry_size;
549 			ret = dpaa2_flow_add_fs_rule(priv, curr);
550 			if (ret)
551 				return ret;
552 		}
553 		curr = LIST_NEXT(curr, next);
554 	}
555 
556 	return 0;
557 }
558 
559 static int
560 dpaa2_flow_qos_rule_insert_hole(struct dpaa2_dev_priv *priv,
561 	int offset, int size)
562 {
563 	struct dpaa2_dev_flow *curr;
564 	int ret;
565 
566 	curr = priv->curr;
567 	if (!curr) {
568 		DPAA2_PMD_ERR("Current qos flow insert hole failed.");
569 		return -EINVAL;
570 	} else {
571 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
572 				DPAA2_FLOW_QOS_TYPE);
573 		if (ret)
574 			return ret;
575 	}
576 
577 	curr = LIST_FIRST(&priv->flows);
578 	while (curr) {
579 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
580 				DPAA2_FLOW_QOS_TYPE);
581 		if (ret)
582 			return ret;
583 		curr = LIST_NEXT(curr, next);
584 	}
585 
586 	return 0;
587 }
588 
589 static int
590 dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv,
591 	int offset, int size, int tc_id)
592 {
593 	struct dpaa2_dev_flow *curr;
594 	int ret;
595 
596 	curr = priv->curr;
597 	if (!curr || curr->tc_id != tc_id) {
598 		DPAA2_PMD_ERR("Current flow insert hole failed.");
599 		return -EINVAL;
600 	} else {
601 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
602 				DPAA2_FLOW_FS_TYPE);
603 		if (ret)
604 			return ret;
605 	}
606 
607 	curr = LIST_FIRST(&priv->flows);
608 
609 	while (curr) {
610 		if (curr->tc_id != tc_id) {
611 			curr = LIST_NEXT(curr, next);
612 			continue;
613 		}
614 		ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
615 				DPAA2_FLOW_FS_TYPE);
616 		if (ret)
617 			return ret;
618 		curr = LIST_NEXT(curr, next);
619 	}
620 
621 	return 0;
622 }
623 
624 static int
625 dpaa2_flow_faf_advance(struct dpaa2_dev_priv *priv,
626 	int faf_byte, enum dpaa2_flow_dist_type dist_type, int tc_id,
627 	int *insert_offset)
628 {
629 	int offset, ret;
630 	struct dpaa2_key_profile *key_profile;
631 	int num, pos;
632 
633 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
634 		key_profile = &priv->extract.qos_key_extract.key_profile;
635 	else
636 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
637 
638 	num = key_profile->num;
639 
640 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
641 		DPAA2_PMD_ERR("Number of extracts overflows");
642 		return -EINVAL;
643 	}
644 
645 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
646 		offset = key_profile->ip_addr_extract_off;
647 		pos = key_profile->ip_addr_extract_pos;
648 		key_profile->ip_addr_extract_pos++;
649 		key_profile->ip_addr_extract_off++;
650 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
651 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
652 					offset, 1);
653 		} else {
654 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
655 				offset, 1, tc_id);
656 		}
657 		if (ret)
658 			return ret;
659 	} else {
660 		pos = num;
661 	}
662 
663 	if (pos > 0) {
664 		key_profile->key_offset[pos] =
665 			key_profile->key_offset[pos - 1] +
666 			key_profile->key_size[pos - 1];
667 	} else {
668 		key_profile->key_offset[pos] = 0;
669 	}
670 
671 	key_profile->key_size[pos] = 1;
672 	key_profile->prot_field[pos].type = DPAA2_FAF_KEY;
673 	key_profile->prot_field[pos].key_field = faf_byte;
674 	key_profile->num++;
675 
676 	if (insert_offset)
677 		*insert_offset = key_profile->key_offset[pos];
678 
679 	key_profile->key_max_size++;
680 
681 	return pos;
682 }
683 
684 /* Move IPv4/IPv6 addresses to fill new extract previous IP address.
685  * Current MC/WRIOP only support generic IP extract but IP address
686  * is not fixed, so we have to put them at end of extracts, otherwise,
687  * the extracts position following them can't be identified.
688  */
689 static int
690 dpaa2_flow_key_profile_advance(enum net_prot prot,
691 	uint32_t field, uint8_t field_size,
692 	struct dpaa2_dev_priv *priv,
693 	enum dpaa2_flow_dist_type dist_type, int tc_id,
694 	int *insert_offset)
695 {
696 	int offset, ret;
697 	struct dpaa2_key_profile *key_profile;
698 	int num, pos;
699 
700 	if (dpaa2_flow_ip_address_extract(prot, field)) {
701 		DPAA2_PMD_ERR("%s only for none IP address extract",
702 			__func__);
703 		return -EINVAL;
704 	}
705 
706 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
707 		key_profile = &priv->extract.qos_key_extract.key_profile;
708 	else
709 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
710 
711 	num = key_profile->num;
712 
713 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
714 		DPAA2_PMD_ERR("Number of extracts overflows");
715 		return -EINVAL;
716 	}
717 
718 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
719 		offset = key_profile->ip_addr_extract_off;
720 		pos = key_profile->ip_addr_extract_pos;
721 		key_profile->ip_addr_extract_pos++;
722 		key_profile->ip_addr_extract_off += field_size;
723 		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
724 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
725 					offset, field_size);
726 		} else {
727 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
728 				offset, field_size, tc_id);
729 		}
730 		if (ret)
731 			return ret;
732 	} else {
733 		pos = num;
734 	}
735 
736 	if (pos > 0) {
737 		key_profile->key_offset[pos] =
738 			key_profile->key_offset[pos - 1] +
739 			key_profile->key_size[pos - 1];
740 	} else {
741 		key_profile->key_offset[pos] = 0;
742 	}
743 
744 	key_profile->key_size[pos] = field_size;
745 	key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
746 	key_profile->prot_field[pos].prot = prot;
747 	key_profile->prot_field[pos].key_field = field;
748 	key_profile->num++;
749 
750 	if (insert_offset)
751 		*insert_offset = key_profile->key_offset[pos];
752 
753 	if (dpaa2_flow_l4_src_port_extract(prot, field)) {
754 		key_profile->l4_src_port_present = 1;
755 		key_profile->l4_src_port_pos = pos;
756 		key_profile->l4_src_port_offset =
757 			key_profile->key_offset[pos];
758 	} else if (dpaa2_flow_l4_dst_port_extract(prot, field)) {
759 		key_profile->l4_dst_port_present = 1;
760 		key_profile->l4_dst_port_pos = pos;
761 		key_profile->l4_dst_port_offset =
762 			key_profile->key_offset[pos];
763 	}
764 	key_profile->key_max_size += field_size;
765 
766 	return pos;
767 }
768 
769 static int
770 dpaa2_flow_faf_add_hdr(int faf_byte,
771 	struct dpaa2_dev_priv *priv,
772 	enum dpaa2_flow_dist_type dist_type, int tc_id,
773 	int *insert_offset)
774 {
775 	int pos, i, offset;
776 	struct dpaa2_key_extract *key_extract;
777 	struct dpkg_profile_cfg *dpkg;
778 	struct dpkg_extract *extracts;
779 
780 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
781 		key_extract = &priv->extract.qos_key_extract;
782 	else
783 		key_extract = &priv->extract.tc_key_extract[tc_id];
784 
785 	dpkg = &key_extract->dpkg;
786 	extracts = dpkg->extracts;
787 
788 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
789 		DPAA2_PMD_ERR("Number of extracts overflows");
790 		return -EINVAL;
791 	}
792 
793 	pos = dpaa2_flow_faf_advance(priv,
794 			faf_byte, dist_type, tc_id,
795 			insert_offset);
796 	if (pos < 0)
797 		return pos;
798 
799 	if (pos != dpkg->num_extracts) {
800 		/* Not the last pos, must have IP address extract.*/
801 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
802 			memcpy(&extracts[i + 1],
803 				&extracts[i], sizeof(struct dpkg_extract));
804 		}
805 	}
806 
807 	offset = DPAA2_FAFE_PSR_OFFSET + faf_byte;
808 
809 	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
810 	extracts[pos].extract.from_parse.offset = offset;
811 	extracts[pos].extract.from_parse.size = 1;
812 
813 	dpkg->num_extracts++;
814 
815 	return 0;
816 }
817 
818 static int
819 dpaa2_flow_extract_add_hdr(enum net_prot prot,
820 	uint32_t field, uint8_t field_size,
821 	struct dpaa2_dev_priv *priv,
822 	enum dpaa2_flow_dist_type dist_type, int tc_id,
823 	int *insert_offset)
824 {
825 	int pos, i;
826 	struct dpaa2_key_extract *key_extract;
827 	struct dpkg_profile_cfg *dpkg;
828 	struct dpkg_extract *extracts;
829 
830 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
831 		key_extract = &priv->extract.qos_key_extract;
832 	else
833 		key_extract = &priv->extract.tc_key_extract[tc_id];
834 
835 	dpkg = &key_extract->dpkg;
836 	extracts = dpkg->extracts;
837 
838 	if (dpaa2_flow_ip_address_extract(prot, field)) {
839 		DPAA2_PMD_ERR("%s only for none IP address extract",
840 			__func__);
841 		return -EINVAL;
842 	}
843 
844 	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
845 		DPAA2_PMD_ERR("Number of extracts overflows");
846 		return -EINVAL;
847 	}
848 
849 	pos = dpaa2_flow_key_profile_advance(prot,
850 			field, field_size, priv,
851 			dist_type, tc_id,
852 			insert_offset);
853 	if (pos < 0)
854 		return pos;
855 
856 	if (pos != dpkg->num_extracts) {
857 		/* Not the last pos, must have IP address extract.*/
858 		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
859 			memcpy(&extracts[i + 1],
860 				&extracts[i], sizeof(struct dpkg_extract));
861 		}
862 	}
863 
864 	extracts[pos].type = DPKG_EXTRACT_FROM_HDR;
865 	extracts[pos].extract.from_hdr.prot = prot;
866 	extracts[pos].extract.from_hdr.type = DPKG_FULL_FIELD;
867 	extracts[pos].extract.from_hdr.field = field;
868 
869 	dpkg->num_extracts++;
870 
871 	return 0;
872 }
873 
874 static int
875 dpaa2_flow_extract_new_raw(struct dpaa2_dev_priv *priv,
876 	int offset, int size,
877 	enum dpaa2_flow_dist_type dist_type, int tc_id)
878 {
879 	struct dpaa2_key_extract *key_extract;
880 	struct dpkg_profile_cfg *dpkg;
881 	struct dpaa2_key_profile *key_profile;
882 	int last_extract_size, index, pos, item_size;
883 	uint8_t num_extracts;
884 	uint32_t field;
885 
886 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
887 		key_extract = &priv->extract.qos_key_extract;
888 	else
889 		key_extract = &priv->extract.tc_key_extract[tc_id];
890 
891 	dpkg = &key_extract->dpkg;
892 	key_profile = &key_extract->key_profile;
893 
894 	key_profile->raw_region.raw_start = 0;
895 	key_profile->raw_region.raw_size = 0;
896 
897 	last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
898 	num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
899 	if (last_extract_size)
900 		num_extracts++;
901 	else
902 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
903 
904 	for (index = 0; index < num_extracts; index++) {
905 		if (index == num_extracts - 1)
906 			item_size = last_extract_size;
907 		else
908 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
909 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
910 		field |= item_size;
911 
912 		pos = dpaa2_flow_key_profile_advance(NET_PROT_PAYLOAD,
913 				field, item_size, priv, dist_type,
914 				tc_id, NULL);
915 		if (pos < 0)
916 			return pos;
917 
918 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
919 		dpkg->extracts[pos].extract.from_data.size = item_size;
920 		dpkg->extracts[pos].extract.from_data.offset = offset;
921 
922 		if (index == 0) {
923 			key_profile->raw_extract_pos = pos;
924 			key_profile->raw_extract_off =
925 				key_profile->key_offset[pos];
926 			key_profile->raw_region.raw_start = offset;
927 		}
928 		key_profile->raw_extract_num++;
929 		key_profile->raw_region.raw_size +=
930 			key_profile->key_size[pos];
931 
932 		offset += item_size;
933 		dpkg->num_extracts++;
934 	}
935 
936 	return 0;
937 }
938 
939 static int
940 dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv,
941 	int offset, int size, enum dpaa2_flow_dist_type dist_type,
942 	int tc_id, int *recfg)
943 {
944 	struct dpaa2_key_profile *key_profile;
945 	struct dpaa2_raw_region *raw_region;
946 	int end = offset + size, ret = 0, extract_extended, sz_extend;
947 	int start_cmp, end_cmp, new_size, index, pos, end_pos;
948 	int last_extract_size, item_size, num_extracts, bk_num = 0;
949 	struct dpkg_extract extract_bk[DPKG_MAX_NUM_OF_EXTRACTS];
950 	uint8_t key_offset_bk[DPKG_MAX_NUM_OF_EXTRACTS];
951 	uint8_t key_size_bk[DPKG_MAX_NUM_OF_EXTRACTS];
952 	struct key_prot_field prot_field_bk[DPKG_MAX_NUM_OF_EXTRACTS];
953 	struct dpaa2_raw_region raw_hole;
954 	struct dpkg_profile_cfg *dpkg;
955 	enum net_prot prot;
956 	uint32_t field;
957 
958 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
959 		key_profile = &priv->extract.qos_key_extract.key_profile;
960 		dpkg = &priv->extract.qos_key_extract.dpkg;
961 	} else {
962 		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
963 		dpkg = &priv->extract.tc_key_extract[tc_id].dpkg;
964 	}
965 
966 	raw_region = &key_profile->raw_region;
967 	if (!raw_region->raw_size) {
968 		/* New RAW region*/
969 		ret = dpaa2_flow_extract_new_raw(priv, offset, size,
970 			dist_type, tc_id);
971 		if (!ret && recfg)
972 			(*recfg) |= dist_type;
973 
974 		return ret;
975 	}
976 	start_cmp = raw_region->raw_start;
977 	end_cmp = raw_region->raw_start + raw_region->raw_size;
978 
979 	if (offset >= start_cmp && end <= end_cmp)
980 		return 0;
981 
982 	sz_extend = 0;
983 	new_size = raw_region->raw_size;
984 	if (offset < start_cmp) {
985 		sz_extend += start_cmp - offset;
986 		new_size += (start_cmp - offset);
987 	}
988 	if (end > end_cmp) {
989 		sz_extend += end - end_cmp;
990 		new_size += (end - end_cmp);
991 	}
992 
993 	last_extract_size = (new_size % DPAA2_FLOW_MAX_KEY_SIZE);
994 	num_extracts = (new_size / DPAA2_FLOW_MAX_KEY_SIZE);
995 	if (last_extract_size)
996 		num_extracts++;
997 	else
998 		last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
999 
1000 	if ((key_profile->num + num_extracts -
1001 		key_profile->raw_extract_num) >=
1002 		DPKG_MAX_NUM_OF_EXTRACTS) {
1003 		DPAA2_PMD_ERR("%s Failed to expand raw extracts",
1004 			__func__);
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (offset < start_cmp) {
1009 		raw_hole.raw_start = key_profile->raw_extract_off;
1010 		raw_hole.raw_size = start_cmp - offset;
1011 		raw_region->raw_start = offset;
1012 		raw_region->raw_size += start_cmp - offset;
1013 
1014 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1015 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1016 					raw_hole.raw_start,
1017 					raw_hole.raw_size);
1018 			if (ret)
1019 				return ret;
1020 		}
1021 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1022 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1023 					raw_hole.raw_start,
1024 					raw_hole.raw_size, tc_id);
1025 			if (ret)
1026 				return ret;
1027 		}
1028 	}
1029 
1030 	if (end > end_cmp) {
1031 		raw_hole.raw_start =
1032 			key_profile->raw_extract_off +
1033 			raw_region->raw_size;
1034 		raw_hole.raw_size = end - end_cmp;
1035 		raw_region->raw_size += end - end_cmp;
1036 
1037 		if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1038 			ret = dpaa2_flow_qos_rule_insert_hole(priv,
1039 					raw_hole.raw_start,
1040 					raw_hole.raw_size);
1041 			if (ret)
1042 				return ret;
1043 		}
1044 		if (dist_type & DPAA2_FLOW_FS_TYPE) {
1045 			ret = dpaa2_flow_fs_rule_insert_hole(priv,
1046 					raw_hole.raw_start,
1047 					raw_hole.raw_size, tc_id);
1048 			if (ret)
1049 				return ret;
1050 		}
1051 	}
1052 
1053 	end_pos = key_profile->raw_extract_pos +
1054 		key_profile->raw_extract_num;
1055 	if (key_profile->num > end_pos) {
1056 		bk_num = key_profile->num - end_pos;
1057 		memcpy(extract_bk, &dpkg->extracts[end_pos],
1058 			bk_num * sizeof(struct dpkg_extract));
1059 		memcpy(key_offset_bk, &key_profile->key_offset[end_pos],
1060 			bk_num * sizeof(uint8_t));
1061 		memcpy(key_size_bk, &key_profile->key_size[end_pos],
1062 			bk_num * sizeof(uint8_t));
1063 		memcpy(prot_field_bk, &key_profile->prot_field[end_pos],
1064 			bk_num * sizeof(struct key_prot_field));
1065 
1066 		for (index = 0; index < bk_num; index++) {
1067 			key_offset_bk[index] += sz_extend;
1068 			prot = prot_field_bk[index].prot;
1069 			field = prot_field_bk[index].key_field;
1070 			if (dpaa2_flow_l4_src_port_extract(prot,
1071 				field)) {
1072 				key_profile->l4_src_port_present = 1;
1073 				key_profile->l4_src_port_pos = end_pos + index;
1074 				key_profile->l4_src_port_offset =
1075 					key_offset_bk[index];
1076 			} else if (dpaa2_flow_l4_dst_port_extract(prot,
1077 				field)) {
1078 				key_profile->l4_dst_port_present = 1;
1079 				key_profile->l4_dst_port_pos = end_pos + index;
1080 				key_profile->l4_dst_port_offset =
1081 					key_offset_bk[index];
1082 			}
1083 		}
1084 	}
1085 
1086 	pos = key_profile->raw_extract_pos;
1087 
1088 	for (index = 0; index < num_extracts; index++) {
1089 		if (index == num_extracts - 1)
1090 			item_size = last_extract_size;
1091 		else
1092 			item_size = DPAA2_FLOW_MAX_KEY_SIZE;
1093 		field = offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1094 		field |= item_size;
1095 
1096 		if (pos > 0) {
1097 			key_profile->key_offset[pos] =
1098 				key_profile->key_offset[pos - 1] +
1099 				key_profile->key_size[pos - 1];
1100 		} else {
1101 			key_profile->key_offset[pos] = 0;
1102 		}
1103 		key_profile->key_size[pos] = item_size;
1104 		key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
1105 		key_profile->prot_field[pos].prot = NET_PROT_PAYLOAD;
1106 		key_profile->prot_field[pos].key_field = field;
1107 
1108 		dpkg->extracts[pos].type = DPKG_EXTRACT_FROM_DATA;
1109 		dpkg->extracts[pos].extract.from_data.size = item_size;
1110 		dpkg->extracts[pos].extract.from_data.offset = offset;
1111 		offset += item_size;
1112 		pos++;
1113 	}
1114 
1115 	if (bk_num) {
1116 		memcpy(&dpkg->extracts[pos], extract_bk,
1117 			bk_num * sizeof(struct dpkg_extract));
1118 		memcpy(&key_profile->key_offset[end_pos],
1119 			key_offset_bk, bk_num * sizeof(uint8_t));
1120 		memcpy(&key_profile->key_size[end_pos],
1121 			key_size_bk, bk_num * sizeof(uint8_t));
1122 		memcpy(&key_profile->prot_field[end_pos],
1123 			prot_field_bk, bk_num * sizeof(struct key_prot_field));
1124 	}
1125 
1126 	extract_extended = num_extracts - key_profile->raw_extract_num;
1127 	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
1128 		key_profile->ip_addr_extract_pos += extract_extended;
1129 		key_profile->ip_addr_extract_off += sz_extend;
1130 	}
1131 	key_profile->raw_extract_num = num_extracts;
1132 	key_profile->num += extract_extended;
1133 	key_profile->key_max_size += sz_extend;
1134 
1135 	dpkg->num_extracts += extract_extended;
1136 	if (!ret && recfg)
1137 		(*recfg) |= dist_type;
1138 
1139 	return ret;
1140 }
1141 
1142 static inline int
1143 dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
1144 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1145 {
1146 	int pos;
1147 	struct key_prot_field *prot_field;
1148 
1149 	if (dpaa2_flow_ip_address_extract(prot, key_field)) {
1150 		DPAA2_PMD_ERR("%s only for none IP address extract",
1151 			__func__);
1152 		return -EINVAL;
1153 	}
1154 
1155 	prot_field = key_profile->prot_field;
1156 	for (pos = 0; pos < key_profile->num; pos++) {
1157 		if (type == DPAA2_NET_PROT_KEY &&
1158 			prot_field[pos].prot == prot &&
1159 			prot_field[pos].key_field == key_field &&
1160 			prot_field[pos].type == type)
1161 			return pos;
1162 		else if (type == DPAA2_FAF_KEY &&
1163 			prot_field[pos].key_field == key_field &&
1164 			prot_field[pos].type == type)
1165 			return pos;
1166 	}
1167 
1168 	if (type == DPAA2_NET_PROT_KEY &&
1169 		dpaa2_flow_l4_src_port_extract(prot, key_field)) {
1170 		if (key_profile->l4_src_port_present)
1171 			return key_profile->l4_src_port_pos;
1172 	} else if (type == DPAA2_NET_PROT_KEY &&
1173 		dpaa2_flow_l4_dst_port_extract(prot, key_field)) {
1174 		if (key_profile->l4_dst_port_present)
1175 			return key_profile->l4_dst_port_pos;
1176 	}
1177 
1178 	return -ENXIO;
1179 }
1180 
1181 static inline int
1182 dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile,
1183 	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
1184 {
1185 	int i;
1186 
1187 	i = dpaa2_flow_extract_search(key_profile, type, prot, key_field);
1188 	if (i >= 0)
1189 		return key_profile->key_offset[i];
1190 	else
1191 		return i;
1192 }
1193 
1194 static int
1195 dpaa2_flow_faf_add_rule(struct dpaa2_dev_priv *priv,
1196 	struct dpaa2_dev_flow *flow,
1197 	enum dpaa2_rx_faf_offset faf_bit_off,
1198 	int group,
1199 	enum dpaa2_flow_dist_type dist_type)
1200 {
1201 	int offset;
1202 	uint8_t *key_addr;
1203 	uint8_t *mask_addr;
1204 	struct dpaa2_key_extract *key_extract;
1205 	struct dpaa2_key_profile *key_profile;
1206 	uint8_t faf_byte = faf_bit_off / 8;
1207 	uint8_t faf_bit_in_byte = faf_bit_off % 8;
1208 
1209 	faf_bit_in_byte = 7 - faf_bit_in_byte;
1210 
1211 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1212 		key_extract = &priv->extract.qos_key_extract;
1213 		key_profile = &key_extract->key_profile;
1214 
1215 		offset = dpaa2_flow_extract_key_offset(key_profile,
1216 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1217 		if (offset < 0) {
1218 			DPAA2_PMD_ERR("%s QoS key extract failed", __func__);
1219 			return -EINVAL;
1220 		}
1221 		key_addr = flow->qos_key_addr + offset;
1222 		mask_addr = flow->qos_mask_addr + offset;
1223 
1224 		if (!(*key_addr) &&
1225 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1226 			flow->qos_rule_size++;
1227 
1228 		*key_addr |=  (1 << faf_bit_in_byte);
1229 		*mask_addr |=  (1 << faf_bit_in_byte);
1230 	}
1231 
1232 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1233 		key_extract = &priv->extract.tc_key_extract[group];
1234 		key_profile = &key_extract->key_profile;
1235 
1236 		offset = dpaa2_flow_extract_key_offset(key_profile,
1237 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1238 		if (offset < 0) {
1239 			DPAA2_PMD_ERR("%s TC[%d] key extract failed",
1240 				__func__, group);
1241 			return -EINVAL;
1242 		}
1243 		key_addr = flow->fs_key_addr + offset;
1244 		mask_addr = flow->fs_mask_addr + offset;
1245 
1246 		if (!(*key_addr) &&
1247 			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1248 			flow->fs_rule_size++;
1249 
1250 		*key_addr |=  (1 << faf_bit_in_byte);
1251 		*mask_addr |=  (1 << faf_bit_in_byte);
1252 	}
1253 
1254 	return 0;
1255 }
1256 
1257 static inline int
1258 dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow,
1259 	struct dpaa2_key_profile *key_profile,
1260 	enum net_prot prot, uint32_t field, int size,
1261 	const void *key, const void *mask,
1262 	enum dpaa2_flow_dist_type dist_type)
1263 {
1264 	int offset;
1265 
1266 	if (dpaa2_flow_ip_address_extract(prot, field)) {
1267 		DPAA2_PMD_ERR("%s only for none IP address extract",
1268 			__func__);
1269 		return -EINVAL;
1270 	}
1271 
1272 	offset = dpaa2_flow_extract_key_offset(key_profile,
1273 			DPAA2_NET_PROT_KEY, prot, field);
1274 	if (offset < 0) {
1275 		DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!",
1276 			prot, field);
1277 		return -EINVAL;
1278 	}
1279 
1280 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1281 		memcpy((flow->qos_key_addr + offset), key, size);
1282 		memcpy((flow->qos_mask_addr + offset), mask, size);
1283 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1284 			flow->qos_rule_size = offset + size;
1285 	}
1286 
1287 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1288 		memcpy((flow->fs_key_addr + offset), key, size);
1289 		memcpy((flow->fs_mask_addr + offset), mask, size);
1290 		if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
1291 			flow->fs_rule_size = offset + size;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static inline int
1298 dpaa2_flow_raw_rule_data_set(struct dpaa2_dev_flow *flow,
1299 	struct dpaa2_key_profile *key_profile,
1300 	uint32_t extract_offset, int size,
1301 	const void *key, const void *mask,
1302 	enum dpaa2_flow_dist_type dist_type)
1303 {
1304 	int extract_size = size > DPAA2_FLOW_MAX_KEY_SIZE ?
1305 		DPAA2_FLOW_MAX_KEY_SIZE : size;
1306 	int offset, field;
1307 
1308 	field = extract_offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
1309 	field |= extract_size;
1310 	offset = dpaa2_flow_extract_key_offset(key_profile,
1311 			DPAA2_NET_PROT_KEY, NET_PROT_PAYLOAD, field);
1312 	if (offset < 0) {
1313 		DPAA2_PMD_ERR("offset(%d)/size(%d) raw extract failed",
1314 			extract_offset, size);
1315 		return -EINVAL;
1316 	}
1317 
1318 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1319 		memcpy((flow->qos_key_addr + offset), key, size);
1320 		memcpy((flow->qos_mask_addr + offset), mask, size);
1321 		flow->qos_rule_size = offset + size;
1322 	}
1323 
1324 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1325 		memcpy((flow->fs_key_addr + offset), key, size);
1326 		memcpy((flow->fs_mask_addr + offset), mask, size);
1327 		flow->fs_rule_size = offset + size;
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 static int
1334 dpaa2_flow_extract_support(const uint8_t *mask_src,
1335 	enum rte_flow_item_type type)
1336 {
1337 	char mask[64];
1338 	int i, size = 0;
1339 	const char *mask_support = 0;
1340 
1341 	switch (type) {
1342 	case RTE_FLOW_ITEM_TYPE_ETH:
1343 		mask_support = (const char *)&dpaa2_flow_item_eth_mask;
1344 		size = sizeof(struct rte_flow_item_eth);
1345 		break;
1346 	case RTE_FLOW_ITEM_TYPE_VLAN:
1347 		mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
1348 		size = sizeof(struct rte_flow_item_vlan);
1349 		break;
1350 	case RTE_FLOW_ITEM_TYPE_IPV4:
1351 		mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
1352 		size = sizeof(struct rte_flow_item_ipv4);
1353 		break;
1354 	case RTE_FLOW_ITEM_TYPE_IPV6:
1355 		mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
1356 		size = sizeof(struct rte_flow_item_ipv6);
1357 		break;
1358 	case RTE_FLOW_ITEM_TYPE_ICMP:
1359 		mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
1360 		size = sizeof(struct rte_flow_item_icmp);
1361 		break;
1362 	case RTE_FLOW_ITEM_TYPE_UDP:
1363 		mask_support = (const char *)&dpaa2_flow_item_udp_mask;
1364 		size = sizeof(struct rte_flow_item_udp);
1365 		break;
1366 	case RTE_FLOW_ITEM_TYPE_TCP:
1367 		mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
1368 		size = sizeof(struct rte_flow_item_tcp);
1369 		break;
1370 	case RTE_FLOW_ITEM_TYPE_SCTP:
1371 		mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
1372 		size = sizeof(struct rte_flow_item_sctp);
1373 		break;
1374 	case RTE_FLOW_ITEM_TYPE_GRE:
1375 		mask_support = (const char *)&dpaa2_flow_item_gre_mask;
1376 		size = sizeof(struct rte_flow_item_gre);
1377 		break;
1378 	default:
1379 		return -EINVAL;
1380 	}
1381 
1382 	memcpy(mask, mask_support, size);
1383 
1384 	for (i = 0; i < size; i++)
1385 		mask[i] = (mask[i] | mask_src[i]);
1386 
1387 	if (memcmp(mask, mask_support, size))
1388 		return -1;
1389 
1390 	return 0;
1391 }
1392 
1393 static int
1394 dpaa2_flow_identify_by_faf(struct dpaa2_dev_priv *priv,
1395 	struct dpaa2_dev_flow *flow,
1396 	enum dpaa2_rx_faf_offset faf_off,
1397 	enum dpaa2_flow_dist_type dist_type,
1398 	int group, int *recfg)
1399 {
1400 	int ret, index, local_cfg = 0;
1401 	struct dpaa2_key_extract *extract;
1402 	struct dpaa2_key_profile *key_profile;
1403 	uint8_t faf_byte = faf_off / 8;
1404 
1405 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
1406 		extract = &priv->extract.qos_key_extract;
1407 		key_profile = &extract->key_profile;
1408 
1409 		index = dpaa2_flow_extract_search(key_profile,
1410 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1411 		if (index < 0) {
1412 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1413 					priv, DPAA2_FLOW_QOS_TYPE, group,
1414 					NULL);
1415 			if (ret) {
1416 				DPAA2_PMD_ERR("QOS faf extract add failed");
1417 
1418 				return -EINVAL;
1419 			}
1420 			local_cfg |= DPAA2_FLOW_QOS_TYPE;
1421 		}
1422 
1423 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1424 				DPAA2_FLOW_QOS_TYPE);
1425 		if (ret) {
1426 			DPAA2_PMD_ERR("QoS faf rule set failed");
1427 			return -EINVAL;
1428 		}
1429 	}
1430 
1431 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
1432 		extract = &priv->extract.tc_key_extract[group];
1433 		key_profile = &extract->key_profile;
1434 
1435 		index = dpaa2_flow_extract_search(key_profile,
1436 				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
1437 		if (index < 0) {
1438 			ret = dpaa2_flow_faf_add_hdr(faf_byte,
1439 					priv, DPAA2_FLOW_FS_TYPE, group,
1440 					NULL);
1441 			if (ret) {
1442 				DPAA2_PMD_ERR("FS[%d] faf extract add failed",
1443 					group);
1444 
1445 				return -EINVAL;
1446 			}
1447 			local_cfg |= DPAA2_FLOW_FS_TYPE;
1448 		}
1449 
1450 		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
1451 				DPAA2_FLOW_FS_TYPE);
1452 		if (ret) {
1453 			DPAA2_PMD_ERR("FS[%d] faf rule set failed",
1454 				group);
1455 			return -EINVAL;
1456 		}
1457 	}
1458 
1459 	if (recfg)
1460 		*recfg |= local_cfg;
1461 
1462 	return 0;
1463 }
1464 
1465 static int
1466 dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow,
1467 	enum net_prot prot, uint32_t field,
1468 	const void *key, const void *mask, int size,
1469 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1470 	enum dpaa2_flow_dist_type dist_type)
1471 {
1472 	int index, ret, local_cfg = 0;
1473 	struct dpaa2_key_extract *key_extract;
1474 	struct dpaa2_key_profile *key_profile;
1475 
1476 	if (dpaa2_flow_ip_address_extract(prot, field))
1477 		return -EINVAL;
1478 
1479 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1480 		key_extract = &priv->extract.qos_key_extract;
1481 	else
1482 		key_extract = &priv->extract.tc_key_extract[tc_id];
1483 
1484 	key_profile = &key_extract->key_profile;
1485 
1486 	index = dpaa2_flow_extract_search(key_profile,
1487 			DPAA2_NET_PROT_KEY, prot, field);
1488 	if (index < 0) {
1489 		ret = dpaa2_flow_extract_add_hdr(prot,
1490 				field, size, priv,
1491 				dist_type, tc_id, NULL);
1492 		if (ret) {
1493 			DPAA2_PMD_ERR("QoS Extract P(%d)/F(%d) failed",
1494 				prot, field);
1495 
1496 			return ret;
1497 		}
1498 		local_cfg |= dist_type;
1499 	}
1500 
1501 	ret = dpaa2_flow_hdr_rule_data_set(flow, key_profile,
1502 			prot, field, size, key, mask, dist_type);
1503 	if (ret) {
1504 		DPAA2_PMD_ERR("QoS P(%d)/F(%d) rule data set failed",
1505 			prot, field);
1506 
1507 		return ret;
1508 	}
1509 
1510 	if (recfg)
1511 		*recfg |= local_cfg;
1512 
1513 	return 0;
1514 }
1515 
1516 static int
1517 dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow,
1518 	enum net_prot prot, uint32_t field,
1519 	const void *key, const void *mask, int size,
1520 	struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
1521 	enum dpaa2_flow_dist_type dist_type)
1522 {
1523 	int local_cfg = 0, num, ipaddr_extract_len = 0;
1524 	struct dpaa2_key_extract *key_extract;
1525 	struct dpaa2_key_profile *key_profile;
1526 	struct dpkg_profile_cfg *dpkg;
1527 	uint8_t *key_addr, *mask_addr;
1528 	union ip_addr_extract_rule *ip_addr_data;
1529 	union ip_addr_extract_rule *ip_addr_mask;
1530 	enum net_prot orig_prot;
1531 	uint32_t orig_field;
1532 
1533 	if (prot != NET_PROT_IPV4 && prot != NET_PROT_IPV6)
1534 		return -EINVAL;
1535 
1536 	if (prot == NET_PROT_IPV4 && field != NH_FLD_IPV4_SRC_IP &&
1537 		field != NH_FLD_IPV4_DST_IP) {
1538 		return -EINVAL;
1539 	}
1540 
1541 	if (prot == NET_PROT_IPV6 && field != NH_FLD_IPV6_SRC_IP &&
1542 		field != NH_FLD_IPV6_DST_IP) {
1543 		return -EINVAL;
1544 	}
1545 
1546 	orig_prot = prot;
1547 	orig_field = field;
1548 
1549 	if (prot == NET_PROT_IPV4 &&
1550 		field == NH_FLD_IPV4_SRC_IP) {
1551 		prot = NET_PROT_IP;
1552 		field = NH_FLD_IP_SRC;
1553 	} else if (prot == NET_PROT_IPV4 &&
1554 		field == NH_FLD_IPV4_DST_IP) {
1555 		prot = NET_PROT_IP;
1556 		field = NH_FLD_IP_DST;
1557 	} else if (prot == NET_PROT_IPV6 &&
1558 		field == NH_FLD_IPV6_SRC_IP) {
1559 		prot = NET_PROT_IP;
1560 		field = NH_FLD_IP_SRC;
1561 	} else if (prot == NET_PROT_IPV6 &&
1562 		field == NH_FLD_IPV6_DST_IP) {
1563 		prot = NET_PROT_IP;
1564 		field = NH_FLD_IP_DST;
1565 	} else {
1566 		DPAA2_PMD_ERR("Inval P(%d)/F(%d) to extract ip address",
1567 			prot, field);
1568 		return -EINVAL;
1569 	}
1570 
1571 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1572 		key_extract = &priv->extract.qos_key_extract;
1573 		key_profile = &key_extract->key_profile;
1574 		dpkg = &key_extract->dpkg;
1575 		num = key_profile->num;
1576 		key_addr = flow->qos_key_addr;
1577 		mask_addr = flow->qos_mask_addr;
1578 	} else {
1579 		key_extract = &priv->extract.tc_key_extract[tc_id];
1580 		key_profile = &key_extract->key_profile;
1581 		dpkg = &key_extract->dpkg;
1582 		num = key_profile->num;
1583 		key_addr = flow->fs_key_addr;
1584 		mask_addr = flow->fs_mask_addr;
1585 	}
1586 
1587 	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
1588 		DPAA2_PMD_ERR("Number of extracts overflows");
1589 		return -EINVAL;
1590 	}
1591 
1592 	if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) {
1593 		if (field == NH_FLD_IP_SRC)
1594 			key_profile->ip_addr_type = IP_SRC_EXTRACT;
1595 		else
1596 			key_profile->ip_addr_type = IP_DST_EXTRACT;
1597 		ipaddr_extract_len = size;
1598 
1599 		key_profile->ip_addr_extract_pos = num;
1600 		if (num > 0) {
1601 			key_profile->ip_addr_extract_off =
1602 				key_profile->key_offset[num - 1] +
1603 				key_profile->key_size[num - 1];
1604 		} else {
1605 			key_profile->ip_addr_extract_off = 0;
1606 		}
1607 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1608 	} else if (key_profile->ip_addr_type == IP_SRC_EXTRACT) {
1609 		if (field == NH_FLD_IP_SRC) {
1610 			ipaddr_extract_len = size;
1611 			goto rule_configure;
1612 		}
1613 		key_profile->ip_addr_type = IP_SRC_DST_EXTRACT;
1614 		ipaddr_extract_len = size * 2;
1615 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1616 	} else if (key_profile->ip_addr_type == IP_DST_EXTRACT) {
1617 		if (field == NH_FLD_IP_DST) {
1618 			ipaddr_extract_len = size;
1619 			goto rule_configure;
1620 		}
1621 		key_profile->ip_addr_type = IP_DST_SRC_EXTRACT;
1622 		ipaddr_extract_len = size * 2;
1623 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
1624 	}
1625 	key_profile->num++;
1626 	key_profile->prot_field[num].type = DPAA2_NET_PROT_KEY;
1627 
1628 	dpkg->extracts[num].extract.from_hdr.prot = prot;
1629 	dpkg->extracts[num].extract.from_hdr.field = field;
1630 	dpkg->extracts[num].extract.from_hdr.type = DPKG_FULL_FIELD;
1631 	dpkg->num_extracts++;
1632 
1633 	if (dist_type == DPAA2_FLOW_QOS_TYPE)
1634 		local_cfg = DPAA2_FLOW_QOS_TYPE;
1635 	else
1636 		local_cfg = DPAA2_FLOW_FS_TYPE;
1637 
1638 rule_configure:
1639 	key_addr += key_profile->ip_addr_extract_off;
1640 	ip_addr_data = (union ip_addr_extract_rule *)key_addr;
1641 	mask_addr += key_profile->ip_addr_extract_off;
1642 	ip_addr_mask = (union ip_addr_extract_rule *)mask_addr;
1643 
1644 	if (orig_prot == NET_PROT_IPV4 &&
1645 		orig_field == NH_FLD_IPV4_SRC_IP) {
1646 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
1647 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
1648 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_src,
1649 				key, size);
1650 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_src,
1651 				mask, size);
1652 		} else {
1653 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_src,
1654 				key, size);
1655 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_src,
1656 				mask, size);
1657 		}
1658 	} else if (orig_prot == NET_PROT_IPV4 &&
1659 		orig_field == NH_FLD_IPV4_DST_IP) {
1660 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
1661 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
1662 			memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_dst,
1663 				key, size);
1664 			memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_dst,
1665 				mask, size);
1666 		} else {
1667 			memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_dst,
1668 				key, size);
1669 			memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_dst,
1670 				mask, size);
1671 		}
1672 	} else if (orig_prot == NET_PROT_IPV6 &&
1673 		orig_field == NH_FLD_IPV6_SRC_IP) {
1674 		if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
1675 			key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
1676 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_src,
1677 				key, size);
1678 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_src,
1679 				mask, size);
1680 		} else {
1681 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_src,
1682 				key, size);
1683 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_src,
1684 				mask, size);
1685 		}
1686 	} else if (orig_prot == NET_PROT_IPV6 &&
1687 		orig_field == NH_FLD_IPV6_DST_IP) {
1688 		if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
1689 			key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
1690 			memcpy(ip_addr_data->ipv6_ds_addr.ipv6_dst,
1691 				key, size);
1692 			memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_dst,
1693 				mask, size);
1694 		} else {
1695 			memcpy(ip_addr_data->ipv6_sd_addr.ipv6_dst,
1696 				key, size);
1697 			memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_dst,
1698 				mask, size);
1699 		}
1700 	}
1701 
1702 	if (dist_type == DPAA2_FLOW_QOS_TYPE) {
1703 		flow->qos_rule_size =
1704 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
1705 	} else {
1706 		flow->fs_rule_size =
1707 			key_profile->ip_addr_extract_off + ipaddr_extract_len;
1708 	}
1709 
1710 	if (recfg)
1711 		*recfg |= local_cfg;
1712 
1713 	return 0;
1714 }
1715 
1716 static int
1717 dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
1718 	struct rte_eth_dev *dev,
1719 	const struct rte_flow_attr *attr,
1720 	const struct rte_flow_item *pattern,
1721 	const struct rte_flow_action actions[] __rte_unused,
1722 	struct rte_flow_error *error __rte_unused,
1723 	int *device_configured)
1724 {
1725 	int ret, local_cfg = 0;
1726 	uint32_t group;
1727 	const struct rte_flow_item_eth *spec, *mask;
1728 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1729 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
1730 
1731 	group = attr->group;
1732 
1733 	/* Parse pattern list to get the matching parameters */
1734 	spec = pattern->spec;
1735 	mask = pattern->mask ?
1736 			pattern->mask : &dpaa2_flow_item_eth_mask;
1737 
1738 	/* Get traffic class index and flow id to be configured */
1739 	flow->tc_id = group;
1740 	flow->tc_index = attr->priority;
1741 
1742 	if (!spec) {
1743 		ret = dpaa2_flow_identify_by_faf(priv, flow,
1744 				FAF_ETH_FRAM, DPAA2_FLOW_QOS_TYPE,
1745 				group, &local_cfg);
1746 		if (ret)
1747 			return ret;
1748 
1749 		ret = dpaa2_flow_identify_by_faf(priv, flow,
1750 				FAF_ETH_FRAM, DPAA2_FLOW_FS_TYPE,
1751 				group, &local_cfg);
1752 		if (ret)
1753 			return ret;
1754 
1755 		(*device_configured) |= local_cfg;
1756 		return 0;
1757 	}
1758 
1759 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1760 		RTE_FLOW_ITEM_TYPE_ETH)) {
1761 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
1762 
1763 		return -EINVAL;
1764 	}
1765 
1766 	if (memcmp((const char *)&mask->src,
1767 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
1768 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
1769 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
1770 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
1771 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
1772 		if (ret)
1773 			return ret;
1774 
1775 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
1776 			NH_FLD_ETH_SA, &spec->src.addr_bytes,
1777 			&mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
1778 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
1779 		if (ret)
1780 			return ret;
1781 	}
1782 
1783 	if (memcmp((const char *)&mask->dst,
1784 		zero_cmp, RTE_ETHER_ADDR_LEN)) {
1785 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
1786 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
1787 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
1788 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
1789 		if (ret)
1790 			return ret;
1791 
1792 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
1793 			NH_FLD_ETH_DA, &spec->dst.addr_bytes,
1794 			&mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
1795 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
1796 		if (ret)
1797 			return ret;
1798 	}
1799 
1800 	if (memcmp((const char *)&mask->type,
1801 		zero_cmp, sizeof(rte_be16_t))) {
1802 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
1803 			NH_FLD_ETH_TYPE, &spec->type,
1804 			&mask->type, sizeof(rte_be16_t),
1805 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
1806 		if (ret)
1807 			return ret;
1808 
1809 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
1810 			NH_FLD_ETH_TYPE, &spec->type,
1811 			&mask->type, sizeof(rte_be16_t),
1812 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
1813 		if (ret)
1814 			return ret;
1815 	}
1816 
1817 	(*device_configured) |= local_cfg;
1818 
1819 	return 0;
1820 }
1821 
1822 static int
1823 dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
1824 	struct rte_eth_dev *dev,
1825 	const struct rte_flow_attr *attr,
1826 	const struct rte_flow_item *pattern,
1827 	const struct rte_flow_action actions[] __rte_unused,
1828 	struct rte_flow_error *error __rte_unused,
1829 	int *device_configured)
1830 {
1831 	int ret, local_cfg = 0;
1832 	uint32_t group;
1833 	const struct rte_flow_item_vlan *spec, *mask;
1834 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1835 
1836 	group = attr->group;
1837 
1838 	/* Parse pattern list to get the matching parameters */
1839 	spec = pattern->spec;
1840 	mask = pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask;
1841 
1842 	/* Get traffic class index and flow id to be configured */
1843 	flow->tc_id = group;
1844 	flow->tc_index = attr->priority;
1845 
1846 	if (!spec) {
1847 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
1848 						 DPAA2_FLOW_QOS_TYPE, group,
1849 						 &local_cfg);
1850 		if (ret)
1851 			return ret;
1852 
1853 		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
1854 						 DPAA2_FLOW_FS_TYPE, group,
1855 						 &local_cfg);
1856 		if (ret)
1857 			return ret;
1858 
1859 		(*device_configured) |= local_cfg;
1860 		return 0;
1861 	}
1862 
1863 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
1864 				       RTE_FLOW_ITEM_TYPE_VLAN)) {
1865 		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1866 		return -EINVAL;
1867 	}
1868 
1869 	if (!mask->tci)
1870 		return 0;
1871 
1872 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
1873 					      NH_FLD_VLAN_TCI, &spec->tci,
1874 					      &mask->tci, sizeof(rte_be16_t),
1875 					      priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
1876 	if (ret)
1877 		return ret;
1878 
1879 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
1880 					      NH_FLD_VLAN_TCI, &spec->tci,
1881 					      &mask->tci, sizeof(rte_be16_t),
1882 					      priv, group, &local_cfg,
1883 					      DPAA2_FLOW_FS_TYPE);
1884 	if (ret)
1885 		return ret;
1886 
1887 	(*device_configured) |= local_cfg;
1888 	return 0;
1889 }
1890 
1891 static int
1892 dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
1893 			  const struct rte_flow_attr *attr,
1894 			  const struct rte_flow_item *pattern,
1895 			  const struct rte_flow_action actions[] __rte_unused,
1896 			  struct rte_flow_error *error __rte_unused,
1897 			  int *device_configured)
1898 {
1899 	int ret, local_cfg = 0;
1900 	uint32_t group;
1901 	const struct rte_flow_item_ipv4 *spec_ipv4 = 0, *mask_ipv4 = 0;
1902 	const void *key, *mask;
1903 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1904 	int size;
1905 
1906 	group = attr->group;
1907 
1908 	/* Parse pattern list to get the matching parameters */
1909 	spec_ipv4 = pattern->spec;
1910 	mask_ipv4 = pattern->mask ?
1911 		    pattern->mask : &dpaa2_flow_item_ipv4_mask;
1912 
1913 	/* Get traffic class index and flow id to be configured */
1914 	flow->tc_id = group;
1915 	flow->tc_index = attr->priority;
1916 
1917 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
1918 					 DPAA2_FLOW_QOS_TYPE, group,
1919 					 &local_cfg);
1920 	if (ret)
1921 		return ret;
1922 
1923 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
1924 					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
1925 	if (ret)
1926 		return ret;
1927 
1928 	if (!spec_ipv4) {
1929 		(*device_configured) |= local_cfg;
1930 		return 0;
1931 	}
1932 
1933 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1934 				       RTE_FLOW_ITEM_TYPE_IPV4)) {
1935 		DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1936 		return -EINVAL;
1937 	}
1938 
1939 	if (mask_ipv4->hdr.src_addr) {
1940 		key = &spec_ipv4->hdr.src_addr;
1941 		mask = &mask_ipv4->hdr.src_addr;
1942 		size = sizeof(rte_be32_t);
1943 
1944 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
1945 							 NH_FLD_IPV4_SRC_IP,
1946 							 key, mask, size, priv,
1947 							 group, &local_cfg,
1948 							 DPAA2_FLOW_QOS_TYPE);
1949 		if (ret)
1950 			return ret;
1951 
1952 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
1953 							 NH_FLD_IPV4_SRC_IP,
1954 							 key, mask, size, priv,
1955 							 group, &local_cfg,
1956 							 DPAA2_FLOW_FS_TYPE);
1957 		if (ret)
1958 			return ret;
1959 	}
1960 
1961 	if (mask_ipv4->hdr.dst_addr) {
1962 		key = &spec_ipv4->hdr.dst_addr;
1963 		mask = &mask_ipv4->hdr.dst_addr;
1964 		size = sizeof(rte_be32_t);
1965 
1966 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
1967 							 NH_FLD_IPV4_DST_IP,
1968 							 key, mask, size, priv,
1969 							 group, &local_cfg,
1970 							 DPAA2_FLOW_QOS_TYPE);
1971 		if (ret)
1972 			return ret;
1973 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
1974 							 NH_FLD_IPV4_DST_IP,
1975 							 key, mask, size, priv,
1976 							 group, &local_cfg,
1977 							 DPAA2_FLOW_FS_TYPE);
1978 		if (ret)
1979 			return ret;
1980 	}
1981 
1982 	if (mask_ipv4->hdr.next_proto_id) {
1983 		key = &spec_ipv4->hdr.next_proto_id;
1984 		mask = &mask_ipv4->hdr.next_proto_id;
1985 		size = sizeof(uint8_t);
1986 
1987 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
1988 						      NH_FLD_IP_PROTO, key,
1989 						      mask, size, priv, group,
1990 						      &local_cfg,
1991 						      DPAA2_FLOW_QOS_TYPE);
1992 		if (ret)
1993 			return ret;
1994 
1995 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
1996 						      NH_FLD_IP_PROTO, key,
1997 						      mask, size, priv, group,
1998 						      &local_cfg,
1999 						      DPAA2_FLOW_FS_TYPE);
2000 		if (ret)
2001 			return ret;
2002 	}
2003 
2004 	(*device_configured) |= local_cfg;
2005 	return 0;
2006 }
2007 
2008 static int
2009 dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
2010 			  const struct rte_flow_attr *attr,
2011 			  const struct rte_flow_item *pattern,
2012 			  const struct rte_flow_action actions[] __rte_unused,
2013 			  struct rte_flow_error *error __rte_unused,
2014 			  int *device_configured)
2015 {
2016 	int ret, local_cfg = 0;
2017 	uint32_t group;
2018 	const struct rte_flow_item_ipv6 *spec_ipv6 = 0, *mask_ipv6 = 0;
2019 	const void *key, *mask;
2020 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2021 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
2022 	int size;
2023 
2024 	group = attr->group;
2025 
2026 	/* Parse pattern list to get the matching parameters */
2027 	spec_ipv6 = pattern->spec;
2028 	mask_ipv6 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv6_mask;
2029 
2030 	/* Get traffic class index and flow id to be configured */
2031 	flow->tc_id = group;
2032 	flow->tc_index = attr->priority;
2033 
2034 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2035 					 DPAA2_FLOW_QOS_TYPE, group,
2036 					 &local_cfg);
2037 	if (ret)
2038 		return ret;
2039 
2040 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
2041 					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2042 	if (ret)
2043 		return ret;
2044 
2045 	if (!spec_ipv6) {
2046 		(*device_configured) |= local_cfg;
2047 		return 0;
2048 	}
2049 
2050 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
2051 				       RTE_FLOW_ITEM_TYPE_IPV6)) {
2052 		DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
2053 		return -EINVAL;
2054 	}
2055 
2056 	if (memcmp((const char *)&mask_ipv6->hdr.src_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2057 		key = &spec_ipv6->hdr.src_addr;
2058 		mask = &mask_ipv6->hdr.src_addr;
2059 		size = NH_FLD_IPV6_ADDR_SIZE;
2060 
2061 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2062 							 NH_FLD_IPV6_SRC_IP,
2063 							 key, mask, size, priv,
2064 							 group, &local_cfg,
2065 							 DPAA2_FLOW_QOS_TYPE);
2066 		if (ret)
2067 			return ret;
2068 
2069 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2070 							 NH_FLD_IPV6_SRC_IP,
2071 							 key, mask, size, priv,
2072 							 group, &local_cfg,
2073 							 DPAA2_FLOW_FS_TYPE);
2074 		if (ret)
2075 			return ret;
2076 	}
2077 
2078 	if (memcmp((const char *)&mask_ipv6->hdr.dst_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
2079 		key = &spec_ipv6->hdr.dst_addr;
2080 		mask = &mask_ipv6->hdr.dst_addr;
2081 		size = NH_FLD_IPV6_ADDR_SIZE;
2082 
2083 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2084 							 NH_FLD_IPV6_DST_IP,
2085 							 key, mask, size, priv,
2086 							 group, &local_cfg,
2087 							 DPAA2_FLOW_QOS_TYPE);
2088 		if (ret)
2089 			return ret;
2090 
2091 		ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
2092 							 NH_FLD_IPV6_DST_IP,
2093 							 key, mask, size, priv,
2094 							 group, &local_cfg,
2095 							 DPAA2_FLOW_FS_TYPE);
2096 		if (ret)
2097 			return ret;
2098 	}
2099 
2100 	if (mask_ipv6->hdr.proto) {
2101 		key = &spec_ipv6->hdr.proto;
2102 		mask = &mask_ipv6->hdr.proto;
2103 		size = sizeof(uint8_t);
2104 
2105 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2106 						      NH_FLD_IP_PROTO, key,
2107 						      mask, size, priv, group,
2108 						      &local_cfg,
2109 						      DPAA2_FLOW_QOS_TYPE);
2110 		if (ret)
2111 			return ret;
2112 
2113 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
2114 						      NH_FLD_IP_PROTO, key,
2115 						      mask, size, priv, group,
2116 						      &local_cfg,
2117 						      DPAA2_FLOW_FS_TYPE);
2118 		if (ret)
2119 			return ret;
2120 	}
2121 
2122 	(*device_configured) |= local_cfg;
2123 	return 0;
2124 }
2125 
2126 static int
2127 dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
2128 	struct rte_eth_dev *dev,
2129 	const struct rte_flow_attr *attr,
2130 	const struct rte_flow_item *pattern,
2131 	const struct rte_flow_action actions[] __rte_unused,
2132 	struct rte_flow_error *error __rte_unused,
2133 	int *device_configured)
2134 {
2135 	int ret, local_cfg = 0;
2136 	uint32_t group;
2137 	const struct rte_flow_item_icmp *spec, *mask;
2138 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2139 
2140 	group = attr->group;
2141 
2142 	/* Parse pattern list to get the matching parameters */
2143 	spec = pattern->spec;
2144 	mask = pattern->mask ?
2145 		pattern->mask : &dpaa2_flow_item_icmp_mask;
2146 
2147 	/* Get traffic class index and flow id to be configured */
2148 	flow->tc_id = group;
2149 	flow->tc_index = attr->priority;
2150 
2151 	if (!spec) {
2152 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2153 				FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE,
2154 				group, &local_cfg);
2155 		if (ret)
2156 			return ret;
2157 
2158 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2159 				FAF_ICMP_FRAM, DPAA2_FLOW_FS_TYPE,
2160 				group, &local_cfg);
2161 		if (ret)
2162 			return ret;
2163 
2164 		(*device_configured) |= local_cfg;
2165 		return 0;
2166 	}
2167 
2168 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2169 		RTE_FLOW_ITEM_TYPE_ICMP)) {
2170 		DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
2171 
2172 		return -EINVAL;
2173 	}
2174 
2175 	if (mask->hdr.icmp_type) {
2176 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2177 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2178 			&mask->hdr.icmp_type, sizeof(uint8_t),
2179 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2180 		if (ret)
2181 			return ret;
2182 
2183 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2184 			NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
2185 			&mask->hdr.icmp_type, sizeof(uint8_t),
2186 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2187 		if (ret)
2188 			return ret;
2189 	}
2190 
2191 	if (mask->hdr.icmp_code) {
2192 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2193 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2194 			&mask->hdr.icmp_code, sizeof(uint8_t),
2195 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2196 		if (ret)
2197 			return ret;
2198 
2199 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
2200 			NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
2201 			&mask->hdr.icmp_code, sizeof(uint8_t),
2202 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2203 		if (ret)
2204 			return ret;
2205 	}
2206 
2207 	(*device_configured) |= local_cfg;
2208 
2209 	return 0;
2210 }
2211 
2212 static int
2213 dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
2214 	struct rte_eth_dev *dev,
2215 	const struct rte_flow_attr *attr,
2216 	const struct rte_flow_item *pattern,
2217 	const struct rte_flow_action actions[] __rte_unused,
2218 	struct rte_flow_error *error __rte_unused,
2219 	int *device_configured)
2220 {
2221 	int ret, local_cfg = 0;
2222 	uint32_t group;
2223 	const struct rte_flow_item_udp *spec, *mask;
2224 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2225 
2226 	group = attr->group;
2227 
2228 	/* Parse pattern list to get the matching parameters */
2229 	spec = pattern->spec;
2230 	mask = pattern->mask ?
2231 		pattern->mask : &dpaa2_flow_item_udp_mask;
2232 
2233 	/* Get traffic class index and flow id to be configured */
2234 	flow->tc_id = group;
2235 	flow->tc_index = attr->priority;
2236 
2237 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2238 			FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE,
2239 			group, &local_cfg);
2240 	if (ret)
2241 		return ret;
2242 
2243 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2244 			FAF_UDP_FRAM, DPAA2_FLOW_FS_TYPE,
2245 			group, &local_cfg);
2246 	if (ret)
2247 		return ret;
2248 
2249 	if (!spec) {
2250 		(*device_configured) |= local_cfg;
2251 		return 0;
2252 	}
2253 
2254 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2255 		RTE_FLOW_ITEM_TYPE_UDP)) {
2256 		DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2257 
2258 		return -EINVAL;
2259 	}
2260 
2261 	if (mask->hdr.src_port) {
2262 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2263 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2264 			&mask->hdr.src_port, sizeof(rte_be16_t),
2265 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2266 		if (ret)
2267 			return ret;
2268 
2269 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2270 			NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
2271 			&mask->hdr.src_port, sizeof(rte_be16_t),
2272 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2273 		if (ret)
2274 			return ret;
2275 	}
2276 
2277 	if (mask->hdr.dst_port) {
2278 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2279 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
2280 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2281 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2282 		if (ret)
2283 			return ret;
2284 
2285 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
2286 			NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
2287 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2288 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2289 		if (ret)
2290 			return ret;
2291 	}
2292 
2293 	(*device_configured) |= local_cfg;
2294 
2295 	return 0;
2296 }
2297 
2298 static int
2299 dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
2300 	struct rte_eth_dev *dev,
2301 	const struct rte_flow_attr *attr,
2302 	const struct rte_flow_item *pattern,
2303 	const struct rte_flow_action actions[] __rte_unused,
2304 	struct rte_flow_error *error __rte_unused,
2305 	int *device_configured)
2306 {
2307 	int ret, local_cfg = 0;
2308 	uint32_t group;
2309 	const struct rte_flow_item_tcp *spec, *mask;
2310 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2311 
2312 	group = attr->group;
2313 
2314 	/* Parse pattern list to get the matching parameters */
2315 	spec = pattern->spec;
2316 	mask = pattern->mask ?
2317 		pattern->mask : &dpaa2_flow_item_tcp_mask;
2318 
2319 	/* Get traffic class index and flow id to be configured */
2320 	flow->tc_id = group;
2321 	flow->tc_index = attr->priority;
2322 
2323 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2324 			FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE,
2325 			group, &local_cfg);
2326 	if (ret)
2327 		return ret;
2328 
2329 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2330 			FAF_TCP_FRAM, DPAA2_FLOW_FS_TYPE,
2331 			group, &local_cfg);
2332 	if (ret)
2333 		return ret;
2334 
2335 	if (!spec) {
2336 		(*device_configured) |= local_cfg;
2337 		return 0;
2338 	}
2339 
2340 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2341 		RTE_FLOW_ITEM_TYPE_TCP)) {
2342 		DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2343 
2344 		return -EINVAL;
2345 	}
2346 
2347 	if (mask->hdr.src_port) {
2348 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2349 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
2350 			&mask->hdr.src_port, sizeof(rte_be16_t),
2351 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2352 		if (ret)
2353 			return ret;
2354 
2355 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2356 			NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
2357 			&mask->hdr.src_port, sizeof(rte_be16_t),
2358 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2359 		if (ret)
2360 			return ret;
2361 	}
2362 
2363 	if (mask->hdr.dst_port) {
2364 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2365 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
2366 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2367 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2368 		if (ret)
2369 			return ret;
2370 
2371 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
2372 			NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
2373 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2374 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2375 		if (ret)
2376 			return ret;
2377 	}
2378 
2379 	(*device_configured) |= local_cfg;
2380 
2381 	return 0;
2382 }
2383 
2384 static int
2385 dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
2386 	struct rte_eth_dev *dev,
2387 	const struct rte_flow_attr *attr,
2388 	const struct rte_flow_item *pattern,
2389 	const struct rte_flow_action actions[] __rte_unused,
2390 	struct rte_flow_error *error __rte_unused,
2391 	int *device_configured)
2392 {
2393 	int ret, local_cfg = 0;
2394 	uint32_t group;
2395 	const struct rte_flow_item_sctp *spec, *mask;
2396 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2397 
2398 	group = attr->group;
2399 
2400 	/* Parse pattern list to get the matching parameters */
2401 	spec = pattern->spec;
2402 	mask = pattern->mask ?
2403 		pattern->mask : &dpaa2_flow_item_sctp_mask;
2404 
2405 	/* Get traffic class index and flow id to be configured */
2406 	flow->tc_id = group;
2407 	flow->tc_index = attr->priority;
2408 
2409 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2410 			FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE,
2411 			group, &local_cfg);
2412 	if (ret)
2413 		return ret;
2414 
2415 	ret = dpaa2_flow_identify_by_faf(priv, flow,
2416 			FAF_SCTP_FRAM, DPAA2_FLOW_FS_TYPE,
2417 			group, &local_cfg);
2418 	if (ret)
2419 		return ret;
2420 
2421 	if (!spec) {
2422 		(*device_configured) |= local_cfg;
2423 		return 0;
2424 	}
2425 
2426 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2427 		RTE_FLOW_ITEM_TYPE_SCTP)) {
2428 		DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2429 
2430 		return -1;
2431 	}
2432 
2433 	if (mask->hdr.src_port) {
2434 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
2435 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
2436 			&mask->hdr.src_port, sizeof(rte_be16_t),
2437 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2438 		if (ret)
2439 			return ret;
2440 
2441 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
2442 			NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
2443 			&mask->hdr.src_port, sizeof(rte_be16_t),
2444 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2445 		if (ret)
2446 			return ret;
2447 	}
2448 
2449 	if (mask->hdr.dst_port) {
2450 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
2451 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
2452 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2453 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2454 		if (ret)
2455 			return ret;
2456 
2457 		ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
2458 			NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
2459 			&mask->hdr.dst_port, sizeof(rte_be16_t),
2460 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2461 		if (ret)
2462 			return ret;
2463 	}
2464 
2465 	(*device_configured) |= local_cfg;
2466 
2467 	return 0;
2468 }
2469 
2470 static int
2471 dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
2472 	struct rte_eth_dev *dev,
2473 	const struct rte_flow_attr *attr,
2474 	const struct rte_flow_item *pattern,
2475 	const struct rte_flow_action actions[] __rte_unused,
2476 	struct rte_flow_error *error __rte_unused,
2477 	int *device_configured)
2478 {
2479 	int ret, local_cfg = 0;
2480 	uint32_t group;
2481 	const struct rte_flow_item_gre *spec, *mask;
2482 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2483 
2484 	group = attr->group;
2485 
2486 	/* Parse pattern list to get the matching parameters */
2487 	spec = pattern->spec;
2488 	mask = pattern->mask ?
2489 		pattern->mask : &dpaa2_flow_item_gre_mask;
2490 
2491 	/* Get traffic class index and flow id to be configured */
2492 	flow->tc_id = group;
2493 	flow->tc_index = attr->priority;
2494 
2495 	if (!spec) {
2496 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2497 				FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE,
2498 				group, &local_cfg);
2499 		if (ret)
2500 			return ret;
2501 
2502 		ret = dpaa2_flow_identify_by_faf(priv, flow,
2503 				FAF_GRE_FRAM, DPAA2_FLOW_FS_TYPE,
2504 				group, &local_cfg);
2505 		if (ret)
2506 			return ret;
2507 
2508 		(*device_configured) |= local_cfg;
2509 		return 0;
2510 	}
2511 
2512 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
2513 		RTE_FLOW_ITEM_TYPE_GRE)) {
2514 		DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2515 
2516 		return -1;
2517 	}
2518 
2519 	if (!mask->protocol)
2520 		return 0;
2521 
2522 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
2523 			NH_FLD_GRE_TYPE, &spec->protocol,
2524 			&mask->protocol, sizeof(rte_be16_t),
2525 			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
2526 	if (ret)
2527 		return ret;
2528 
2529 	ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
2530 			NH_FLD_GRE_TYPE, &spec->protocol,
2531 			&mask->protocol, sizeof(rte_be16_t),
2532 			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
2533 	if (ret)
2534 		return ret;
2535 
2536 	(*device_configured) |= local_cfg;
2537 
2538 	return 0;
2539 }
2540 
2541 static int
2542 dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
2543 	struct rte_eth_dev *dev,
2544 	const struct rte_flow_attr *attr,
2545 	const struct rte_flow_item *pattern,
2546 	const struct rte_flow_action actions[] __rte_unused,
2547 	struct rte_flow_error *error __rte_unused,
2548 	int *device_configured)
2549 {
2550 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2551 	const struct rte_flow_item_raw *spec = pattern->spec;
2552 	const struct rte_flow_item_raw *mask = pattern->mask;
2553 	int local_cfg = 0, ret;
2554 	uint32_t group;
2555 	struct dpaa2_key_extract *qos_key_extract;
2556 	struct dpaa2_key_extract *tc_key_extract;
2557 
2558 	/* Need both spec and mask */
2559 	if (!spec || !mask) {
2560 		DPAA2_PMD_ERR("spec or mask not present.");
2561 		return -EINVAL;
2562 	}
2563 
2564 	if (spec->relative) {
2565 		/* TBD: relative offset support.
2566 		 * To support relative offset of previous L3 protocol item,
2567 		 * extracts should be expanded to identify if the frame is:
2568 		 * vlan or none-vlan.
2569 		 *
2570 		 * To support relative offset of previous L4 protocol item,
2571 		 * extracts should be expanded to identify if the frame is:
2572 		 * vlan/IPv4 or vlan/IPv6 or none-vlan/IPv4 or none-vlan/IPv6.
2573 		 */
2574 		DPAA2_PMD_ERR("relative not supported.");
2575 		return -EINVAL;
2576 	}
2577 
2578 	if (spec->search) {
2579 		DPAA2_PMD_ERR("search not supported.");
2580 		return -EINVAL;
2581 	}
2582 
2583 	/* Spec len and mask len should be same */
2584 	if (spec->length != mask->length) {
2585 		DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2586 		return -EINVAL;
2587 	}
2588 
2589 	/* Get traffic class index and flow id to be configured */
2590 	group = attr->group;
2591 	flow->tc_id = group;
2592 	flow->tc_index = attr->priority;
2593 
2594 	qos_key_extract = &priv->extract.qos_key_extract;
2595 	tc_key_extract = &priv->extract.tc_key_extract[group];
2596 
2597 	ret = dpaa2_flow_extract_add_raw(priv,
2598 			spec->offset, spec->length,
2599 			DPAA2_FLOW_QOS_TYPE, 0, &local_cfg);
2600 	if (ret) {
2601 		DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2602 		return -EINVAL;
2603 	}
2604 
2605 	ret = dpaa2_flow_extract_add_raw(priv,
2606 			spec->offset, spec->length,
2607 			DPAA2_FLOW_FS_TYPE, group, &local_cfg);
2608 	if (ret) {
2609 		DPAA2_PMD_ERR("FS[%d] Extract RAW add failed.",
2610 			group);
2611 		return -EINVAL;
2612 	}
2613 
2614 	ret = dpaa2_flow_raw_rule_data_set(flow,
2615 			&qos_key_extract->key_profile,
2616 			spec->offset, spec->length,
2617 			spec->pattern, mask->pattern,
2618 			DPAA2_FLOW_QOS_TYPE);
2619 	if (ret) {
2620 		DPAA2_PMD_ERR("QoS RAW rule data set failed");
2621 		return -EINVAL;
2622 	}
2623 
2624 	ret = dpaa2_flow_raw_rule_data_set(flow,
2625 			&tc_key_extract->key_profile,
2626 			spec->offset, spec->length,
2627 			spec->pattern, mask->pattern,
2628 			DPAA2_FLOW_FS_TYPE);
2629 	if (ret) {
2630 		DPAA2_PMD_ERR("FS RAW rule data set failed");
2631 		return -EINVAL;
2632 	}
2633 
2634 	(*device_configured) |= local_cfg;
2635 
2636 	return 0;
2637 }
2638 
2639 static inline int
2640 dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv,
2641 	const struct rte_flow_attr *attr)
2642 {
2643 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
2644 
2645 	while (curr) {
2646 		if (curr->tc_id == attr->group &&
2647 			curr->tc_index == attr->priority) {
2648 			DPAA2_PMD_ERR("Flow(TC[%d].entry[%d] exists",
2649 				attr->group, attr->priority);
2650 
2651 			return -EINVAL;
2652 		}
2653 		curr = LIST_NEXT(curr, next);
2654 	}
2655 
2656 	return 0;
2657 }
2658 
2659 static inline struct rte_eth_dev *
2660 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
2661 	const struct rte_flow_action *action)
2662 {
2663 	const struct rte_flow_action_port_id *port_id;
2664 	const struct rte_flow_action_ethdev *ethdev;
2665 	int idx = -1;
2666 	struct rte_eth_dev *dest_dev;
2667 
2668 	if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
2669 		port_id = action->conf;
2670 		if (!port_id->original)
2671 			idx = port_id->id;
2672 	} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
2673 		ethdev = action->conf;
2674 		idx = ethdev->port_id;
2675 	} else {
2676 		return NULL;
2677 	}
2678 
2679 	if (idx >= 0) {
2680 		if (!rte_eth_dev_is_valid_port(idx))
2681 			return NULL;
2682 		if (!rte_pmd_dpaa2_dev_is_dpaa2(idx))
2683 			return NULL;
2684 		dest_dev = &rte_eth_devices[idx];
2685 	} else {
2686 		dest_dev = priv->eth_dev;
2687 	}
2688 
2689 	return dest_dev;
2690 }
2691 
2692 static inline int
2693 dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv,
2694 	const struct rte_flow_attr *attr,
2695 	const struct rte_flow_action actions[])
2696 {
2697 	int end_of_list = 0, i, j = 0;
2698 	const struct rte_flow_action_queue *dest_queue;
2699 	const struct rte_flow_action_rss *rss_conf;
2700 	struct dpaa2_queue *rxq;
2701 
2702 	while (!end_of_list) {
2703 		switch (actions[j].type) {
2704 		case RTE_FLOW_ACTION_TYPE_QUEUE:
2705 			dest_queue = actions[j].conf;
2706 			rxq = priv->rx_vq[dest_queue->index];
2707 			if (attr->group != rxq->tc_index) {
2708 				DPAA2_PMD_ERR("FSQ(%d.%d) not in TC[%d]",
2709 					rxq->tc_index, rxq->flow_id,
2710 					attr->group);
2711 
2712 				return -ENOTSUP;
2713 			}
2714 			break;
2715 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2716 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
2717 			if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
2718 				DPAA2_PMD_ERR("Invalid port id of action");
2719 				return -ENOTSUP;
2720 			}
2721 			break;
2722 		case RTE_FLOW_ACTION_TYPE_RSS:
2723 			rss_conf = (const struct rte_flow_action_rss *)
2724 					(actions[j].conf);
2725 			if (rss_conf->queue_num > priv->dist_queues) {
2726 				DPAA2_PMD_ERR("RSS number too large");
2727 				return -ENOTSUP;
2728 			}
2729 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
2730 				if (rss_conf->queue[i] >= priv->nb_rx_queues) {
2731 					DPAA2_PMD_ERR("RSS queue not in range");
2732 					return -ENOTSUP;
2733 				}
2734 				rxq = priv->rx_vq[rss_conf->queue[i]];
2735 				if (rxq->tc_index != attr->group) {
2736 					DPAA2_PMD_ERR("RSS queue not in group");
2737 					return -ENOTSUP;
2738 				}
2739 			}
2740 
2741 			break;
2742 		case RTE_FLOW_ACTION_TYPE_END:
2743 			end_of_list = 1;
2744 			break;
2745 		default:
2746 			DPAA2_PMD_ERR("Invalid action type");
2747 			return -ENOTSUP;
2748 		}
2749 		j++;
2750 	}
2751 
2752 	return 0;
2753 }
2754 
2755 static int
2756 dpaa2_configure_flow_fs_action(struct dpaa2_dev_priv *priv,
2757 	struct dpaa2_dev_flow *flow,
2758 	const struct rte_flow_action *rte_action)
2759 {
2760 	struct rte_eth_dev *dest_dev;
2761 	struct dpaa2_dev_priv *dest_priv;
2762 	const struct rte_flow_action_queue *dest_queue;
2763 	struct dpaa2_queue *dest_q;
2764 
2765 	memset(&flow->fs_action_cfg, 0,
2766 		sizeof(struct dpni_fs_action_cfg));
2767 	flow->action_type = rte_action->type;
2768 
2769 	if (flow->action_type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2770 		dest_queue = rte_action->conf;
2771 		dest_q = priv->rx_vq[dest_queue->index];
2772 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
2773 	} else if (flow->action_type == RTE_FLOW_ACTION_TYPE_PORT_ID ||
2774 		   flow->action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
2775 		dest_dev = dpaa2_flow_redirect_dev(priv, rte_action);
2776 		if (!dest_dev) {
2777 			DPAA2_PMD_ERR("Invalid device to redirect");
2778 			return -EINVAL;
2779 		}
2780 
2781 		dest_priv = dest_dev->data->dev_private;
2782 		dest_q = dest_priv->tx_vq[0];
2783 		flow->fs_action_cfg.options =
2784 			DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
2785 		flow->fs_action_cfg.redirect_obj_token =
2786 			dest_priv->token;
2787 		flow->fs_action_cfg.flow_id = dest_q->flow_id;
2788 	}
2789 
2790 	return 0;
2791 }
2792 
2793 static inline uint16_t
2794 dpaa2_flow_entry_size(uint16_t key_max_size)
2795 {
2796 	if (key_max_size > DPAA2_FLOW_ENTRY_MAX_SIZE) {
2797 		DPAA2_PMD_ERR("Key size(%d) > max(%d)",
2798 			key_max_size,
2799 			DPAA2_FLOW_ENTRY_MAX_SIZE);
2800 
2801 		return 0;
2802 	}
2803 
2804 	if (key_max_size > DPAA2_FLOW_ENTRY_MIN_SIZE)
2805 		return DPAA2_FLOW_ENTRY_MAX_SIZE;
2806 
2807 	/* Current MC only support fixed entry size(56)*/
2808 	return DPAA2_FLOW_ENTRY_MAX_SIZE;
2809 }
2810 
2811 static inline int
2812 dpaa2_flow_clear_fs_table(struct dpaa2_dev_priv *priv,
2813 	uint8_t tc_id)
2814 {
2815 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
2816 	int need_clear = 0, ret;
2817 	struct fsl_mc_io *dpni = priv->hw;
2818 
2819 	while (curr) {
2820 		if (curr->tc_id == tc_id) {
2821 			need_clear = 1;
2822 			break;
2823 		}
2824 		curr = LIST_NEXT(curr, next);
2825 	}
2826 
2827 	if (need_clear) {
2828 		ret = dpni_clear_fs_entries(dpni, CMD_PRI_LOW,
2829 				priv->token, tc_id);
2830 		if (ret) {
2831 			DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
2832 			return ret;
2833 		}
2834 	}
2835 
2836 	return 0;
2837 }
2838 
2839 static int
2840 dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv,
2841 	uint8_t tc_id, uint16_t dist_size, int rss_dist)
2842 {
2843 	struct dpaa2_key_extract *tc_extract;
2844 	uint8_t *key_cfg_buf;
2845 	uint64_t key_cfg_iova;
2846 	int ret;
2847 	struct dpni_rx_dist_cfg tc_cfg;
2848 	struct fsl_mc_io *dpni = priv->hw;
2849 	uint16_t entry_size;
2850 	uint16_t key_max_size;
2851 
2852 	ret = dpaa2_flow_clear_fs_table(priv, tc_id);
2853 	if (ret < 0) {
2854 		DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
2855 		return ret;
2856 	}
2857 
2858 	tc_extract = &priv->extract.tc_key_extract[tc_id];
2859 	key_cfg_buf = priv->extract.tc_extract_param[tc_id];
2860 	key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
2861 
2862 	key_max_size = tc_extract->key_profile.key_max_size;
2863 	entry_size = dpaa2_flow_entry_size(key_max_size);
2864 
2865 	dpaa2_flow_fs_extracts_log(priv, tc_id);
2866 	ret = dpkg_prepare_key_cfg(&tc_extract->dpkg,
2867 			key_cfg_buf);
2868 	if (ret < 0) {
2869 		DPAA2_PMD_ERR("TC[%d] prepare key failed", tc_id);
2870 		return ret;
2871 	}
2872 
2873 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
2874 	tc_cfg.dist_size = dist_size;
2875 	tc_cfg.key_cfg_iova = key_cfg_iova;
2876 	if (rss_dist)
2877 		tc_cfg.enable = true;
2878 	else
2879 		tc_cfg.enable = false;
2880 	tc_cfg.tc = tc_id;
2881 	ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
2882 			priv->token, &tc_cfg);
2883 	if (ret < 0) {
2884 		if (rss_dist) {
2885 			DPAA2_PMD_ERR("RSS TC[%d] set failed",
2886 				tc_id);
2887 		} else {
2888 			DPAA2_PMD_ERR("FS TC[%d] hash disable failed",
2889 				tc_id);
2890 		}
2891 
2892 		return ret;
2893 	}
2894 
2895 	if (rss_dist)
2896 		return 0;
2897 
2898 	tc_cfg.enable = true;
2899 	tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
2900 	ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
2901 			priv->token, &tc_cfg);
2902 	if (ret < 0) {
2903 		DPAA2_PMD_ERR("TC[%d] FS configured failed", tc_id);
2904 		return ret;
2905 	}
2906 
2907 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_FS_TYPE,
2908 			entry_size, tc_id);
2909 	if (ret)
2910 		return ret;
2911 
2912 	return 0;
2913 }
2914 
2915 static int
2916 dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv,
2917 	int rss_dist)
2918 {
2919 	struct dpaa2_key_extract *qos_extract;
2920 	uint8_t *key_cfg_buf;
2921 	uint64_t key_cfg_iova;
2922 	int ret;
2923 	struct dpni_qos_tbl_cfg qos_cfg;
2924 	struct fsl_mc_io *dpni = priv->hw;
2925 	uint16_t entry_size;
2926 	uint16_t key_max_size;
2927 
2928 	if (!rss_dist && priv->num_rx_tc <= 1) {
2929 		/* QoS table is effecitive for FS multiple TCs or RSS.*/
2930 		return 0;
2931 	}
2932 
2933 	if (LIST_FIRST(&priv->flows)) {
2934 		ret = dpni_clear_qos_table(dpni, CMD_PRI_LOW,
2935 				priv->token);
2936 		if (ret < 0) {
2937 			DPAA2_PMD_ERR("QoS table clear failed");
2938 			return ret;
2939 		}
2940 	}
2941 
2942 	qos_extract = &priv->extract.qos_key_extract;
2943 	key_cfg_buf = priv->extract.qos_extract_param;
2944 	key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
2945 
2946 	key_max_size = qos_extract->key_profile.key_max_size;
2947 	entry_size = dpaa2_flow_entry_size(key_max_size);
2948 
2949 	dpaa2_flow_qos_extracts_log(priv);
2950 
2951 	ret = dpkg_prepare_key_cfg(&qos_extract->dpkg,
2952 			key_cfg_buf);
2953 	if (ret < 0) {
2954 		DPAA2_PMD_ERR("QoS prepare extract failed");
2955 		return ret;
2956 	}
2957 	memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
2958 	qos_cfg.keep_entries = true;
2959 	qos_cfg.key_cfg_iova = key_cfg_iova;
2960 	if (rss_dist) {
2961 		qos_cfg.discard_on_miss = true;
2962 	} else {
2963 		qos_cfg.discard_on_miss = false;
2964 		qos_cfg.default_tc = 0;
2965 	}
2966 
2967 	ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
2968 			priv->token, &qos_cfg);
2969 	if (ret < 0) {
2970 		DPAA2_PMD_ERR("QoS table set failed");
2971 		return ret;
2972 	}
2973 
2974 	ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_QOS_TYPE,
2975 			entry_size, 0);
2976 	if (ret)
2977 		return ret;
2978 
2979 	return 0;
2980 }
2981 
2982 static int
2983 dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
2984 	struct rte_eth_dev *dev,
2985 	const struct rte_flow_attr *attr,
2986 	const struct rte_flow_item pattern[],
2987 	const struct rte_flow_action actions[],
2988 	struct rte_flow_error *error)
2989 {
2990 	const struct rte_flow_action_rss *rss_conf;
2991 	int is_keycfg_configured = 0, end_of_list = 0;
2992 	int ret = 0, i = 0, j = 0;
2993 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2994 	struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
2995 	uint16_t dist_size, key_size;
2996 	struct dpaa2_key_extract *qos_key_extract;
2997 	struct dpaa2_key_extract *tc_key_extract;
2998 
2999 	ret = dpaa2_flow_verify_attr(priv, attr);
3000 	if (ret)
3001 		return ret;
3002 
3003 	ret = dpaa2_flow_verify_action(priv, attr, actions);
3004 	if (ret)
3005 		return ret;
3006 
3007 	/* Parse pattern list to get the matching parameters */
3008 	while (!end_of_list) {
3009 		switch (pattern[i].type) {
3010 		case RTE_FLOW_ITEM_TYPE_ETH:
3011 			ret = dpaa2_configure_flow_eth(flow,
3012 					dev, attr, &pattern[i], actions, error,
3013 					&is_keycfg_configured);
3014 			if (ret) {
3015 				DPAA2_PMD_ERR("ETH flow config failed!");
3016 				return ret;
3017 			}
3018 			break;
3019 		case RTE_FLOW_ITEM_TYPE_VLAN:
3020 			ret = dpaa2_configure_flow_vlan(flow,
3021 					dev, attr, &pattern[i], actions, error,
3022 					&is_keycfg_configured);
3023 			if (ret) {
3024 				DPAA2_PMD_ERR("vLan flow config failed!");
3025 				return ret;
3026 			}
3027 			break;
3028 		case RTE_FLOW_ITEM_TYPE_IPV4:
3029 			ret = dpaa2_configure_flow_ipv4(flow,
3030 					dev, attr, &pattern[i], actions, error,
3031 					&is_keycfg_configured);
3032 			if (ret) {
3033 				DPAA2_PMD_ERR("IPV4 flow config failed!");
3034 				return ret;
3035 			}
3036 			break;
3037 		case RTE_FLOW_ITEM_TYPE_IPV6:
3038 			ret = dpaa2_configure_flow_ipv6(flow,
3039 					dev, attr, &pattern[i], actions, error,
3040 					&is_keycfg_configured);
3041 			if (ret) {
3042 				DPAA2_PMD_ERR("IPV6 flow config failed!");
3043 				return ret;
3044 			}
3045 			break;
3046 		case RTE_FLOW_ITEM_TYPE_ICMP:
3047 			ret = dpaa2_configure_flow_icmp(flow,
3048 					dev, attr, &pattern[i], actions, error,
3049 					&is_keycfg_configured);
3050 			if (ret) {
3051 				DPAA2_PMD_ERR("ICMP flow config failed!");
3052 				return ret;
3053 			}
3054 			break;
3055 		case RTE_FLOW_ITEM_TYPE_UDP:
3056 			ret = dpaa2_configure_flow_udp(flow,
3057 					dev, attr, &pattern[i], actions, error,
3058 					&is_keycfg_configured);
3059 			if (ret) {
3060 				DPAA2_PMD_ERR("UDP flow config failed!");
3061 				return ret;
3062 			}
3063 			break;
3064 		case RTE_FLOW_ITEM_TYPE_TCP:
3065 			ret = dpaa2_configure_flow_tcp(flow,
3066 					dev, attr, &pattern[i], actions, error,
3067 					&is_keycfg_configured);
3068 			if (ret) {
3069 				DPAA2_PMD_ERR("TCP flow config failed!");
3070 				return ret;
3071 			}
3072 			break;
3073 		case RTE_FLOW_ITEM_TYPE_SCTP:
3074 			ret = dpaa2_configure_flow_sctp(flow,
3075 					dev, attr, &pattern[i], actions, error,
3076 					&is_keycfg_configured);
3077 			if (ret) {
3078 				DPAA2_PMD_ERR("SCTP flow config failed!");
3079 				return ret;
3080 			}
3081 			break;
3082 		case RTE_FLOW_ITEM_TYPE_GRE:
3083 			ret = dpaa2_configure_flow_gre(flow,
3084 					dev, attr, &pattern[i], actions, error,
3085 					&is_keycfg_configured);
3086 			if (ret) {
3087 				DPAA2_PMD_ERR("GRE flow config failed!");
3088 				return ret;
3089 			}
3090 			break;
3091 		case RTE_FLOW_ITEM_TYPE_RAW:
3092 			ret = dpaa2_configure_flow_raw(flow,
3093 					dev, attr, &pattern[i],
3094 					actions, error,
3095 					&is_keycfg_configured);
3096 			if (ret) {
3097 				DPAA2_PMD_ERR("RAW flow config failed!");
3098 				return ret;
3099 			}
3100 			break;
3101 		case RTE_FLOW_ITEM_TYPE_END:
3102 			end_of_list = 1;
3103 			break; /*End of List*/
3104 		default:
3105 			DPAA2_PMD_ERR("Invalid action type");
3106 			ret = -ENOTSUP;
3107 			break;
3108 		}
3109 		i++;
3110 	}
3111 
3112 	qos_key_extract = &priv->extract.qos_key_extract;
3113 	key_size = qos_key_extract->key_profile.key_max_size;
3114 	flow->qos_rule.key_size = dpaa2_flow_entry_size(key_size);
3115 
3116 	tc_key_extract = &priv->extract.tc_key_extract[flow->tc_id];
3117 	key_size = tc_key_extract->key_profile.key_max_size;
3118 	flow->fs_rule.key_size = dpaa2_flow_entry_size(key_size);
3119 
3120 	/* Let's parse action on matching traffic */
3121 	end_of_list = 0;
3122 	while (!end_of_list) {
3123 		switch (actions[j].type) {
3124 		case RTE_FLOW_ACTION_TYPE_QUEUE:
3125 		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3126 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
3127 			ret = dpaa2_configure_flow_fs_action(priv, flow,
3128 							     &actions[j]);
3129 			if (ret)
3130 				return ret;
3131 
3132 			/* Configure FS table first*/
3133 			dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3134 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
3135 				ret = dpaa2_configure_fs_rss_table(priv,
3136 								   flow->tc_id,
3137 								   dist_size,
3138 								   false);
3139 				if (ret)
3140 					return ret;
3141 			}
3142 
3143 			/* Configure QoS table then.*/
3144 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
3145 				ret = dpaa2_configure_qos_table(priv, false);
3146 				if (ret)
3147 					return ret;
3148 			}
3149 
3150 			if (priv->num_rx_tc > 1) {
3151 				ret = dpaa2_flow_add_qos_rule(priv, flow);
3152 				if (ret)
3153 					return ret;
3154 			}
3155 
3156 			if (flow->tc_index >= priv->fs_entries) {
3157 				DPAA2_PMD_ERR("FS table with %d entries full",
3158 					priv->fs_entries);
3159 				return -1;
3160 			}
3161 
3162 			ret = dpaa2_flow_add_fs_rule(priv, flow);
3163 			if (ret)
3164 				return ret;
3165 
3166 			break;
3167 		case RTE_FLOW_ACTION_TYPE_RSS:
3168 			rss_conf = actions[j].conf;
3169 			flow->action_type = RTE_FLOW_ACTION_TYPE_RSS;
3170 
3171 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3172 					&tc_key_extract->dpkg);
3173 			if (ret < 0) {
3174 				DPAA2_PMD_ERR("TC[%d] distset RSS failed",
3175 					      flow->tc_id);
3176 				return ret;
3177 			}
3178 
3179 			dist_size = rss_conf->queue_num;
3180 			if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
3181 				ret = dpaa2_configure_fs_rss_table(priv,
3182 								   flow->tc_id,
3183 								   dist_size,
3184 								   true);
3185 				if (ret)
3186 					return ret;
3187 			}
3188 
3189 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
3190 				ret = dpaa2_configure_qos_table(priv, true);
3191 				if (ret)
3192 					return ret;
3193 			}
3194 
3195 			ret = dpaa2_flow_add_qos_rule(priv, flow);
3196 			if (ret)
3197 				return ret;
3198 
3199 			ret = dpaa2_flow_add_fs_rule(priv, flow);
3200 			if (ret)
3201 				return ret;
3202 
3203 			break;
3204 		case RTE_FLOW_ACTION_TYPE_END:
3205 			end_of_list = 1;
3206 			break;
3207 		default:
3208 			DPAA2_PMD_ERR("Invalid action type");
3209 			ret = -ENOTSUP;
3210 			break;
3211 		}
3212 		j++;
3213 	}
3214 
3215 	if (!ret) {
3216 		/* New rules are inserted. */
3217 		if (!curr) {
3218 			LIST_INSERT_HEAD(&priv->flows, flow, next);
3219 		} else {
3220 			while (LIST_NEXT(curr, next))
3221 				curr = LIST_NEXT(curr, next);
3222 			LIST_INSERT_AFTER(curr, flow, next);
3223 		}
3224 	}
3225 	return ret;
3226 }
3227 
3228 static inline int
3229 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3230 	const struct rte_flow_attr *attr)
3231 {
3232 	int ret = 0;
3233 
3234 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3235 		DPAA2_PMD_ERR("Priority group is out of range");
3236 		ret = -ENOTSUP;
3237 	}
3238 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3239 		DPAA2_PMD_ERR("Priority within the group is out of range");
3240 		ret = -ENOTSUP;
3241 	}
3242 	if (unlikely(attr->egress)) {
3243 		DPAA2_PMD_ERR(
3244 			"Flow configuration is not supported on egress side");
3245 		ret = -ENOTSUP;
3246 	}
3247 	if (unlikely(!attr->ingress)) {
3248 		DPAA2_PMD_ERR("Ingress flag must be configured");
3249 		ret = -EINVAL;
3250 	}
3251 	return ret;
3252 }
3253 
3254 static inline int
3255 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3256 {
3257 	unsigned int i, j, is_found = 0;
3258 	int ret = 0;
3259 
3260 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3261 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3262 			if (dpaa2_supported_pattern_type[i]
3263 					== pattern[j].type) {
3264 				is_found = 1;
3265 				break;
3266 			}
3267 		}
3268 		if (!is_found) {
3269 			ret = -ENOTSUP;
3270 			break;
3271 		}
3272 	}
3273 	/* Lets verify other combinations of given pattern rules */
3274 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3275 		if (!pattern[j].spec) {
3276 			ret = -EINVAL;
3277 			break;
3278 		}
3279 	}
3280 
3281 	return ret;
3282 }
3283 
3284 static inline int
3285 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3286 {
3287 	unsigned int i, j, is_found = 0;
3288 	int ret = 0;
3289 
3290 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3291 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3292 			if (dpaa2_supported_action_type[i] == actions[j].type) {
3293 				is_found = 1;
3294 				break;
3295 			}
3296 		}
3297 		if (!is_found) {
3298 			ret = -ENOTSUP;
3299 			break;
3300 		}
3301 	}
3302 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3303 		if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3304 		    !actions[j].conf)
3305 			ret = -EINVAL;
3306 	}
3307 	return ret;
3308 }
3309 
3310 static int
3311 dpaa2_flow_validate(struct rte_eth_dev *dev,
3312 	const struct rte_flow_attr *flow_attr,
3313 	const struct rte_flow_item pattern[],
3314 	const struct rte_flow_action actions[],
3315 	struct rte_flow_error *error)
3316 {
3317 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3318 	struct dpni_attr dpni_attr;
3319 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3320 	uint16_t token = priv->token;
3321 	int ret = 0;
3322 
3323 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3324 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3325 	if (ret < 0) {
3326 		DPAA2_PMD_ERR(
3327 			"Failure to get dpni@%p attribute, err code  %d",
3328 			dpni, ret);
3329 		rte_flow_error_set(error, EPERM,
3330 			   RTE_FLOW_ERROR_TYPE_ATTR,
3331 			   flow_attr, "invalid");
3332 		return ret;
3333 	}
3334 
3335 	/* Verify input attributes */
3336 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3337 	if (ret < 0) {
3338 		DPAA2_PMD_ERR(
3339 			"Invalid attributes are given");
3340 		rte_flow_error_set(error, EPERM,
3341 			   RTE_FLOW_ERROR_TYPE_ATTR,
3342 			   flow_attr, "invalid");
3343 		goto not_valid_params;
3344 	}
3345 	/* Verify input pattern list */
3346 	ret = dpaa2_dev_verify_patterns(pattern);
3347 	if (ret < 0) {
3348 		DPAA2_PMD_ERR(
3349 			"Invalid pattern list is given");
3350 		rte_flow_error_set(error, EPERM,
3351 			   RTE_FLOW_ERROR_TYPE_ITEM,
3352 			   pattern, "invalid");
3353 		goto not_valid_params;
3354 	}
3355 	/* Verify input action list */
3356 	ret = dpaa2_dev_verify_actions(actions);
3357 	if (ret < 0) {
3358 		DPAA2_PMD_ERR(
3359 			"Invalid action list is given");
3360 		rte_flow_error_set(error, EPERM,
3361 			   RTE_FLOW_ERROR_TYPE_ACTION,
3362 			   actions, "invalid");
3363 		goto not_valid_params;
3364 	}
3365 not_valid_params:
3366 	return ret;
3367 }
3368 
3369 static struct rte_flow *
3370 dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3371 		  const struct rte_flow_item pattern[],
3372 		  const struct rte_flow_action actions[],
3373 		  struct rte_flow_error *error)
3374 {
3375 	struct dpaa2_dev_flow *flow = NULL;
3376 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3377 	int ret;
3378 
3379 	dpaa2_flow_control_log =
3380 		getenv("DPAA2_FLOW_CONTROL_LOG");
3381 
3382 	if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3383 		dpaa2_flow_miss_flow_id =
3384 			(uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3385 		if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3386 			DPAA2_PMD_ERR("Missed flow ID %d >= dist size(%d)",
3387 				      dpaa2_flow_miss_flow_id,
3388 				      priv->dist_queues);
3389 			return NULL;
3390 		}
3391 	}
3392 
3393 	flow = rte_zmalloc(NULL, sizeof(struct dpaa2_dev_flow),
3394 			   RTE_CACHE_LINE_SIZE);
3395 	if (!flow) {
3396 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
3397 		goto mem_failure;
3398 	}
3399 
3400 	/* Allocate DMA'ble memory to write the qos rules */
3401 	flow->qos_key_addr = rte_zmalloc(NULL, 256, 64);
3402 	if (!flow->qos_key_addr) {
3403 		DPAA2_PMD_ERR("Memory allocation failed");
3404 		goto mem_failure;
3405 	}
3406 	flow->qos_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->qos_key_addr);
3407 
3408 	flow->qos_mask_addr = rte_zmalloc(NULL, 256, 64);
3409 	if (!flow->qos_mask_addr) {
3410 		DPAA2_PMD_ERR("Memory allocation failed");
3411 		goto mem_failure;
3412 	}
3413 	flow->qos_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->qos_mask_addr);
3414 
3415 	/* Allocate DMA'ble memory to write the FS rules */
3416 	flow->fs_key_addr = rte_zmalloc(NULL, 256, 64);
3417 	if (!flow->fs_key_addr) {
3418 		DPAA2_PMD_ERR("Memory allocation failed");
3419 		goto mem_failure;
3420 	}
3421 	flow->fs_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->fs_key_addr);
3422 
3423 	flow->fs_mask_addr = rte_zmalloc(NULL, 256, 64);
3424 	if (!flow->fs_mask_addr) {
3425 		DPAA2_PMD_ERR("Memory allocation failed");
3426 		goto mem_failure;
3427 	}
3428 	flow->fs_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->fs_mask_addr);
3429 
3430 	priv->curr = flow;
3431 
3432 	ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, actions, error);
3433 	if (ret < 0) {
3434 		if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3435 			rte_flow_error_set(error, EPERM,
3436 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3437 					   attr, "unknown");
3438 		DPAA2_PMD_ERR("Create flow failed (%d)", ret);
3439 		goto creation_error;
3440 	}
3441 
3442 	priv->curr = NULL;
3443 	return (struct rte_flow *)flow;
3444 
3445 mem_failure:
3446 	rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3447 			   "memory alloc");
3448 
3449 creation_error:
3450 	if (flow) {
3451 		if (flow->qos_key_addr)
3452 			rte_free(flow->qos_key_addr);
3453 		if (flow->qos_mask_addr)
3454 			rte_free(flow->qos_mask_addr);
3455 		if (flow->fs_key_addr)
3456 			rte_free(flow->fs_key_addr);
3457 		if (flow->fs_mask_addr)
3458 			rte_free(flow->fs_mask_addr);
3459 		rte_free(flow);
3460 	}
3461 	priv->curr = NULL;
3462 
3463 	return NULL;
3464 }
3465 
3466 static int
3467 dpaa2_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *_flow,
3468 		   struct rte_flow_error *error)
3469 {
3470 	int ret = 0;
3471 	struct dpaa2_dev_flow *flow;
3472 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3473 	struct fsl_mc_io *dpni = priv->hw;
3474 
3475 	flow = (struct dpaa2_dev_flow *)_flow;
3476 
3477 	switch (flow->action_type) {
3478 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3479 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3480 	case RTE_FLOW_ACTION_TYPE_PORT_ID:
3481 		if (priv->num_rx_tc > 1) {
3482 			/* Remove entry from QoS table first */
3483 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3484 						    priv->token,
3485 						    &flow->qos_rule);
3486 			if (ret < 0) {
3487 				DPAA2_PMD_ERR("Remove FS QoS entry failed");
3488 				dpaa2_flow_qos_entry_log("Delete failed", flow,
3489 							 -1);
3490 				abort();
3491 				goto error;
3492 			}
3493 		}
3494 
3495 		/* Then remove entry from FS table */
3496 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3497 					   flow->tc_id, &flow->fs_rule);
3498 		if (ret < 0) {
3499 			DPAA2_PMD_ERR("Remove entry from FS[%d] failed",
3500 				      flow->tc_id);
3501 			goto error;
3502 		}
3503 		break;
3504 	case RTE_FLOW_ACTION_TYPE_RSS:
3505 		if (priv->num_rx_tc > 1) {
3506 			ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3507 						    priv->token,
3508 						    &flow->qos_rule);
3509 			if (ret < 0) {
3510 				DPAA2_PMD_ERR("Remove RSS QoS entry failed");
3511 				goto error;
3512 			}
3513 		}
3514 		break;
3515 	default:
3516 		DPAA2_PMD_ERR("Action(%d) not supported", flow->action_type);
3517 		ret = -ENOTSUP;
3518 		break;
3519 	}
3520 
3521 	LIST_REMOVE(flow, next);
3522 	if (flow->qos_key_addr)
3523 		rte_free(flow->qos_key_addr);
3524 	if (flow->qos_mask_addr)
3525 		rte_free(flow->qos_mask_addr);
3526 	if (flow->fs_key_addr)
3527 		rte_free(flow->fs_key_addr);
3528 	if (flow->fs_mask_addr)
3529 		rte_free(flow->fs_mask_addr);
3530 	/* Now free the flow */
3531 	rte_free(flow);
3532 
3533 error:
3534 	if (ret)
3535 		rte_flow_error_set(error, EPERM,
3536 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3537 				   NULL, "unknown");
3538 	return ret;
3539 }
3540 
3541 /**
3542  * Destroy user-configured flow rules.
3543  *
3544  * This function skips internal flows rules.
3545  *
3546  * @see rte_flow_flush()
3547  * @see rte_flow_ops
3548  */
3549 static int
3550 dpaa2_flow_flush(struct rte_eth_dev *dev,
3551 		struct rte_flow_error *error)
3552 {
3553 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3554 	struct dpaa2_dev_flow *flow = LIST_FIRST(&priv->flows);
3555 
3556 	while (flow) {
3557 		struct dpaa2_dev_flow *next = LIST_NEXT(flow, next);
3558 
3559 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, error);
3560 		flow = next;
3561 	}
3562 	return 0;
3563 }
3564 
3565 static int
3566 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3567 	struct rte_flow *_flow __rte_unused,
3568 	const struct rte_flow_action *actions __rte_unused,
3569 	void *data __rte_unused,
3570 	struct rte_flow_error *error __rte_unused)
3571 {
3572 	return 0;
3573 }
3574 
3575 /**
3576  * Clean up all flow rules.
3577  *
3578  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3579  * rules regardless of whether they are internal or user-configured.
3580  *
3581  * @param priv
3582  *   Pointer to private structure.
3583  */
3584 void
3585 dpaa2_flow_clean(struct rte_eth_dev *dev)
3586 {
3587 	struct dpaa2_dev_flow *flow;
3588 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
3589 
3590 	while ((flow = LIST_FIRST(&priv->flows)))
3591 		dpaa2_flow_destroy(dev, (struct rte_flow *)flow, NULL);
3592 }
3593 
3594 const struct rte_flow_ops dpaa2_flow_ops = {
3595 	.create	= dpaa2_flow_create,
3596 	.validate = dpaa2_flow_validate,
3597 	.destroy = dpaa2_flow_destroy,
3598 	.flush	= dpaa2_flow_flush,
3599 	.query	= dpaa2_flow_query,
3600 };
3601