xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision 4e445633a11a01c74f663378c9d8bc86159cbcb8)
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright 2018 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 struct rte_flow {
26 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
27 	struct dpni_rule_cfg rule;
28 	uint8_t key_size;
29 	uint8_t tc_id;
30 	uint8_t flow_type;
31 	uint8_t index;
32 	enum rte_flow_action_type action;
33 	uint16_t flow_id;
34 };
35 
36 static const
37 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
38 	RTE_FLOW_ITEM_TYPE_END,
39 	RTE_FLOW_ITEM_TYPE_ETH,
40 	RTE_FLOW_ITEM_TYPE_VLAN,
41 	RTE_FLOW_ITEM_TYPE_IPV4,
42 	RTE_FLOW_ITEM_TYPE_IPV6,
43 	RTE_FLOW_ITEM_TYPE_ICMP,
44 	RTE_FLOW_ITEM_TYPE_UDP,
45 	RTE_FLOW_ITEM_TYPE_TCP,
46 	RTE_FLOW_ITEM_TYPE_SCTP,
47 	RTE_FLOW_ITEM_TYPE_GRE,
48 };
49 
50 static const
51 enum rte_flow_action_type dpaa2_supported_action_type[] = {
52 	RTE_FLOW_ACTION_TYPE_END,
53 	RTE_FLOW_ACTION_TYPE_QUEUE,
54 	RTE_FLOW_ACTION_TYPE_RSS
55 };
56 
57 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
58 static const void *default_mask;
59 
60 static int
61 dpaa2_configure_flow_eth(struct rte_flow *flow,
62 			 struct rte_eth_dev *dev,
63 			 const struct rte_flow_attr *attr,
64 			 const struct rte_flow_item *pattern,
65 			 const struct rte_flow_action actions[] __rte_unused,
66 			 struct rte_flow_error *error __rte_unused)
67 {
68 	int index, j = 0;
69 	size_t key_iova;
70 	size_t mask_iova;
71 	int device_configured = 0, entry_found = 0;
72 	uint32_t group;
73 	const struct rte_flow_item_eth *spec, *mask;
74 
75 	/* TODO: Currently upper bound of range parameter is not implemented */
76 	const struct rte_flow_item_eth *last __rte_unused;
77 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
78 
79 	group = attr->group;
80 
81 	/* DPAA2 platform has a limitation that extract parameter can not be */
82 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
83 	/* TODO: pattern is an array of 9 elements where 9th pattern element */
84 	/* is for QoS table and 1-8th pattern element is for FS tables. */
85 	/* It can be changed to macro. */
86 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
87 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
88 						DPKG_MAX_NUM_OF_EXTRACTS);
89 		return -ENOTSUP;
90 	}
91 
92 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
93 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
94 						DPKG_MAX_NUM_OF_EXTRACTS);
95 		return -ENOTSUP;
96 	}
97 
98 	for (j = 0; j < priv->pattern[8].item_count; j++) {
99 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
100 			continue;
101 		} else {
102 			entry_found = 1;
103 			break;
104 		}
105 	}
106 
107 	if (!entry_found) {
108 		priv->pattern[8].pattern_type[j] = pattern->type;
109 		priv->pattern[8].item_count++;
110 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
111 	}
112 
113 	entry_found = 0;
114 	for (j = 0; j < priv->pattern[group].item_count; j++) {
115 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
116 			continue;
117 		} else {
118 			entry_found = 1;
119 			break;
120 		}
121 	}
122 
123 	if (!entry_found) {
124 		priv->pattern[group].pattern_type[j] = pattern->type;
125 		priv->pattern[group].item_count++;
126 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
127 	}
128 
129 	/* Get traffic class index and flow id to be configured */
130 	flow->tc_id = group;
131 	flow->index = attr->priority;
132 
133 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
134 		index = priv->extract.qos_key_cfg.num_extracts;
135 		priv->extract.qos_key_cfg.extracts[index].type =
136 							DPKG_EXTRACT_FROM_HDR;
137 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
138 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
139 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
140 		index++;
141 
142 		priv->extract.qos_key_cfg.extracts[index].type =
143 							DPKG_EXTRACT_FROM_HDR;
144 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
145 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
146 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
147 		index++;
148 
149 		priv->extract.qos_key_cfg.extracts[index].type =
150 							DPKG_EXTRACT_FROM_HDR;
151 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
152 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
153 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
154 		index++;
155 
156 		priv->extract.qos_key_cfg.num_extracts = index;
157 	}
158 
159 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
160 		index = priv->extract.fs_key_cfg[group].num_extracts;
161 		priv->extract.fs_key_cfg[group].extracts[index].type =
162 							DPKG_EXTRACT_FROM_HDR;
163 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
164 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
165 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
166 		index++;
167 
168 		priv->extract.fs_key_cfg[group].extracts[index].type =
169 							DPKG_EXTRACT_FROM_HDR;
170 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
171 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
172 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
173 		index++;
174 
175 		priv->extract.fs_key_cfg[group].extracts[index].type =
176 							DPKG_EXTRACT_FROM_HDR;
177 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
178 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
179 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
180 		index++;
181 
182 		priv->extract.fs_key_cfg[group].num_extracts = index;
183 	}
184 
185 	/* Parse pattern list to get the matching parameters */
186 	spec	= (const struct rte_flow_item_eth *)pattern->spec;
187 	last	= (const struct rte_flow_item_eth *)pattern->last;
188 	mask	= (const struct rte_flow_item_eth *)
189 			(pattern->mask ? pattern->mask : default_mask);
190 
191 	/* Key rule */
192 	key_iova = flow->rule.key_iova + flow->key_size;
193 	memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
194 						sizeof(struct rte_ether_addr));
195 	key_iova += sizeof(struct rte_ether_addr);
196 	memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
197 						sizeof(struct rte_ether_addr));
198 	key_iova += sizeof(struct rte_ether_addr);
199 	memcpy((void *)key_iova, (const void *)(&spec->type),
200 						sizeof(rte_be16_t));
201 
202 	/* Key mask */
203 	mask_iova = flow->rule.mask_iova + flow->key_size;
204 	memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
205 						sizeof(struct rte_ether_addr));
206 	mask_iova += sizeof(struct rte_ether_addr);
207 	memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
208 						sizeof(struct rte_ether_addr));
209 	mask_iova += sizeof(struct rte_ether_addr);
210 	memcpy((void *)mask_iova, (const void *)(&mask->type),
211 						sizeof(rte_be16_t));
212 
213 	flow->key_size += ((2  * sizeof(struct rte_ether_addr)) +
214 					sizeof(rte_be16_t));
215 
216 	return device_configured;
217 }
218 
219 static int
220 dpaa2_configure_flow_vlan(struct rte_flow *flow,
221 			  struct rte_eth_dev *dev,
222 			  const struct rte_flow_attr *attr,
223 			  const struct rte_flow_item *pattern,
224 			  const struct rte_flow_action actions[] __rte_unused,
225 			  struct rte_flow_error *error __rte_unused)
226 {
227 	int index, j = 0;
228 	size_t key_iova;
229 	size_t mask_iova;
230 	int device_configured = 0, entry_found = 0;
231 	uint32_t group;
232 	const struct rte_flow_item_vlan *spec, *mask;
233 
234 	const struct rte_flow_item_vlan *last __rte_unused;
235 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
236 
237 	group = attr->group;
238 
239 	/* DPAA2 platform has a limitation that extract parameter can not be */
240 	/*  more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
241 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
242 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
243 						DPKG_MAX_NUM_OF_EXTRACTS);
244 		return -ENOTSUP;
245 	}
246 
247 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
248 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
249 						DPKG_MAX_NUM_OF_EXTRACTS);
250 		return -ENOTSUP;
251 	}
252 
253 	for (j = 0; j < priv->pattern[8].item_count; j++) {
254 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
255 			continue;
256 		} else {
257 			entry_found = 1;
258 			break;
259 		}
260 	}
261 
262 	if (!entry_found) {
263 		priv->pattern[8].pattern_type[j] = pattern->type;
264 		priv->pattern[8].item_count++;
265 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
266 	}
267 
268 	entry_found = 0;
269 	for (j = 0; j < priv->pattern[group].item_count; j++) {
270 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
271 			continue;
272 		} else {
273 			entry_found = 1;
274 			break;
275 		}
276 	}
277 
278 	if (!entry_found) {
279 		priv->pattern[group].pattern_type[j] = pattern->type;
280 		priv->pattern[group].item_count++;
281 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
282 	}
283 
284 
285 	/* Get traffic class index and flow id to be configured */
286 	flow->tc_id = group;
287 	flow->index = attr->priority;
288 
289 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
290 		index = priv->extract.qos_key_cfg.num_extracts;
291 		priv->extract.qos_key_cfg.extracts[index].type =
292 							DPKG_EXTRACT_FROM_HDR;
293 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
294 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
295 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
296 		priv->extract.qos_key_cfg.num_extracts++;
297 	}
298 
299 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
300 		index = priv->extract.fs_key_cfg[group].num_extracts;
301 		priv->extract.fs_key_cfg[group].extracts[index].type =
302 							DPKG_EXTRACT_FROM_HDR;
303 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
304 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
305 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
306 		priv->extract.fs_key_cfg[group].num_extracts++;
307 	}
308 
309 	/* Parse pattern list to get the matching parameters */
310 	spec	= (const struct rte_flow_item_vlan *)pattern->spec;
311 	last	= (const struct rte_flow_item_vlan *)pattern->last;
312 	mask	= (const struct rte_flow_item_vlan *)
313 			(pattern->mask ? pattern->mask : default_mask);
314 
315 	key_iova = flow->rule.key_iova + flow->key_size;
316 	memcpy((void *)key_iova, (const void *)(&spec->tci),
317 							sizeof(rte_be16_t));
318 
319 	mask_iova = flow->rule.mask_iova + flow->key_size;
320 	memcpy((void *)mask_iova, (const void *)(&mask->tci),
321 							sizeof(rte_be16_t));
322 
323 	flow->key_size += sizeof(rte_be16_t);
324 	return device_configured;
325 }
326 
327 static int
328 dpaa2_configure_flow_ipv4(struct rte_flow *flow,
329 			  struct rte_eth_dev *dev,
330 			  const struct rte_flow_attr *attr,
331 			  const struct rte_flow_item *pattern,
332 			  const struct rte_flow_action actions[] __rte_unused,
333 			  struct rte_flow_error *error __rte_unused)
334 {
335 	int index, j = 0;
336 	size_t key_iova;
337 	size_t mask_iova;
338 	int device_configured = 0, entry_found = 0;
339 	uint32_t group;
340 	const struct rte_flow_item_ipv4 *spec, *mask;
341 
342 	const struct rte_flow_item_ipv4 *last __rte_unused;
343 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
344 
345 	group = attr->group;
346 
347 	/* DPAA2 platform has a limitation that extract parameter can not be */
348 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
349 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
350 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
351 						DPKG_MAX_NUM_OF_EXTRACTS);
352 		return -ENOTSUP;
353 	}
354 
355 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
356 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
357 						DPKG_MAX_NUM_OF_EXTRACTS);
358 		return -ENOTSUP;
359 	}
360 
361 	for (j = 0; j < priv->pattern[8].item_count; j++) {
362 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
363 			continue;
364 		} else {
365 			entry_found = 1;
366 			break;
367 		}
368 	}
369 
370 	if (!entry_found) {
371 		priv->pattern[8].pattern_type[j] = pattern->type;
372 		priv->pattern[8].item_count++;
373 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
374 	}
375 
376 	entry_found = 0;
377 	for (j = 0; j < priv->pattern[group].item_count; j++) {
378 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
379 			continue;
380 		} else {
381 			entry_found = 1;
382 			break;
383 		}
384 	}
385 
386 	if (!entry_found) {
387 		priv->pattern[group].pattern_type[j] = pattern->type;
388 		priv->pattern[group].item_count++;
389 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
390 	}
391 
392 	/* Get traffic class index and flow id to be configured */
393 	flow->tc_id = group;
394 	flow->index = attr->priority;
395 
396 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
397 		index = priv->extract.qos_key_cfg.num_extracts;
398 		priv->extract.qos_key_cfg.extracts[index].type =
399 							DPKG_EXTRACT_FROM_HDR;
400 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
401 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
402 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
403 		index++;
404 
405 		priv->extract.qos_key_cfg.extracts[index].type =
406 							DPKG_EXTRACT_FROM_HDR;
407 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
408 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
409 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
410 		index++;
411 
412 		priv->extract.qos_key_cfg.extracts[index].type =
413 							DPKG_EXTRACT_FROM_HDR;
414 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
415 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
416 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
417 		index++;
418 
419 		priv->extract.qos_key_cfg.num_extracts = index;
420 	}
421 
422 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
423 		index = priv->extract.fs_key_cfg[group].num_extracts;
424 		priv->extract.fs_key_cfg[group].extracts[index].type =
425 							DPKG_EXTRACT_FROM_HDR;
426 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
427 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
428 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
429 		index++;
430 
431 		priv->extract.fs_key_cfg[group].extracts[index].type =
432 							DPKG_EXTRACT_FROM_HDR;
433 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
434 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
435 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
436 		index++;
437 
438 		priv->extract.fs_key_cfg[group].extracts[index].type =
439 							DPKG_EXTRACT_FROM_HDR;
440 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
441 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
442 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
443 		index++;
444 
445 		priv->extract.fs_key_cfg[group].num_extracts = index;
446 	}
447 
448 	/* Parse pattern list to get the matching parameters */
449 	spec	= (const struct rte_flow_item_ipv4 *)pattern->spec;
450 	last	= (const struct rte_flow_item_ipv4 *)pattern->last;
451 	mask	= (const struct rte_flow_item_ipv4 *)
452 			(pattern->mask ? pattern->mask : default_mask);
453 
454 	key_iova = flow->rule.key_iova + flow->key_size;
455 	memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
456 							sizeof(uint32_t));
457 	key_iova += sizeof(uint32_t);
458 	memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
459 							sizeof(uint32_t));
460 	key_iova += sizeof(uint32_t);
461 	memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
462 							sizeof(uint8_t));
463 
464 	mask_iova = flow->rule.mask_iova + flow->key_size;
465 	memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
466 							sizeof(uint32_t));
467 	mask_iova += sizeof(uint32_t);
468 	memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
469 							sizeof(uint32_t));
470 	mask_iova += sizeof(uint32_t);
471 	memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
472 							sizeof(uint8_t));
473 
474 	flow->key_size += (2 * sizeof(uint32_t)) + sizeof(uint8_t);
475 	return device_configured;
476 }
477 
478 static int
479 dpaa2_configure_flow_ipv6(struct rte_flow *flow,
480 			  struct rte_eth_dev *dev,
481 			  const struct rte_flow_attr *attr,
482 			  const struct rte_flow_item *pattern,
483 			  const struct rte_flow_action actions[] __rte_unused,
484 			  struct rte_flow_error *error __rte_unused)
485 {
486 	int index, j = 0;
487 	size_t key_iova;
488 	size_t mask_iova;
489 	int device_configured = 0, entry_found = 0;
490 	uint32_t group;
491 	const struct rte_flow_item_ipv6 *spec, *mask;
492 
493 	const struct rte_flow_item_ipv6 *last __rte_unused;
494 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
495 
496 	group = attr->group;
497 
498 	/* DPAA2 platform has a limitation that extract parameter can not be */
499 	/* more	than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
500 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
501 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
502 						DPKG_MAX_NUM_OF_EXTRACTS);
503 		return -ENOTSUP;
504 	}
505 
506 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
507 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
508 						DPKG_MAX_NUM_OF_EXTRACTS);
509 		return -ENOTSUP;
510 	}
511 
512 	for (j = 0; j < priv->pattern[8].item_count; j++) {
513 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
514 			continue;
515 		} else {
516 			entry_found = 1;
517 			break;
518 		}
519 	}
520 
521 	if (!entry_found) {
522 		priv->pattern[8].pattern_type[j] = pattern->type;
523 		priv->pattern[8].item_count++;
524 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
525 	}
526 
527 	entry_found = 0;
528 	for (j = 0; j < priv->pattern[group].item_count; j++) {
529 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
530 			continue;
531 		} else {
532 			entry_found = 1;
533 			break;
534 		}
535 	}
536 
537 	if (!entry_found) {
538 		priv->pattern[group].pattern_type[j] = pattern->type;
539 		priv->pattern[group].item_count++;
540 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
541 	}
542 
543 	/* Get traffic class index and flow id to be configured */
544 	flow->tc_id = group;
545 	flow->index = attr->priority;
546 
547 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
548 		index = priv->extract.qos_key_cfg.num_extracts;
549 		priv->extract.qos_key_cfg.extracts[index].type =
550 							DPKG_EXTRACT_FROM_HDR;
551 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
552 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
553 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
554 		index++;
555 
556 		priv->extract.qos_key_cfg.extracts[index].type =
557 							DPKG_EXTRACT_FROM_HDR;
558 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
559 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
560 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
561 		index++;
562 
563 		priv->extract.qos_key_cfg.num_extracts = index;
564 	}
565 
566 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
567 		index = priv->extract.fs_key_cfg[group].num_extracts;
568 		priv->extract.fs_key_cfg[group].extracts[index].type =
569 							DPKG_EXTRACT_FROM_HDR;
570 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
571 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
572 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
573 		index++;
574 
575 		priv->extract.fs_key_cfg[group].extracts[index].type =
576 							DPKG_EXTRACT_FROM_HDR;
577 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
578 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
579 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
580 		index++;
581 
582 		priv->extract.fs_key_cfg[group].num_extracts = index;
583 	}
584 
585 	/* Parse pattern list to get the matching parameters */
586 	spec	= (const struct rte_flow_item_ipv6 *)pattern->spec;
587 	last	= (const struct rte_flow_item_ipv6 *)pattern->last;
588 	mask	= (const struct rte_flow_item_ipv6 *)
589 			(pattern->mask ? pattern->mask : default_mask);
590 
591 	key_iova = flow->rule.key_iova + flow->key_size;
592 	memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
593 						sizeof(spec->hdr.src_addr));
594 	key_iova += sizeof(spec->hdr.src_addr);
595 	memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
596 						sizeof(spec->hdr.dst_addr));
597 
598 	mask_iova = flow->rule.mask_iova + flow->key_size;
599 	memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
600 						sizeof(mask->hdr.src_addr));
601 	mask_iova += sizeof(mask->hdr.src_addr);
602 	memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
603 						sizeof(mask->hdr.dst_addr));
604 
605 	flow->key_size += sizeof(spec->hdr.src_addr) +
606 					sizeof(mask->hdr.dst_addr);
607 	return device_configured;
608 }
609 
610 static int
611 dpaa2_configure_flow_icmp(struct rte_flow *flow,
612 			  struct rte_eth_dev *dev,
613 			  const struct rte_flow_attr *attr,
614 			  const struct rte_flow_item *pattern,
615 			  const struct rte_flow_action actions[] __rte_unused,
616 			  struct rte_flow_error *error __rte_unused)
617 {
618 	int index, j = 0;
619 	size_t key_iova;
620 	size_t mask_iova;
621 	int device_configured = 0, entry_found = 0;
622 	uint32_t group;
623 	const struct rte_flow_item_icmp *spec, *mask;
624 
625 	const struct rte_flow_item_icmp *last __rte_unused;
626 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
627 
628 	group = attr->group;
629 
630 	/* DPAA2 platform has a limitation that extract parameter can not be */
631 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
632 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
633 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
634 						DPKG_MAX_NUM_OF_EXTRACTS);
635 		return -ENOTSUP;
636 	}
637 
638 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
639 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
640 						DPKG_MAX_NUM_OF_EXTRACTS);
641 		return -ENOTSUP;
642 	}
643 
644 	for (j = 0; j < priv->pattern[8].item_count; j++) {
645 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
646 			continue;
647 		} else {
648 			entry_found = 1;
649 			break;
650 		}
651 	}
652 
653 	if (!entry_found) {
654 		priv->pattern[8].pattern_type[j] = pattern->type;
655 		priv->pattern[8].item_count++;
656 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
657 	}
658 
659 	entry_found = 0;
660 	for (j = 0; j < priv->pattern[group].item_count; j++) {
661 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
662 			continue;
663 		} else {
664 			entry_found = 1;
665 			break;
666 		}
667 	}
668 
669 	if (!entry_found) {
670 		priv->pattern[group].pattern_type[j] = pattern->type;
671 		priv->pattern[group].item_count++;
672 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
673 	}
674 
675 	/* Get traffic class index and flow id to be configured */
676 	flow->tc_id = group;
677 	flow->index = attr->priority;
678 
679 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
680 		index = priv->extract.qos_key_cfg.num_extracts;
681 		priv->extract.qos_key_cfg.extracts[index].type =
682 							DPKG_EXTRACT_FROM_HDR;
683 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
684 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
685 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
686 		index++;
687 
688 		priv->extract.qos_key_cfg.extracts[index].type =
689 							DPKG_EXTRACT_FROM_HDR;
690 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
691 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
692 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
693 		index++;
694 
695 		priv->extract.qos_key_cfg.num_extracts = index;
696 	}
697 
698 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
699 		index = priv->extract.fs_key_cfg[group].num_extracts;
700 		priv->extract.fs_key_cfg[group].extracts[index].type =
701 							DPKG_EXTRACT_FROM_HDR;
702 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
703 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
704 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
705 		index++;
706 
707 		priv->extract.fs_key_cfg[group].extracts[index].type =
708 							DPKG_EXTRACT_FROM_HDR;
709 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
710 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
711 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
712 		index++;
713 
714 		priv->extract.fs_key_cfg[group].num_extracts = index;
715 	}
716 
717 	/* Parse pattern list to get the matching parameters */
718 	spec	= (const struct rte_flow_item_icmp *)pattern->spec;
719 	last	= (const struct rte_flow_item_icmp *)pattern->last;
720 	mask	= (const struct rte_flow_item_icmp *)
721 			(pattern->mask ? pattern->mask : default_mask);
722 
723 	key_iova = flow->rule.key_iova + flow->key_size;
724 	memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
725 							sizeof(uint8_t));
726 	key_iova += sizeof(uint8_t);
727 	memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
728 							sizeof(uint8_t));
729 
730 	mask_iova = flow->rule.mask_iova + flow->key_size;
731 	memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
732 							sizeof(uint8_t));
733 	key_iova += sizeof(uint8_t);
734 	memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
735 							sizeof(uint8_t));
736 
737 	flow->key_size += 2 * sizeof(uint8_t);
738 
739 	return device_configured;
740 }
741 
742 static int
743 dpaa2_configure_flow_udp(struct rte_flow *flow,
744 			 struct rte_eth_dev *dev,
745 			  const struct rte_flow_attr *attr,
746 			  const struct rte_flow_item *pattern,
747 			  const struct rte_flow_action actions[] __rte_unused,
748 			  struct rte_flow_error *error __rte_unused)
749 {
750 	int index, j = 0;
751 	size_t key_iova;
752 	size_t mask_iova;
753 	int device_configured = 0, entry_found = 0;
754 	uint32_t group;
755 	const struct rte_flow_item_udp *spec, *mask;
756 
757 	const struct rte_flow_item_udp *last __rte_unused;
758 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
759 
760 	group = attr->group;
761 
762 	/* DPAA2 platform has a limitation that extract parameter can not be */
763 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
764 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
765 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
766 						DPKG_MAX_NUM_OF_EXTRACTS);
767 		return -ENOTSUP;
768 	}
769 
770 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
771 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
772 						DPKG_MAX_NUM_OF_EXTRACTS);
773 		return -ENOTSUP;
774 	}
775 
776 	for (j = 0; j < priv->pattern[8].item_count; j++) {
777 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
778 			continue;
779 		} else {
780 			 entry_found = 1;
781 			break;
782 		}
783 	}
784 
785 	if (!entry_found) {
786 		priv->pattern[8].pattern_type[j] = pattern->type;
787 		priv->pattern[8].item_count++;
788 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
789 	}
790 
791 	entry_found = 0;
792 	for (j = 0; j < priv->pattern[group].item_count; j++) {
793 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
794 			continue;
795 		} else {
796 			entry_found = 1;
797 			break;
798 		}
799 	}
800 
801 	if (!entry_found) {
802 		priv->pattern[group].pattern_type[j] = pattern->type;
803 		priv->pattern[group].item_count++;
804 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
805 	}
806 
807 	/* Get traffic class index and flow id to be configured */
808 	flow->tc_id = group;
809 	flow->index = attr->priority;
810 
811 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
812 		index = priv->extract.qos_key_cfg.num_extracts;
813 		priv->extract.qos_key_cfg.extracts[index].type =
814 							DPKG_EXTRACT_FROM_HDR;
815 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
816 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
817 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
818 		index++;
819 
820 		priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
821 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
822 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
823 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
824 		index++;
825 
826 		priv->extract.qos_key_cfg.num_extracts = index;
827 	}
828 
829 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
830 		index = priv->extract.fs_key_cfg[group].num_extracts;
831 		priv->extract.fs_key_cfg[group].extracts[index].type =
832 							DPKG_EXTRACT_FROM_HDR;
833 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
834 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
835 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
836 		index++;
837 
838 		priv->extract.fs_key_cfg[group].extracts[index].type =
839 							DPKG_EXTRACT_FROM_HDR;
840 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
841 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
842 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
843 		index++;
844 
845 		priv->extract.fs_key_cfg[group].num_extracts = index;
846 	}
847 
848 	/* Parse pattern list to get the matching parameters */
849 	spec	= (const struct rte_flow_item_udp *)pattern->spec;
850 	last	= (const struct rte_flow_item_udp *)pattern->last;
851 	mask	= (const struct rte_flow_item_udp *)
852 			(pattern->mask ? pattern->mask : default_mask);
853 
854 	key_iova = flow->rule.key_iova + flow->key_size;
855 	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
856 							sizeof(uint16_t));
857 	key_iova +=  sizeof(uint16_t);
858 	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
859 							sizeof(uint16_t));
860 
861 	mask_iova = flow->rule.mask_iova + flow->key_size;
862 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
863 							sizeof(uint16_t));
864 	mask_iova +=  sizeof(uint16_t);
865 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
866 							sizeof(uint16_t));
867 
868 	flow->key_size += (2 * sizeof(uint16_t));
869 
870 	return device_configured;
871 }
872 
873 static int
874 dpaa2_configure_flow_tcp(struct rte_flow *flow,
875 			 struct rte_eth_dev *dev,
876 			 const struct rte_flow_attr *attr,
877 			 const struct rte_flow_item *pattern,
878 			 const struct rte_flow_action actions[] __rte_unused,
879 			 struct rte_flow_error *error __rte_unused)
880 {
881 	int index, j = 0;
882 	size_t key_iova;
883 	size_t mask_iova;
884 	int device_configured = 0, entry_found = 0;
885 	uint32_t group;
886 	const struct rte_flow_item_tcp *spec, *mask;
887 
888 	const struct rte_flow_item_tcp *last __rte_unused;
889 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
890 
891 	group = attr->group;
892 
893 	/* DPAA2 platform has a limitation that extract parameter can not be */
894 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
895 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
896 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
897 						DPKG_MAX_NUM_OF_EXTRACTS);
898 		return -ENOTSUP;
899 	}
900 
901 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
902 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
903 						DPKG_MAX_NUM_OF_EXTRACTS);
904 		return -ENOTSUP;
905 	}
906 
907 	for (j = 0; j < priv->pattern[8].item_count; j++) {
908 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
909 			continue;
910 		} else {
911 			entry_found = 1;
912 			break;
913 		}
914 	}
915 
916 	if (!entry_found) {
917 		priv->pattern[8].pattern_type[j] = pattern->type;
918 		priv->pattern[8].item_count++;
919 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
920 	}
921 
922 	entry_found = 0;
923 	for (j = 0; j < priv->pattern[group].item_count; j++) {
924 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
925 			continue;
926 		} else {
927 			entry_found = 1;
928 			break;
929 		}
930 	}
931 
932 	if (!entry_found) {
933 		priv->pattern[group].pattern_type[j] = pattern->type;
934 		priv->pattern[group].item_count++;
935 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
936 	}
937 
938 	/* Get traffic class index and flow id to be configured */
939 	flow->tc_id = group;
940 	flow->index = attr->priority;
941 
942 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
943 		index = priv->extract.qos_key_cfg.num_extracts;
944 		priv->extract.qos_key_cfg.extracts[index].type =
945 							DPKG_EXTRACT_FROM_HDR;
946 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
947 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
948 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
949 		index++;
950 
951 		priv->extract.qos_key_cfg.extracts[index].type =
952 							DPKG_EXTRACT_FROM_HDR;
953 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
954 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
955 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
956 		index++;
957 
958 		priv->extract.qos_key_cfg.num_extracts = index;
959 	}
960 
961 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
962 		index = priv->extract.fs_key_cfg[group].num_extracts;
963 		priv->extract.fs_key_cfg[group].extracts[index].type =
964 							DPKG_EXTRACT_FROM_HDR;
965 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
966 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
967 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
968 		index++;
969 
970 		priv->extract.fs_key_cfg[group].extracts[index].type =
971 							DPKG_EXTRACT_FROM_HDR;
972 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
973 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
974 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
975 		index++;
976 
977 		priv->extract.fs_key_cfg[group].num_extracts = index;
978 	}
979 
980 	/* Parse pattern list to get the matching parameters */
981 	spec	= (const struct rte_flow_item_tcp *)pattern->spec;
982 	last	= (const struct rte_flow_item_tcp *)pattern->last;
983 	mask	= (const struct rte_flow_item_tcp *)
984 			(pattern->mask ? pattern->mask : default_mask);
985 
986 	key_iova = flow->rule.key_iova + flow->key_size;
987 	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
988 							sizeof(uint16_t));
989 	key_iova += sizeof(uint16_t);
990 	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
991 							sizeof(uint16_t));
992 
993 	mask_iova = flow->rule.mask_iova + flow->key_size;
994 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
995 							sizeof(uint16_t));
996 	mask_iova += sizeof(uint16_t);
997 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
998 							sizeof(uint16_t));
999 
1000 	flow->key_size += 2 * sizeof(uint16_t);
1001 
1002 	return device_configured;
1003 }
1004 
1005 static int
1006 dpaa2_configure_flow_sctp(struct rte_flow *flow,
1007 			  struct rte_eth_dev *dev,
1008 			  const struct rte_flow_attr *attr,
1009 			  const struct rte_flow_item *pattern,
1010 			  const struct rte_flow_action actions[] __rte_unused,
1011 			  struct rte_flow_error *error __rte_unused)
1012 {
1013 	int index, j = 0;
1014 	size_t key_iova;
1015 	size_t mask_iova;
1016 	int device_configured = 0, entry_found = 0;
1017 	uint32_t group;
1018 	const struct rte_flow_item_sctp *spec, *mask;
1019 
1020 	const struct rte_flow_item_sctp *last __rte_unused;
1021 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1022 
1023 	group = attr->group;
1024 
1025 	/* DPAA2 platform has a limitation that extract parameter can not be */
1026 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1027 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1028 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1029 						DPKG_MAX_NUM_OF_EXTRACTS);
1030 		return -ENOTSUP;
1031 	}
1032 
1033 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1034 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1035 						DPKG_MAX_NUM_OF_EXTRACTS);
1036 		return -ENOTSUP;
1037 	}
1038 
1039 	for (j = 0; j < priv->pattern[8].item_count; j++) {
1040 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
1041 			continue;
1042 		} else {
1043 			entry_found = 1;
1044 			break;
1045 		}
1046 	}
1047 
1048 	if (!entry_found) {
1049 		priv->pattern[8].pattern_type[j] = pattern->type;
1050 		priv->pattern[8].item_count++;
1051 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1052 	}
1053 
1054 	entry_found = 0;
1055 	for (j = 0; j < priv->pattern[group].item_count; j++) {
1056 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
1057 			continue;
1058 		} else {
1059 			entry_found = 1;
1060 			break;
1061 		}
1062 	}
1063 
1064 	if (!entry_found) {
1065 		priv->pattern[group].pattern_type[j] = pattern->type;
1066 		priv->pattern[group].item_count++;
1067 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1068 	}
1069 
1070 	/* Get traffic class index and flow id to be configured */
1071 	flow->tc_id = group;
1072 	flow->index = attr->priority;
1073 
1074 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1075 		index = priv->extract.qos_key_cfg.num_extracts;
1076 		priv->extract.qos_key_cfg.extracts[index].type =
1077 							DPKG_EXTRACT_FROM_HDR;
1078 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1079 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1080 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1081 		index++;
1082 
1083 		priv->extract.qos_key_cfg.extracts[index].type =
1084 							DPKG_EXTRACT_FROM_HDR;
1085 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1086 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1087 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1088 		index++;
1089 
1090 		priv->extract.qos_key_cfg.num_extracts = index;
1091 	}
1092 
1093 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1094 		index = priv->extract.fs_key_cfg[group].num_extracts;
1095 		priv->extract.fs_key_cfg[group].extracts[index].type =
1096 							DPKG_EXTRACT_FROM_HDR;
1097 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1098 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1099 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1100 		index++;
1101 
1102 		priv->extract.fs_key_cfg[group].extracts[index].type =
1103 							DPKG_EXTRACT_FROM_HDR;
1104 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1105 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1106 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1107 		index++;
1108 
1109 		priv->extract.fs_key_cfg[group].num_extracts = index;
1110 	}
1111 
1112 	/* Parse pattern list to get the matching parameters */
1113 	spec	= (const struct rte_flow_item_sctp *)pattern->spec;
1114 	last	= (const struct rte_flow_item_sctp *)pattern->last;
1115 	mask	= (const struct rte_flow_item_sctp *)
1116 			(pattern->mask ? pattern->mask : default_mask);
1117 
1118 	key_iova = flow->rule.key_iova + flow->key_size;
1119 	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1120 							sizeof(uint16_t));
1121 	key_iova += sizeof(uint16_t);
1122 	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1123 							sizeof(uint16_t));
1124 
1125 	mask_iova = flow->rule.mask_iova + flow->key_size;
1126 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1127 							sizeof(uint16_t));
1128 	mask_iova += sizeof(uint16_t);
1129 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1130 							sizeof(uint16_t));
1131 
1132 	flow->key_size += 2 * sizeof(uint16_t);
1133 
1134 	return device_configured;
1135 }
1136 
1137 static int
1138 dpaa2_configure_flow_gre(struct rte_flow *flow,
1139 			 struct rte_eth_dev *dev,
1140 			 const struct rte_flow_attr *attr,
1141 			 const struct rte_flow_item *pattern,
1142 			 const struct rte_flow_action actions[] __rte_unused,
1143 			 struct rte_flow_error *error __rte_unused)
1144 {
1145 	int index, j = 0;
1146 	size_t key_iova;
1147 	size_t mask_iova;
1148 	int device_configured = 0, entry_found = 0;
1149 	uint32_t group;
1150 	const struct rte_flow_item_gre *spec, *mask;
1151 
1152 	const struct rte_flow_item_gre *last __rte_unused;
1153 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1154 
1155 	group = attr->group;
1156 
1157 	/* DPAA2 platform has a limitation that extract parameter can not be */
1158 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1159 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1160 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1161 						DPKG_MAX_NUM_OF_EXTRACTS);
1162 		return -ENOTSUP;
1163 	}
1164 
1165 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1166 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1167 						DPKG_MAX_NUM_OF_EXTRACTS);
1168 		return -ENOTSUP;
1169 	}
1170 
1171 	for (j = 0; j < priv->pattern[8].item_count; j++) {
1172 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
1173 			continue;
1174 		} else {
1175 			entry_found = 1;
1176 			break;
1177 		}
1178 	}
1179 
1180 	if (!entry_found) {
1181 		priv->pattern[8].pattern_type[j] = pattern->type;
1182 		priv->pattern[8].item_count++;
1183 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1184 	}
1185 
1186 	entry_found = 0;
1187 	for (j = 0; j < priv->pattern[group].item_count; j++) {
1188 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
1189 			continue;
1190 		} else {
1191 			entry_found = 1;
1192 			break;
1193 		}
1194 	}
1195 
1196 	if (!entry_found) {
1197 		priv->pattern[group].pattern_type[j] = pattern->type;
1198 		priv->pattern[group].item_count++;
1199 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1200 	}
1201 
1202 	/* Get traffic class index and flow id to be configured */
1203 	flow->tc_id = group;
1204 	flow->index = attr->priority;
1205 
1206 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1207 		index = priv->extract.qos_key_cfg.num_extracts;
1208 		priv->extract.qos_key_cfg.extracts[index].type =
1209 							DPKG_EXTRACT_FROM_HDR;
1210 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1211 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1212 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1213 		index++;
1214 
1215 		priv->extract.qos_key_cfg.num_extracts = index;
1216 	}
1217 
1218 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1219 		index = priv->extract.fs_key_cfg[group].num_extracts;
1220 		priv->extract.fs_key_cfg[group].extracts[index].type =
1221 							DPKG_EXTRACT_FROM_HDR;
1222 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1223 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1224 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1225 		index++;
1226 
1227 		priv->extract.fs_key_cfg[group].num_extracts = index;
1228 	}
1229 
1230 	/* Parse pattern list to get the matching parameters */
1231 	spec	= (const struct rte_flow_item_gre *)pattern->spec;
1232 	last	= (const struct rte_flow_item_gre *)pattern->last;
1233 	mask	= (const struct rte_flow_item_gre *)
1234 			(pattern->mask ? pattern->mask : default_mask);
1235 
1236 	key_iova = flow->rule.key_iova + flow->key_size;
1237 	memcpy((void *)key_iova, (const void *)(&spec->protocol),
1238 							sizeof(rte_be16_t));
1239 
1240 	mask_iova = flow->rule.mask_iova + flow->key_size;
1241 	memcpy((void *)mask_iova, (const void *)(&mask->protocol),
1242 							sizeof(rte_be16_t));
1243 
1244 	flow->key_size += sizeof(rte_be16_t);
1245 
1246 	return device_configured;
1247 }
1248 
1249 static int
1250 dpaa2_generic_flow_set(struct rte_flow *flow,
1251 		       struct rte_eth_dev *dev,
1252 		       const struct rte_flow_attr *attr,
1253 		       const struct rte_flow_item pattern[],
1254 		       const struct rte_flow_action actions[],
1255 		       struct rte_flow_error *error)
1256 {
1257 	const struct rte_flow_action_queue *dest_queue;
1258 	const struct rte_flow_action_rss *rss_conf;
1259 	uint16_t index;
1260 	int is_keycfg_configured = 0, end_of_list = 0;
1261 	int ret = 0, i = 0, j = 0;
1262 	struct dpni_attr nic_attr;
1263 	struct dpni_rx_tc_dist_cfg tc_cfg;
1264 	struct dpni_qos_tbl_cfg qos_cfg;
1265 	struct dpkg_profile_cfg key_cfg;
1266 	struct dpni_fs_action_cfg action;
1267 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1268 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1269 	size_t param;
1270 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
1271 
1272 	/* Parse pattern list to get the matching parameters */
1273 	while (!end_of_list) {
1274 		switch (pattern[i].type) {
1275 		case RTE_FLOW_ITEM_TYPE_ETH:
1276 			is_keycfg_configured = dpaa2_configure_flow_eth(flow,
1277 									dev,
1278 									attr,
1279 									&pattern[i],
1280 									actions,
1281 									error);
1282 			break;
1283 		case RTE_FLOW_ITEM_TYPE_VLAN:
1284 			is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
1285 									dev,
1286 									attr,
1287 									&pattern[i],
1288 									actions,
1289 									error);
1290 			break;
1291 		case RTE_FLOW_ITEM_TYPE_IPV4:
1292 			is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
1293 									dev,
1294 									attr,
1295 									&pattern[i],
1296 									actions,
1297 									error);
1298 			break;
1299 		case RTE_FLOW_ITEM_TYPE_IPV6:
1300 			is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
1301 									dev,
1302 									attr,
1303 									&pattern[i],
1304 									actions,
1305 									error);
1306 			break;
1307 		case RTE_FLOW_ITEM_TYPE_ICMP:
1308 			is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
1309 									dev,
1310 									attr,
1311 									&pattern[i],
1312 									actions,
1313 									error);
1314 			break;
1315 		case RTE_FLOW_ITEM_TYPE_UDP:
1316 			is_keycfg_configured = dpaa2_configure_flow_udp(flow,
1317 									dev,
1318 									attr,
1319 									&pattern[i],
1320 									actions,
1321 									error);
1322 			break;
1323 		case RTE_FLOW_ITEM_TYPE_TCP:
1324 			is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
1325 									dev,
1326 									attr,
1327 									&pattern[i],
1328 									actions,
1329 									error);
1330 			break;
1331 		case RTE_FLOW_ITEM_TYPE_SCTP:
1332 			is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
1333 									dev, attr,
1334 									&pattern[i],
1335 									actions,
1336 									error);
1337 			break;
1338 		case RTE_FLOW_ITEM_TYPE_GRE:
1339 			is_keycfg_configured = dpaa2_configure_flow_gre(flow,
1340 									dev,
1341 									attr,
1342 									&pattern[i],
1343 									actions,
1344 									error);
1345 			break;
1346 		case RTE_FLOW_ITEM_TYPE_END:
1347 			end_of_list = 1;
1348 			break; /*End of List*/
1349 		default:
1350 			DPAA2_PMD_ERR("Invalid action type");
1351 			ret = -ENOTSUP;
1352 			break;
1353 		}
1354 		i++;
1355 	}
1356 
1357 	/* Let's parse action on matching traffic */
1358 	end_of_list = 0;
1359 	while (!end_of_list) {
1360 		switch (actions[j].type) {
1361 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1362 			dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
1363 			flow->flow_id = dest_queue->index;
1364 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
1365 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
1366 			action.flow_id = flow->flow_id;
1367 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1368 				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1369 							 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1370 					DPAA2_PMD_ERR(
1371 					"Unable to prepare extract parameters");
1372 					return -1;
1373 				}
1374 
1375 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
1376 				qos_cfg.discard_on_miss = true;
1377 				qos_cfg.keep_entries = true;
1378 				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1379 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1380 							 priv->token, &qos_cfg);
1381 				if (ret < 0) {
1382 					DPAA2_PMD_ERR(
1383 					"Distribution cannot be configured.(%d)"
1384 					, ret);
1385 					return -1;
1386 				}
1387 			}
1388 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1389 				if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
1390 						(uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
1391 					DPAA2_PMD_ERR(
1392 					"Unable to prepare extract parameters");
1393 					return -1;
1394 				}
1395 
1396 				memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1397 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
1398 				tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
1399 				tc_cfg.key_cfg_iova =
1400 					(uint64_t)priv->extract.fs_extract_param[flow->tc_id];
1401 				tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1402 				tc_cfg.fs_cfg.keep_entries = true;
1403 				ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1404 							 priv->token,
1405 							 flow->tc_id, &tc_cfg);
1406 				if (ret < 0) {
1407 					DPAA2_PMD_ERR(
1408 					"Distribution cannot be configured.(%d)"
1409 					, ret);
1410 					return -1;
1411 				}
1412 			}
1413 			/* Configure QoS table first */
1414 			memset(&nic_attr, 0, sizeof(struct dpni_attr));
1415 			ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1416 						 priv->token, &nic_attr);
1417 			if (ret < 0) {
1418 				DPAA2_PMD_ERR(
1419 				"Failure to get attribute. dpni@%p err code(%d)\n",
1420 				dpni, ret);
1421 				return ret;
1422 			}
1423 
1424 			action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
1425 			index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1426 			flow->rule.key_size = flow->key_size;
1427 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
1428 						priv->token, &flow->rule,
1429 						flow->tc_id, index,
1430 						0, 0);
1431 			if (ret < 0) {
1432 				DPAA2_PMD_ERR(
1433 				"Error in addnig entry to QoS table(%d)", ret);
1434 				return ret;
1435 			}
1436 
1437 			/* Then Configure FS table */
1438 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1439 						flow->tc_id, flow->index,
1440 						&flow->rule, &action);
1441 			if (ret < 0) {
1442 				DPAA2_PMD_ERR(
1443 				"Error in adding entry to FS table(%d)", ret);
1444 				return ret;
1445 			}
1446 			break;
1447 		case RTE_FLOW_ACTION_TYPE_RSS:
1448 			ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1449 						 priv->token, &nic_attr);
1450 			if (ret < 0) {
1451 				DPAA2_PMD_ERR(
1452 				"Failure to get attribute. dpni@%p err code(%d)\n",
1453 				dpni, ret);
1454 				return ret;
1455 			}
1456 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
1457 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
1458 				if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
1459 				    rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
1460 					DPAA2_PMD_ERR(
1461 					"Queue/Group combination are not supported\n");
1462 					return -ENOTSUP;
1463 				}
1464 			}
1465 
1466 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
1467 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
1468 								&key_cfg);
1469 			if (ret < 0) {
1470 				DPAA2_PMD_ERR(
1471 				"unable to set flow distribution.please check queue config\n");
1472 				return ret;
1473 			}
1474 
1475 			/* Allocate DMA'ble memory to write the rules */
1476 			param = (size_t)rte_malloc(NULL, 256, 64);
1477 			if (!param) {
1478 				DPAA2_PMD_ERR("Memory allocation failure\n");
1479 				return -1;
1480 			}
1481 
1482 			if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
1483 				DPAA2_PMD_ERR(
1484 				"Unable to prepare extract parameters");
1485 				rte_free((void *)param);
1486 				return -1;
1487 			}
1488 
1489 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1490 			tc_cfg.dist_size = rss_conf->queue_num;
1491 			tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
1492 			tc_cfg.key_cfg_iova = (size_t)param;
1493 			tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1494 
1495 			ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1496 						 priv->token, flow->tc_id,
1497 						 &tc_cfg);
1498 			if (ret < 0) {
1499 				DPAA2_PMD_ERR(
1500 				"Distribution cannot be configured: %d\n", ret);
1501 				rte_free((void *)param);
1502 				return -1;
1503 			}
1504 
1505 			rte_free((void *)param);
1506 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1507 				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1508 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1509 					DPAA2_PMD_ERR(
1510 					"Unable to prepare extract parameters");
1511 					return -1;
1512 				}
1513 				memset(&qos_cfg, 0,
1514 					sizeof(struct dpni_qos_tbl_cfg));
1515 				qos_cfg.discard_on_miss = true;
1516 				qos_cfg.keep_entries = true;
1517 				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1518 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1519 							 priv->token, &qos_cfg);
1520 				if (ret < 0) {
1521 					DPAA2_PMD_ERR(
1522 					"Distribution can not be configured(%d)\n",
1523 					ret);
1524 					return -1;
1525 				}
1526 			}
1527 
1528 			/* Add Rule into QoS table */
1529 			index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1530 			flow->rule.key_size = flow->key_size;
1531 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1532 						&flow->rule, flow->tc_id,
1533 						index, 0, 0);
1534 			if (ret < 0) {
1535 				DPAA2_PMD_ERR(
1536 				"Error in entry addition in QoS table(%d)",
1537 				ret);
1538 				return ret;
1539 			}
1540 			break;
1541 		case RTE_FLOW_ACTION_TYPE_END:
1542 			end_of_list = 1;
1543 			break;
1544 		default:
1545 			DPAA2_PMD_ERR("Invalid action type");
1546 			ret = -ENOTSUP;
1547 			break;
1548 		}
1549 		j++;
1550 	}
1551 
1552 	if (!ret) {
1553 		/* New rules are inserted. */
1554 		if (!curr) {
1555 			LIST_INSERT_HEAD(&priv->flows, flow, next);
1556 		} else {
1557 			while (LIST_NEXT(curr, next))
1558 				curr = LIST_NEXT(curr, next);
1559 			LIST_INSERT_AFTER(curr, flow, next);
1560 		}
1561 	}
1562 	return ret;
1563 }
1564 
1565 static inline int
1566 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
1567 		      const struct rte_flow_attr *attr)
1568 {
1569 	int ret = 0;
1570 
1571 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
1572 		DPAA2_PMD_ERR("Priority group is out of range\n");
1573 		ret = -ENOTSUP;
1574 	}
1575 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
1576 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
1577 		ret = -ENOTSUP;
1578 	}
1579 	if (unlikely(attr->egress)) {
1580 		DPAA2_PMD_ERR(
1581 			"Flow configuration is not supported on egress side\n");
1582 		ret = -ENOTSUP;
1583 	}
1584 	if (unlikely(!attr->ingress)) {
1585 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
1586 		ret = -EINVAL;
1587 	}
1588 	return ret;
1589 }
1590 
1591 static inline void
1592 dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
1593 {
1594 	switch (pattern->type) {
1595 	case RTE_FLOW_ITEM_TYPE_ETH:
1596 		default_mask = (const void *)&rte_flow_item_eth_mask;
1597 		break;
1598 	case RTE_FLOW_ITEM_TYPE_VLAN:
1599 		default_mask = (const void *)&rte_flow_item_vlan_mask;
1600 		break;
1601 	case RTE_FLOW_ITEM_TYPE_IPV4:
1602 		default_mask = (const void *)&rte_flow_item_ipv4_mask;
1603 		break;
1604 	case RTE_FLOW_ITEM_TYPE_IPV6:
1605 		default_mask = (const void *)&rte_flow_item_ipv6_mask;
1606 		break;
1607 	case RTE_FLOW_ITEM_TYPE_ICMP:
1608 		default_mask = (const void *)&rte_flow_item_icmp_mask;
1609 		break;
1610 	case RTE_FLOW_ITEM_TYPE_UDP:
1611 		default_mask = (const void *)&rte_flow_item_udp_mask;
1612 		break;
1613 	case RTE_FLOW_ITEM_TYPE_TCP:
1614 		default_mask = (const void *)&rte_flow_item_tcp_mask;
1615 		break;
1616 	case RTE_FLOW_ITEM_TYPE_SCTP:
1617 		default_mask = (const void *)&rte_flow_item_sctp_mask;
1618 		break;
1619 	case RTE_FLOW_ITEM_TYPE_GRE:
1620 		default_mask = (const void *)&rte_flow_item_gre_mask;
1621 		break;
1622 	default:
1623 		DPAA2_PMD_ERR("Invalid pattern type");
1624 	}
1625 }
1626 
1627 static inline int
1628 dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
1629 			  const struct rte_flow_item pattern[])
1630 {
1631 	unsigned int i, j, k, is_found = 0;
1632 	int ret = 0;
1633 
1634 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1635 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
1636 			if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
1637 				is_found = 1;
1638 				break;
1639 			}
1640 		}
1641 		if (!is_found) {
1642 			ret = -ENOTSUP;
1643 			break;
1644 		}
1645 	}
1646 	/* Lets verify other combinations of given pattern rules */
1647 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1648 		if (!pattern[j].spec) {
1649 			ret = -EINVAL;
1650 			break;
1651 		}
1652 		if ((pattern[j].last) && (!pattern[j].mask))
1653 			dpaa2_dev_update_default_mask(&pattern[j]);
1654 	}
1655 
1656 	/* DPAA2 platform has a limitation that extract parameter can not be */
1657 	/* more	than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1658 	for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1659 		for (j = 0; j < MAX_TCS + 1; j++) {
1660 				for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; k++) {
1661 					if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
1662 						break;
1663 				}
1664 			if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
1665 				ret = -ENOTSUP;
1666 		}
1667 	}
1668 	return ret;
1669 }
1670 
1671 static inline int
1672 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
1673 {
1674 	unsigned int i, j, is_found = 0;
1675 	int ret = 0;
1676 
1677 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1678 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
1679 			if (dpaa2_supported_action_type[i] == actions[j].type) {
1680 				is_found = 1;
1681 				break;
1682 			}
1683 		}
1684 		if (!is_found) {
1685 			ret = -ENOTSUP;
1686 			break;
1687 		}
1688 	}
1689 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1690 		if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
1691 			ret = -EINVAL;
1692 	}
1693 	return ret;
1694 }
1695 
1696 static
1697 int dpaa2_flow_validate(struct rte_eth_dev *dev,
1698 			const struct rte_flow_attr *flow_attr,
1699 			const struct rte_flow_item pattern[],
1700 			const struct rte_flow_action actions[],
1701 			struct rte_flow_error *error)
1702 {
1703 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1704 	struct dpni_attr dpni_attr;
1705 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1706 	uint16_t token = priv->token;
1707 	int ret = 0;
1708 
1709 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
1710 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
1711 	if (ret < 0) {
1712 		DPAA2_PMD_ERR(
1713 			"Failure to get dpni@%p attribute, err code  %d\n",
1714 			dpni, ret);
1715 		rte_flow_error_set(error, EPERM,
1716 			   RTE_FLOW_ERROR_TYPE_ATTR,
1717 			   flow_attr, "invalid");
1718 		return ret;
1719 	}
1720 
1721 	/* Verify input attributes */
1722 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
1723 	if (ret < 0) {
1724 		DPAA2_PMD_ERR(
1725 			"Invalid attributes are given\n");
1726 		rte_flow_error_set(error, EPERM,
1727 			   RTE_FLOW_ERROR_TYPE_ATTR,
1728 			   flow_attr, "invalid");
1729 		goto not_valid_params;
1730 	}
1731 	/* Verify input pattern list */
1732 	ret = dpaa2_dev_verify_patterns(priv, pattern);
1733 	if (ret < 0) {
1734 		DPAA2_PMD_ERR(
1735 			"Invalid pattern list is given\n");
1736 		rte_flow_error_set(error, EPERM,
1737 			   RTE_FLOW_ERROR_TYPE_ITEM,
1738 			   pattern, "invalid");
1739 		goto not_valid_params;
1740 	}
1741 	/* Verify input action list */
1742 	ret = dpaa2_dev_verify_actions(actions);
1743 	if (ret < 0) {
1744 		DPAA2_PMD_ERR(
1745 			"Invalid action list is given\n");
1746 		rte_flow_error_set(error, EPERM,
1747 			   RTE_FLOW_ERROR_TYPE_ACTION,
1748 			   actions, "invalid");
1749 		goto not_valid_params;
1750 	}
1751 not_valid_params:
1752 	return ret;
1753 }
1754 
1755 static
1756 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
1757 				   const struct rte_flow_attr *attr,
1758 				   const struct rte_flow_item pattern[],
1759 				   const struct rte_flow_action actions[],
1760 				   struct rte_flow_error *error)
1761 {
1762 	struct rte_flow *flow = NULL;
1763 	size_t key_iova = 0, mask_iova = 0;
1764 	int ret;
1765 
1766 	flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
1767 	if (!flow) {
1768 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
1769 		goto mem_failure;
1770 	}
1771 	/* Allocate DMA'ble memory to write the rules */
1772 	key_iova = (size_t)rte_malloc(NULL, 256, 64);
1773 	if (!key_iova) {
1774 		DPAA2_PMD_ERR(
1775 			"Memory allocation failure for rule configuration\n");
1776 		goto mem_failure;
1777 	}
1778 	mask_iova = (size_t)rte_malloc(NULL, 256, 64);
1779 	if (!mask_iova) {
1780 		DPAA2_PMD_ERR(
1781 			"Memory allocation failure for rule configuration\n");
1782 		goto mem_failure;
1783 	}
1784 
1785 	flow->rule.key_iova = key_iova;
1786 	flow->rule.mask_iova = mask_iova;
1787 	flow->key_size = 0;
1788 
1789 	switch (dpaa2_filter_type) {
1790 	case RTE_ETH_FILTER_GENERIC:
1791 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
1792 					     actions, error);
1793 		if (ret < 0) {
1794 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
1795 				rte_flow_error_set(error, EPERM,
1796 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1797 						attr, "unknown");
1798 			DPAA2_PMD_ERR(
1799 			"Failure to create flow, return code (%d)", ret);
1800 			goto creation_error;
1801 		}
1802 		break;
1803 	default:
1804 		DPAA2_PMD_ERR("Filter type (%d) not supported",
1805 		dpaa2_filter_type);
1806 		break;
1807 	}
1808 
1809 	return flow;
1810 mem_failure:
1811 	rte_flow_error_set(error, EPERM,
1812 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1813 			   NULL, "memory alloc");
1814 creation_error:
1815 	rte_free((void *)flow);
1816 	rte_free((void *)key_iova);
1817 	rte_free((void *)mask_iova);
1818 
1819 	return NULL;
1820 }
1821 
1822 static
1823 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
1824 		       struct rte_flow *flow,
1825 		       struct rte_flow_error *error)
1826 {
1827 	int ret = 0;
1828 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1829 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1830 
1831 	switch (flow->action) {
1832 	case RTE_FLOW_ACTION_TYPE_QUEUE:
1833 		/* Remove entry from QoS table first */
1834 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1835 					   &flow->rule);
1836 		if (ret < 0) {
1837 			DPAA2_PMD_ERR(
1838 				"Error in adding entry to QoS table(%d)", ret);
1839 			goto error;
1840 		}
1841 
1842 		/* Then remove entry from FS table */
1843 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1844 					   flow->tc_id, &flow->rule);
1845 		if (ret < 0) {
1846 			DPAA2_PMD_ERR(
1847 				"Error in entry addition in FS table(%d)", ret);
1848 			goto error;
1849 		}
1850 		break;
1851 	case RTE_FLOW_ACTION_TYPE_RSS:
1852 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1853 					   &flow->rule);
1854 		if (ret < 0) {
1855 			DPAA2_PMD_ERR(
1856 			"Error in entry addition in QoS table(%d)", ret);
1857 			goto error;
1858 		}
1859 		break;
1860 	default:
1861 		DPAA2_PMD_ERR(
1862 		"Action type (%d) is not supported", flow->action);
1863 		ret = -ENOTSUP;
1864 		break;
1865 	}
1866 
1867 	LIST_REMOVE(flow, next);
1868 	/* Now free the flow */
1869 	rte_free(flow);
1870 
1871 error:
1872 	if (ret)
1873 		rte_flow_error_set(error, EPERM,
1874 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1875 				   NULL, "unknown");
1876 	return ret;
1877 }
1878 
1879 /**
1880  * Destroy user-configured flow rules.
1881  *
1882  * This function skips internal flows rules.
1883  *
1884  * @see rte_flow_flush()
1885  * @see rte_flow_ops
1886  */
1887 static int
1888 dpaa2_flow_flush(struct rte_eth_dev *dev,
1889 		struct rte_flow_error *error)
1890 {
1891 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1892 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
1893 
1894 	while (flow) {
1895 		struct rte_flow *next = LIST_NEXT(flow, next);
1896 
1897 		dpaa2_flow_destroy(dev, flow, error);
1898 		flow = next;
1899 	}
1900 	return 0;
1901 }
1902 
1903 static int
1904 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
1905 		struct rte_flow *flow __rte_unused,
1906 		const struct rte_flow_action *actions __rte_unused,
1907 		void *data __rte_unused,
1908 		struct rte_flow_error *error __rte_unused)
1909 {
1910 	return 0;
1911 }
1912 
1913 /**
1914  * Clean up all flow rules.
1915  *
1916  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
1917  * rules regardless of whether they are internal or user-configured.
1918  *
1919  * @param priv
1920  *   Pointer to private structure.
1921  */
1922 void
1923 dpaa2_flow_clean(struct rte_eth_dev *dev)
1924 {
1925 	struct rte_flow *flow;
1926 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1927 
1928 	while ((flow = LIST_FIRST(&priv->flows)))
1929 		dpaa2_flow_destroy(dev, flow, NULL);
1930 }
1931 
1932 const struct rte_flow_ops dpaa2_flow_ops = {
1933 	.create	= dpaa2_flow_create,
1934 	.validate = dpaa2_flow_validate,
1935 	.destroy = dpaa2_flow_destroy,
1936 	.flush	= dpaa2_flow_flush,
1937 	.query	= dpaa2_flow_query,
1938 };
1939