xref: /dpdk/drivers/net/dpaa2/dpaa2_flow.c (revision e11bdd37745229bf26b557305c07d118c3dbaad7)
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright 2018 NXP
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18 
19 #include <fsl_dpni.h>
20 #include <fsl_dpkg.h>
21 
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
24 
25 struct rte_flow {
26 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
27 	struct dpni_rule_cfg rule;
28 	uint8_t key_size;
29 	uint8_t tc_id;
30 	uint8_t flow_type;
31 	uint8_t index;
32 	enum rte_flow_action_type action;
33 	uint16_t flow_id;
34 };
35 
36 /* Layout for rule compositions for supported patterns */
37 /* TODO: Current design only supports Ethernet + IPv4 based classification. */
38 /* So corresponding offset macros are valid only. Rest are placeholder for */
39 /* now. Once support for other netwrok headers will be added then */
40 /* corresponding macros will be updated with correct values*/
41 #define DPAA2_CLS_RULE_OFFSET_ETH	0	/*Start of buffer*/
42 #define DPAA2_CLS_RULE_OFFSET_VLAN	14	/* DPAA2_CLS_RULE_OFFSET_ETH */
43 						/*	+ Sizeof Eth fields  */
44 #define DPAA2_CLS_RULE_OFFSET_IPV4	14	/* DPAA2_CLS_RULE_OFFSET_VLAN */
45 						/*	+ Sizeof VLAN fields */
46 #define DPAA2_CLS_RULE_OFFSET_IPV6	25	/* DPAA2_CLS_RULE_OFFSET_IPV4 */
47 						/*	+ Sizeof IPV4 fields */
48 #define DPAA2_CLS_RULE_OFFSET_ICMP	58	/* DPAA2_CLS_RULE_OFFSET_IPV6 */
49 						/*	+ Sizeof IPV6 fields */
50 #define DPAA2_CLS_RULE_OFFSET_UDP	60	/* DPAA2_CLS_RULE_OFFSET_ICMP */
51 						/*	+ Sizeof ICMP fields */
52 #define DPAA2_CLS_RULE_OFFSET_TCP	64	/* DPAA2_CLS_RULE_OFFSET_UDP  */
53 						/*	+ Sizeof UDP fields  */
54 #define DPAA2_CLS_RULE_OFFSET_SCTP	68	/* DPAA2_CLS_RULE_OFFSET_TCP  */
55 						/*	+ Sizeof TCP fields  */
56 #define DPAA2_CLS_RULE_OFFSET_GRE	72	/* DPAA2_CLS_RULE_OFFSET_SCTP */
57 						/*	+ Sizeof SCTP fields */
58 
59 static const
60 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
61 	RTE_FLOW_ITEM_TYPE_END,
62 	RTE_FLOW_ITEM_TYPE_ETH,
63 	RTE_FLOW_ITEM_TYPE_VLAN,
64 	RTE_FLOW_ITEM_TYPE_IPV4,
65 	RTE_FLOW_ITEM_TYPE_IPV6,
66 	RTE_FLOW_ITEM_TYPE_ICMP,
67 	RTE_FLOW_ITEM_TYPE_UDP,
68 	RTE_FLOW_ITEM_TYPE_TCP,
69 	RTE_FLOW_ITEM_TYPE_SCTP,
70 	RTE_FLOW_ITEM_TYPE_GRE,
71 };
72 
73 static const
74 enum rte_flow_action_type dpaa2_supported_action_type[] = {
75 	RTE_FLOW_ACTION_TYPE_END,
76 	RTE_FLOW_ACTION_TYPE_QUEUE,
77 	RTE_FLOW_ACTION_TYPE_RSS
78 };
79 
80 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
81 static const void *default_mask;
82 
83 static int
84 dpaa2_configure_flow_eth(struct rte_flow *flow,
85 			 struct rte_eth_dev *dev,
86 			 const struct rte_flow_attr *attr,
87 			 const struct rte_flow_item *pattern,
88 			 const struct rte_flow_action actions[] __rte_unused,
89 			 struct rte_flow_error *error __rte_unused)
90 {
91 	int index, j = 0;
92 	size_t key_iova;
93 	size_t mask_iova;
94 	int device_configured = 0, entry_found = 0;
95 	uint32_t group;
96 	const struct rte_flow_item_eth *spec, *mask;
97 
98 	/* TODO: Currently upper bound of range parameter is not implemented */
99 	const struct rte_flow_item_eth *last __rte_unused;
100 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
101 
102 	group = attr->group;
103 
104 	/* DPAA2 platform has a limitation that extract parameter can not be */
105 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
106 	/* TODO: pattern is an array of 9 elements where 9th pattern element */
107 	/* is for QoS table and 1-8th pattern element is for FS tables. */
108 	/* It can be changed to macro. */
109 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
110 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
111 						DPKG_MAX_NUM_OF_EXTRACTS);
112 		return -ENOTSUP;
113 	}
114 
115 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
116 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
117 						DPKG_MAX_NUM_OF_EXTRACTS);
118 		return -ENOTSUP;
119 	}
120 
121 	for (j = 0; j < priv->pattern[8].item_count; j++) {
122 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
123 			continue;
124 		} else {
125 			entry_found = 1;
126 			break;
127 		}
128 	}
129 
130 	if (!entry_found) {
131 		priv->pattern[8].pattern_type[j] = pattern->type;
132 		priv->pattern[8].item_count++;
133 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
134 	}
135 
136 	entry_found = 0;
137 	for (j = 0; j < priv->pattern[group].item_count; j++) {
138 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
139 			continue;
140 		} else {
141 			entry_found = 1;
142 			break;
143 		}
144 	}
145 
146 	if (!entry_found) {
147 		priv->pattern[group].pattern_type[j] = pattern->type;
148 		priv->pattern[group].item_count++;
149 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
150 	}
151 
152 	/* Get traffic class index and flow id to be configured */
153 	flow->tc_id = group;
154 	flow->index = attr->priority;
155 
156 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
157 		index = priv->extract.qos_key_cfg.num_extracts;
158 		priv->extract.qos_key_cfg.extracts[index].type =
159 							DPKG_EXTRACT_FROM_HDR;
160 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
161 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
162 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
163 		index++;
164 
165 		priv->extract.qos_key_cfg.extracts[index].type =
166 							DPKG_EXTRACT_FROM_HDR;
167 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
168 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
169 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
170 		index++;
171 
172 		priv->extract.qos_key_cfg.extracts[index].type =
173 							DPKG_EXTRACT_FROM_HDR;
174 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
175 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
176 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
177 		index++;
178 
179 		priv->extract.qos_key_cfg.num_extracts = index;
180 	}
181 
182 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
183 		index = priv->extract.fs_key_cfg[group].num_extracts;
184 		priv->extract.fs_key_cfg[group].extracts[index].type =
185 							DPKG_EXTRACT_FROM_HDR;
186 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
187 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
188 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
189 		index++;
190 
191 		priv->extract.fs_key_cfg[group].extracts[index].type =
192 							DPKG_EXTRACT_FROM_HDR;
193 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
194 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
195 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
196 		index++;
197 
198 		priv->extract.fs_key_cfg[group].extracts[index].type =
199 							DPKG_EXTRACT_FROM_HDR;
200 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
201 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
202 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
203 		index++;
204 
205 		priv->extract.fs_key_cfg[group].num_extracts = index;
206 	}
207 
208 	/* Parse pattern list to get the matching parameters */
209 	spec	= (const struct rte_flow_item_eth *)pattern->spec;
210 	last	= (const struct rte_flow_item_eth *)pattern->last;
211 	mask	= (const struct rte_flow_item_eth *)
212 			(pattern->mask ? pattern->mask : default_mask);
213 
214 	/* Key rule */
215 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH;
216 	memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
217 						sizeof(struct rte_ether_addr));
218 	key_iova += sizeof(struct rte_ether_addr);
219 	memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
220 						sizeof(struct rte_ether_addr));
221 	key_iova += sizeof(struct rte_ether_addr);
222 	memcpy((void *)key_iova, (const void *)(&spec->type),
223 						sizeof(rte_be16_t));
224 
225 	/* Key mask */
226 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH;
227 	memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
228 						sizeof(struct rte_ether_addr));
229 	mask_iova += sizeof(struct rte_ether_addr);
230 	memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
231 						sizeof(struct rte_ether_addr));
232 	mask_iova += sizeof(struct rte_ether_addr);
233 	memcpy((void *)mask_iova, (const void *)(&mask->type),
234 						sizeof(rte_be16_t));
235 
236 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH +
237 				((2  * sizeof(struct rte_ether_addr)) +
238 				sizeof(rte_be16_t)));
239 	return device_configured;
240 }
241 
242 static int
243 dpaa2_configure_flow_vlan(struct rte_flow *flow,
244 			  struct rte_eth_dev *dev,
245 			  const struct rte_flow_attr *attr,
246 			  const struct rte_flow_item *pattern,
247 			  const struct rte_flow_action actions[] __rte_unused,
248 			  struct rte_flow_error *error __rte_unused)
249 {
250 	int index, j = 0;
251 	size_t key_iova;
252 	size_t mask_iova;
253 	int device_configured = 0, entry_found = 0;
254 	uint32_t group;
255 	const struct rte_flow_item_vlan *spec, *mask;
256 
257 	const struct rte_flow_item_vlan *last __rte_unused;
258 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
259 
260 	group = attr->group;
261 
262 	/* DPAA2 platform has a limitation that extract parameter can not be */
263 	/*  more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
264 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
265 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
266 						DPKG_MAX_NUM_OF_EXTRACTS);
267 		return -ENOTSUP;
268 	}
269 
270 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
271 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
272 						DPKG_MAX_NUM_OF_EXTRACTS);
273 		return -ENOTSUP;
274 	}
275 
276 	for (j = 0; j < priv->pattern[8].item_count; j++) {
277 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
278 			continue;
279 		} else {
280 			entry_found = 1;
281 			break;
282 		}
283 	}
284 
285 	if (!entry_found) {
286 		priv->pattern[8].pattern_type[j] = pattern->type;
287 		priv->pattern[8].item_count++;
288 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
289 	}
290 
291 	entry_found = 0;
292 	for (j = 0; j < priv->pattern[group].item_count; j++) {
293 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
294 			continue;
295 		} else {
296 			entry_found = 1;
297 			break;
298 		}
299 	}
300 
301 	if (!entry_found) {
302 		priv->pattern[group].pattern_type[j] = pattern->type;
303 		priv->pattern[group].item_count++;
304 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
305 	}
306 
307 
308 	/* Get traffic class index and flow id to be configured */
309 	flow->tc_id = group;
310 	flow->index = attr->priority;
311 
312 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
313 		index = priv->extract.qos_key_cfg.num_extracts;
314 		priv->extract.qos_key_cfg.extracts[index].type =
315 							DPKG_EXTRACT_FROM_HDR;
316 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
317 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
318 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
319 		priv->extract.qos_key_cfg.num_extracts++;
320 	}
321 
322 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
323 		index = priv->extract.fs_key_cfg[group].num_extracts;
324 		priv->extract.fs_key_cfg[group].extracts[index].type =
325 							DPKG_EXTRACT_FROM_HDR;
326 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
327 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
328 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
329 		priv->extract.fs_key_cfg[group].num_extracts++;
330 	}
331 
332 	/* Parse pattern list to get the matching parameters */
333 	spec	= (const struct rte_flow_item_vlan *)pattern->spec;
334 	last	= (const struct rte_flow_item_vlan *)pattern->last;
335 	mask	= (const struct rte_flow_item_vlan *)
336 			(pattern->mask ? pattern->mask : default_mask);
337 
338 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
339 	memcpy((void *)key_iova, (const void *)(&spec->tci),
340 							sizeof(rte_be16_t));
341 
342 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
343 	memcpy((void *)mask_iova, (const void *)(&mask->tci),
344 							sizeof(rte_be16_t));
345 
346 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t));
347 	return device_configured;
348 }
349 
350 static int
351 dpaa2_configure_flow_ipv4(struct rte_flow *flow,
352 			  struct rte_eth_dev *dev,
353 			  const struct rte_flow_attr *attr,
354 			  const struct rte_flow_item *pattern,
355 			  const struct rte_flow_action actions[] __rte_unused,
356 			  struct rte_flow_error *error __rte_unused)
357 {
358 	int index, j = 0;
359 	size_t key_iova;
360 	size_t mask_iova;
361 	int device_configured = 0, entry_found = 0;
362 	uint32_t group;
363 	const struct rte_flow_item_ipv4 *spec, *mask;
364 
365 	const struct rte_flow_item_ipv4 *last __rte_unused;
366 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
367 
368 	group = attr->group;
369 
370 	/* DPAA2 platform has a limitation that extract parameter can not be */
371 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
372 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
373 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
374 						DPKG_MAX_NUM_OF_EXTRACTS);
375 		return -ENOTSUP;
376 	}
377 
378 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
379 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
380 						DPKG_MAX_NUM_OF_EXTRACTS);
381 		return -ENOTSUP;
382 	}
383 
384 	for (j = 0; j < priv->pattern[8].item_count; j++) {
385 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
386 			continue;
387 		} else {
388 			entry_found = 1;
389 			break;
390 		}
391 	}
392 
393 	if (!entry_found) {
394 		priv->pattern[8].pattern_type[j] = pattern->type;
395 		priv->pattern[8].item_count++;
396 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
397 	}
398 
399 	entry_found = 0;
400 	for (j = 0; j < priv->pattern[group].item_count; j++) {
401 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
402 			continue;
403 		} else {
404 			entry_found = 1;
405 			break;
406 		}
407 	}
408 
409 	if (!entry_found) {
410 		priv->pattern[group].pattern_type[j] = pattern->type;
411 		priv->pattern[group].item_count++;
412 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
413 	}
414 
415 	/* Get traffic class index and flow id to be configured */
416 	flow->tc_id = group;
417 	flow->index = attr->priority;
418 
419 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
420 		index = priv->extract.qos_key_cfg.num_extracts;
421 		priv->extract.qos_key_cfg.extracts[index].type =
422 							DPKG_EXTRACT_FROM_HDR;
423 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
424 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
425 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
426 		index++;
427 
428 		priv->extract.qos_key_cfg.extracts[index].type =
429 							DPKG_EXTRACT_FROM_HDR;
430 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
431 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
432 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
433 		index++;
434 
435 		priv->extract.qos_key_cfg.extracts[index].type =
436 							DPKG_EXTRACT_FROM_HDR;
437 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
438 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
439 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
440 		index++;
441 
442 		priv->extract.qos_key_cfg.num_extracts = index;
443 	}
444 
445 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
446 		index = priv->extract.fs_key_cfg[group].num_extracts;
447 		priv->extract.fs_key_cfg[group].extracts[index].type =
448 							DPKG_EXTRACT_FROM_HDR;
449 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
450 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
451 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
452 		index++;
453 
454 		priv->extract.fs_key_cfg[group].extracts[index].type =
455 							DPKG_EXTRACT_FROM_HDR;
456 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
457 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
458 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
459 		index++;
460 
461 		priv->extract.fs_key_cfg[group].extracts[index].type =
462 							DPKG_EXTRACT_FROM_HDR;
463 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
464 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
465 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
466 		index++;
467 
468 		priv->extract.fs_key_cfg[group].num_extracts = index;
469 	}
470 
471 	/* Parse pattern list to get the matching parameters */
472 	spec	= (const struct rte_flow_item_ipv4 *)pattern->spec;
473 	last	= (const struct rte_flow_item_ipv4 *)pattern->last;
474 	mask	= (const struct rte_flow_item_ipv4 *)
475 			(pattern->mask ? pattern->mask : default_mask);
476 
477 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
478 	memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
479 							sizeof(uint32_t));
480 	key_iova += sizeof(uint32_t);
481 	memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
482 							sizeof(uint32_t));
483 	key_iova += sizeof(uint32_t);
484 	memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
485 							sizeof(uint8_t));
486 
487 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
488 	memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
489 							sizeof(uint32_t));
490 	mask_iova += sizeof(uint32_t);
491 	memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
492 							sizeof(uint32_t));
493 	mask_iova += sizeof(uint32_t);
494 	memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
495 							sizeof(uint8_t));
496 
497 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 +
498 				(2 * sizeof(uint32_t)) + sizeof(uint8_t));
499 
500 	return device_configured;
501 }
502 
503 static int
504 dpaa2_configure_flow_ipv6(struct rte_flow *flow,
505 			  struct rte_eth_dev *dev,
506 			  const struct rte_flow_attr *attr,
507 			  const struct rte_flow_item *pattern,
508 			  const struct rte_flow_action actions[] __rte_unused,
509 			  struct rte_flow_error *error __rte_unused)
510 {
511 	int index, j = 0;
512 	size_t key_iova;
513 	size_t mask_iova;
514 	int device_configured = 0, entry_found = 0;
515 	uint32_t group;
516 	const struct rte_flow_item_ipv6 *spec, *mask;
517 
518 	const struct rte_flow_item_ipv6 *last __rte_unused;
519 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
520 
521 	group = attr->group;
522 
523 	/* DPAA2 platform has a limitation that extract parameter can not be */
524 	/* more	than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
525 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
526 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
527 						DPKG_MAX_NUM_OF_EXTRACTS);
528 		return -ENOTSUP;
529 	}
530 
531 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
532 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
533 						DPKG_MAX_NUM_OF_EXTRACTS);
534 		return -ENOTSUP;
535 	}
536 
537 	for (j = 0; j < priv->pattern[8].item_count; j++) {
538 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
539 			continue;
540 		} else {
541 			entry_found = 1;
542 			break;
543 		}
544 	}
545 
546 	if (!entry_found) {
547 		priv->pattern[8].pattern_type[j] = pattern->type;
548 		priv->pattern[8].item_count++;
549 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
550 	}
551 
552 	entry_found = 0;
553 	for (j = 0; j < priv->pattern[group].item_count; j++) {
554 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
555 			continue;
556 		} else {
557 			entry_found = 1;
558 			break;
559 		}
560 	}
561 
562 	if (!entry_found) {
563 		priv->pattern[group].pattern_type[j] = pattern->type;
564 		priv->pattern[group].item_count++;
565 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
566 	}
567 
568 	/* Get traffic class index and flow id to be configured */
569 	flow->tc_id = group;
570 	flow->index = attr->priority;
571 
572 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
573 		index = priv->extract.qos_key_cfg.num_extracts;
574 		priv->extract.qos_key_cfg.extracts[index].type =
575 							DPKG_EXTRACT_FROM_HDR;
576 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
577 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
578 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
579 		index++;
580 
581 		priv->extract.qos_key_cfg.extracts[index].type =
582 							DPKG_EXTRACT_FROM_HDR;
583 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
584 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
585 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
586 		index++;
587 
588 		priv->extract.qos_key_cfg.num_extracts = index;
589 	}
590 
591 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
592 		index = priv->extract.fs_key_cfg[group].num_extracts;
593 		priv->extract.fs_key_cfg[group].extracts[index].type =
594 							DPKG_EXTRACT_FROM_HDR;
595 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
596 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
597 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
598 		index++;
599 
600 		priv->extract.fs_key_cfg[group].extracts[index].type =
601 							DPKG_EXTRACT_FROM_HDR;
602 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
603 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
604 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
605 		index++;
606 
607 		priv->extract.fs_key_cfg[group].num_extracts = index;
608 	}
609 
610 	/* Parse pattern list to get the matching parameters */
611 	spec	= (const struct rte_flow_item_ipv6 *)pattern->spec;
612 	last	= (const struct rte_flow_item_ipv6 *)pattern->last;
613 	mask	= (const struct rte_flow_item_ipv6 *)
614 			(pattern->mask ? pattern->mask : default_mask);
615 
616 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
617 	memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
618 						sizeof(spec->hdr.src_addr));
619 	key_iova += sizeof(spec->hdr.src_addr);
620 	memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
621 						sizeof(spec->hdr.dst_addr));
622 
623 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
624 	memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
625 						sizeof(mask->hdr.src_addr));
626 	mask_iova += sizeof(mask->hdr.src_addr);
627 	memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
628 						sizeof(mask->hdr.dst_addr));
629 
630 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 +
631 					sizeof(spec->hdr.src_addr) +
632 					sizeof(mask->hdr.dst_addr));
633 	return device_configured;
634 }
635 
636 static int
637 dpaa2_configure_flow_icmp(struct rte_flow *flow,
638 			  struct rte_eth_dev *dev,
639 			  const struct rte_flow_attr *attr,
640 			  const struct rte_flow_item *pattern,
641 			  const struct rte_flow_action actions[] __rte_unused,
642 			  struct rte_flow_error *error __rte_unused)
643 {
644 	int index, j = 0;
645 	size_t key_iova;
646 	size_t mask_iova;
647 	int device_configured = 0, entry_found = 0;
648 	uint32_t group;
649 	const struct rte_flow_item_icmp *spec, *mask;
650 
651 	const struct rte_flow_item_icmp *last __rte_unused;
652 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
653 
654 	group = attr->group;
655 
656 	/* DPAA2 platform has a limitation that extract parameter can not be */
657 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
658 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
659 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
660 						DPKG_MAX_NUM_OF_EXTRACTS);
661 		return -ENOTSUP;
662 	}
663 
664 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
665 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
666 						DPKG_MAX_NUM_OF_EXTRACTS);
667 		return -ENOTSUP;
668 	}
669 
670 	for (j = 0; j < priv->pattern[8].item_count; j++) {
671 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
672 			continue;
673 		} else {
674 			entry_found = 1;
675 			break;
676 		}
677 	}
678 
679 	if (!entry_found) {
680 		priv->pattern[8].pattern_type[j] = pattern->type;
681 		priv->pattern[8].item_count++;
682 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
683 	}
684 
685 	entry_found = 0;
686 	for (j = 0; j < priv->pattern[group].item_count; j++) {
687 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
688 			continue;
689 		} else {
690 			entry_found = 1;
691 			break;
692 		}
693 	}
694 
695 	if (!entry_found) {
696 		priv->pattern[group].pattern_type[j] = pattern->type;
697 		priv->pattern[group].item_count++;
698 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
699 	}
700 
701 	/* Get traffic class index and flow id to be configured */
702 	flow->tc_id = group;
703 	flow->index = attr->priority;
704 
705 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
706 		index = priv->extract.qos_key_cfg.num_extracts;
707 		priv->extract.qos_key_cfg.extracts[index].type =
708 							DPKG_EXTRACT_FROM_HDR;
709 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
710 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
711 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
712 		index++;
713 
714 		priv->extract.qos_key_cfg.extracts[index].type =
715 							DPKG_EXTRACT_FROM_HDR;
716 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
717 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
718 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
719 		index++;
720 
721 		priv->extract.qos_key_cfg.num_extracts = index;
722 	}
723 
724 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
725 		index = priv->extract.fs_key_cfg[group].num_extracts;
726 		priv->extract.fs_key_cfg[group].extracts[index].type =
727 							DPKG_EXTRACT_FROM_HDR;
728 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
729 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
730 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
731 		index++;
732 
733 		priv->extract.fs_key_cfg[group].extracts[index].type =
734 							DPKG_EXTRACT_FROM_HDR;
735 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
736 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
737 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
738 		index++;
739 
740 		priv->extract.fs_key_cfg[group].num_extracts = index;
741 	}
742 
743 	/* Parse pattern list to get the matching parameters */
744 	spec	= (const struct rte_flow_item_icmp *)pattern->spec;
745 	last	= (const struct rte_flow_item_icmp *)pattern->last;
746 	mask	= (const struct rte_flow_item_icmp *)
747 			(pattern->mask ? pattern->mask : default_mask);
748 
749 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
750 	memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
751 							sizeof(uint8_t));
752 	key_iova += sizeof(uint8_t);
753 	memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
754 							sizeof(uint8_t));
755 
756 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
757 	memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
758 							sizeof(uint8_t));
759 	key_iova += sizeof(uint8_t);
760 	memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
761 							sizeof(uint8_t));
762 
763 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP +
764 				(2 * sizeof(uint8_t)));
765 
766 	return device_configured;
767 }
768 
769 static int
770 dpaa2_configure_flow_udp(struct rte_flow *flow,
771 			 struct rte_eth_dev *dev,
772 			  const struct rte_flow_attr *attr,
773 			  const struct rte_flow_item *pattern,
774 			  const struct rte_flow_action actions[] __rte_unused,
775 			  struct rte_flow_error *error __rte_unused)
776 {
777 	int index, j = 0;
778 	size_t key_iova;
779 	size_t mask_iova;
780 	int device_configured = 0, entry_found = 0;
781 	uint32_t group;
782 	const struct rte_flow_item_udp *spec, *mask;
783 
784 	const struct rte_flow_item_udp *last __rte_unused;
785 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
786 
787 	group = attr->group;
788 
789 	/* DPAA2 platform has a limitation that extract parameter can not be */
790 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
791 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
792 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
793 						DPKG_MAX_NUM_OF_EXTRACTS);
794 		return -ENOTSUP;
795 	}
796 
797 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
798 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
799 						DPKG_MAX_NUM_OF_EXTRACTS);
800 		return -ENOTSUP;
801 	}
802 
803 	for (j = 0; j < priv->pattern[8].item_count; j++) {
804 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
805 			continue;
806 		} else {
807 			 entry_found = 1;
808 			break;
809 		}
810 	}
811 
812 	if (!entry_found) {
813 		priv->pattern[8].pattern_type[j] = pattern->type;
814 		priv->pattern[8].item_count++;
815 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
816 	}
817 
818 	entry_found = 0;
819 	for (j = 0; j < priv->pattern[group].item_count; j++) {
820 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
821 			continue;
822 		} else {
823 			entry_found = 1;
824 			break;
825 		}
826 	}
827 
828 	if (!entry_found) {
829 		priv->pattern[group].pattern_type[j] = pattern->type;
830 		priv->pattern[group].item_count++;
831 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
832 	}
833 
834 	/* Get traffic class index and flow id to be configured */
835 	flow->tc_id = group;
836 	flow->index = attr->priority;
837 
838 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
839 		index = priv->extract.qos_key_cfg.num_extracts;
840 		priv->extract.qos_key_cfg.extracts[index].type =
841 							DPKG_EXTRACT_FROM_HDR;
842 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
843 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
844 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
845 		index++;
846 
847 		priv->extract.qos_key_cfg.extracts[index].type =
848 							DPKG_EXTRACT_FROM_HDR;
849 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
850 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
851 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
852 		index++;
853 
854 		priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
855 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
856 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
857 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
858 		index++;
859 
860 		priv->extract.qos_key_cfg.num_extracts = index;
861 	}
862 
863 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
864 		index = priv->extract.fs_key_cfg[group].num_extracts;
865 		priv->extract.fs_key_cfg[group].extracts[index].type =
866 							DPKG_EXTRACT_FROM_HDR;
867 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
868 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
869 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
870 		index++;
871 
872 		priv->extract.fs_key_cfg[group].extracts[index].type =
873 							DPKG_EXTRACT_FROM_HDR;
874 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
875 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
876 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
877 		index++;
878 
879 		priv->extract.fs_key_cfg[group].extracts[index].type =
880 							DPKG_EXTRACT_FROM_HDR;
881 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
882 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
883 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
884 		index++;
885 
886 		priv->extract.fs_key_cfg[group].num_extracts = index;
887 	}
888 
889 	/* Parse pattern list to get the matching parameters */
890 	spec	= (const struct rte_flow_item_udp *)pattern->spec;
891 	last	= (const struct rte_flow_item_udp *)pattern->last;
892 	mask	= (const struct rte_flow_item_udp *)
893 			(pattern->mask ? pattern->mask : default_mask);
894 
895 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
896 					(2 * sizeof(uint32_t));
897 	memset((void *)key_iova, 0x11, sizeof(uint8_t));
898 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP;
899 	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
900 							sizeof(uint16_t));
901 	key_iova +=  sizeof(uint16_t);
902 	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
903 							sizeof(uint16_t));
904 
905 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP;
906 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
907 							sizeof(uint16_t));
908 	mask_iova +=  sizeof(uint16_t);
909 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
910 							sizeof(uint16_t));
911 
912 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP +
913 				(2 * sizeof(uint16_t)));
914 
915 	return device_configured;
916 }
917 
918 static int
919 dpaa2_configure_flow_tcp(struct rte_flow *flow,
920 			 struct rte_eth_dev *dev,
921 			 const struct rte_flow_attr *attr,
922 			 const struct rte_flow_item *pattern,
923 			 const struct rte_flow_action actions[] __rte_unused,
924 			 struct rte_flow_error *error __rte_unused)
925 {
926 	int index, j = 0;
927 	size_t key_iova;
928 	size_t mask_iova;
929 	int device_configured = 0, entry_found = 0;
930 	uint32_t group;
931 	const struct rte_flow_item_tcp *spec, *mask;
932 
933 	const struct rte_flow_item_tcp *last __rte_unused;
934 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
935 
936 	group = attr->group;
937 
938 	/* DPAA2 platform has a limitation that extract parameter can not be */
939 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
940 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
941 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
942 						DPKG_MAX_NUM_OF_EXTRACTS);
943 		return -ENOTSUP;
944 	}
945 
946 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
947 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
948 						DPKG_MAX_NUM_OF_EXTRACTS);
949 		return -ENOTSUP;
950 	}
951 
952 	for (j = 0; j < priv->pattern[8].item_count; j++) {
953 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
954 			continue;
955 		} else {
956 			entry_found = 1;
957 			break;
958 		}
959 	}
960 
961 	if (!entry_found) {
962 		priv->pattern[8].pattern_type[j] = pattern->type;
963 		priv->pattern[8].item_count++;
964 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
965 	}
966 
967 	entry_found = 0;
968 	for (j = 0; j < priv->pattern[group].item_count; j++) {
969 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
970 			continue;
971 		} else {
972 			entry_found = 1;
973 			break;
974 		}
975 	}
976 
977 	if (!entry_found) {
978 		priv->pattern[group].pattern_type[j] = pattern->type;
979 		priv->pattern[group].item_count++;
980 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
981 	}
982 
983 	/* Get traffic class index and flow id to be configured */
984 	flow->tc_id = group;
985 	flow->index = attr->priority;
986 
987 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
988 		index = priv->extract.qos_key_cfg.num_extracts;
989 		priv->extract.qos_key_cfg.extracts[index].type =
990 							DPKG_EXTRACT_FROM_HDR;
991 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
992 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
993 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
994 		index++;
995 
996 		priv->extract.qos_key_cfg.extracts[index].type =
997 							DPKG_EXTRACT_FROM_HDR;
998 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
999 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1000 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1001 		index++;
1002 
1003 		priv->extract.qos_key_cfg.extracts[index].type =
1004 							DPKG_EXTRACT_FROM_HDR;
1005 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1006 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1007 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1008 		index++;
1009 
1010 		priv->extract.qos_key_cfg.num_extracts = index;
1011 	}
1012 
1013 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1014 		index = priv->extract.fs_key_cfg[group].num_extracts;
1015 		priv->extract.fs_key_cfg[group].extracts[index].type =
1016 							DPKG_EXTRACT_FROM_HDR;
1017 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1018 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1019 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1020 		index++;
1021 
1022 		priv->extract.fs_key_cfg[group].extracts[index].type =
1023 							DPKG_EXTRACT_FROM_HDR;
1024 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1025 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1026 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1027 		index++;
1028 
1029 		priv->extract.fs_key_cfg[group].extracts[index].type =
1030 							DPKG_EXTRACT_FROM_HDR;
1031 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1032 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1033 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1034 		index++;
1035 
1036 		priv->extract.fs_key_cfg[group].num_extracts = index;
1037 	}
1038 
1039 	/* Parse pattern list to get the matching parameters */
1040 	spec	= (const struct rte_flow_item_tcp *)pattern->spec;
1041 	last	= (const struct rte_flow_item_tcp *)pattern->last;
1042 	mask	= (const struct rte_flow_item_tcp *)
1043 			(pattern->mask ? pattern->mask : default_mask);
1044 
1045 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1046 					(2 * sizeof(uint32_t));
1047 	memset((void *)key_iova, 0x06, sizeof(uint8_t));
1048 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1049 	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1050 							sizeof(uint16_t));
1051 	key_iova += sizeof(uint16_t);
1052 	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1053 							sizeof(uint16_t));
1054 
1055 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1056 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1057 							sizeof(uint16_t));
1058 	mask_iova += sizeof(uint16_t);
1059 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1060 							sizeof(uint16_t));
1061 
1062 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP +
1063 				(2 * sizeof(uint16_t)));
1064 
1065 	return device_configured;
1066 }
1067 
1068 static int
1069 dpaa2_configure_flow_sctp(struct rte_flow *flow,
1070 			  struct rte_eth_dev *dev,
1071 			  const struct rte_flow_attr *attr,
1072 			  const struct rte_flow_item *pattern,
1073 			  const struct rte_flow_action actions[] __rte_unused,
1074 			  struct rte_flow_error *error __rte_unused)
1075 {
1076 	int index, j = 0;
1077 	size_t key_iova;
1078 	size_t mask_iova;
1079 	int device_configured = 0, entry_found = 0;
1080 	uint32_t group;
1081 	const struct rte_flow_item_sctp *spec, *mask;
1082 
1083 	const struct rte_flow_item_sctp *last __rte_unused;
1084 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1085 
1086 	group = attr->group;
1087 
1088 	/* DPAA2 platform has a limitation that extract parameter can not be */
1089 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1090 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1091 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1092 						DPKG_MAX_NUM_OF_EXTRACTS);
1093 		return -ENOTSUP;
1094 	}
1095 
1096 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1097 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1098 						DPKG_MAX_NUM_OF_EXTRACTS);
1099 		return -ENOTSUP;
1100 	}
1101 
1102 	for (j = 0; j < priv->pattern[8].item_count; j++) {
1103 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
1104 			continue;
1105 		} else {
1106 			entry_found = 1;
1107 			break;
1108 		}
1109 	}
1110 
1111 	if (!entry_found) {
1112 		priv->pattern[8].pattern_type[j] = pattern->type;
1113 		priv->pattern[8].item_count++;
1114 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1115 	}
1116 
1117 	entry_found = 0;
1118 	for (j = 0; j < priv->pattern[group].item_count; j++) {
1119 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
1120 			continue;
1121 		} else {
1122 			entry_found = 1;
1123 			break;
1124 		}
1125 	}
1126 
1127 	if (!entry_found) {
1128 		priv->pattern[group].pattern_type[j] = pattern->type;
1129 		priv->pattern[group].item_count++;
1130 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1131 	}
1132 
1133 	/* Get traffic class index and flow id to be configured */
1134 	flow->tc_id = group;
1135 	flow->index = attr->priority;
1136 
1137 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1138 		index = priv->extract.qos_key_cfg.num_extracts;
1139 		priv->extract.qos_key_cfg.extracts[index].type =
1140 							DPKG_EXTRACT_FROM_HDR;
1141 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1142 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1143 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1144 		index++;
1145 
1146 		priv->extract.qos_key_cfg.extracts[index].type =
1147 							DPKG_EXTRACT_FROM_HDR;
1148 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1149 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1150 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1151 		index++;
1152 
1153 		priv->extract.qos_key_cfg.extracts[index].type =
1154 							DPKG_EXTRACT_FROM_HDR;
1155 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1156 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1157 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1158 		index++;
1159 
1160 		priv->extract.qos_key_cfg.num_extracts = index;
1161 	}
1162 
1163 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1164 		index = priv->extract.fs_key_cfg[group].num_extracts;
1165 		priv->extract.fs_key_cfg[group].extracts[index].type =
1166 							DPKG_EXTRACT_FROM_HDR;
1167 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1168 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1169 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1170 		index++;
1171 
1172 		priv->extract.fs_key_cfg[group].extracts[index].type =
1173 							DPKG_EXTRACT_FROM_HDR;
1174 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1175 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1176 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1177 		index++;
1178 
1179 		priv->extract.fs_key_cfg[group].extracts[index].type =
1180 							DPKG_EXTRACT_FROM_HDR;
1181 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1182 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1183 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1184 		index++;
1185 
1186 		priv->extract.fs_key_cfg[group].num_extracts = index;
1187 	}
1188 
1189 	/* Parse pattern list to get the matching parameters */
1190 	spec	= (const struct rte_flow_item_sctp *)pattern->spec;
1191 	last	= (const struct rte_flow_item_sctp *)pattern->last;
1192 	mask	= (const struct rte_flow_item_sctp *)
1193 			(pattern->mask ? pattern->mask : default_mask);
1194 
1195 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1196 						(2 * sizeof(uint32_t));
1197 	memset((void *)key_iova, 0x84, sizeof(uint8_t));
1198 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1199 	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1200 							sizeof(uint16_t));
1201 	key_iova += sizeof(uint16_t);
1202 	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1203 							sizeof(uint16_t));
1204 
1205 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1206 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1207 							sizeof(uint16_t));
1208 	mask_iova += sizeof(uint16_t);
1209 	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1210 							sizeof(uint16_t));
1211 
1212 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP +
1213 				(2 * sizeof(uint16_t)));
1214 	return device_configured;
1215 }
1216 
1217 static int
1218 dpaa2_configure_flow_gre(struct rte_flow *flow,
1219 			 struct rte_eth_dev *dev,
1220 			 const struct rte_flow_attr *attr,
1221 			 const struct rte_flow_item *pattern,
1222 			 const struct rte_flow_action actions[] __rte_unused,
1223 			 struct rte_flow_error *error __rte_unused)
1224 {
1225 	int index, j = 0;
1226 	size_t key_iova;
1227 	size_t mask_iova;
1228 	int device_configured = 0, entry_found = 0;
1229 	uint32_t group;
1230 	const struct rte_flow_item_gre *spec, *mask;
1231 
1232 	const struct rte_flow_item_gre *last __rte_unused;
1233 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1234 
1235 	group = attr->group;
1236 
1237 	/* DPAA2 platform has a limitation that extract parameter can not be */
1238 	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1239 	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1240 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1241 						DPKG_MAX_NUM_OF_EXTRACTS);
1242 		return -ENOTSUP;
1243 	}
1244 
1245 	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1246 		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1247 						DPKG_MAX_NUM_OF_EXTRACTS);
1248 		return -ENOTSUP;
1249 	}
1250 
1251 	for (j = 0; j < priv->pattern[8].item_count; j++) {
1252 		if (priv->pattern[8].pattern_type[j] != pattern->type) {
1253 			continue;
1254 		} else {
1255 			entry_found = 1;
1256 			break;
1257 		}
1258 	}
1259 
1260 	if (!entry_found) {
1261 		priv->pattern[8].pattern_type[j] = pattern->type;
1262 		priv->pattern[8].item_count++;
1263 		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1264 	}
1265 
1266 	entry_found = 0;
1267 	for (j = 0; j < priv->pattern[group].item_count; j++) {
1268 		if (priv->pattern[group].pattern_type[j] != pattern->type) {
1269 			continue;
1270 		} else {
1271 			entry_found = 1;
1272 			break;
1273 		}
1274 	}
1275 
1276 	if (!entry_found) {
1277 		priv->pattern[group].pattern_type[j] = pattern->type;
1278 		priv->pattern[group].item_count++;
1279 		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1280 	}
1281 
1282 	/* Get traffic class index and flow id to be configured */
1283 	flow->tc_id = group;
1284 	flow->index = attr->priority;
1285 
1286 	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1287 		index = priv->extract.qos_key_cfg.num_extracts;
1288 		priv->extract.qos_key_cfg.extracts[index].type =
1289 							DPKG_EXTRACT_FROM_HDR;
1290 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1291 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1292 		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1293 		index++;
1294 
1295 		priv->extract.qos_key_cfg.num_extracts = index;
1296 	}
1297 
1298 	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1299 		index = priv->extract.fs_key_cfg[group].num_extracts;
1300 		priv->extract.fs_key_cfg[group].extracts[index].type =
1301 							DPKG_EXTRACT_FROM_HDR;
1302 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1303 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1304 		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1305 		index++;
1306 
1307 		priv->extract.fs_key_cfg[group].num_extracts = index;
1308 	}
1309 
1310 	/* Parse pattern list to get the matching parameters */
1311 	spec	= (const struct rte_flow_item_gre *)pattern->spec;
1312 	last	= (const struct rte_flow_item_gre *)pattern->last;
1313 	mask	= (const struct rte_flow_item_gre *)
1314 			(pattern->mask ? pattern->mask : default_mask);
1315 
1316 	key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1317 	memcpy((void *)key_iova, (const void *)(&spec->protocol),
1318 							sizeof(rte_be16_t));
1319 
1320 	mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1321 	memcpy((void *)mask_iova, (const void *)(&mask->protocol),
1322 							sizeof(rte_be16_t));
1323 
1324 	flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t));
1325 
1326 	return device_configured;
1327 }
1328 
1329 static int
1330 dpaa2_generic_flow_set(struct rte_flow *flow,
1331 		       struct rte_eth_dev *dev,
1332 		       const struct rte_flow_attr *attr,
1333 		       const struct rte_flow_item pattern[],
1334 		       const struct rte_flow_action actions[],
1335 		       struct rte_flow_error *error)
1336 {
1337 	const struct rte_flow_action_queue *dest_queue;
1338 	const struct rte_flow_action_rss *rss_conf;
1339 	uint16_t index;
1340 	int is_keycfg_configured = 0, end_of_list = 0;
1341 	int ret = 0, i = 0, j = 0;
1342 	struct dpni_attr nic_attr;
1343 	struct dpni_rx_tc_dist_cfg tc_cfg;
1344 	struct dpni_qos_tbl_cfg qos_cfg;
1345 	struct dpkg_profile_cfg key_cfg;
1346 	struct dpni_fs_action_cfg action;
1347 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1348 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1349 	size_t param;
1350 	struct rte_flow *curr = LIST_FIRST(&priv->flows);
1351 
1352 	/* Parse pattern list to get the matching parameters */
1353 	while (!end_of_list) {
1354 		switch (pattern[i].type) {
1355 		case RTE_FLOW_ITEM_TYPE_ETH:
1356 			is_keycfg_configured = dpaa2_configure_flow_eth(flow,
1357 									dev,
1358 									attr,
1359 									&pattern[i],
1360 									actions,
1361 									error);
1362 			break;
1363 		case RTE_FLOW_ITEM_TYPE_VLAN:
1364 			is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
1365 									dev,
1366 									attr,
1367 									&pattern[i],
1368 									actions,
1369 									error);
1370 			break;
1371 		case RTE_FLOW_ITEM_TYPE_IPV4:
1372 			is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
1373 									dev,
1374 									attr,
1375 									&pattern[i],
1376 									actions,
1377 									error);
1378 			break;
1379 		case RTE_FLOW_ITEM_TYPE_IPV6:
1380 			is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
1381 									dev,
1382 									attr,
1383 									&pattern[i],
1384 									actions,
1385 									error);
1386 			break;
1387 		case RTE_FLOW_ITEM_TYPE_ICMP:
1388 			is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
1389 									dev,
1390 									attr,
1391 									&pattern[i],
1392 									actions,
1393 									error);
1394 			break;
1395 		case RTE_FLOW_ITEM_TYPE_UDP:
1396 			is_keycfg_configured = dpaa2_configure_flow_udp(flow,
1397 									dev,
1398 									attr,
1399 									&pattern[i],
1400 									actions,
1401 									error);
1402 			break;
1403 		case RTE_FLOW_ITEM_TYPE_TCP:
1404 			is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
1405 									dev,
1406 									attr,
1407 									&pattern[i],
1408 									actions,
1409 									error);
1410 			break;
1411 		case RTE_FLOW_ITEM_TYPE_SCTP:
1412 			is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
1413 									dev, attr,
1414 									&pattern[i],
1415 									actions,
1416 									error);
1417 			break;
1418 		case RTE_FLOW_ITEM_TYPE_GRE:
1419 			is_keycfg_configured = dpaa2_configure_flow_gre(flow,
1420 									dev,
1421 									attr,
1422 									&pattern[i],
1423 									actions,
1424 									error);
1425 			break;
1426 		case RTE_FLOW_ITEM_TYPE_END:
1427 			end_of_list = 1;
1428 			break; /*End of List*/
1429 		default:
1430 			DPAA2_PMD_ERR("Invalid action type");
1431 			ret = -ENOTSUP;
1432 			break;
1433 		}
1434 		i++;
1435 	}
1436 
1437 	/* Let's parse action on matching traffic */
1438 	end_of_list = 0;
1439 	while (!end_of_list) {
1440 		switch (actions[j].type) {
1441 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1442 			dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
1443 			flow->flow_id = dest_queue->index;
1444 			flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
1445 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
1446 			action.flow_id = flow->flow_id;
1447 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1448 				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1449 							 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1450 					DPAA2_PMD_ERR(
1451 					"Unable to prepare extract parameters");
1452 					return -1;
1453 				}
1454 
1455 				memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
1456 				qos_cfg.discard_on_miss = true;
1457 				qos_cfg.keep_entries = true;
1458 				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1459 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1460 							 priv->token, &qos_cfg);
1461 				if (ret < 0) {
1462 					DPAA2_PMD_ERR(
1463 					"Distribution cannot be configured.(%d)"
1464 					, ret);
1465 					return -1;
1466 				}
1467 			}
1468 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1469 				if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
1470 						(uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
1471 					DPAA2_PMD_ERR(
1472 					"Unable to prepare extract parameters");
1473 					return -1;
1474 				}
1475 
1476 				memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1477 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
1478 				tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
1479 				tc_cfg.key_cfg_iova =
1480 					(uint64_t)priv->extract.fs_extract_param[flow->tc_id];
1481 				tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1482 				tc_cfg.fs_cfg.keep_entries = true;
1483 				ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1484 							 priv->token,
1485 							 flow->tc_id, &tc_cfg);
1486 				if (ret < 0) {
1487 					DPAA2_PMD_ERR(
1488 					"Distribution cannot be configured.(%d)"
1489 					, ret);
1490 					return -1;
1491 				}
1492 			}
1493 			/* Configure QoS table first */
1494 			memset(&nic_attr, 0, sizeof(struct dpni_attr));
1495 			ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1496 						 priv->token, &nic_attr);
1497 			if (ret < 0) {
1498 				DPAA2_PMD_ERR(
1499 				"Failure to get attribute. dpni@%p err code(%d)\n",
1500 				dpni, ret);
1501 				return ret;
1502 			}
1503 
1504 			action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
1505 			index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1506 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
1507 						priv->token, &flow->rule,
1508 						flow->tc_id, index,
1509 						0, 0);
1510 			if (ret < 0) {
1511 				DPAA2_PMD_ERR(
1512 				"Error in addnig entry to QoS table(%d)", ret);
1513 				return ret;
1514 			}
1515 
1516 			/* Then Configure FS table */
1517 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1518 						flow->tc_id, flow->index,
1519 						&flow->rule, &action);
1520 			if (ret < 0) {
1521 				DPAA2_PMD_ERR(
1522 				"Error in adding entry to FS table(%d)", ret);
1523 				return ret;
1524 			}
1525 			break;
1526 		case RTE_FLOW_ACTION_TYPE_RSS:
1527 			ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1528 						 priv->token, &nic_attr);
1529 			if (ret < 0) {
1530 				DPAA2_PMD_ERR(
1531 				"Failure to get attribute. dpni@%p err code(%d)\n",
1532 				dpni, ret);
1533 				return ret;
1534 			}
1535 			rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
1536 			for (i = 0; i < (int)rss_conf->queue_num; i++) {
1537 				if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
1538 				    rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
1539 					DPAA2_PMD_ERR(
1540 					"Queue/Group combination are not supported\n");
1541 					return -ENOTSUP;
1542 				}
1543 			}
1544 
1545 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
1546 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
1547 								&key_cfg);
1548 			if (ret < 0) {
1549 				DPAA2_PMD_ERR(
1550 				"unable to set flow distribution.please check queue config\n");
1551 				return ret;
1552 			}
1553 
1554 			/* Allocate DMA'ble memory to write the rules */
1555 			param = (size_t)rte_malloc(NULL, 256, 64);
1556 			if (!param) {
1557 				DPAA2_PMD_ERR("Memory allocation failure\n");
1558 				return -1;
1559 			}
1560 
1561 			if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
1562 				DPAA2_PMD_ERR(
1563 				"Unable to prepare extract parameters");
1564 				rte_free((void *)param);
1565 				return -1;
1566 			}
1567 
1568 			memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1569 			tc_cfg.dist_size = rss_conf->queue_num;
1570 			tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
1571 			tc_cfg.key_cfg_iova = (size_t)param;
1572 			tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1573 
1574 			ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1575 						 priv->token, flow->tc_id,
1576 						 &tc_cfg);
1577 			if (ret < 0) {
1578 				DPAA2_PMD_ERR(
1579 				"Distribution cannot be configured: %d\n", ret);
1580 				rte_free((void *)param);
1581 				return -1;
1582 			}
1583 
1584 			rte_free((void *)param);
1585 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1586 				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1587 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1588 					DPAA2_PMD_ERR(
1589 					"Unable to prepare extract parameters");
1590 					return -1;
1591 				}
1592 				memset(&qos_cfg, 0,
1593 					sizeof(struct dpni_qos_tbl_cfg));
1594 				qos_cfg.discard_on_miss = true;
1595 				qos_cfg.keep_entries = true;
1596 				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1597 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1598 							 priv->token, &qos_cfg);
1599 				if (ret < 0) {
1600 					DPAA2_PMD_ERR(
1601 					"Distribution can not be configured(%d)\n",
1602 					ret);
1603 					return -1;
1604 				}
1605 			}
1606 
1607 			/* Add Rule into QoS table */
1608 			index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1609 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1610 						&flow->rule, flow->tc_id,
1611 						index, 0, 0);
1612 			if (ret < 0) {
1613 				DPAA2_PMD_ERR(
1614 				"Error in entry addition in QoS table(%d)",
1615 				ret);
1616 				return ret;
1617 			}
1618 			break;
1619 		case RTE_FLOW_ACTION_TYPE_END:
1620 			end_of_list = 1;
1621 			break;
1622 		default:
1623 			DPAA2_PMD_ERR("Invalid action type");
1624 			ret = -ENOTSUP;
1625 			break;
1626 		}
1627 		j++;
1628 	}
1629 
1630 	if (!ret) {
1631 		/* New rules are inserted. */
1632 		if (!curr) {
1633 			LIST_INSERT_HEAD(&priv->flows, flow, next);
1634 		} else {
1635 			while (LIST_NEXT(curr, next))
1636 				curr = LIST_NEXT(curr, next);
1637 			LIST_INSERT_AFTER(curr, flow, next);
1638 		}
1639 	}
1640 	return ret;
1641 }
1642 
1643 static inline int
1644 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
1645 		      const struct rte_flow_attr *attr)
1646 {
1647 	int ret = 0;
1648 
1649 	if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
1650 		DPAA2_PMD_ERR("Priority group is out of range\n");
1651 		ret = -ENOTSUP;
1652 	}
1653 	if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
1654 		DPAA2_PMD_ERR("Priority within the group is out of range\n");
1655 		ret = -ENOTSUP;
1656 	}
1657 	if (unlikely(attr->egress)) {
1658 		DPAA2_PMD_ERR(
1659 			"Flow configuration is not supported on egress side\n");
1660 		ret = -ENOTSUP;
1661 	}
1662 	if (unlikely(!attr->ingress)) {
1663 		DPAA2_PMD_ERR("Ingress flag must be configured\n");
1664 		ret = -EINVAL;
1665 	}
1666 	return ret;
1667 }
1668 
1669 static inline void
1670 dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
1671 {
1672 	switch (pattern->type) {
1673 	case RTE_FLOW_ITEM_TYPE_ETH:
1674 		default_mask = (const void *)&rte_flow_item_eth_mask;
1675 		break;
1676 	case RTE_FLOW_ITEM_TYPE_VLAN:
1677 		default_mask = (const void *)&rte_flow_item_vlan_mask;
1678 		break;
1679 	case RTE_FLOW_ITEM_TYPE_IPV4:
1680 		default_mask = (const void *)&rte_flow_item_ipv4_mask;
1681 		break;
1682 	case RTE_FLOW_ITEM_TYPE_IPV6:
1683 		default_mask = (const void *)&rte_flow_item_ipv6_mask;
1684 		break;
1685 	case RTE_FLOW_ITEM_TYPE_ICMP:
1686 		default_mask = (const void *)&rte_flow_item_icmp_mask;
1687 		break;
1688 	case RTE_FLOW_ITEM_TYPE_UDP:
1689 		default_mask = (const void *)&rte_flow_item_udp_mask;
1690 		break;
1691 	case RTE_FLOW_ITEM_TYPE_TCP:
1692 		default_mask = (const void *)&rte_flow_item_tcp_mask;
1693 		break;
1694 	case RTE_FLOW_ITEM_TYPE_SCTP:
1695 		default_mask = (const void *)&rte_flow_item_sctp_mask;
1696 		break;
1697 	case RTE_FLOW_ITEM_TYPE_GRE:
1698 		default_mask = (const void *)&rte_flow_item_gre_mask;
1699 		break;
1700 	default:
1701 		DPAA2_PMD_ERR("Invalid pattern type");
1702 	}
1703 }
1704 
1705 static inline int
1706 dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
1707 			  const struct rte_flow_item pattern[])
1708 {
1709 	unsigned int i, j, k, is_found = 0;
1710 	int ret = 0;
1711 
1712 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1713 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
1714 			if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
1715 				is_found = 1;
1716 				break;
1717 			}
1718 		}
1719 		if (!is_found) {
1720 			ret = -ENOTSUP;
1721 			break;
1722 		}
1723 	}
1724 	/* Lets verify other combinations of given pattern rules */
1725 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1726 		if (!pattern[j].spec) {
1727 			ret = -EINVAL;
1728 			break;
1729 		}
1730 		if ((pattern[j].last) && (!pattern[j].mask))
1731 			dpaa2_dev_update_default_mask(&pattern[j]);
1732 	}
1733 
1734 	/* DPAA2 platform has a limitation that extract parameter can not be */
1735 	/* more	than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1736 	for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1737 		for (j = 0; j < MAX_TCS + 1; j++) {
1738 				for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; k++) {
1739 					if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
1740 						break;
1741 				}
1742 			if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
1743 				ret = -ENOTSUP;
1744 		}
1745 	}
1746 	return ret;
1747 }
1748 
1749 static inline int
1750 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
1751 {
1752 	unsigned int i, j, is_found = 0;
1753 	int ret = 0;
1754 
1755 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1756 		for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
1757 			if (dpaa2_supported_action_type[i] == actions[j].type) {
1758 				is_found = 1;
1759 				break;
1760 			}
1761 		}
1762 		if (!is_found) {
1763 			ret = -ENOTSUP;
1764 			break;
1765 		}
1766 	}
1767 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1768 		if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
1769 			ret = -EINVAL;
1770 	}
1771 	return ret;
1772 }
1773 
1774 static
1775 int dpaa2_flow_validate(struct rte_eth_dev *dev,
1776 			const struct rte_flow_attr *flow_attr,
1777 			const struct rte_flow_item pattern[],
1778 			const struct rte_flow_action actions[],
1779 			struct rte_flow_error *error)
1780 {
1781 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1782 	struct dpni_attr dpni_attr;
1783 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1784 	uint16_t token = priv->token;
1785 	int ret = 0;
1786 
1787 	memset(&dpni_attr, 0, sizeof(struct dpni_attr));
1788 	ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
1789 	if (ret < 0) {
1790 		DPAA2_PMD_ERR(
1791 			"Failure to get dpni@%p attribute, err code  %d\n",
1792 			dpni, ret);
1793 		rte_flow_error_set(error, EPERM,
1794 			   RTE_FLOW_ERROR_TYPE_ATTR,
1795 			   flow_attr, "invalid");
1796 		return ret;
1797 	}
1798 
1799 	/* Verify input attributes */
1800 	ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
1801 	if (ret < 0) {
1802 		DPAA2_PMD_ERR(
1803 			"Invalid attributes are given\n");
1804 		rte_flow_error_set(error, EPERM,
1805 			   RTE_FLOW_ERROR_TYPE_ATTR,
1806 			   flow_attr, "invalid");
1807 		goto not_valid_params;
1808 	}
1809 	/* Verify input pattern list */
1810 	ret = dpaa2_dev_verify_patterns(priv, pattern);
1811 	if (ret < 0) {
1812 		DPAA2_PMD_ERR(
1813 			"Invalid pattern list is given\n");
1814 		rte_flow_error_set(error, EPERM,
1815 			   RTE_FLOW_ERROR_TYPE_ITEM,
1816 			   pattern, "invalid");
1817 		goto not_valid_params;
1818 	}
1819 	/* Verify input action list */
1820 	ret = dpaa2_dev_verify_actions(actions);
1821 	if (ret < 0) {
1822 		DPAA2_PMD_ERR(
1823 			"Invalid action list is given\n");
1824 		rte_flow_error_set(error, EPERM,
1825 			   RTE_FLOW_ERROR_TYPE_ACTION,
1826 			   actions, "invalid");
1827 		goto not_valid_params;
1828 	}
1829 not_valid_params:
1830 	return ret;
1831 }
1832 
1833 static
1834 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
1835 				   const struct rte_flow_attr *attr,
1836 				   const struct rte_flow_item pattern[],
1837 				   const struct rte_flow_action actions[],
1838 				   struct rte_flow_error *error)
1839 {
1840 	struct rte_flow *flow = NULL;
1841 	size_t key_iova = 0, mask_iova = 0;
1842 	int ret;
1843 
1844 	flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
1845 	if (!flow) {
1846 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
1847 		goto mem_failure;
1848 	}
1849 	/* Allocate DMA'ble memory to write the rules */
1850 	key_iova = (size_t)rte_malloc(NULL, 256, 64);
1851 	if (!key_iova) {
1852 		DPAA2_PMD_ERR(
1853 			"Memory allocation failure for rule configuration\n");
1854 		goto mem_failure;
1855 	}
1856 	mask_iova = (size_t)rte_malloc(NULL, 256, 64);
1857 	if (!mask_iova) {
1858 		DPAA2_PMD_ERR(
1859 			"Memory allocation failure for rule configuration\n");
1860 		goto mem_failure;
1861 	}
1862 
1863 	flow->rule.key_iova = key_iova;
1864 	flow->rule.mask_iova = mask_iova;
1865 	flow->rule.key_size = 0;
1866 
1867 	switch (dpaa2_filter_type) {
1868 	case RTE_ETH_FILTER_GENERIC:
1869 		ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
1870 					     actions, error);
1871 		if (ret < 0) {
1872 			if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
1873 				rte_flow_error_set(error, EPERM,
1874 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1875 						attr, "unknown");
1876 			DPAA2_PMD_ERR(
1877 			"Failure to create flow, return code (%d)", ret);
1878 			goto creation_error;
1879 		}
1880 		break;
1881 	default:
1882 		DPAA2_PMD_ERR("Filter type (%d) not supported",
1883 		dpaa2_filter_type);
1884 		break;
1885 	}
1886 
1887 	return flow;
1888 mem_failure:
1889 	rte_flow_error_set(error, EPERM,
1890 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1891 			   NULL, "memory alloc");
1892 creation_error:
1893 	rte_free((void *)flow);
1894 	rte_free((void *)key_iova);
1895 	rte_free((void *)mask_iova);
1896 
1897 	return NULL;
1898 }
1899 
1900 static
1901 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
1902 		       struct rte_flow *flow,
1903 		       struct rte_flow_error *error)
1904 {
1905 	int ret = 0;
1906 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1907 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1908 
1909 	switch (flow->action) {
1910 	case RTE_FLOW_ACTION_TYPE_QUEUE:
1911 		/* Remove entry from QoS table first */
1912 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1913 					   &flow->rule);
1914 		if (ret < 0) {
1915 			DPAA2_PMD_ERR(
1916 				"Error in adding entry to QoS table(%d)", ret);
1917 			goto error;
1918 		}
1919 
1920 		/* Then remove entry from FS table */
1921 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1922 					   flow->tc_id, &flow->rule);
1923 		if (ret < 0) {
1924 			DPAA2_PMD_ERR(
1925 				"Error in entry addition in FS table(%d)", ret);
1926 			goto error;
1927 		}
1928 		break;
1929 	case RTE_FLOW_ACTION_TYPE_RSS:
1930 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1931 					   &flow->rule);
1932 		if (ret < 0) {
1933 			DPAA2_PMD_ERR(
1934 			"Error in entry addition in QoS table(%d)", ret);
1935 			goto error;
1936 		}
1937 		break;
1938 	default:
1939 		DPAA2_PMD_ERR(
1940 		"Action type (%d) is not supported", flow->action);
1941 		ret = -ENOTSUP;
1942 		break;
1943 	}
1944 
1945 	LIST_REMOVE(flow, next);
1946 	/* Now free the flow */
1947 	rte_free(flow);
1948 
1949 error:
1950 	if (ret)
1951 		rte_flow_error_set(error, EPERM,
1952 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1953 				   NULL, "unknown");
1954 	return ret;
1955 }
1956 
1957 /**
1958  * Destroy user-configured flow rules.
1959  *
1960  * This function skips internal flows rules.
1961  *
1962  * @see rte_flow_flush()
1963  * @see rte_flow_ops
1964  */
1965 static int
1966 dpaa2_flow_flush(struct rte_eth_dev *dev,
1967 		struct rte_flow_error *error)
1968 {
1969 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
1970 	struct rte_flow *flow = LIST_FIRST(&priv->flows);
1971 
1972 	while (flow) {
1973 		struct rte_flow *next = LIST_NEXT(flow, next);
1974 
1975 		dpaa2_flow_destroy(dev, flow, error);
1976 		flow = next;
1977 	}
1978 	return 0;
1979 }
1980 
1981 static int
1982 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
1983 		struct rte_flow *flow __rte_unused,
1984 		const struct rte_flow_action *actions __rte_unused,
1985 		void *data __rte_unused,
1986 		struct rte_flow_error *error __rte_unused)
1987 {
1988 	return 0;
1989 }
1990 
1991 /**
1992  * Clean up all flow rules.
1993  *
1994  * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
1995  * rules regardless of whether they are internal or user-configured.
1996  *
1997  * @param priv
1998  *   Pointer to private structure.
1999  */
2000 void
2001 dpaa2_flow_clean(struct rte_eth_dev *dev)
2002 {
2003 	struct rte_flow *flow;
2004 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
2005 
2006 	while ((flow = LIST_FIRST(&priv->flows)))
2007 		dpaa2_flow_destroy(dev, flow, NULL);
2008 }
2009 
2010 const struct rte_flow_ops dpaa2_flow_ops = {
2011 	.create	= dpaa2_flow_create,
2012 	.validate = dpaa2_flow_validate,
2013 	.destroy = dpaa2_flow_destroy,
2014 	.flush	= dpaa2_flow_flush,
2015 	.query	= dpaa2_flow_query,
2016 };
2017