xref: /dpdk/drivers/net/mvpp2/mrvl_flow.c (revision 61938a2d178554a0605f8d7ec2e5b7eeaea20e43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Marvell International Ltd.
3  * Copyright(c) 2018 Semihalf.
4  * All rights reserved.
5  */
6 
7 #include <rte_flow.h>
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
10 #include <rte_log.h>
11 
12 #include <arpa/inet.h>
13 
14 #include "mrvl_flow.h"
15 #include "mrvl_qos.h"
16 
17 /** Number of rules in the classifier table. */
18 #define MRVL_CLS_MAX_NUM_RULES 20
19 
20 /** Size of the classifier key and mask strings. */
21 #define MRVL_CLS_STR_SIZE_MAX 40
22 
23 #define MRVL_VLAN_ID_MASK 0x0fff
24 #define MRVL_VLAN_PRI_MASK 0x7000
25 #define MRVL_IPV4_DSCP_MASK 0xfc
26 #define MRVL_IPV4_ADDR_MASK 0xffffffff
27 #define MRVL_IPV6_FLOW_MASK 0x0fffff
28 
29 /**
30  * Allocate memory for classifier rule key and mask fields.
31  *
32  * @param field Pointer to the classifier rule.
33  * @returns 0 in case of success, negative value otherwise.
34  */
35 static int
36 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
37 {
38 	unsigned int id = rte_socket_id();
39 
40 	field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
41 	if (!field->key)
42 		goto out;
43 
44 	field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
45 	if (!field->mask)
46 		goto out_mask;
47 
48 	return 0;
49 out_mask:
50 	rte_free(field->key);
51 out:
52 	field->key = NULL;
53 	field->mask = NULL;
54 	return -1;
55 }
56 
57 /**
58  * Free memory allocated for classifier rule key and mask fields.
59  *
60  * @param field Pointer to the classifier rule.
61  */
62 static void
63 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
64 {
65 	rte_free(field->key);
66 	rte_free(field->mask);
67 	field->key = NULL;
68 	field->mask = NULL;
69 }
70 
71 /**
72  * Free memory allocated for all classifier rule key and mask fields.
73  *
74  * @param rule Pointer to the classifier table rule.
75  */
76 static void
77 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
78 {
79 	int i;
80 
81 	for (i = 0; i < rule->num_fields; i++)
82 		mrvl_free_key_mask(&rule->fields[i]);
83 	rule->num_fields = 0;
84 }
85 
86 /*
87  * Initialize rte flow item parsing.
88  *
89  * @param item Pointer to the flow item.
90  * @param spec_ptr Pointer to the specific item pointer.
91  * @param mask_ptr Pointer to the specific item's mask pointer.
92  * @def_mask Pointer to the default mask.
93  * @size Size of the flow item.
94  * @error Pointer to the rte flow error.
95  * @returns 0 in case of success, negative value otherwise.
96  */
97 static int
98 mrvl_parse_init(const struct rte_flow_item *item,
99 		const void **spec_ptr,
100 		const void **mask_ptr,
101 		const void *def_mask,
102 		unsigned int size,
103 		struct rte_flow_error *error)
104 {
105 	const uint8_t *spec;
106 	const uint8_t *mask;
107 	const uint8_t *last;
108 	uint8_t zeros[size];
109 
110 	memset(zeros, 0, size);
111 
112 	if (item == NULL) {
113 		rte_flow_error_set(error, EINVAL,
114 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
115 				   "NULL item\n");
116 		return -rte_errno;
117 	}
118 
119 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
120 		rte_flow_error_set(error, EINVAL,
121 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
122 				   "Mask or last is set without spec\n");
123 		return -rte_errno;
124 	}
125 
126 	/*
127 	 * If "mask" is not set, default mask is used,
128 	 * but if default mask is NULL, "mask" should be set.
129 	 */
130 	if (item->mask == NULL) {
131 		if (def_mask == NULL) {
132 			rte_flow_error_set(error, EINVAL,
133 					   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134 					   "Mask should be specified\n");
135 			return -rte_errno;
136 		}
137 
138 		mask = (const uint8_t *)def_mask;
139 	} else {
140 		mask = (const uint8_t *)item->mask;
141 	}
142 
143 	spec = (const uint8_t *)item->spec;
144 	last = (const uint8_t *)item->last;
145 
146 	if (spec == NULL) {
147 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
148 				   NULL, "Spec should be specified\n");
149 		return -rte_errno;
150 	}
151 
152 	/*
153 	 * If field values in "last" are either 0 or equal to the corresponding
154 	 * values in "spec" then they are ignored.
155 	 */
156 	if (last != NULL &&
157 	    !memcmp(last, zeros, size) &&
158 	    memcmp(last, spec, size) != 0) {
159 		rte_flow_error_set(error, ENOTSUP,
160 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
161 				   "Ranging is not supported\n");
162 		return -rte_errno;
163 	}
164 
165 	*spec_ptr = spec;
166 	*mask_ptr = mask;
167 
168 	return 0;
169 }
170 
171 /**
172  * Parse the eth flow item.
173  *
174  * This will create classifier rule that matches either destination or source
175  * mac.
176  *
177  * @param spec Pointer to the specific flow item.
178  * @param mask Pointer to the specific flow item's mask.
179  * @param parse_dst Parse either destination or source mac address.
180  * @param flow Pointer to the flow.
181  * @return 0 in case of success, negative error value otherwise.
182  */
183 static int
184 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
185 	       const struct rte_flow_item_eth *mask,
186 	       int parse_dst, struct rte_flow *flow)
187 {
188 	struct pp2_cls_rule_key_field *key_field;
189 	const uint8_t *k, *m;
190 
191 	if (parse_dst) {
192 		k = spec->hdr.dst_addr.addr_bytes;
193 		m = mask->hdr.dst_addr.addr_bytes;
194 
195 		flow->table_key.proto_field[flow->rule.num_fields].field.eth =
196 			MV_NET_ETH_F_DA;
197 	} else {
198 		k = spec->hdr.src_addr.addr_bytes;
199 		m = mask->hdr.src_addr.addr_bytes;
200 
201 		flow->table_key.proto_field[flow->rule.num_fields].field.eth =
202 			MV_NET_ETH_F_SA;
203 	}
204 
205 	key_field = &flow->rule.fields[flow->rule.num_fields];
206 	mrvl_alloc_key_mask(key_field);
207 	key_field->size = 6;
208 
209 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
210 		 RTE_ETHER_ADDR_PRT_FMT,
211 		 k[0], k[1], k[2], k[3], k[4], k[5]);
212 
213 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
214 		 RTE_ETHER_ADDR_PRT_FMT,
215 		 m[0], m[1], m[2], m[3], m[4], m[5]);
216 
217 	flow->table_key.proto_field[flow->rule.num_fields].proto =
218 		MV_NET_PROTO_ETH;
219 	flow->table_key.key_size += key_field->size;
220 
221 	flow->rule.num_fields += 1;
222 
223 	return 0;
224 }
225 
226 /**
227  * Helper for parsing the eth flow item destination mac address.
228  *
229  * @param spec Pointer to the specific flow item.
230  * @param mask Pointer to the specific flow item's mask.
231  * @param flow Pointer to the flow.
232  * @return 0 in case of success, negative error value otherwise.
233  */
234 static inline int
235 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
236 		const struct rte_flow_item_eth *mask,
237 		struct rte_flow *flow)
238 {
239 	return mrvl_parse_mac(spec, mask, 1, flow);
240 }
241 
242 /**
243  * Helper for parsing the eth flow item source mac address.
244  *
245  * @param spec Pointer to the specific flow item.
246  * @param mask Pointer to the specific flow item's mask.
247  * @param flow Pointer to the flow.
248  * @return 0 in case of success, negative error value otherwise.
249  */
250 static inline int
251 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
252 		const struct rte_flow_item_eth *mask,
253 		struct rte_flow *flow)
254 {
255 	return mrvl_parse_mac(spec, mask, 0, flow);
256 }
257 
258 /**
259  * Parse the ether type field of the eth flow item.
260  *
261  * @param spec Pointer to the specific flow item.
262  * @param mask Pointer to the specific flow item's mask.
263  * @param flow Pointer to the flow.
264  * @return 0 in case of success, negative error value otherwise.
265  */
266 static int
267 mrvl_parse_type(const struct rte_flow_item_eth *spec,
268 		const struct rte_flow_item_eth *mask __rte_unused,
269 		struct rte_flow *flow)
270 {
271 	struct pp2_cls_rule_key_field *key_field;
272 	uint16_t k;
273 
274 	key_field = &flow->rule.fields[flow->rule.num_fields];
275 	mrvl_alloc_key_mask(key_field);
276 	key_field->size = 2;
277 
278 	k = rte_be_to_cpu_16(spec->hdr.ether_type);
279 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
280 
281 	flow->table_key.proto_field[flow->rule.num_fields].proto =
282 		MV_NET_PROTO_ETH;
283 	flow->table_key.proto_field[flow->rule.num_fields].field.eth =
284 		MV_NET_ETH_F_TYPE;
285 	flow->table_key.key_size += key_field->size;
286 
287 	flow->rule.num_fields += 1;
288 
289 	return 0;
290 }
291 
292 /**
293  * Parse the vid field of the vlan rte flow item.
294  *
295  * This will create classifier rule that matches vid.
296  *
297  * @param spec Pointer to the specific flow item.
298  * @param mask Pointer to the specific flow item's mask.
299  * @param flow Pointer to the flow.
300  * @return 0 in case of success, negative error value otherwise.
301  */
302 static int
303 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
304 		   const struct rte_flow_item_vlan *mask __rte_unused,
305 		   struct rte_flow *flow)
306 {
307 	struct pp2_cls_rule_key_field *key_field;
308 	uint16_t k;
309 
310 	key_field = &flow->rule.fields[flow->rule.num_fields];
311 	mrvl_alloc_key_mask(key_field);
312 	key_field->size = 2;
313 
314 	k = rte_be_to_cpu_16(spec->hdr.vlan_tci) & MRVL_VLAN_ID_MASK;
315 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
316 
317 	flow->table_key.proto_field[flow->rule.num_fields].proto =
318 		MV_NET_PROTO_VLAN;
319 	flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
320 		MV_NET_VLAN_F_ID;
321 	flow->table_key.key_size += key_field->size;
322 
323 	flow->rule.num_fields += 1;
324 
325 	return 0;
326 }
327 
328 /**
329  * Parse the pri field of the vlan rte flow item.
330  *
331  * This will create classifier rule that matches pri.
332  *
333  * @param spec Pointer to the specific flow item.
334  * @param mask Pointer to the specific flow item's mask.
335  * @param flow Pointer to the flow.
336  * @return 0 in case of success, negative error value otherwise.
337  */
338 static int
339 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
340 		    const struct rte_flow_item_vlan *mask __rte_unused,
341 		    struct rte_flow *flow)
342 {
343 	struct pp2_cls_rule_key_field *key_field;
344 	uint16_t k;
345 
346 	key_field = &flow->rule.fields[flow->rule.num_fields];
347 	mrvl_alloc_key_mask(key_field);
348 	key_field->size = 1;
349 
350 	k = (rte_be_to_cpu_16(spec->hdr.vlan_tci) & MRVL_VLAN_PRI_MASK) >> 13;
351 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
352 
353 	flow->table_key.proto_field[flow->rule.num_fields].proto =
354 		MV_NET_PROTO_VLAN;
355 	flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
356 		MV_NET_VLAN_F_PRI;
357 	flow->table_key.key_size += key_field->size;
358 
359 	flow->rule.num_fields += 1;
360 
361 	return 0;
362 }
363 
364 /**
365  * Parse the dscp field of the ipv4 rte flow item.
366  *
367  * This will create classifier rule that matches dscp field.
368  *
369  * @param spec Pointer to the specific flow item.
370  * @param mask Pointer to the specific flow item's mask.
371  * @param flow Pointer to the flow.
372  * @return 0 in case of success, negative error value otherwise.
373  */
374 static int
375 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
376 		    const struct rte_flow_item_ipv4 *mask,
377 		    struct rte_flow *flow)
378 {
379 	struct pp2_cls_rule_key_field *key_field;
380 	uint8_t k, m;
381 
382 	key_field = &flow->rule.fields[flow->rule.num_fields];
383 	mrvl_alloc_key_mask(key_field);
384 	key_field->size = 1;
385 
386 	k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
387 	m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
388 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
389 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
390 
391 	flow->table_key.proto_field[flow->rule.num_fields].proto =
392 		MV_NET_PROTO_IP4;
393 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
394 		MV_NET_IP4_F_DSCP;
395 	flow->table_key.key_size += key_field->size;
396 
397 	flow->rule.num_fields += 1;
398 
399 	return 0;
400 }
401 
402 /**
403  * Parse either source or destination ip addresses of the ipv4 flow item.
404  *
405  * This will create classifier rule that matches either destination
406  * or source ip field.
407  *
408  * @param spec Pointer to the specific flow item.
409  * @param mask Pointer to the specific flow item's mask.
410  * @param parse_dst Parse either destination or source ip address.
411  * @param flow Pointer to the flow.
412  * @return 0 in case of success, negative error value otherwise.
413  */
414 static int
415 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
416 		    const struct rte_flow_item_ipv4 *mask,
417 		    int parse_dst, struct rte_flow *flow)
418 {
419 	struct pp2_cls_rule_key_field *key_field;
420 	struct in_addr k;
421 	uint32_t m;
422 
423 	memset(&k, 0, sizeof(k));
424 	if (parse_dst) {
425 		k.s_addr = spec->hdr.dst_addr;
426 		m = rte_be_to_cpu_32(mask->hdr.dst_addr);
427 
428 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
429 			MV_NET_IP4_F_DA;
430 	} else {
431 		k.s_addr = spec->hdr.src_addr;
432 		m = rte_be_to_cpu_32(mask->hdr.src_addr);
433 
434 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
435 			MV_NET_IP4_F_SA;
436 	}
437 
438 	key_field = &flow->rule.fields[flow->rule.num_fields];
439 	mrvl_alloc_key_mask(key_field);
440 	key_field->size = 4;
441 
442 	inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
443 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
444 
445 	flow->table_key.proto_field[flow->rule.num_fields].proto =
446 		MV_NET_PROTO_IP4;
447 	flow->table_key.key_size += key_field->size;
448 
449 	flow->rule.num_fields += 1;
450 
451 	return 0;
452 }
453 
454 /**
455  * Helper for parsing destination ip of the ipv4 flow item.
456  *
457  * @param spec Pointer to the specific flow item.
458  * @param mask Pointer to the specific flow item's mask.
459  * @param flow Pointer to the flow.
460  * @return 0 in case of success, negative error value otherwise.
461  */
462 static inline int
463 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
464 		   const struct rte_flow_item_ipv4 *mask,
465 		   struct rte_flow *flow)
466 {
467 	return mrvl_parse_ip4_addr(spec, mask, 1, flow);
468 }
469 
470 /**
471  * Helper for parsing source ip of the ipv4 flow item.
472  *
473  * @param spec Pointer to the specific flow item.
474  * @param mask Pointer to the specific flow item's mask.
475  * @param flow Pointer to the flow.
476  * @return 0 in case of success, negative error value otherwise.
477  */
478 static inline int
479 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
480 		   const struct rte_flow_item_ipv4 *mask,
481 		   struct rte_flow *flow)
482 {
483 	return mrvl_parse_ip4_addr(spec, mask, 0, flow);
484 }
485 
486 /**
487  * Parse the proto field of the ipv4 rte flow item.
488  *
489  * This will create classifier rule that matches proto field.
490  *
491  * @param spec Pointer to the specific flow item.
492  * @param mask Pointer to the specific flow item's mask.
493  * @param flow Pointer to the flow.
494  * @return 0 in case of success, negative error value otherwise.
495  */
496 static int
497 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
498 		     const struct rte_flow_item_ipv4 *mask __rte_unused,
499 		     struct rte_flow *flow)
500 {
501 	struct pp2_cls_rule_key_field *key_field;
502 	uint8_t k = spec->hdr.next_proto_id;
503 
504 	key_field = &flow->rule.fields[flow->rule.num_fields];
505 	mrvl_alloc_key_mask(key_field);
506 	key_field->size = 1;
507 
508 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
509 
510 	flow->table_key.proto_field[flow->rule.num_fields].proto =
511 		MV_NET_PROTO_IP4;
512 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
513 		MV_NET_IP4_F_PROTO;
514 	flow->table_key.key_size += key_field->size;
515 
516 	flow->rule.num_fields += 1;
517 
518 	return 0;
519 }
520 
521 /**
522  * Parse either source or destination ip addresses of the ipv6 rte flow item.
523  *
524  * This will create classifier rule that matches either destination
525  * or source ip field.
526  *
527  * @param spec Pointer to the specific flow item.
528  * @param mask Pointer to the specific flow item's mask.
529  * @param parse_dst Parse either destination or source ipv6 address.
530  * @param flow Pointer to the flow.
531  * @return 0 in case of success, negative error value otherwise.
532  */
533 static int
534 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
535 	       const struct rte_flow_item_ipv6 *mask,
536 	       int parse_dst, struct rte_flow *flow)
537 {
538 	struct pp2_cls_rule_key_field *key_field;
539 	struct rte_ipv6_addr k, m;
540 
541 	if (parse_dst) {
542 		k = spec->hdr.dst_addr;
543 		m = mask->hdr.dst_addr;
544 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
545 			MV_NET_IP6_F_DA;
546 	} else {
547 		k = spec->hdr.src_addr;
548 		m = mask->hdr.src_addr;
549 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
550 			MV_NET_IP6_F_SA;
551 	}
552 
553 	key_field = &flow->rule.fields[flow->rule.num_fields];
554 	mrvl_alloc_key_mask(key_field);
555 	key_field->size = RTE_IPV6_ADDR_SIZE;
556 
557 	inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
558 	inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
559 
560 	flow->table_key.proto_field[flow->rule.num_fields].proto =
561 		MV_NET_PROTO_IP6;
562 	flow->table_key.key_size += key_field->size;
563 
564 	flow->rule.num_fields += 1;
565 
566 	return 0;
567 }
568 
569 /**
570  * Helper for parsing destination ip of the ipv6 flow item.
571  *
572  * @param spec Pointer to the specific flow item.
573  * @param mask Pointer to the specific flow item's mask.
574  * @param flow Pointer to the flow.
575  * @return 0 in case of success, negative error value otherwise.
576  */
577 static inline int
578 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
579 		   const struct rte_flow_item_ipv6 *mask,
580 		   struct rte_flow *flow)
581 {
582 	return mrvl_parse_ip6_addr(spec, mask, 1, flow);
583 }
584 
585 /**
586  * Helper for parsing source ip of the ipv6 flow item.
587  *
588  * @param spec Pointer to the specific flow item.
589  * @param mask Pointer to the specific flow item's mask.
590  * @param flow Pointer to the flow.
591  * @return 0 in case of success, negative error value otherwise.
592  */
593 static inline int
594 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
595 		   const struct rte_flow_item_ipv6 *mask,
596 		   struct rte_flow *flow)
597 {
598 	return mrvl_parse_ip6_addr(spec, mask, 0, flow);
599 }
600 
601 /**
602  * Parse the flow label of the ipv6 flow item.
603  *
604  * This will create classifier rule that matches flow field.
605  *
606  * @param spec Pointer to the specific flow item.
607  * @param mask Pointer to the specific flow item's mask.
608  * @param flow Pointer to the flow.
609  * @return 0 in case of success, negative error value otherwise.
610  */
611 static int
612 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
613 		    const struct rte_flow_item_ipv6 *mask,
614 		    struct rte_flow *flow)
615 {
616 	struct pp2_cls_rule_key_field *key_field;
617 	uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
618 		 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
619 
620 	key_field = &flow->rule.fields[flow->rule.num_fields];
621 	mrvl_alloc_key_mask(key_field);
622 	key_field->size = 3;
623 
624 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
625 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
626 
627 	flow->table_key.proto_field[flow->rule.num_fields].proto =
628 		MV_NET_PROTO_IP6;
629 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
630 		MV_NET_IP6_F_FLOW;
631 	flow->table_key.key_size += key_field->size;
632 
633 	flow->rule.num_fields += 1;
634 
635 	return 0;
636 }
637 
638 /**
639  * Parse the next header of the ipv6 flow item.
640  *
641  * This will create classifier rule that matches next header field.
642  *
643  * @param spec Pointer to the specific flow item.
644  * @param mask Pointer to the specific flow item's mask.
645  * @param flow Pointer to the flow.
646  * @return 0 in case of success, negative error value otherwise.
647  */
648 static int
649 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
650 			const struct rte_flow_item_ipv6 *mask __rte_unused,
651 			struct rte_flow *flow)
652 {
653 	struct pp2_cls_rule_key_field *key_field;
654 	uint8_t k = spec->hdr.proto;
655 
656 	key_field = &flow->rule.fields[flow->rule.num_fields];
657 	mrvl_alloc_key_mask(key_field);
658 	key_field->size = 1;
659 
660 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
661 
662 	flow->table_key.proto_field[flow->rule.num_fields].proto =
663 		MV_NET_PROTO_IP6;
664 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
665 		MV_NET_IP6_F_NEXT_HDR;
666 	flow->table_key.key_size += key_field->size;
667 
668 	flow->rule.num_fields += 1;
669 
670 	return 0;
671 }
672 
673 /**
674  * Parse destination or source port of the tcp flow item.
675  *
676  * This will create classifier rule that matches either destination or
677  * source tcp port.
678  *
679  * @param spec Pointer to the specific flow item.
680  * @param mask Pointer to the specific flow item's mask.
681  * @param parse_dst Parse either destination or source port.
682  * @param flow Pointer to the flow.
683  * @return 0 in case of success, negative error value otherwise.
684  */
685 static int
686 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
687 		    const struct rte_flow_item_tcp *mask __rte_unused,
688 		    int parse_dst, struct rte_flow *flow)
689 {
690 	struct pp2_cls_rule_key_field *key_field;
691 	uint16_t k;
692 
693 	key_field = &flow->rule.fields[flow->rule.num_fields];
694 	mrvl_alloc_key_mask(key_field);
695 	key_field->size = 2;
696 
697 	if (parse_dst) {
698 		k = rte_be_to_cpu_16(spec->hdr.dst_port);
699 
700 		flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
701 			MV_NET_TCP_F_DP;
702 	} else {
703 		k = rte_be_to_cpu_16(spec->hdr.src_port);
704 
705 		flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
706 			MV_NET_TCP_F_SP;
707 	}
708 
709 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
710 
711 	flow->table_key.proto_field[flow->rule.num_fields].proto =
712 		MV_NET_PROTO_TCP;
713 	flow->table_key.key_size += key_field->size;
714 
715 	flow->rule.num_fields += 1;
716 
717 	return 0;
718 }
719 
720 /**
721  * Helper for parsing the tcp source port of the tcp flow item.
722  *
723  * @param spec Pointer to the specific flow item.
724  * @param mask Pointer to the specific flow item's mask.
725  * @param flow Pointer to the flow.
726  * @return 0 in case of success, negative error value otherwise.
727  */
728 static inline int
729 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
730 		     const struct rte_flow_item_tcp *mask,
731 		     struct rte_flow *flow)
732 {
733 	return mrvl_parse_tcp_port(spec, mask, 0, flow);
734 }
735 
736 /**
737  * Helper for parsing the tcp destination port of the tcp flow item.
738  *
739  * @param spec Pointer to the specific flow item.
740  * @param mask Pointer to the specific flow item's mask.
741  * @param flow Pointer to the flow.
742  * @return 0 in case of success, negative error value otherwise.
743  */
744 static inline int
745 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
746 		     const struct rte_flow_item_tcp *mask,
747 		     struct rte_flow *flow)
748 {
749 	return mrvl_parse_tcp_port(spec, mask, 1, flow);
750 }
751 
752 /**
753  * Parse destination or source port of the udp flow item.
754  *
755  * This will create classifier rule that matches either destination or
756  * source udp port.
757  *
758  * @param spec Pointer to the specific flow item.
759  * @param mask Pointer to the specific flow item's mask.
760  * @param parse_dst Parse either destination or source port.
761  * @param flow Pointer to the flow.
762  * @return 0 in case of success, negative error value otherwise.
763  */
764 static int
765 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
766 		    const struct rte_flow_item_udp *mask __rte_unused,
767 		    int parse_dst, struct rte_flow *flow)
768 {
769 	struct pp2_cls_rule_key_field *key_field;
770 	uint16_t k;
771 
772 	key_field = &flow->rule.fields[flow->rule.num_fields];
773 	mrvl_alloc_key_mask(key_field);
774 	key_field->size = 2;
775 
776 	if (parse_dst) {
777 		k = rte_be_to_cpu_16(spec->hdr.dst_port);
778 
779 		flow->table_key.proto_field[flow->rule.num_fields].field.udp =
780 			MV_NET_UDP_F_DP;
781 	} else {
782 		k = rte_be_to_cpu_16(spec->hdr.src_port);
783 
784 		flow->table_key.proto_field[flow->rule.num_fields].field.udp =
785 			MV_NET_UDP_F_SP;
786 	}
787 
788 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
789 
790 	flow->table_key.proto_field[flow->rule.num_fields].proto =
791 		MV_NET_PROTO_UDP;
792 	flow->table_key.key_size += key_field->size;
793 
794 	flow->rule.num_fields += 1;
795 
796 	return 0;
797 }
798 
799 /**
800  * Helper for parsing the udp source port of the udp flow item.
801  *
802  * @param spec Pointer to the specific flow item.
803  * @param mask Pointer to the specific flow item's mask.
804  * @param flow Pointer to the flow.
805  * @return 0 in case of success, negative error value otherwise.
806  */
807 static inline int
808 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
809 		     const struct rte_flow_item_udp *mask,
810 		     struct rte_flow *flow)
811 {
812 	return mrvl_parse_udp_port(spec, mask, 0, flow);
813 }
814 
815 /**
816  * Helper for parsing the udp destination port of the udp flow item.
817  *
818  * @param spec Pointer to the specific flow item.
819  * @param mask Pointer to the specific flow item's mask.
820  * @param flow Pointer to the flow.
821  * @return 0 in case of success, negative error value otherwise.
822  */
823 static inline int
824 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
825 		     const struct rte_flow_item_udp *mask,
826 		     struct rte_flow *flow)
827 {
828 	return mrvl_parse_udp_port(spec, mask, 1, flow);
829 }
830 
831 /**
832  * Parse eth flow item.
833  *
834  * @param item Pointer to the flow item.
835  * @param flow Pointer to the flow.
836  * @param error Pointer to the flow error.
837  * @returns 0 on success, negative value otherwise.
838  */
839 static int
840 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
841 	       struct rte_flow_error *error)
842 {
843 	const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
844 	struct rte_ether_addr zero;
845 	int ret;
846 
847 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
848 			      &rte_flow_item_eth_mask,
849 			      sizeof(struct rte_flow_item_eth), error);
850 	if (ret)
851 		return ret;
852 
853 	memset(&zero, 0, sizeof(zero));
854 
855 	if (memcmp(&mask->hdr.dst_addr, &zero, sizeof(mask->hdr.dst_addr))) {
856 		ret = mrvl_parse_dmac(spec, mask, flow);
857 		if (ret)
858 			goto out;
859 	}
860 
861 	if (memcmp(&mask->hdr.src_addr, &zero, sizeof(mask->hdr.src_addr))) {
862 		ret = mrvl_parse_smac(spec, mask, flow);
863 		if (ret)
864 			goto out;
865 	}
866 
867 	if (mask->hdr.ether_type) {
868 		MRVL_LOG(WARNING, "eth type mask is ignored");
869 		ret = mrvl_parse_type(spec, mask, flow);
870 		if (ret)
871 			goto out;
872 	}
873 
874 	return 0;
875 out:
876 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
877 			   "Reached maximum number of fields in cls tbl key\n");
878 	return -rte_errno;
879 }
880 
881 /**
882  * Parse vlan flow item.
883  *
884  * @param item Pointer to the flow item.
885  * @param flow Pointer to the flow.
886  * @param error Pointer to the flow error.
887  * @returns 0 on success, negative value otherwise.
888  */
889 static int
890 mrvl_parse_vlan(const struct rte_flow_item *item,
891 		struct rte_flow *flow,
892 		struct rte_flow_error *error)
893 {
894 	const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
895 	uint16_t m;
896 	int ret, i;
897 
898 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
899 			      &rte_flow_item_vlan_mask,
900 			      sizeof(struct rte_flow_item_vlan), error);
901 	if (ret)
902 		return ret;
903 
904 	m = rte_be_to_cpu_16(mask->hdr.vlan_tci);
905 	if (m & MRVL_VLAN_ID_MASK) {
906 		MRVL_LOG(WARNING, "vlan id mask is ignored");
907 		ret = mrvl_parse_vlan_id(spec, mask, flow);
908 		if (ret)
909 			goto out;
910 	}
911 
912 	if (m & MRVL_VLAN_PRI_MASK) {
913 		MRVL_LOG(WARNING, "vlan pri mask is ignored");
914 		ret = mrvl_parse_vlan_pri(spec, mask, flow);
915 		if (ret)
916 			goto out;
917 	}
918 
919 	if (mask->hdr.eth_proto) {
920 		struct rte_flow_item_eth spec_eth = {
921 			.hdr.ether_type = spec->hdr.eth_proto,
922 		};
923 		struct rte_flow_item_eth mask_eth = {
924 			.hdr.ether_type = mask->hdr.eth_proto,
925 		};
926 
927 		/* TPID is not supported so if ETH_TYPE was selected,
928 		 * error is return. else, classify eth-type with the tpid value
929 		 */
930 		for (i = 0; i < flow->rule.num_fields; i++)
931 			if (flow->table_key.proto_field[i].proto ==
932 			    MV_NET_PROTO_ETH &&
933 			    flow->table_key.proto_field[i].field.eth ==
934 			    MV_NET_ETH_F_TYPE) {
935 				rte_flow_error_set(error, ENOTSUP,
936 						   RTE_FLOW_ERROR_TYPE_ITEM,
937 						   item,
938 						   "VLAN TPID matching is not supported");
939 				return -rte_errno;
940 			}
941 
942 		MRVL_LOG(WARNING, "inner eth type mask is ignored");
943 		ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
944 		if (ret)
945 			goto out;
946 	}
947 
948 	return 0;
949 out:
950 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
951 			   "Reached maximum number of fields in cls tbl key\n");
952 	return -rte_errno;
953 }
954 
955 /**
956  * Parse ipv4 flow item.
957  *
958  * @param item Pointer to the flow item.
959  * @param flow Pointer to the flow.
960  * @param error Pointer to the flow error.
961  * @returns 0 on success, negative value otherwise.
962  */
963 static int
964 mrvl_parse_ip4(const struct rte_flow_item *item,
965 	       struct rte_flow *flow,
966 	       struct rte_flow_error *error)
967 {
968 	const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
969 	int ret;
970 
971 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
972 			      &rte_flow_item_ipv4_mask,
973 			      sizeof(struct rte_flow_item_ipv4), error);
974 	if (ret)
975 		return ret;
976 
977 	if (mask->hdr.version_ihl ||
978 	    mask->hdr.total_length ||
979 	    mask->hdr.packet_id ||
980 	    mask->hdr.fragment_offset ||
981 	    mask->hdr.time_to_live ||
982 	    mask->hdr.hdr_checksum) {
983 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
984 				   NULL, "Not supported by classifier\n");
985 		return -rte_errno;
986 	}
987 
988 	if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
989 		ret = mrvl_parse_ip4_dscp(spec, mask, flow);
990 		if (ret)
991 			goto out;
992 	}
993 
994 	if (mask->hdr.src_addr) {
995 		ret = mrvl_parse_ip4_sip(spec, mask, flow);
996 		if (ret)
997 			goto out;
998 	}
999 
1000 	if (mask->hdr.dst_addr) {
1001 		ret = mrvl_parse_ip4_dip(spec, mask, flow);
1002 		if (ret)
1003 			goto out;
1004 	}
1005 
1006 	if (mask->hdr.next_proto_id) {
1007 		MRVL_LOG(WARNING, "next proto id mask is ignored");
1008 		ret = mrvl_parse_ip4_proto(spec, mask, flow);
1009 		if (ret)
1010 			goto out;
1011 	}
1012 
1013 	return 0;
1014 out:
1015 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1016 			   "Reached maximum number of fields in cls tbl key\n");
1017 	return -rte_errno;
1018 }
1019 
1020 /**
1021  * Parse ipv6 flow item.
1022  *
1023  * @param item Pointer to the flow item.
1024  * @param flow Pointer to the flow.
1025  * @param error Pointer to the flow error.
1026  * @returns 0 on success, negative value otherwise.
1027  */
1028 static int
1029 mrvl_parse_ip6(const struct rte_flow_item *item,
1030 	       struct rte_flow *flow,
1031 	       struct rte_flow_error *error)
1032 {
1033 	const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1034 	uint32_t flow_mask;
1035 	int ret;
1036 
1037 	ret = mrvl_parse_init(item, (const void **)&spec,
1038 			      (const void **)&mask,
1039 			      &rte_flow_item_ipv6_mask,
1040 			      sizeof(struct rte_flow_item_ipv6),
1041 			      error);
1042 	if (ret)
1043 		return ret;
1044 
1045 	if (mask->hdr.payload_len ||
1046 	    mask->hdr.hop_limits) {
1047 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1048 				   NULL, "Not supported by classifier\n");
1049 		return -rte_errno;
1050 	}
1051 
1052 	if (!rte_ipv6_addr_is_unspec(&mask->hdr.src_addr)) {
1053 		ret = mrvl_parse_ip6_sip(spec, mask, flow);
1054 		if (ret)
1055 			goto out;
1056 	}
1057 
1058 	if (!rte_ipv6_addr_is_unspec(&mask->hdr.dst_addr)) {
1059 		ret = mrvl_parse_ip6_dip(spec, mask, flow);
1060 		if (ret)
1061 			goto out;
1062 	}
1063 
1064 	flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1065 	if (flow_mask) {
1066 		ret = mrvl_parse_ip6_flow(spec, mask, flow);
1067 		if (ret)
1068 			goto out;
1069 	}
1070 
1071 	if (mask->hdr.proto) {
1072 		MRVL_LOG(WARNING, "next header mask is ignored");
1073 		ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1074 		if (ret)
1075 			goto out;
1076 	}
1077 
1078 	return 0;
1079 out:
1080 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1081 			   "Reached maximum number of fields in cls tbl key\n");
1082 	return -rte_errno;
1083 }
1084 
1085 /**
1086  * Parse tcp flow item.
1087  *
1088  * @param item Pointer to the flow item.
1089  * @param flow Pointer to the flow.
1090  * @param error Pointer to the flow error.
1091  * @returns 0 on success, negative value otherwise.
1092  */
1093 static int
1094 mrvl_parse_tcp(const struct rte_flow_item *item,
1095 	       struct rte_flow *flow,
1096 	       struct rte_flow_error *error)
1097 {
1098 	const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1099 	int ret;
1100 
1101 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1102 			      &rte_flow_item_tcp_mask,
1103 			      sizeof(struct rte_flow_item_tcp), error);
1104 	if (ret)
1105 		return ret;
1106 
1107 	if (mask->hdr.sent_seq ||
1108 	    mask->hdr.recv_ack ||
1109 	    mask->hdr.data_off ||
1110 	    mask->hdr.tcp_flags ||
1111 	    mask->hdr.rx_win ||
1112 	    mask->hdr.cksum ||
1113 	    mask->hdr.tcp_urp) {
1114 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1115 				   NULL, "Not supported by classifier\n");
1116 		return -rte_errno;
1117 	}
1118 
1119 	if (mask->hdr.src_port) {
1120 		MRVL_LOG(WARNING, "tcp sport mask is ignored");
1121 		ret = mrvl_parse_tcp_sport(spec, mask, flow);
1122 		if (ret)
1123 			goto out;
1124 	}
1125 
1126 	if (mask->hdr.dst_port) {
1127 		MRVL_LOG(WARNING, "tcp dport mask is ignored");
1128 		ret = mrvl_parse_tcp_dport(spec, mask, flow);
1129 		if (ret)
1130 			goto out;
1131 	}
1132 
1133 	return 0;
1134 out:
1135 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1136 			   "Reached maximum number of fields in cls tbl key\n");
1137 	return -rte_errno;
1138 }
1139 
1140 /**
1141  * Parse udp flow item.
1142  *
1143  * @param item Pointer to the flow item.
1144  * @param flow Pointer to the flow.
1145  * @param error Pointer to the flow error.
1146  * @returns 0 on success, negative value otherwise.
1147  */
1148 static int
1149 mrvl_parse_udp(const struct rte_flow_item *item,
1150 	       struct rte_flow *flow,
1151 	       struct rte_flow_error *error)
1152 {
1153 	const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1154 	int ret;
1155 
1156 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1157 			      &rte_flow_item_udp_mask,
1158 			      sizeof(struct rte_flow_item_udp), error);
1159 	if (ret)
1160 		return ret;
1161 
1162 	if (mask->hdr.dgram_len ||
1163 	    mask->hdr.dgram_cksum) {
1164 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1165 				   NULL, "Not supported by classifier\n");
1166 		return -rte_errno;
1167 	}
1168 
1169 	if (mask->hdr.src_port) {
1170 		MRVL_LOG(WARNING, "udp sport mask is ignored");
1171 		ret = mrvl_parse_udp_sport(spec, mask, flow);
1172 		if (ret)
1173 			goto out;
1174 	}
1175 
1176 	if (mask->hdr.dst_port) {
1177 		MRVL_LOG(WARNING, "udp dport mask is ignored");
1178 		ret = mrvl_parse_udp_dport(spec, mask, flow);
1179 		if (ret)
1180 			goto out;
1181 	}
1182 
1183 	return 0;
1184 out:
1185 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1186 			   "Reached maximum number of fields in cls tbl key\n");
1187 	return -rte_errno;
1188 }
1189 
1190 static int
1191 mrvl_string_to_hex_values(const uint8_t *input_string,
1192 			  uint8_t *hex_key,
1193 			  uint8_t *length)
1194 {
1195 	char tmp_arr[3], tmp_string[MRVL_CLS_STR_SIZE_MAX], *string_iter;
1196 	int i;
1197 
1198 	strcpy(tmp_string, (const char *)input_string);
1199 	string_iter = tmp_string;
1200 
1201 	string_iter += 2; /* skip the '0x' */
1202 	*length = ((*length - 2) + 1) / 2;
1203 
1204 	for (i = 0; i < *length; i++) {
1205 		strncpy(tmp_arr, string_iter, 2);
1206 		tmp_arr[2] = '\0';
1207 		if (get_val_securely8(tmp_arr, 16,
1208 				      &hex_key[*length - 1 - i]) < 0)
1209 			return -1;
1210 		string_iter += 2;
1211 	}
1212 
1213 	return 0;
1214 }
1215 
1216 /**
1217  * Parse raw flow item.
1218  *
1219  * @param item Pointer to the flow item.
1220  * @param flow Pointer to the flow.
1221  * @param error Pointer to the flow error.
1222  * @returns 0 on success, negative value otherwise.
1223  */
1224 static int
1225 mrvl_parse_raw(const struct rte_flow_item *item,
1226 	       struct rte_flow *flow,
1227 	       struct rte_flow_error *error)
1228 {
1229 	const struct rte_flow_item_raw *spec = NULL, *mask = NULL;
1230 	struct pp2_cls_rule_key_field *key_field;
1231 	struct mv_net_udf *udf_params;
1232 	uint8_t length;
1233 	int ret;
1234 
1235 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1236 			      &rte_flow_item_raw_mask,
1237 			      sizeof(struct rte_flow_item_raw), error);
1238 	if (ret)
1239 		return ret;
1240 
1241 	if (!spec->pattern) {
1242 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1243 				   NULL, "pattern pointer MUST be given\n");
1244 		return -rte_errno;
1245 	}
1246 
1247 	/* Only hex string is supported; so, it must start with '0x' */
1248 	if (strncmp((const char *)spec->pattern, "0x", 2) != 0)  {
1249 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1250 				   NULL, "'pattern' string must start with '0x'\n");
1251 		return -rte_errno;
1252 	}
1253 
1254 	if (mask->pattern &&
1255 	    strncmp((const char *)mask->pattern, "0x", 2) != 0)  {
1256 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1257 				   NULL, "'mask-pattern' string must start with '0x'\n");
1258 		return -rte_errno;
1259 	}
1260 
1261 	if (mask->search && spec->search) {
1262 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1263 				   NULL, "'search' option must be '0'\n");
1264 		return -rte_errno;
1265 	}
1266 
1267 	if (mask->offset && spec->offset != 0) {
1268 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1269 				   NULL, "'offset' option must be '0'\n");
1270 		return -rte_errno;
1271 	}
1272 
1273 	if (!mask->relative || !spec->relative) {
1274 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1275 				   NULL, "'relative' option must be given and enabled\n");
1276 		return -rte_errno;
1277 	}
1278 
1279 	length = spec->length & mask->length;
1280 	if (!length) {
1281 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1282 				   NULL, "'length' option must be given bigger than '0'\n");
1283 		return -rte_errno;
1284 	}
1285 
1286 	key_field = &flow->rule.fields[flow->rule.num_fields];
1287 	mrvl_alloc_key_mask(key_field);
1288 
1289 	/* pattern and length refer to string bytes. we need to convert it to
1290 	 * values.
1291 	 */
1292 	key_field->size = length;
1293 	ret = mrvl_string_to_hex_values(spec->pattern, key_field->key,
1294 					&key_field->size);
1295 	if (ret) {
1296 		rte_flow_error_set(error, EINVAL,
1297 				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1298 				   NULL,
1299 				   "can't convert pattern from string to hex\n");
1300 		return -rte_errno;
1301 	}
1302 	if (mask->pattern) {
1303 		ret = mrvl_string_to_hex_values(mask->pattern, key_field->mask,
1304 						&length);
1305 		if (ret) {
1306 			rte_flow_error_set(error, EINVAL,
1307 					   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1308 					   NULL,
1309 					   "can't convert mask-pattern from string to hex\n");
1310 			return -rte_errno;
1311 		}
1312 	} else {
1313 		rte_free(key_field->mask);
1314 		key_field->mask = NULL;
1315 	}
1316 
1317 	flow->table_key.proto_field[flow->rule.num_fields].proto =
1318 		MV_NET_UDF;
1319 	udf_params =
1320 		&flow->table_key.proto_field[flow->rule.num_fields].field.udf;
1321 	udf_params->id = flow->next_udf_id++;
1322 	udf_params->size = key_field->size;
1323 	flow->table_key.key_size += key_field->size;
1324 
1325 	flow->rule.num_fields += 1;
1326 
1327 	return 0;
1328 }
1329 
1330 /**
1331  * Structure used to map specific flow pattern to the pattern parse callback
1332  * which will iterate over each pattern item and extract relevant data.
1333  */
1334 static const struct {
1335 	const enum rte_flow_item_type pattern_type;
1336 	int (*parse)(const struct rte_flow_item *pattern,
1337 		struct rte_flow *flow,
1338 		struct rte_flow_error *error);
1339 } mrvl_patterns[] = {
1340 	{ RTE_FLOW_ITEM_TYPE_ETH, mrvl_parse_eth },
1341 	{ RTE_FLOW_ITEM_TYPE_VLAN, mrvl_parse_vlan },
1342 	{ RTE_FLOW_ITEM_TYPE_IPV4, mrvl_parse_ip4 },
1343 	{ RTE_FLOW_ITEM_TYPE_IPV6, mrvl_parse_ip6 },
1344 	{ RTE_FLOW_ITEM_TYPE_TCP, mrvl_parse_tcp },
1345 	{ RTE_FLOW_ITEM_TYPE_UDP, mrvl_parse_udp },
1346 	{ RTE_FLOW_ITEM_TYPE_RAW, mrvl_parse_raw },
1347 	{ RTE_FLOW_ITEM_TYPE_END, NULL }
1348 };
1349 
1350 /**
1351  * Parse flow attribute.
1352  *
1353  * This will check whether the provided attribute's flags are supported.
1354  *
1355  * @param priv Unused
1356  * @param attr Pointer to the flow attribute.
1357  * @param flow Unused
1358  * @param error Pointer to the flow error.
1359  * @returns 0 in case of success, negative value otherwise.
1360  */
1361 static int
1362 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
1363 		     const struct rte_flow_attr *attr,
1364 		     struct rte_flow *flow __rte_unused,
1365 		     struct rte_flow_error *error)
1366 {
1367 	if (!attr) {
1368 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
1369 				   NULL, "NULL attribute");
1370 		return -rte_errno;
1371 	}
1372 
1373 	if (attr->group) {
1374 		rte_flow_error_set(error, ENOTSUP,
1375 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1376 				   "Groups are not supported");
1377 		return -rte_errno;
1378 	}
1379 	if (attr->priority) {
1380 		rte_flow_error_set(error, ENOTSUP,
1381 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
1382 				   "Priorities are not supported");
1383 		return -rte_errno;
1384 	}
1385 	if (!attr->ingress) {
1386 		rte_flow_error_set(error, ENOTSUP,
1387 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
1388 				   "Only ingress is supported");
1389 		return -rte_errno;
1390 	}
1391 	if (attr->egress) {
1392 		rte_flow_error_set(error, ENOTSUP,
1393 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1394 				   "Egress is not supported");
1395 		return -rte_errno;
1396 	}
1397 	if (attr->transfer) {
1398 		rte_flow_error_set(error, ENOTSUP,
1399 				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
1400 				   "Transfer is not supported");
1401 		return -rte_errno;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 /**
1408  * Parse flow pattern.
1409  *
1410  * Specific classifier rule will be created as well.
1411  *
1412  * @param priv Unused
1413  * @param pattern Pointer to the flow pattern.
1414  * @param flow Pointer to the flow.
1415  * @param error Pointer to the flow error.
1416  * @returns 0 in case of success, negative value otherwise.
1417  */
1418 static int
1419 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
1420 			const struct rte_flow_item pattern[],
1421 			struct rte_flow *flow,
1422 			struct rte_flow_error *error)
1423 {
1424 	unsigned int i, j;
1425 	int ret;
1426 
1427 	for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1428 		if (pattern[i].type == RTE_FLOW_ITEM_TYPE_VOID)
1429 			continue;
1430 		for (j = 0; mrvl_patterns[j].pattern_type !=
1431 			RTE_FLOW_ITEM_TYPE_END; j++) {
1432 			if (mrvl_patterns[j].pattern_type != pattern[i].type)
1433 				continue;
1434 
1435 			if (flow->rule.num_fields >=
1436 			    PP2_CLS_TBL_MAX_NUM_FIELDS) {
1437 				rte_flow_error_set(error, ENOSPC,
1438 						   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1439 						   NULL,
1440 						   "too many pattern (max %d)");
1441 				return -rte_errno;
1442 			}
1443 
1444 			ret = mrvl_patterns[j].parse(&pattern[i], flow, error);
1445 			if (ret) {
1446 				mrvl_free_all_key_mask(&flow->rule);
1447 				return ret;
1448 			}
1449 			break;
1450 		}
1451 		if (mrvl_patterns[j].pattern_type == RTE_FLOW_ITEM_TYPE_END) {
1452 			rte_flow_error_set(error, ENOTSUP,
1453 					   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1454 					   "Unsupported pattern");
1455 			return -rte_errno;
1456 		}
1457 	}
1458 
1459 	flow->table_key.num_fields = flow->rule.num_fields;
1460 
1461 	return 0;
1462 }
1463 
1464 /**
1465  * Parse flow actions.
1466  *
1467  * @param priv Pointer to the port's private data.
1468  * @param actions Pointer the action table.
1469  * @param flow Pointer to the flow.
1470  * @param error Pointer to the flow error.
1471  * @returns 0 in case of success, negative value otherwise.
1472  */
1473 static int
1474 mrvl_flow_parse_actions(struct mrvl_priv *priv,
1475 			const struct rte_flow_action actions[],
1476 			struct rte_flow *flow,
1477 			struct rte_flow_error *error)
1478 {
1479 	const struct rte_flow_action *action = actions;
1480 	int specified = 0;
1481 
1482 	for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1483 		if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1484 			continue;
1485 
1486 		if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
1487 			flow->cos.ppio = priv->ppio;
1488 			flow->cos.tc = 0;
1489 			flow->action.type = PP2_CLS_TBL_ACT_DROP;
1490 			flow->action.cos = &flow->cos;
1491 			specified++;
1492 		} else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1493 			const struct rte_flow_action_queue *q =
1494 				(const struct rte_flow_action_queue *)
1495 				action->conf;
1496 
1497 			if (q->index > priv->nb_rx_queues) {
1498 				rte_flow_error_set(error, EINVAL,
1499 						RTE_FLOW_ERROR_TYPE_ACTION,
1500 						NULL,
1501 						"Queue index out of range");
1502 				return -rte_errno;
1503 			}
1504 
1505 			if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
1506 				/*
1507 				 * Unknown TC mapping, mapping will not have
1508 				 * a correct queue.
1509 				 */
1510 				MRVL_LOG(ERR,
1511 					"Unknown TC mapping for queue %hu eth%hhu",
1512 					q->index, priv->ppio_id);
1513 
1514 				rte_flow_error_set(error, EFAULT,
1515 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1516 						NULL, NULL);
1517 				return -rte_errno;
1518 			}
1519 
1520 			MRVL_LOG(DEBUG,
1521 				"Action: Assign packets to queue %d, tc:%d, q:%d",
1522 				q->index, priv->rxq_map[q->index].tc,
1523 				priv->rxq_map[q->index].inq);
1524 
1525 			flow->cos.ppio = priv->ppio;
1526 			flow->cos.tc = priv->rxq_map[q->index].tc;
1527 			flow->action.type = PP2_CLS_TBL_ACT_DONE;
1528 			flow->action.cos = &flow->cos;
1529 			specified++;
1530 		} else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1531 			const struct rte_flow_action_meter *meter;
1532 			struct mrvl_mtr *mtr;
1533 
1534 			meter = action->conf;
1535 			if (!meter)
1536 				return -rte_flow_error_set(error, EINVAL,
1537 						RTE_FLOW_ERROR_TYPE_ACTION,
1538 						NULL, "Invalid meter\n");
1539 
1540 			LIST_FOREACH(mtr, &priv->mtrs, next)
1541 				if (mtr->mtr_id == meter->mtr_id)
1542 					break;
1543 
1544 			if (!mtr)
1545 				return -rte_flow_error_set(error, EINVAL,
1546 						RTE_FLOW_ERROR_TYPE_ACTION,
1547 						NULL,
1548 						"Meter id does not exist\n");
1549 
1550 			if (!mtr->shared && mtr->refcnt)
1551 				return -rte_flow_error_set(error, EPERM,
1552 						RTE_FLOW_ERROR_TYPE_ACTION,
1553 						NULL,
1554 						"Meter cannot be shared\n");
1555 
1556 			/*
1557 			 * In case cos has already been set
1558 			 * do not modify it.
1559 			 */
1560 			if (!flow->cos.ppio) {
1561 				flow->cos.ppio = priv->ppio;
1562 				flow->cos.tc = 0;
1563 			}
1564 
1565 			flow->action.type = PP2_CLS_TBL_ACT_DONE;
1566 			flow->action.cos = &flow->cos;
1567 			flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
1568 			flow->mtr = mtr;
1569 			mtr->refcnt++;
1570 			specified++;
1571 		} else {
1572 			rte_flow_error_set(error, ENOTSUP,
1573 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1574 					   "Action not supported");
1575 			return -rte_errno;
1576 		}
1577 	}
1578 
1579 	if (!specified) {
1580 		rte_flow_error_set(error, EINVAL,
1581 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1582 				   "Action not specified");
1583 		return -rte_errno;
1584 	}
1585 
1586 	return 0;
1587 }
1588 
1589 /**
1590  * Parse flow attribute, pattern and actions.
1591  *
1592  * @param priv Pointer to the port's private data.
1593  * @param attr Pointer to the flow attribute.
1594  * @param pattern Pointer to the flow pattern.
1595  * @param actions Pointer to the flow actions.
1596  * @param flow Pointer to the flow.
1597  * @param error Pointer to the flow error.
1598  * @returns 0 on success, negative value otherwise.
1599  */
1600 static int
1601 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
1602 		const struct rte_flow_item pattern[],
1603 		const struct rte_flow_action actions[],
1604 		struct rte_flow *flow,
1605 		struct rte_flow_error *error)
1606 {
1607 	int ret;
1608 
1609 	ret = mrvl_flow_parse_attr(priv, attr, flow, error);
1610 	if (ret)
1611 		return ret;
1612 
1613 	ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
1614 	if (ret)
1615 		return ret;
1616 
1617 	return mrvl_flow_parse_actions(priv, actions, flow, error);
1618 }
1619 
1620 /**
1621  * Get engine type for the given flow.
1622  *
1623  * @param field Pointer to the flow.
1624  * @returns The type of the engine.
1625  */
1626 static inline enum pp2_cls_tbl_type
1627 mrvl_engine_type(const struct rte_flow *flow)
1628 {
1629 	int i, size = 0;
1630 
1631 	for (i = 0; i < flow->rule.num_fields; i++)
1632 		size += flow->rule.fields[i].size;
1633 
1634 	/*
1635 	 * For maskable engine type the key size must be up to 8 bytes.
1636 	 * For keys with size bigger than 8 bytes, engine type must
1637 	 * be set to exact match.
1638 	 */
1639 	if (size > 8)
1640 		return PP2_CLS_TBL_EXACT_MATCH;
1641 
1642 	return PP2_CLS_TBL_MASKABLE;
1643 }
1644 
1645 /**
1646  * Create classifier table.
1647  *
1648  * @param dev Pointer to the device.
1649  * @param flow Pointer to the very first flow.
1650  * @returns 0 in case of success, negative value otherwise.
1651  */
1652 static int
1653 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
1654 {
1655 	struct mrvl_priv *priv = dev->data->dev_private;
1656 	struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
1657 	int ret;
1658 
1659 	if (priv->cls_tbl) {
1660 		pp2_cls_tbl_deinit(priv->cls_tbl);
1661 		priv->cls_tbl = NULL;
1662 	}
1663 
1664 	memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
1665 
1666 	priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
1667 	MRVL_LOG(INFO, "Setting cls search engine type to %s",
1668 			priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
1669 			"exact" : "maskable");
1670 	priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
1671 	priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
1672 	priv->cls_tbl_params.default_act.cos = &first_flow->cos;
1673 	memcpy(key, &first_flow->table_key, sizeof(struct pp2_cls_tbl_key));
1674 
1675 	ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
1676 
1677 	return ret;
1678 }
1679 
1680 /**
1681  * Check whether new flow can be added to the table
1682  *
1683  * @param priv Pointer to the port's private data.
1684  * @param flow Pointer to the new flow.
1685  * @return 1 in case flow can be added, 0 otherwise.
1686  */
1687 static inline int
1688 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
1689 {
1690 	int same = memcmp(&flow->table_key, &priv->cls_tbl_params.key,
1691 			  sizeof(struct pp2_cls_tbl_key)) == 0;
1692 
1693 	return same && mrvl_engine_type(flow) == priv->cls_tbl_params.type;
1694 }
1695 
1696 /**
1697  * DPDK flow create callback called when flow is to be created.
1698  *
1699  * @param dev Pointer to the device.
1700  * @param attr Pointer to the flow attribute.
1701  * @param pattern Pointer to the flow pattern.
1702  * @param actions Pointer to the flow actions.
1703  * @param error Pointer to the flow error.
1704  * @returns Pointer to the created flow in case of success, NULL otherwise.
1705  */
1706 static struct rte_flow *
1707 mrvl_flow_create(struct rte_eth_dev *dev,
1708 		 const struct rte_flow_attr *attr,
1709 		 const struct rte_flow_item pattern[],
1710 		 const struct rte_flow_action actions[],
1711 		 struct rte_flow_error *error)
1712 {
1713 	struct mrvl_priv *priv = dev->data->dev_private;
1714 	struct rte_flow *flow, *first;
1715 	int ret;
1716 
1717 	if (!dev->data->dev_started) {
1718 		rte_flow_error_set(error, EINVAL,
1719 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1720 				   "Port must be started first\n");
1721 		return NULL;
1722 	}
1723 
1724 	flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
1725 	if (!flow)
1726 		return NULL;
1727 
1728 	ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
1729 	if (ret)
1730 		goto out;
1731 
1732 	/*
1733 	 * Four cases here:
1734 	 *
1735 	 * 1. In case table does not exist - create one.
1736 	 * 2. In case table exists, is empty and new flow cannot be added
1737 	 *    recreate table.
1738 	 * 3. In case table is not empty and new flow matches table format
1739 	 *    add it.
1740 	 * 4. Otherwise flow cannot be added.
1741 	 */
1742 	first = LIST_FIRST(&priv->flows);
1743 	if (!priv->cls_tbl) {
1744 		ret = mrvl_create_cls_table(dev, flow);
1745 	} else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
1746 		ret = mrvl_create_cls_table(dev, flow);
1747 	} else if (mrvl_flow_can_be_added(priv, flow)) {
1748 		ret = 0;
1749 	} else {
1750 		rte_flow_error_set(error, EINVAL,
1751 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1752 				   "Pattern does not match cls table format\n");
1753 		goto out;
1754 	}
1755 
1756 	if (ret) {
1757 		rte_flow_error_set(error, EINVAL,
1758 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1759 				   "Failed to create cls table\n");
1760 		goto out;
1761 	}
1762 
1763 	ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
1764 	if (ret) {
1765 		rte_flow_error_set(error, EINVAL,
1766 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1767 				   "Failed to add rule\n");
1768 		goto out;
1769 	}
1770 
1771 	LIST_INSERT_HEAD(&priv->flows, flow, next);
1772 
1773 	return flow;
1774 out:
1775 	rte_free(flow);
1776 	return NULL;
1777 }
1778 
1779 /**
1780  * Remove classifier rule associated with given flow.
1781  *
1782  * @param priv Pointer to the port's private data.
1783  * @param flow Pointer to the flow.
1784  * @param error Pointer to the flow error.
1785  * @returns 0 in case of success, negative value otherwise.
1786  */
1787 static int
1788 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
1789 		 struct rte_flow_error *error)
1790 {
1791 	int ret;
1792 
1793 	if (!priv->cls_tbl) {
1794 		rte_flow_error_set(error, EINVAL,
1795 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1796 				   "Classifier table not initialized");
1797 		return -rte_errno;
1798 	}
1799 
1800 	ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
1801 	if (ret) {
1802 		rte_flow_error_set(error, EINVAL,
1803 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1804 				   "Failed to remove rule");
1805 		return -rte_errno;
1806 	}
1807 
1808 	mrvl_free_all_key_mask(&flow->rule);
1809 
1810 	if (flow->mtr) {
1811 		flow->mtr->refcnt--;
1812 		flow->mtr = NULL;
1813 	}
1814 
1815 	return 0;
1816 }
1817 
1818 /**
1819  * DPDK flow destroy callback called when flow is to be removed.
1820  *
1821  * @param dev Pointer to the device.
1822  * @param flow Pointer to the flow.
1823  * @param error Pointer to the flow error.
1824  * @returns 0 in case of success, negative value otherwise.
1825  */
1826 static int
1827 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1828 		  struct rte_flow_error *error)
1829 {
1830 	struct mrvl_priv *priv = dev->data->dev_private;
1831 	struct rte_flow *f;
1832 	int ret;
1833 
1834 	LIST_FOREACH(f, &priv->flows, next) {
1835 		if (f == flow)
1836 			break;
1837 	}
1838 
1839 	if (!flow) {
1840 		rte_flow_error_set(error, EINVAL,
1841 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1842 				   "Rule was not found");
1843 		return -rte_errno;
1844 	}
1845 
1846 	LIST_REMOVE(f, next);
1847 
1848 	ret = mrvl_flow_remove(priv, flow, error);
1849 	if (ret)
1850 		return ret;
1851 
1852 	rte_free(flow);
1853 
1854 	return 0;
1855 }
1856 
1857 /**
1858  * DPDK flow callback called to verify given attribute, pattern and actions.
1859  *
1860  * @param dev Pointer to the device.
1861  * @param attr Pointer to the flow attribute.
1862  * @param pattern Pointer to the flow pattern.
1863  * @param actions Pointer to the flow actions.
1864  * @param error Pointer to the flow error.
1865  * @returns 0 on success, negative value otherwise.
1866  */
1867 static int
1868 mrvl_flow_validate(struct rte_eth_dev *dev,
1869 		   const struct rte_flow_attr *attr,
1870 		   const struct rte_flow_item pattern[],
1871 		   const struct rte_flow_action actions[],
1872 		   struct rte_flow_error *error)
1873 {
1874 	static struct rte_flow *flow;
1875 
1876 	flow = mrvl_flow_create(dev, attr, pattern, actions, error);
1877 	if (!flow)
1878 		return -rte_errno;
1879 
1880 	mrvl_flow_destroy(dev, flow, error);
1881 
1882 	return 0;
1883 }
1884 
1885 /**
1886  * DPDK flow flush callback called when flows are to be flushed.
1887  *
1888  * @param dev Pointer to the device.
1889  * @param error Pointer to the flow error.
1890  * @returns 0 in case of success, negative value otherwise.
1891  */
1892 static int
1893 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1894 {
1895 	struct mrvl_priv *priv = dev->data->dev_private;
1896 
1897 	while (!LIST_EMPTY(&priv->flows)) {
1898 		struct rte_flow *flow = LIST_FIRST(&priv->flows);
1899 		int ret = mrvl_flow_remove(priv, flow, error);
1900 		if (ret)
1901 			return ret;
1902 
1903 		LIST_REMOVE(flow, next);
1904 		rte_free(flow);
1905 	}
1906 
1907 	if (priv->cls_tbl) {
1908 		pp2_cls_tbl_deinit(priv->cls_tbl);
1909 		priv->cls_tbl = NULL;
1910 	}
1911 
1912 	return 0;
1913 }
1914 
1915 /**
1916  * DPDK flow isolate callback called to isolate port.
1917  *
1918  * @param dev Pointer to the device.
1919  * @param enable Pass 0/1 to disable/enable port isolation.
1920  * @param error Pointer to the flow error.
1921  * @returns 0 in case of success, negative value otherwise.
1922  */
1923 static int
1924 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
1925 		  struct rte_flow_error *error)
1926 {
1927 	struct mrvl_priv *priv = dev->data->dev_private;
1928 
1929 	if (dev->data->dev_started) {
1930 		rte_flow_error_set(error, EBUSY,
1931 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1932 				   NULL, "Port must be stopped first\n");
1933 		return -rte_errno;
1934 	}
1935 
1936 	priv->isolated = enable;
1937 
1938 	return 0;
1939 }
1940 
1941 const struct rte_flow_ops mrvl_flow_ops = {
1942 	.validate = mrvl_flow_validate,
1943 	.create = mrvl_flow_create,
1944 	.destroy = mrvl_flow_destroy,
1945 	.flush = mrvl_flow_flush,
1946 	.isolate = mrvl_flow_isolate
1947 };
1948 
1949 /**
1950  * Initialize flow resources.
1951  *
1952  * @param dev Pointer to the device.
1953  */
1954 void
1955 mrvl_flow_init(struct rte_eth_dev *dev)
1956 {
1957 	struct mrvl_priv *priv = dev->data->dev_private;
1958 
1959 	LIST_INIT(&priv->flows);
1960 }
1961 
1962 /**
1963  * Cleanup flow resources.
1964  *
1965  * @param dev Pointer to the device.
1966  */
1967 void
1968 mrvl_flow_deinit(struct rte_eth_dev *dev)
1969 {
1970 	mrvl_flow_flush(dev, NULL);
1971 }
1972