xref: /dpdk/drivers/net/mvpp2/mrvl_flow.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Marvell International Ltd.
3  * Copyright(c) 2018 Semihalf.
4  * All rights reserved.
5  */
6 
7 #include <rte_flow.h>
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
10 #include <rte_log.h>
11 
12 #include <arpa/inet.h>
13 
14 #include "mrvl_flow.h"
15 #include "mrvl_qos.h"
16 
17 /** Number of rules in the classifier table. */
18 #define MRVL_CLS_MAX_NUM_RULES 20
19 
20 /** Size of the classifier key and mask strings. */
21 #define MRVL_CLS_STR_SIZE_MAX 40
22 
23 static const enum rte_flow_item_type pattern_eth[] = {
24 	RTE_FLOW_ITEM_TYPE_ETH,
25 	RTE_FLOW_ITEM_TYPE_END
26 };
27 
28 static const enum rte_flow_item_type pattern_eth_vlan[] = {
29 	RTE_FLOW_ITEM_TYPE_ETH,
30 	RTE_FLOW_ITEM_TYPE_VLAN,
31 	RTE_FLOW_ITEM_TYPE_END
32 };
33 
34 static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
35 	RTE_FLOW_ITEM_TYPE_ETH,
36 	RTE_FLOW_ITEM_TYPE_VLAN,
37 	RTE_FLOW_ITEM_TYPE_IPV4,
38 	RTE_FLOW_ITEM_TYPE_END
39 };
40 
41 static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
42 	RTE_FLOW_ITEM_TYPE_ETH,
43 	RTE_FLOW_ITEM_TYPE_VLAN,
44 	RTE_FLOW_ITEM_TYPE_IPV6,
45 	RTE_FLOW_ITEM_TYPE_END
46 };
47 
48 static const enum rte_flow_item_type pattern_eth_ip4[] = {
49 	RTE_FLOW_ITEM_TYPE_ETH,
50 	RTE_FLOW_ITEM_TYPE_IPV4,
51 	RTE_FLOW_ITEM_TYPE_END
52 };
53 
54 static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
55 	RTE_FLOW_ITEM_TYPE_ETH,
56 	RTE_FLOW_ITEM_TYPE_IPV4,
57 	RTE_FLOW_ITEM_TYPE_TCP,
58 	RTE_FLOW_ITEM_TYPE_END
59 };
60 
61 static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
62 	RTE_FLOW_ITEM_TYPE_ETH,
63 	RTE_FLOW_ITEM_TYPE_IPV4,
64 	RTE_FLOW_ITEM_TYPE_UDP,
65 	RTE_FLOW_ITEM_TYPE_END
66 };
67 
68 static const enum rte_flow_item_type pattern_eth_ip6[] = {
69 	RTE_FLOW_ITEM_TYPE_ETH,
70 	RTE_FLOW_ITEM_TYPE_IPV6,
71 	RTE_FLOW_ITEM_TYPE_END
72 };
73 
74 static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
75 	RTE_FLOW_ITEM_TYPE_ETH,
76 	RTE_FLOW_ITEM_TYPE_IPV6,
77 	RTE_FLOW_ITEM_TYPE_TCP,
78 	RTE_FLOW_ITEM_TYPE_END
79 };
80 
81 static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
82 	RTE_FLOW_ITEM_TYPE_ETH,
83 	RTE_FLOW_ITEM_TYPE_IPV6,
84 	RTE_FLOW_ITEM_TYPE_UDP,
85 	RTE_FLOW_ITEM_TYPE_END
86 };
87 
88 static const enum rte_flow_item_type pattern_vlan[] = {
89 	RTE_FLOW_ITEM_TYPE_VLAN,
90 	RTE_FLOW_ITEM_TYPE_END
91 };
92 
93 static const enum rte_flow_item_type pattern_vlan_ip[] = {
94 	RTE_FLOW_ITEM_TYPE_VLAN,
95 	RTE_FLOW_ITEM_TYPE_IPV4,
96 	RTE_FLOW_ITEM_TYPE_END
97 };
98 
99 static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
100 	RTE_FLOW_ITEM_TYPE_VLAN,
101 	RTE_FLOW_ITEM_TYPE_IPV4,
102 	RTE_FLOW_ITEM_TYPE_TCP,
103 	RTE_FLOW_ITEM_TYPE_END
104 };
105 
106 static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
107 	RTE_FLOW_ITEM_TYPE_VLAN,
108 	RTE_FLOW_ITEM_TYPE_IPV4,
109 	RTE_FLOW_ITEM_TYPE_UDP,
110 	RTE_FLOW_ITEM_TYPE_END
111 };
112 
113 static const enum rte_flow_item_type pattern_vlan_ip6[] = {
114 	RTE_FLOW_ITEM_TYPE_VLAN,
115 	RTE_FLOW_ITEM_TYPE_IPV6,
116 	RTE_FLOW_ITEM_TYPE_END
117 };
118 
119 static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
120 	RTE_FLOW_ITEM_TYPE_VLAN,
121 	RTE_FLOW_ITEM_TYPE_IPV6,
122 	RTE_FLOW_ITEM_TYPE_TCP,
123 	RTE_FLOW_ITEM_TYPE_END
124 };
125 
126 static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
127 	RTE_FLOW_ITEM_TYPE_VLAN,
128 	RTE_FLOW_ITEM_TYPE_IPV6,
129 	RTE_FLOW_ITEM_TYPE_UDP,
130 	RTE_FLOW_ITEM_TYPE_END
131 };
132 
133 static const enum rte_flow_item_type pattern_ip[] = {
134 	RTE_FLOW_ITEM_TYPE_IPV4,
135 	RTE_FLOW_ITEM_TYPE_END
136 };
137 
138 static const enum rte_flow_item_type pattern_ip6[] = {
139 	RTE_FLOW_ITEM_TYPE_IPV6,
140 	RTE_FLOW_ITEM_TYPE_END
141 };
142 
143 static const enum rte_flow_item_type pattern_ip_tcp[] = {
144 	RTE_FLOW_ITEM_TYPE_IPV4,
145 	RTE_FLOW_ITEM_TYPE_TCP,
146 	RTE_FLOW_ITEM_TYPE_END
147 };
148 
149 static const enum rte_flow_item_type pattern_ip6_tcp[] = {
150 	RTE_FLOW_ITEM_TYPE_IPV6,
151 	RTE_FLOW_ITEM_TYPE_TCP,
152 	RTE_FLOW_ITEM_TYPE_END
153 };
154 
155 static const enum rte_flow_item_type pattern_ip_udp[] = {
156 	RTE_FLOW_ITEM_TYPE_IPV4,
157 	RTE_FLOW_ITEM_TYPE_UDP,
158 	RTE_FLOW_ITEM_TYPE_END
159 };
160 
161 static const enum rte_flow_item_type pattern_ip6_udp[] = {
162 	RTE_FLOW_ITEM_TYPE_IPV6,
163 	RTE_FLOW_ITEM_TYPE_UDP,
164 	RTE_FLOW_ITEM_TYPE_END
165 };
166 
167 static const enum rte_flow_item_type pattern_tcp[] = {
168 	RTE_FLOW_ITEM_TYPE_TCP,
169 	RTE_FLOW_ITEM_TYPE_END
170 };
171 
172 static const enum rte_flow_item_type pattern_udp[] = {
173 	RTE_FLOW_ITEM_TYPE_UDP,
174 	RTE_FLOW_ITEM_TYPE_END
175 };
176 
177 #define MRVL_VLAN_ID_MASK 0x0fff
178 #define MRVL_VLAN_PRI_MASK 0x7000
179 #define MRVL_IPV4_DSCP_MASK 0xfc
180 #define MRVL_IPV4_ADDR_MASK 0xffffffff
181 #define MRVL_IPV6_FLOW_MASK 0x0fffff
182 
183 /**
184  * Given a flow item, return the next non-void one.
185  *
186  * @param items Pointer to the item in the table.
187  * @returns Next not-void item, NULL otherwise.
188  */
189 static const struct rte_flow_item *
190 mrvl_next_item(const struct rte_flow_item *items)
191 {
192 	const struct rte_flow_item *item = items;
193 
194 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
195 		if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
196 			return item;
197 	}
198 
199 	return NULL;
200 }
201 
202 /**
203  * Allocate memory for classifier rule key and mask fields.
204  *
205  * @param field Pointer to the classifier rule.
206  * @returns 0 in case of success, negative value otherwise.
207  */
208 static int
209 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
210 {
211 	unsigned int id = rte_socket_id();
212 
213 	field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
214 	if (!field->key)
215 		goto out;
216 
217 	field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
218 	if (!field->mask)
219 		goto out_mask;
220 
221 	return 0;
222 out_mask:
223 	rte_free(field->key);
224 out:
225 	field->key = NULL;
226 	field->mask = NULL;
227 	return -1;
228 }
229 
230 /**
231  * Free memory allocated for classifier rule key and mask fields.
232  *
233  * @param field Pointer to the classifier rule.
234  */
235 static void
236 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
237 {
238 	rte_free(field->key);
239 	rte_free(field->mask);
240 	field->key = NULL;
241 	field->mask = NULL;
242 }
243 
244 /**
245  * Free memory allocated for all classifier rule key and mask fields.
246  *
247  * @param rule Pointer to the classifier table rule.
248  */
249 static void
250 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
251 {
252 	int i;
253 
254 	for (i = 0; i < rule->num_fields; i++)
255 		mrvl_free_key_mask(&rule->fields[i]);
256 	rule->num_fields = 0;
257 }
258 
259 /*
260  * Initialize rte flow item parsing.
261  *
262  * @param item Pointer to the flow item.
263  * @param spec_ptr Pointer to the specific item pointer.
264  * @param mask_ptr Pointer to the specific item's mask pointer.
265  * @def_mask Pointer to the default mask.
266  * @size Size of the flow item.
267  * @error Pointer to the rte flow error.
268  * @returns 0 in case of success, negative value otherwise.
269  */
270 static int
271 mrvl_parse_init(const struct rte_flow_item *item,
272 		const void **spec_ptr,
273 		const void **mask_ptr,
274 		const void *def_mask,
275 		unsigned int size,
276 		struct rte_flow_error *error)
277 {
278 	const uint8_t *spec;
279 	const uint8_t *mask;
280 	const uint8_t *last;
281 	uint8_t zeros[size];
282 
283 	memset(zeros, 0, size);
284 
285 	if (item == NULL) {
286 		rte_flow_error_set(error, EINVAL,
287 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
288 				   "NULL item\n");
289 		return -rte_errno;
290 	}
291 
292 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
293 		rte_flow_error_set(error, EINVAL,
294 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
295 				   "Mask or last is set without spec\n");
296 		return -rte_errno;
297 	}
298 
299 	/*
300 	 * If "mask" is not set, default mask is used,
301 	 * but if default mask is NULL, "mask" should be set.
302 	 */
303 	if (item->mask == NULL) {
304 		if (def_mask == NULL) {
305 			rte_flow_error_set(error, EINVAL,
306 					   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
307 					   "Mask should be specified\n");
308 			return -rte_errno;
309 		}
310 
311 		mask = (const uint8_t *)def_mask;
312 	} else {
313 		mask = (const uint8_t *)item->mask;
314 	}
315 
316 	spec = (const uint8_t *)item->spec;
317 	last = (const uint8_t *)item->last;
318 
319 	if (spec == NULL) {
320 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
321 				   NULL, "Spec should be specified\n");
322 		return -rte_errno;
323 	}
324 
325 	/*
326 	 * If field values in "last" are either 0 or equal to the corresponding
327 	 * values in "spec" then they are ignored.
328 	 */
329 	if (last != NULL &&
330 	    !memcmp(last, zeros, size) &&
331 	    memcmp(last, spec, size) != 0) {
332 		rte_flow_error_set(error, ENOTSUP,
333 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
334 				   "Ranging is not supported\n");
335 		return -rte_errno;
336 	}
337 
338 	*spec_ptr = spec;
339 	*mask_ptr = mask;
340 
341 	return 0;
342 }
343 
344 /**
345  * Parse the eth flow item.
346  *
347  * This will create classifier rule that matches either destination or source
348  * mac.
349  *
350  * @param spec Pointer to the specific flow item.
351  * @param mask Pointer to the specific flow item's mask.
352  * @param parse_dst Parse either destination or source mac address.
353  * @param flow Pointer to the flow.
354  * @return 0 in case of success, negative error value otherwise.
355  */
356 static int
357 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
358 	       const struct rte_flow_item_eth *mask,
359 	       int parse_dst, struct rte_flow *flow)
360 {
361 	struct pp2_cls_rule_key_field *key_field;
362 	const uint8_t *k, *m;
363 
364 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
365 		return -ENOSPC;
366 
367 	if (parse_dst) {
368 		k = spec->dst.addr_bytes;
369 		m = mask->dst.addr_bytes;
370 
371 		flow->pattern |= F_DMAC;
372 	} else {
373 		k = spec->src.addr_bytes;
374 		m = mask->src.addr_bytes;
375 
376 		flow->pattern |= F_SMAC;
377 	}
378 
379 	key_field = &flow->rule.fields[flow->rule.num_fields];
380 	mrvl_alloc_key_mask(key_field);
381 	key_field->size = 6;
382 
383 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
384 		 "%02x:%02x:%02x:%02x:%02x:%02x",
385 		 k[0], k[1], k[2], k[3], k[4], k[5]);
386 
387 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
388 		 "%02x:%02x:%02x:%02x:%02x:%02x",
389 		 m[0], m[1], m[2], m[3], m[4], m[5]);
390 
391 	flow->rule.num_fields += 1;
392 
393 	return 0;
394 }
395 
396 /**
397  * Helper for parsing the eth flow item destination mac address.
398  *
399  * @param spec Pointer to the specific flow item.
400  * @param mask Pointer to the specific flow item's mask.
401  * @param flow Pointer to the flow.
402  * @return 0 in case of success, negative error value otherwise.
403  */
404 static inline int
405 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
406 		const struct rte_flow_item_eth *mask,
407 		struct rte_flow *flow)
408 {
409 	return mrvl_parse_mac(spec, mask, 1, flow);
410 }
411 
412 /**
413  * Helper for parsing the eth flow item source mac address.
414  *
415  * @param spec Pointer to the specific flow item.
416  * @param mask Pointer to the specific flow item's mask.
417  * @param flow Pointer to the flow.
418  * @return 0 in case of success, negative error value otherwise.
419  */
420 static inline int
421 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
422 		const struct rte_flow_item_eth *mask,
423 		struct rte_flow *flow)
424 {
425 	return mrvl_parse_mac(spec, mask, 0, flow);
426 }
427 
428 /**
429  * Parse the ether type field of the eth flow item.
430  *
431  * @param spec Pointer to the specific flow item.
432  * @param mask Pointer to the specific flow item's mask.
433  * @param flow Pointer to the flow.
434  * @return 0 in case of success, negative error value otherwise.
435  */
436 static int
437 mrvl_parse_type(const struct rte_flow_item_eth *spec,
438 		const struct rte_flow_item_eth *mask __rte_unused,
439 		struct rte_flow *flow)
440 {
441 	struct pp2_cls_rule_key_field *key_field;
442 	uint16_t k;
443 
444 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
445 		return -ENOSPC;
446 
447 	key_field = &flow->rule.fields[flow->rule.num_fields];
448 	mrvl_alloc_key_mask(key_field);
449 	key_field->size = 2;
450 
451 	k = rte_be_to_cpu_16(spec->type);
452 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
453 
454 	flow->pattern |= F_TYPE;
455 	flow->rule.num_fields += 1;
456 
457 	return 0;
458 }
459 
460 /**
461  * Parse the vid field of the vlan rte flow item.
462  *
463  * This will create classifier rule that matches vid.
464  *
465  * @param spec Pointer to the specific flow item.
466  * @param mask Pointer to the specific flow item's mask.
467  * @param flow Pointer to the flow.
468  * @return 0 in case of success, negative error value otherwise.
469  */
470 static int
471 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
472 		   const struct rte_flow_item_vlan *mask __rte_unused,
473 		   struct rte_flow *flow)
474 {
475 	struct pp2_cls_rule_key_field *key_field;
476 	uint16_t k;
477 
478 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
479 		return -ENOSPC;
480 
481 	key_field = &flow->rule.fields[flow->rule.num_fields];
482 	mrvl_alloc_key_mask(key_field);
483 	key_field->size = 2;
484 
485 	k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
486 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
487 
488 	flow->pattern |= F_VLAN_ID;
489 	flow->rule.num_fields += 1;
490 
491 	return 0;
492 }
493 
494 /**
495  * Parse the pri field of the vlan rte flow item.
496  *
497  * This will create classifier rule that matches pri.
498  *
499  * @param spec Pointer to the specific flow item.
500  * @param mask Pointer to the specific flow item's mask.
501  * @param flow Pointer to the flow.
502  * @return 0 in case of success, negative error value otherwise.
503  */
504 static int
505 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
506 		    const struct rte_flow_item_vlan *mask __rte_unused,
507 		    struct rte_flow *flow)
508 {
509 	struct pp2_cls_rule_key_field *key_field;
510 	uint16_t k;
511 
512 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
513 		return -ENOSPC;
514 
515 	key_field = &flow->rule.fields[flow->rule.num_fields];
516 	mrvl_alloc_key_mask(key_field);
517 	key_field->size = 1;
518 
519 	k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
520 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
521 
522 	flow->pattern |= F_VLAN_PRI;
523 	flow->rule.num_fields += 1;
524 
525 	return 0;
526 }
527 
528 /**
529  * Parse the dscp field of the ipv4 rte flow item.
530  *
531  * This will create classifier rule that matches dscp field.
532  *
533  * @param spec Pointer to the specific flow item.
534  * @param mask Pointer to the specific flow item's mask.
535  * @param flow Pointer to the flow.
536  * @return 0 in case of success, negative error value otherwise.
537  */
538 static int
539 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
540 		    const struct rte_flow_item_ipv4 *mask,
541 		    struct rte_flow *flow)
542 {
543 	struct pp2_cls_rule_key_field *key_field;
544 	uint8_t k, m;
545 
546 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
547 		return -ENOSPC;
548 
549 	key_field = &flow->rule.fields[flow->rule.num_fields];
550 	mrvl_alloc_key_mask(key_field);
551 	key_field->size = 1;
552 
553 	k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
554 	m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
555 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
556 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
557 
558 	flow->pattern |= F_IP4_TOS;
559 	flow->rule.num_fields += 1;
560 
561 	return 0;
562 }
563 
564 /**
565  * Parse either source or destination ip addresses of the ipv4 flow item.
566  *
567  * This will create classifier rule that matches either destination
568  * or source ip field.
569  *
570  * @param spec Pointer to the specific flow item.
571  * @param mask Pointer to the specific flow item's mask.
572  * @param parse_dst Parse either destination or source ip address.
573  * @param flow Pointer to the flow.
574  * @return 0 in case of success, negative error value otherwise.
575  */
576 static int
577 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
578 		    const struct rte_flow_item_ipv4 *mask,
579 		    int parse_dst, struct rte_flow *flow)
580 {
581 	struct pp2_cls_rule_key_field *key_field;
582 	struct in_addr k;
583 	uint32_t m;
584 
585 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
586 		return -ENOSPC;
587 
588 	memset(&k, 0, sizeof(k));
589 	if (parse_dst) {
590 		k.s_addr = spec->hdr.dst_addr;
591 		m = rte_be_to_cpu_32(mask->hdr.dst_addr);
592 
593 		flow->pattern |= F_IP4_DIP;
594 	} else {
595 		k.s_addr = spec->hdr.src_addr;
596 		m = rte_be_to_cpu_32(mask->hdr.src_addr);
597 
598 		flow->pattern |= F_IP4_SIP;
599 	}
600 
601 	key_field = &flow->rule.fields[flow->rule.num_fields];
602 	mrvl_alloc_key_mask(key_field);
603 	key_field->size = 4;
604 
605 	inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
606 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
607 
608 	flow->rule.num_fields += 1;
609 
610 	return 0;
611 }
612 
613 /**
614  * Helper for parsing destination ip of the ipv4 flow item.
615  *
616  * @param spec Pointer to the specific flow item.
617  * @param mask Pointer to the specific flow item's mask.
618  * @param flow Pointer to the flow.
619  * @return 0 in case of success, negative error value otherwise.
620  */
621 static inline int
622 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
623 		   const struct rte_flow_item_ipv4 *mask,
624 		   struct rte_flow *flow)
625 {
626 	return mrvl_parse_ip4_addr(spec, mask, 1, flow);
627 }
628 
629 /**
630  * Helper for parsing source ip of the ipv4 flow item.
631  *
632  * @param spec Pointer to the specific flow item.
633  * @param mask Pointer to the specific flow item's mask.
634  * @param flow Pointer to the flow.
635  * @return 0 in case of success, negative error value otherwise.
636  */
637 static inline int
638 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
639 		   const struct rte_flow_item_ipv4 *mask,
640 		   struct rte_flow *flow)
641 {
642 	return mrvl_parse_ip4_addr(spec, mask, 0, flow);
643 }
644 
645 /**
646  * Parse the proto field of the ipv4 rte flow item.
647  *
648  * This will create classifier rule that matches proto field.
649  *
650  * @param spec Pointer to the specific flow item.
651  * @param mask Pointer to the specific flow item's mask.
652  * @param flow Pointer to the flow.
653  * @return 0 in case of success, negative error value otherwise.
654  */
655 static int
656 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
657 		     const struct rte_flow_item_ipv4 *mask __rte_unused,
658 		     struct rte_flow *flow)
659 {
660 	struct pp2_cls_rule_key_field *key_field;
661 	uint8_t k = spec->hdr.next_proto_id;
662 
663 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
664 		return -ENOSPC;
665 
666 	key_field = &flow->rule.fields[flow->rule.num_fields];
667 	mrvl_alloc_key_mask(key_field);
668 	key_field->size = 1;
669 
670 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
671 
672 	flow->pattern |= F_IP4_PROTO;
673 	flow->rule.num_fields += 1;
674 
675 	return 0;
676 }
677 
678 /**
679  * Parse either source or destination ip addresses of the ipv6 rte flow item.
680  *
681  * This will create classifier rule that matches either destination
682  * or source ip field.
683  *
684  * @param spec Pointer to the specific flow item.
685  * @param mask Pointer to the specific flow item's mask.
686  * @param parse_dst Parse either destination or source ipv6 address.
687  * @param flow Pointer to the flow.
688  * @return 0 in case of success, negative error value otherwise.
689  */
690 static int
691 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
692 	       const struct rte_flow_item_ipv6 *mask,
693 	       int parse_dst, struct rte_flow *flow)
694 {
695 	struct pp2_cls_rule_key_field *key_field;
696 	int size = sizeof(spec->hdr.dst_addr);
697 	struct in6_addr k, m;
698 
699 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
700 		return -ENOSPC;
701 
702 	memset(&k, 0, sizeof(k));
703 	if (parse_dst) {
704 		memcpy(k.s6_addr, spec->hdr.dst_addr, size);
705 		memcpy(m.s6_addr, mask->hdr.dst_addr, size);
706 
707 		flow->pattern |= F_IP6_DIP;
708 	} else {
709 		memcpy(k.s6_addr, spec->hdr.src_addr, size);
710 		memcpy(m.s6_addr, mask->hdr.src_addr, size);
711 
712 		flow->pattern |= F_IP6_SIP;
713 	}
714 
715 	key_field = &flow->rule.fields[flow->rule.num_fields];
716 	mrvl_alloc_key_mask(key_field);
717 	key_field->size = 16;
718 
719 	inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
720 	inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
721 
722 	flow->rule.num_fields += 1;
723 
724 	return 0;
725 }
726 
727 /**
728  * Helper for parsing destination ip of the ipv6 flow item.
729  *
730  * @param spec Pointer to the specific flow item.
731  * @param mask Pointer to the specific flow item's mask.
732  * @param flow Pointer to the flow.
733  * @return 0 in case of success, negative error value otherwise.
734  */
735 static inline int
736 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
737 		   const struct rte_flow_item_ipv6 *mask,
738 		   struct rte_flow *flow)
739 {
740 	return mrvl_parse_ip6_addr(spec, mask, 1, flow);
741 }
742 
743 /**
744  * Helper for parsing source ip of the ipv6 flow item.
745  *
746  * @param spec Pointer to the specific flow item.
747  * @param mask Pointer to the specific flow item's mask.
748  * @param flow Pointer to the flow.
749  * @return 0 in case of success, negative error value otherwise.
750  */
751 static inline int
752 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
753 		   const struct rte_flow_item_ipv6 *mask,
754 		   struct rte_flow *flow)
755 {
756 	return mrvl_parse_ip6_addr(spec, mask, 0, flow);
757 }
758 
759 /**
760  * Parse the flow label of the ipv6 flow item.
761  *
762  * This will create classifier rule that matches flow field.
763  *
764  * @param spec Pointer to the specific flow item.
765  * @param mask Pointer to the specific flow item's mask.
766  * @param flow Pointer to the flow.
767  * @return 0 in case of success, negative error value otherwise.
768  */
769 static int
770 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
771 		    const struct rte_flow_item_ipv6 *mask,
772 		    struct rte_flow *flow)
773 {
774 	struct pp2_cls_rule_key_field *key_field;
775 	uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
776 		 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
777 
778 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
779 		return -ENOSPC;
780 
781 	key_field = &flow->rule.fields[flow->rule.num_fields];
782 	mrvl_alloc_key_mask(key_field);
783 	key_field->size = 3;
784 
785 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
786 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
787 
788 	flow->pattern |= F_IP6_FLOW;
789 	flow->rule.num_fields += 1;
790 
791 	return 0;
792 }
793 
794 /**
795  * Parse the next header of the ipv6 flow item.
796  *
797  * This will create classifier rule that matches next header field.
798  *
799  * @param spec Pointer to the specific flow item.
800  * @param mask Pointer to the specific flow item's mask.
801  * @param flow Pointer to the flow.
802  * @return 0 in case of success, negative error value otherwise.
803  */
804 static int
805 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
806 			const struct rte_flow_item_ipv6 *mask __rte_unused,
807 			struct rte_flow *flow)
808 {
809 	struct pp2_cls_rule_key_field *key_field;
810 	uint8_t k = spec->hdr.proto;
811 
812 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
813 		return -ENOSPC;
814 
815 	key_field = &flow->rule.fields[flow->rule.num_fields];
816 	mrvl_alloc_key_mask(key_field);
817 	key_field->size = 1;
818 
819 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
820 
821 	flow->pattern |= F_IP6_NEXT_HDR;
822 	flow->rule.num_fields += 1;
823 
824 	return 0;
825 }
826 
827 /**
828  * Parse destination or source port of the tcp flow item.
829  *
830  * This will create classifier rule that matches either destination or
831  * source tcp port.
832  *
833  * @param spec Pointer to the specific flow item.
834  * @param mask Pointer to the specific flow item's mask.
835  * @param parse_dst Parse either destination or source port.
836  * @param flow Pointer to the flow.
837  * @return 0 in case of success, negative error value otherwise.
838  */
839 static int
840 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
841 		    const struct rte_flow_item_tcp *mask __rte_unused,
842 		    int parse_dst, struct rte_flow *flow)
843 {
844 	struct pp2_cls_rule_key_field *key_field;
845 	uint16_t k;
846 
847 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
848 		return -ENOSPC;
849 
850 	key_field = &flow->rule.fields[flow->rule.num_fields];
851 	mrvl_alloc_key_mask(key_field);
852 	key_field->size = 2;
853 
854 	if (parse_dst) {
855 		k = rte_be_to_cpu_16(spec->hdr.dst_port);
856 
857 		flow->pattern |= F_TCP_DPORT;
858 	} else {
859 		k = rte_be_to_cpu_16(spec->hdr.src_port);
860 
861 		flow->pattern |= F_TCP_SPORT;
862 	}
863 
864 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
865 
866 	flow->rule.num_fields += 1;
867 
868 	return 0;
869 }
870 
871 /**
872  * Helper for parsing the tcp source port of the tcp flow item.
873  *
874  * @param spec Pointer to the specific flow item.
875  * @param mask Pointer to the specific flow item's mask.
876  * @param flow Pointer to the flow.
877  * @return 0 in case of success, negative error value otherwise.
878  */
879 static inline int
880 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
881 		     const struct rte_flow_item_tcp *mask,
882 		     struct rte_flow *flow)
883 {
884 	return mrvl_parse_tcp_port(spec, mask, 0, flow);
885 }
886 
887 /**
888  * Helper for parsing the tcp destination port of the tcp flow item.
889  *
890  * @param spec Pointer to the specific flow item.
891  * @param mask Pointer to the specific flow item's mask.
892  * @param flow Pointer to the flow.
893  * @return 0 in case of success, negative error value otherwise.
894  */
895 static inline int
896 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
897 		     const struct rte_flow_item_tcp *mask,
898 		     struct rte_flow *flow)
899 {
900 	return mrvl_parse_tcp_port(spec, mask, 1, flow);
901 }
902 
903 /**
904  * Parse destination or source port of the udp flow item.
905  *
906  * This will create classifier rule that matches either destination or
907  * source udp port.
908  *
909  * @param spec Pointer to the specific flow item.
910  * @param mask Pointer to the specific flow item's mask.
911  * @param parse_dst Parse either destination or source port.
912  * @param flow Pointer to the flow.
913  * @return 0 in case of success, negative error value otherwise.
914  */
915 static int
916 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
917 		    const struct rte_flow_item_udp *mask __rte_unused,
918 		    int parse_dst, struct rte_flow *flow)
919 {
920 	struct pp2_cls_rule_key_field *key_field;
921 	uint16_t k;
922 
923 	if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
924 		return -ENOSPC;
925 
926 	key_field = &flow->rule.fields[flow->rule.num_fields];
927 	mrvl_alloc_key_mask(key_field);
928 	key_field->size = 2;
929 
930 	if (parse_dst) {
931 		k = rte_be_to_cpu_16(spec->hdr.dst_port);
932 
933 		flow->pattern |= F_UDP_DPORT;
934 	} else {
935 		k = rte_be_to_cpu_16(spec->hdr.src_port);
936 
937 		flow->pattern |= F_UDP_SPORT;
938 	}
939 
940 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
941 
942 	flow->rule.num_fields += 1;
943 
944 	return 0;
945 }
946 
947 /**
948  * Helper for parsing the udp source port of the udp flow item.
949  *
950  * @param spec Pointer to the specific flow item.
951  * @param mask Pointer to the specific flow item's mask.
952  * @param flow Pointer to the flow.
953  * @return 0 in case of success, negative error value otherwise.
954  */
955 static inline int
956 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
957 		     const struct rte_flow_item_udp *mask,
958 		     struct rte_flow *flow)
959 {
960 	return mrvl_parse_udp_port(spec, mask, 0, flow);
961 }
962 
963 /**
964  * Helper for parsing the udp destination port of the udp flow item.
965  *
966  * @param spec Pointer to the specific flow item.
967  * @param mask Pointer to the specific flow item's mask.
968  * @param flow Pointer to the flow.
969  * @return 0 in case of success, negative error value otherwise.
970  */
971 static inline int
972 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
973 		     const struct rte_flow_item_udp *mask,
974 		     struct rte_flow *flow)
975 {
976 	return mrvl_parse_udp_port(spec, mask, 1, flow);
977 }
978 
979 /**
980  * Parse eth flow item.
981  *
982  * @param item Pointer to the flow item.
983  * @param flow Pointer to the flow.
984  * @param error Pointer to the flow error.
985  * @returns 0 on success, negative value otherwise.
986  */
987 static int
988 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
989 	       struct rte_flow_error *error)
990 {
991 	const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
992 	struct ether_addr zero;
993 	int ret;
994 
995 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
996 			      &rte_flow_item_eth_mask,
997 			      sizeof(struct rte_flow_item_eth), error);
998 	if (ret)
999 		return ret;
1000 
1001 	memset(&zero, 0, sizeof(zero));
1002 
1003 	if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
1004 		ret = mrvl_parse_dmac(spec, mask, flow);
1005 		if (ret)
1006 			goto out;
1007 	}
1008 
1009 	if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
1010 		ret = mrvl_parse_smac(spec, mask, flow);
1011 		if (ret)
1012 			goto out;
1013 	}
1014 
1015 	if (mask->type) {
1016 		MRVL_LOG(WARNING, "eth type mask is ignored");
1017 		ret = mrvl_parse_type(spec, mask, flow);
1018 		if (ret)
1019 			goto out;
1020 	}
1021 
1022 	return 0;
1023 out:
1024 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1025 			   "Reached maximum number of fields in cls tbl key\n");
1026 	return -rte_errno;
1027 }
1028 
1029 /**
1030  * Parse vlan flow item.
1031  *
1032  * @param item Pointer to the flow item.
1033  * @param flow Pointer to the flow.
1034  * @param error Pointer to the flow error.
1035  * @returns 0 on success, negative value otherwise.
1036  */
1037 static int
1038 mrvl_parse_vlan(const struct rte_flow_item *item,
1039 		struct rte_flow *flow,
1040 		struct rte_flow_error *error)
1041 {
1042 	const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
1043 	uint16_t m;
1044 	int ret;
1045 
1046 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1047 			      &rte_flow_item_vlan_mask,
1048 			      sizeof(struct rte_flow_item_vlan), error);
1049 	if (ret)
1050 		return ret;
1051 
1052 	m = rte_be_to_cpu_16(mask->tci);
1053 	if (m & MRVL_VLAN_ID_MASK) {
1054 		MRVL_LOG(WARNING, "vlan id mask is ignored");
1055 		ret = mrvl_parse_vlan_id(spec, mask, flow);
1056 		if (ret)
1057 			goto out;
1058 	}
1059 
1060 	if (m & MRVL_VLAN_PRI_MASK) {
1061 		MRVL_LOG(WARNING, "vlan pri mask is ignored");
1062 		ret = mrvl_parse_vlan_pri(spec, mask, flow);
1063 		if (ret)
1064 			goto out;
1065 	}
1066 
1067 	if (flow->pattern & F_TYPE) {
1068 		rte_flow_error_set(error, ENOTSUP,
1069 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
1070 				   "VLAN TPID matching is not supported");
1071 		return -rte_errno;
1072 	}
1073 	if (mask->inner_type) {
1074 		struct rte_flow_item_eth spec_eth = {
1075 			.type = spec->inner_type,
1076 		};
1077 		struct rte_flow_item_eth mask_eth = {
1078 			.type = mask->inner_type,
1079 		};
1080 
1081 		MRVL_LOG(WARNING, "inner eth type mask is ignored");
1082 		ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
1083 		if (ret)
1084 			goto out;
1085 	}
1086 
1087 	return 0;
1088 out:
1089 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1090 			   "Reached maximum number of fields in cls tbl key\n");
1091 	return -rte_errno;
1092 }
1093 
1094 /**
1095  * Parse ipv4 flow item.
1096  *
1097  * @param item Pointer to the flow item.
1098  * @param flow Pointer to the flow.
1099  * @param error Pointer to the flow error.
1100  * @returns 0 on success, negative value otherwise.
1101  */
1102 static int
1103 mrvl_parse_ip4(const struct rte_flow_item *item,
1104 	       struct rte_flow *flow,
1105 	       struct rte_flow_error *error)
1106 {
1107 	const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
1108 	int ret;
1109 
1110 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1111 			      &rte_flow_item_ipv4_mask,
1112 			      sizeof(struct rte_flow_item_ipv4), error);
1113 	if (ret)
1114 		return ret;
1115 
1116 	if (mask->hdr.version_ihl ||
1117 	    mask->hdr.total_length ||
1118 	    mask->hdr.packet_id ||
1119 	    mask->hdr.fragment_offset ||
1120 	    mask->hdr.time_to_live ||
1121 	    mask->hdr.hdr_checksum) {
1122 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1123 				   NULL, "Not supported by classifier\n");
1124 		return -rte_errno;
1125 	}
1126 
1127 	if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
1128 		ret = mrvl_parse_ip4_dscp(spec, mask, flow);
1129 		if (ret)
1130 			goto out;
1131 	}
1132 
1133 	if (mask->hdr.src_addr) {
1134 		ret = mrvl_parse_ip4_sip(spec, mask, flow);
1135 		if (ret)
1136 			goto out;
1137 	}
1138 
1139 	if (mask->hdr.dst_addr) {
1140 		ret = mrvl_parse_ip4_dip(spec, mask, flow);
1141 		if (ret)
1142 			goto out;
1143 	}
1144 
1145 	if (mask->hdr.next_proto_id) {
1146 		MRVL_LOG(WARNING, "next proto id mask is ignored");
1147 		ret = mrvl_parse_ip4_proto(spec, mask, flow);
1148 		if (ret)
1149 			goto out;
1150 	}
1151 
1152 	return 0;
1153 out:
1154 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1155 			   "Reached maximum number of fields in cls tbl key\n");
1156 	return -rte_errno;
1157 }
1158 
1159 /**
1160  * Parse ipv6 flow item.
1161  *
1162  * @param item Pointer to the flow item.
1163  * @param flow Pointer to the flow.
1164  * @param error Pointer to the flow error.
1165  * @returns 0 on success, negative value otherwise.
1166  */
1167 static int
1168 mrvl_parse_ip6(const struct rte_flow_item *item,
1169 	       struct rte_flow *flow,
1170 	       struct rte_flow_error *error)
1171 {
1172 	const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1173 	struct ipv6_hdr zero;
1174 	uint32_t flow_mask;
1175 	int ret;
1176 
1177 	ret = mrvl_parse_init(item, (const void **)&spec,
1178 			      (const void **)&mask,
1179 			      &rte_flow_item_ipv6_mask,
1180 			      sizeof(struct rte_flow_item_ipv6),
1181 			      error);
1182 	if (ret)
1183 		return ret;
1184 
1185 	memset(&zero, 0, sizeof(zero));
1186 
1187 	if (mask->hdr.payload_len ||
1188 	    mask->hdr.hop_limits) {
1189 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1190 				   NULL, "Not supported by classifier\n");
1191 		return -rte_errno;
1192 	}
1193 
1194 	if (memcmp(mask->hdr.src_addr,
1195 		   zero.src_addr, sizeof(mask->hdr.src_addr))) {
1196 		ret = mrvl_parse_ip6_sip(spec, mask, flow);
1197 		if (ret)
1198 			goto out;
1199 	}
1200 
1201 	if (memcmp(mask->hdr.dst_addr,
1202 		   zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1203 		ret = mrvl_parse_ip6_dip(spec, mask, flow);
1204 		if (ret)
1205 			goto out;
1206 	}
1207 
1208 	flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1209 	if (flow_mask) {
1210 		ret = mrvl_parse_ip6_flow(spec, mask, flow);
1211 		if (ret)
1212 			goto out;
1213 	}
1214 
1215 	if (mask->hdr.proto) {
1216 		MRVL_LOG(WARNING, "next header mask is ignored");
1217 		ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1218 		if (ret)
1219 			goto out;
1220 	}
1221 
1222 	return 0;
1223 out:
1224 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1225 			   "Reached maximum number of fields in cls tbl key\n");
1226 	return -rte_errno;
1227 }
1228 
1229 /**
1230  * Parse tcp flow item.
1231  *
1232  * @param item Pointer to the flow item.
1233  * @param flow Pointer to the flow.
1234  * @param error Pointer to the flow error.
1235  * @returns 0 on success, negative value otherwise.
1236  */
1237 static int
1238 mrvl_parse_tcp(const struct rte_flow_item *item,
1239 	       struct rte_flow *flow,
1240 	       struct rte_flow_error *error)
1241 {
1242 	const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1243 	int ret;
1244 
1245 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1246 			      &rte_flow_item_ipv4_mask,
1247 			      sizeof(struct rte_flow_item_ipv4), error);
1248 	if (ret)
1249 		return ret;
1250 
1251 	if (mask->hdr.sent_seq ||
1252 	    mask->hdr.recv_ack ||
1253 	    mask->hdr.data_off ||
1254 	    mask->hdr.tcp_flags ||
1255 	    mask->hdr.rx_win ||
1256 	    mask->hdr.cksum ||
1257 	    mask->hdr.tcp_urp) {
1258 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1259 				   NULL, "Not supported by classifier\n");
1260 		return -rte_errno;
1261 	}
1262 
1263 	if (mask->hdr.src_port) {
1264 		MRVL_LOG(WARNING, "tcp sport mask is ignored");
1265 		ret = mrvl_parse_tcp_sport(spec, mask, flow);
1266 		if (ret)
1267 			goto out;
1268 	}
1269 
1270 	if (mask->hdr.dst_port) {
1271 		MRVL_LOG(WARNING, "tcp dport mask is ignored");
1272 		ret = mrvl_parse_tcp_dport(spec, mask, flow);
1273 		if (ret)
1274 			goto out;
1275 	}
1276 
1277 	return 0;
1278 out:
1279 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1280 			   "Reached maximum number of fields in cls tbl key\n");
1281 	return -rte_errno;
1282 }
1283 
1284 /**
1285  * Parse udp flow item.
1286  *
1287  * @param item Pointer to the flow item.
1288  * @param flow Pointer to the flow.
1289  * @param error Pointer to the flow error.
1290  * @returns 0 on success, negative value otherwise.
1291  */
1292 static int
1293 mrvl_parse_udp(const struct rte_flow_item *item,
1294 	       struct rte_flow *flow,
1295 	       struct rte_flow_error *error)
1296 {
1297 	const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1298 	int ret;
1299 
1300 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1301 			      &rte_flow_item_ipv4_mask,
1302 			      sizeof(struct rte_flow_item_ipv4), error);
1303 	if (ret)
1304 		return ret;
1305 
1306 	if (mask->hdr.dgram_len ||
1307 	    mask->hdr.dgram_cksum) {
1308 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1309 				   NULL, "Not supported by classifier\n");
1310 		return -rte_errno;
1311 	}
1312 
1313 	if (mask->hdr.src_port) {
1314 		MRVL_LOG(WARNING, "udp sport mask is ignored");
1315 		ret = mrvl_parse_udp_sport(spec, mask, flow);
1316 		if (ret)
1317 			goto out;
1318 	}
1319 
1320 	if (mask->hdr.dst_port) {
1321 		MRVL_LOG(WARNING, "udp dport mask is ignored");
1322 		ret = mrvl_parse_udp_dport(spec, mask, flow);
1323 		if (ret)
1324 			goto out;
1325 	}
1326 
1327 	return 0;
1328 out:
1329 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1330 			   "Reached maximum number of fields in cls tbl key\n");
1331 	return -rte_errno;
1332 }
1333 
1334 /**
1335  * Parse flow pattern composed of the the eth item.
1336  *
1337  * @param pattern Pointer to the flow pattern table.
1338  * @param flow Pointer to the flow.
1339  * @param error Pointer to the flow error.
1340  * @returns 0 in case of success, negative value otherwise.
1341  */
1342 static int
1343 mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
1344 		       struct rte_flow *flow,
1345 		       struct rte_flow_error *error)
1346 {
1347 	return mrvl_parse_eth(pattern, flow, error);
1348 }
1349 
1350 /**
1351  * Parse flow pattern composed of the eth and vlan items.
1352  *
1353  * @param pattern Pointer to the flow pattern table.
1354  * @param flow Pointer to the flow.
1355  * @param error Pointer to the flow error.
1356  * @returns 0 in case of success, negative value otherwise.
1357  */
1358 static int
1359 mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
1360 			    struct rte_flow *flow,
1361 			    struct rte_flow_error *error)
1362 {
1363 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1364 	int ret;
1365 
1366 	ret = mrvl_parse_eth(item, flow, error);
1367 	if (ret)
1368 		return ret;
1369 
1370 	item = mrvl_next_item(item + 1);
1371 
1372 	return mrvl_parse_vlan(item, flow, error);
1373 }
1374 
1375 /**
1376  * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
1377  *
1378  * @param pattern Pointer to the flow pattern table.
1379  * @param flow Pointer to the flow.
1380  * @param error Pointer to the flow error.
1381  * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1382  * @returns 0 in case of success, negative value otherwise.
1383  */
1384 static int
1385 mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1386 				    struct rte_flow *flow,
1387 				    struct rte_flow_error *error, int ip6)
1388 {
1389 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1390 	int ret;
1391 
1392 	ret = mrvl_parse_eth(item, flow, error);
1393 	if (ret)
1394 		return ret;
1395 
1396 	item = mrvl_next_item(item + 1);
1397 	ret = mrvl_parse_vlan(item, flow, error);
1398 	if (ret)
1399 		return ret;
1400 
1401 	item = mrvl_next_item(item + 1);
1402 
1403 	return ip6 ? mrvl_parse_ip6(item, flow, error) :
1404 		     mrvl_parse_ip4(item, flow, error);
1405 }
1406 
1407 /**
1408  * Parse flow pattern composed of the eth, vlan and ipv4 items.
1409  *
1410  * @param pattern Pointer to the flow pattern table.
1411  * @param flow Pointer to the flow.
1412  * @param error Pointer to the flow error.
1413  * @returns 0 in case of success, negative value otherwise.
1414  */
1415 static int
1416 mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
1417 				struct rte_flow *flow,
1418 				struct rte_flow_error *error)
1419 {
1420 	return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
1421 }
1422 
1423 /**
1424  * Parse flow pattern composed of the eth, vlan and ipv6 items.
1425  *
1426  * @param pattern Pointer to the flow pattern table.
1427  * @param flow Pointer to the flow.
1428  * @param error Pointer to the flow error.
1429  * @returns 0 in case of success, negative value otherwise.
1430  */
1431 static int
1432 mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
1433 				struct rte_flow *flow,
1434 				struct rte_flow_error *error)
1435 {
1436 	return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
1437 }
1438 
1439 /**
1440  * Parse flow pattern composed of the eth and ip4/ip6 items.
1441  *
1442  * @param pattern Pointer to the flow pattern table.
1443  * @param flow Pointer to the flow.
1444  * @param error Pointer to the flow error.
1445  * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1446  * @returns 0 in case of success, negative value otherwise.
1447  */
1448 static int
1449 mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
1450 			       struct rte_flow *flow,
1451 			       struct rte_flow_error *error, int ip6)
1452 {
1453 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1454 	int ret;
1455 
1456 	ret = mrvl_parse_eth(item, flow, error);
1457 	if (ret)
1458 		return ret;
1459 
1460 	item = mrvl_next_item(item + 1);
1461 
1462 	return ip6 ? mrvl_parse_ip6(item, flow, error) :
1463 		     mrvl_parse_ip4(item, flow, error);
1464 }
1465 
1466 /**
1467  * Parse flow pattern composed of the eth and ipv4 items.
1468  *
1469  * @param pattern Pointer to the flow pattern table.
1470  * @param flow Pointer to the flow.
1471  * @param error Pointer to the flow error.
1472  * @returns 0 in case of success, negative value otherwise.
1473  */
1474 static inline int
1475 mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
1476 			   struct rte_flow *flow,
1477 			   struct rte_flow_error *error)
1478 {
1479 	return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1480 }
1481 
1482 /**
1483  * Parse flow pattern composed of the eth and ipv6 items.
1484  *
1485  * @param pattern Pointer to the flow pattern table.
1486  * @param flow Pointer to the flow.
1487  * @param error Pointer to the flow error.
1488  * @returns 0 in case of success, negative value otherwise.
1489  */
1490 static inline int
1491 mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
1492 			   struct rte_flow *flow,
1493 			   struct rte_flow_error *error)
1494 {
1495 	return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1496 }
1497 
1498 /**
1499  * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
1500  *
1501  * @param pattern Pointer to the flow pattern table.
1502  * @param flow Pointer to the flow.
1503  * @param error Pointer to the flow error.
1504  * @param tcp 1 to parse tcp item, 0 to parse udp item.
1505  * @returns 0 in case of success, negative value otherwise.
1506  */
1507 static int
1508 mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
1509 				   struct rte_flow *flow,
1510 				   struct rte_flow_error *error, int tcp)
1511 {
1512 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1513 	int ret;
1514 
1515 	ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1516 	if (ret)
1517 		return ret;
1518 
1519 	item = mrvl_next_item(item + 1);
1520 	item = mrvl_next_item(item + 1);
1521 
1522 	if (tcp)
1523 		return mrvl_parse_tcp(item, flow, error);
1524 
1525 	return mrvl_parse_udp(item, flow, error);
1526 }
1527 
1528 /**
1529  * Parse flow pattern composed of the eth, ipv4 and tcp items.
1530  *
1531  * @param pattern Pointer to the flow pattern table.
1532  * @param flow Pointer to the flow.
1533  * @param error Pointer to the flow error.
1534  * @returns 0 in case of success, negative value otherwise.
1535  */
1536 static inline int
1537 mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
1538 			       struct rte_flow *flow,
1539 			       struct rte_flow_error *error)
1540 {
1541 	return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
1542 }
1543 
1544 /**
1545  * Parse flow pattern composed of the eth, ipv4 and udp items.
1546  *
1547  * @param pattern Pointer to the flow pattern table.
1548  * @param flow Pointer to the flow.
1549  * @param error Pointer to the flow error.
1550  * @returns 0 in case of success, negative value otherwise.
1551  */
1552 static inline int
1553 mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
1554 			       struct rte_flow *flow,
1555 			       struct rte_flow_error *error)
1556 {
1557 	return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
1558 }
1559 
1560 /**
1561  * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
1562  *
1563  * @param pattern Pointer to the flow pattern table.
1564  * @param flow Pointer to the flow.
1565  * @param error Pointer to the flow error.
1566  * @param tcp 1 to parse tcp item, 0 to parse udp item.
1567  * @returns 0 in case of success, negative value otherwise.
1568  */
1569 static int
1570 mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
1571 				   struct rte_flow *flow,
1572 				   struct rte_flow_error *error, int tcp)
1573 {
1574 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1575 	int ret;
1576 
1577 	ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1578 	if (ret)
1579 		return ret;
1580 
1581 	item = mrvl_next_item(item + 1);
1582 	item = mrvl_next_item(item + 1);
1583 
1584 	if (tcp)
1585 		return mrvl_parse_tcp(item, flow, error);
1586 
1587 	return mrvl_parse_udp(item, flow, error);
1588 }
1589 
1590 /**
1591  * Parse flow pattern composed of the eth, ipv6 and tcp items.
1592  *
1593  * @param pattern Pointer to the flow pattern table.
1594  * @param flow Pointer to the flow.
1595  * @param error Pointer to the flow error.
1596  * @returns 0 in case of success, negative value otherwise.
1597  */
1598 static inline int
1599 mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
1600 			       struct rte_flow *flow,
1601 			       struct rte_flow_error *error)
1602 {
1603 	return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
1604 }
1605 
1606 /**
1607  * Parse flow pattern composed of the eth, ipv6 and udp items.
1608  *
1609  * @param pattern Pointer to the flow pattern table.
1610  * @param flow Pointer to the flow.
1611  * @param error Pointer to the flow error.
1612  * @returns 0 in case of success, negative value otherwise.
1613  */
1614 static inline int
1615 mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
1616 			       struct rte_flow *flow,
1617 			       struct rte_flow_error *error)
1618 {
1619 	return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
1620 }
1621 
1622 /**
1623  * Parse flow pattern composed of the vlan item.
1624  *
1625  * @param pattern Pointer to the flow pattern table.
1626  * @param flow Pointer to the flow.
1627  * @param error Pointer to the flow error.
1628  * @returns 0 in case of success, negative value otherwise.
1629  */
1630 static int
1631 mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
1632 			    struct rte_flow *flow,
1633 			    struct rte_flow_error *error)
1634 {
1635 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1636 
1637 	return mrvl_parse_vlan(item, flow, error);
1638 }
1639 
1640 /**
1641  * Parse flow pattern composed of the vlan and ip4/ip6 items.
1642  *
1643  * @param pattern Pointer to the flow pattern table.
1644  * @param flow Pointer to the flow.
1645  * @param error Pointer to the flow error.
1646  * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1647  * @returns 0 in case of success, negative value otherwise.
1648  */
1649 static int
1650 mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1651 				struct rte_flow *flow,
1652 				struct rte_flow_error *error, int ip6)
1653 {
1654 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1655 	int ret;
1656 
1657 	ret = mrvl_parse_vlan(item, flow, error);
1658 	if (ret)
1659 		return ret;
1660 
1661 	item = mrvl_next_item(item + 1);
1662 
1663 	return ip6 ? mrvl_parse_ip6(item, flow, error) :
1664 		     mrvl_parse_ip4(item, flow, error);
1665 }
1666 
1667 /**
1668  * Parse flow pattern composed of the vlan and ipv4 items.
1669  *
1670  * @param pattern Pointer to the flow pattern table.
1671  * @param flow Pointer to the flow.
1672  * @param error Pointer to the flow error.
1673  * @returns 0 in case of success, negative value otherwise.
1674  */
1675 static inline int
1676 mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
1677 			    struct rte_flow *flow,
1678 			    struct rte_flow_error *error)
1679 {
1680 	return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1681 }
1682 
1683 /**
1684  * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
1685  *
1686  * @param pattern Pointer to the flow pattern table.
1687  * @param flow Pointer to the flow.
1688  * @param error Pointer to the flow error.
1689  * @returns 0 in case of success, negative value otherwise.
1690  */
1691 static int
1692 mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
1693 				   struct rte_flow *flow,
1694 				   struct rte_flow_error *error, int tcp)
1695 {
1696 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1697 	int ret;
1698 
1699 	ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1700 	if (ret)
1701 		return ret;
1702 
1703 	item = mrvl_next_item(item + 1);
1704 	item = mrvl_next_item(item + 1);
1705 
1706 	if (tcp)
1707 		return mrvl_parse_tcp(item, flow, error);
1708 
1709 	return mrvl_parse_udp(item, flow, error);
1710 }
1711 
1712 /**
1713  * Parse flow pattern composed of the vlan, ipv4 and tcp items.
1714  *
1715  * @param pattern Pointer to the flow pattern table.
1716  * @param flow Pointer to the flow.
1717  * @param error Pointer to the flow error.
1718  * @returns 0 in case of success, negative value otherwise.
1719  */
1720 static inline int
1721 mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
1722 			       struct rte_flow *flow,
1723 			       struct rte_flow_error *error)
1724 {
1725 	return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
1726 }
1727 
1728 /**
1729  * Parse flow pattern composed of the vlan, ipv4 and udp items.
1730  *
1731  * @param pattern Pointer to the flow pattern table.
1732  * @param flow Pointer to the flow.
1733  * @param error Pointer to the flow error.
1734  * @returns 0 in case of success, negative value otherwise.
1735  */
1736 static inline int
1737 mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
1738 			       struct rte_flow *flow,
1739 			       struct rte_flow_error *error)
1740 {
1741 	return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
1742 }
1743 
1744 /**
1745  * Parse flow pattern composed of the vlan and ipv6 items.
1746  *
1747  * @param pattern Pointer to the flow pattern table.
1748  * @param flow Pointer to the flow.
1749  * @param error Pointer to the flow error.
1750  * @returns 0 in case of success, negative value otherwise.
1751  */
1752 static inline int
1753 mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
1754 			    struct rte_flow *flow,
1755 			    struct rte_flow_error *error)
1756 {
1757 	return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1758 }
1759 
1760 /**
1761  * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
1762  *
1763  * @param pattern Pointer to the flow pattern table.
1764  * @param flow Pointer to the flow.
1765  * @param error Pointer to the flow error.
1766  * @returns 0 in case of success, negative value otherwise.
1767  */
1768 static int
1769 mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
1770 				    struct rte_flow *flow,
1771 				    struct rte_flow_error *error, int tcp)
1772 {
1773 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1774 	int ret;
1775 
1776 	ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1777 	if (ret)
1778 		return ret;
1779 
1780 	item = mrvl_next_item(item + 1);
1781 	item = mrvl_next_item(item + 1);
1782 
1783 	if (tcp)
1784 		return mrvl_parse_tcp(item, flow, error);
1785 
1786 	return mrvl_parse_udp(item, flow, error);
1787 }
1788 
1789 /**
1790  * Parse flow pattern composed of the vlan, ipv6 and tcp items.
1791  *
1792  * @param pattern Pointer to the flow pattern table.
1793  * @param flow Pointer to the flow.
1794  * @param error Pointer to the flow error.
1795  * @returns 0 in case of success, negative value otherwise.
1796  */
1797 static inline int
1798 mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
1799 				struct rte_flow *flow,
1800 				struct rte_flow_error *error)
1801 {
1802 	return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
1803 }
1804 
1805 /**
1806  * Parse flow pattern composed of the vlan, ipv6 and udp items.
1807  *
1808  * @param pattern Pointer to the flow pattern table.
1809  * @param flow Pointer to the flow.
1810  * @param error Pointer to the flow error.
1811  * @returns 0 in case of success, negative value otherwise.
1812  */
1813 static inline int
1814 mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
1815 				struct rte_flow *flow,
1816 				struct rte_flow_error *error)
1817 {
1818 	return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
1819 }
1820 
1821 /**
1822  * Parse flow pattern composed of the ip4/ip6 item.
1823  *
1824  * @param pattern Pointer to the flow pattern table.
1825  * @param flow Pointer to the flow.
1826  * @param error Pointer to the flow error.
1827  * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1828  * @returns 0 in case of success, negative value otherwise.
1829  */
1830 static int
1831 mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
1832 		       struct rte_flow *flow,
1833 		       struct rte_flow_error *error, int ip6)
1834 {
1835 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1836 
1837 	return ip6 ? mrvl_parse_ip6(item, flow, error) :
1838 		     mrvl_parse_ip4(item, flow, error);
1839 }
1840 
1841 /**
1842  * Parse flow pattern composed of the ipv4 item.
1843  *
1844  * @param pattern Pointer to the flow pattern table.
1845  * @param flow Pointer to the flow.
1846  * @param error Pointer to the flow error.
1847  * @returns 0 in case of success, negative value otherwise.
1848  */
1849 static inline int
1850 mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
1851 		       struct rte_flow *flow,
1852 		       struct rte_flow_error *error)
1853 {
1854 	return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
1855 }
1856 
1857 /**
1858  * Parse flow pattern composed of the ipv6 item.
1859  *
1860  * @param pattern Pointer to the flow pattern table.
1861  * @param flow Pointer to the flow.
1862  * @param error Pointer to the flow error.
1863  * @returns 0 in case of success, negative value otherwise.
1864  */
1865 static inline int
1866 mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
1867 		       struct rte_flow *flow,
1868 		       struct rte_flow_error *error)
1869 {
1870 	return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
1871 }
1872 
1873 /**
1874  * Parse flow pattern composed of the ip4/ip6 and tcp items.
1875  *
1876  * @param pattern Pointer to the flow pattern table.
1877  * @param flow Pointer to the flow.
1878  * @param error Pointer to the flow error.
1879  * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1880  * @returns 0 in case of success, negative value otherwise.
1881  */
1882 static int
1883 mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
1884 			   struct rte_flow *flow,
1885 			   struct rte_flow_error *error, int ip6)
1886 {
1887 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1888 	int ret;
1889 
1890 	ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1891 		    mrvl_parse_ip4(item, flow, error);
1892 	if (ret)
1893 		return ret;
1894 
1895 	item = mrvl_next_item(item + 1);
1896 
1897 	return mrvl_parse_tcp(item, flow, error);
1898 }
1899 
1900 /**
1901  * Parse flow pattern composed of the ipv4 and tcp items.
1902  *
1903  * @param pattern Pointer to the flow pattern table.
1904  * @param flow Pointer to the flow.
1905  * @param error Pointer to the flow error.
1906  * @returns 0 in case of success, negative value otherwise.
1907  */
1908 static inline int
1909 mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
1910 			   struct rte_flow *flow,
1911 			   struct rte_flow_error *error)
1912 {
1913 	return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
1914 }
1915 
1916 /**
1917  * Parse flow pattern composed of the ipv6 and tcp items.
1918  *
1919  * @param pattern Pointer to the flow pattern table.
1920  * @param flow Pointer to the flow.
1921  * @param error Pointer to the flow error.
1922  * @returns 0 in case of success, negative value otherwise.
1923  */
1924 static inline int
1925 mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
1926 			   struct rte_flow *flow,
1927 			   struct rte_flow_error *error)
1928 {
1929 	return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
1930 }
1931 
1932 /**
1933  * Parse flow pattern composed of the ipv4/ipv6 and udp items.
1934  *
1935  * @param pattern Pointer to the flow pattern table.
1936  * @param flow Pointer to the flow.
1937  * @param error Pointer to the flow error.
1938  * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1939  * @returns 0 in case of success, negative value otherwise.
1940  */
1941 static int
1942 mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
1943 			   struct rte_flow *flow,
1944 			   struct rte_flow_error *error, int ip6)
1945 {
1946 	const struct rte_flow_item *item = mrvl_next_item(pattern);
1947 	int ret;
1948 
1949 	ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1950 		    mrvl_parse_ip4(item, flow, error);
1951 	if (ret)
1952 		return ret;
1953 
1954 	item = mrvl_next_item(item + 1);
1955 
1956 	return mrvl_parse_udp(item, flow, error);
1957 }
1958 
1959 /**
1960  * Parse flow pattern composed of the ipv4 and udp items.
1961  *
1962  * @param pattern Pointer to the flow pattern table.
1963  * @param flow Pointer to the flow.
1964  * @param error Pointer to the flow error.
1965  * @returns 0 in case of success, negative value otherwise.
1966  */
1967 static inline int
1968 mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
1969 			   struct rte_flow *flow,
1970 			   struct rte_flow_error *error)
1971 {
1972 	return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
1973 }
1974 
1975 /**
1976  * Parse flow pattern composed of the ipv6 and udp items.
1977  *
1978  * @param pattern Pointer to the flow pattern table.
1979  * @param flow Pointer to the flow.
1980  * @param error Pointer to the flow error.
1981  * @returns 0 in case of success, negative value otherwise.
1982  */
1983 static inline int
1984 mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
1985 			   struct rte_flow *flow,
1986 			   struct rte_flow_error *error)
1987 {
1988 	return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
1989 }
1990 
1991 /**
1992  * Parse flow pattern composed of the tcp item.
1993  *
1994  * @param pattern Pointer to the flow pattern table.
1995  * @param flow Pointer to the flow.
1996  * @param error Pointer to the flow error.
1997  * @returns 0 in case of success, negative value otherwise.
1998  */
1999 static int
2000 mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
2001 		       struct rte_flow *flow,
2002 		       struct rte_flow_error *error)
2003 {
2004 	const struct rte_flow_item *item = mrvl_next_item(pattern);
2005 
2006 	return mrvl_parse_tcp(item, flow, error);
2007 }
2008 
2009 /**
2010  * Parse flow pattern composed of the udp item.
2011  *
2012  * @param pattern Pointer to the flow pattern table.
2013  * @param flow Pointer to the flow.
2014  * @param error Pointer to the flow error.
2015  * @returns 0 in case of success, negative value otherwise.
2016  */
2017 static int
2018 mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
2019 		       struct rte_flow *flow,
2020 		       struct rte_flow_error *error)
2021 {
2022 	const struct rte_flow_item *item = mrvl_next_item(pattern);
2023 
2024 	return mrvl_parse_udp(item, flow, error);
2025 }
2026 
2027 /**
2028  * Structure used to map specific flow pattern to the pattern parse callback
2029  * which will iterate over each pattern item and extract relevant data.
2030  */
2031 static const struct {
2032 	const enum rte_flow_item_type *pattern;
2033 	int (*parse)(const struct rte_flow_item pattern[],
2034 		struct rte_flow *flow,
2035 		struct rte_flow_error *error);
2036 } mrvl_patterns[] = {
2037 	{ pattern_eth, mrvl_parse_pattern_eth },
2038 	{ pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
2039 	{ pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
2040 	{ pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
2041 	{ pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
2042 	{ pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
2043 	{ pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
2044 	{ pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
2045 	{ pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
2046 	{ pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
2047 	{ pattern_vlan, mrvl_parse_pattern_vlan },
2048 	{ pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
2049 	{ pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
2050 	{ pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
2051 	{ pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
2052 	{ pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
2053 	{ pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
2054 	{ pattern_ip, mrvl_parse_pattern_ip4 },
2055 	{ pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
2056 	{ pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
2057 	{ pattern_ip6, mrvl_parse_pattern_ip6 },
2058 	{ pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
2059 	{ pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
2060 	{ pattern_tcp, mrvl_parse_pattern_tcp },
2061 	{ pattern_udp, mrvl_parse_pattern_udp }
2062 };
2063 
2064 /**
2065  * Check whether provided pattern matches any of the supported ones.
2066  *
2067  * @param type_pattern Pointer to the pattern type.
2068  * @param item_pattern Pointer to the flow pattern.
2069  * @returns 1 in case of success, 0 value otherwise.
2070  */
2071 static int
2072 mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
2073 		    const struct rte_flow_item *item_pattern)
2074 {
2075 	const enum rte_flow_item_type *type = type_pattern;
2076 	const struct rte_flow_item *item = item_pattern;
2077 
2078 	for (;;) {
2079 		if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
2080 			item++;
2081 			continue;
2082 		}
2083 
2084 		if (*type == RTE_FLOW_ITEM_TYPE_END ||
2085 		    item->type == RTE_FLOW_ITEM_TYPE_END)
2086 			break;
2087 
2088 		if (*type != item->type)
2089 			break;
2090 
2091 		item++;
2092 		type++;
2093 	}
2094 
2095 	return *type == item->type;
2096 }
2097 
2098 /**
2099  * Parse flow attribute.
2100  *
2101  * This will check whether the provided attribute's flags are supported.
2102  *
2103  * @param priv Unused
2104  * @param attr Pointer to the flow attribute.
2105  * @param flow Unused
2106  * @param error Pointer to the flow error.
2107  * @returns 0 in case of success, negative value otherwise.
2108  */
2109 static int
2110 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
2111 		     const struct rte_flow_attr *attr,
2112 		     struct rte_flow *flow __rte_unused,
2113 		     struct rte_flow_error *error)
2114 {
2115 	if (!attr) {
2116 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
2117 				   NULL, "NULL attribute");
2118 		return -rte_errno;
2119 	}
2120 
2121 	if (attr->group) {
2122 		rte_flow_error_set(error, ENOTSUP,
2123 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2124 				   "Groups are not supported");
2125 		return -rte_errno;
2126 	}
2127 	if (attr->priority) {
2128 		rte_flow_error_set(error, ENOTSUP,
2129 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2130 				   "Priorities are not supported");
2131 		return -rte_errno;
2132 	}
2133 	if (!attr->ingress) {
2134 		rte_flow_error_set(error, ENOTSUP,
2135 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
2136 				   "Only ingress is supported");
2137 		return -rte_errno;
2138 	}
2139 	if (attr->egress) {
2140 		rte_flow_error_set(error, ENOTSUP,
2141 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2142 				   "Egress is not supported");
2143 		return -rte_errno;
2144 	}
2145 	if (attr->transfer) {
2146 		rte_flow_error_set(error, ENOTSUP,
2147 				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
2148 				   "Transfer is not supported");
2149 		return -rte_errno;
2150 	}
2151 
2152 	return 0;
2153 }
2154 
2155 /**
2156  * Parse flow pattern.
2157  *
2158  * Specific classifier rule will be created as well.
2159  *
2160  * @param priv Unused
2161  * @param pattern Pointer to the flow pattern.
2162  * @param flow Pointer to the flow.
2163  * @param error Pointer to the flow error.
2164  * @returns 0 in case of success, negative value otherwise.
2165  */
2166 static int
2167 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
2168 			const struct rte_flow_item pattern[],
2169 			struct rte_flow *flow,
2170 			struct rte_flow_error *error)
2171 {
2172 	unsigned int i;
2173 	int ret;
2174 
2175 	for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
2176 		if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
2177 			continue;
2178 
2179 		ret = mrvl_patterns[i].parse(pattern, flow, error);
2180 		if (ret)
2181 			mrvl_free_all_key_mask(&flow->rule);
2182 
2183 		return ret;
2184 	}
2185 
2186 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2187 			   "Unsupported pattern");
2188 
2189 	return -rte_errno;
2190 }
2191 
2192 /**
2193  * Parse flow actions.
2194  *
2195  * @param priv Pointer to the port's private data.
2196  * @param actions Pointer the action table.
2197  * @param flow Pointer to the flow.
2198  * @param error Pointer to the flow error.
2199  * @returns 0 in case of success, negative value otherwise.
2200  */
2201 static int
2202 mrvl_flow_parse_actions(struct mrvl_priv *priv,
2203 			const struct rte_flow_action actions[],
2204 			struct rte_flow *flow,
2205 			struct rte_flow_error *error)
2206 {
2207 	const struct rte_flow_action *action = actions;
2208 	int specified = 0;
2209 
2210 	for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
2211 		if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
2212 			continue;
2213 
2214 		if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
2215 			flow->cos.ppio = priv->ppio;
2216 			flow->cos.tc = 0;
2217 			flow->action.type = PP2_CLS_TBL_ACT_DROP;
2218 			flow->action.cos = &flow->cos;
2219 			specified++;
2220 		} else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2221 			const struct rte_flow_action_queue *q =
2222 				(const struct rte_flow_action_queue *)
2223 				action->conf;
2224 
2225 			if (q->index > priv->nb_rx_queues) {
2226 				rte_flow_error_set(error, EINVAL,
2227 						RTE_FLOW_ERROR_TYPE_ACTION,
2228 						NULL,
2229 						"Queue index out of range");
2230 				return -rte_errno;
2231 			}
2232 
2233 			if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
2234 				/*
2235 				 * Unknown TC mapping, mapping will not have
2236 				 * a correct queue.
2237 				 */
2238 				MRVL_LOG(ERR,
2239 					"Unknown TC mapping for queue %hu eth%hhu",
2240 					q->index, priv->ppio_id);
2241 
2242 				rte_flow_error_set(error, EFAULT,
2243 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2244 						NULL, NULL);
2245 				return -rte_errno;
2246 			}
2247 
2248 			MRVL_LOG(DEBUG,
2249 				"Action: Assign packets to queue %d, tc:%d, q:%d",
2250 				q->index, priv->rxq_map[q->index].tc,
2251 				priv->rxq_map[q->index].inq);
2252 
2253 			flow->cos.ppio = priv->ppio;
2254 			flow->cos.tc = priv->rxq_map[q->index].tc;
2255 			flow->action.type = PP2_CLS_TBL_ACT_DONE;
2256 			flow->action.cos = &flow->cos;
2257 			specified++;
2258 		} else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
2259 			const struct rte_flow_action_meter *meter;
2260 			struct mrvl_mtr *mtr;
2261 
2262 			meter = action->conf;
2263 			if (!meter)
2264 				return -rte_flow_error_set(error, EINVAL,
2265 						RTE_FLOW_ERROR_TYPE_ACTION,
2266 						NULL, "Invalid meter\n");
2267 
2268 			LIST_FOREACH(mtr, &priv->mtrs, next)
2269 				if (mtr->mtr_id == meter->mtr_id)
2270 					break;
2271 
2272 			if (!mtr)
2273 				return -rte_flow_error_set(error, EINVAL,
2274 						RTE_FLOW_ERROR_TYPE_ACTION,
2275 						NULL,
2276 						"Meter id does not exist\n");
2277 
2278 			if (!mtr->shared && mtr->refcnt)
2279 				return -rte_flow_error_set(error, EPERM,
2280 						RTE_FLOW_ERROR_TYPE_ACTION,
2281 						NULL,
2282 						"Meter cannot be shared\n");
2283 
2284 			/*
2285 			 * In case cos has already been set
2286 			 * do not modify it.
2287 			 */
2288 			if (!flow->cos.ppio) {
2289 				flow->cos.ppio = priv->ppio;
2290 				flow->cos.tc = 0;
2291 			}
2292 
2293 			flow->action.type = PP2_CLS_TBL_ACT_DONE;
2294 			flow->action.cos = &flow->cos;
2295 			flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
2296 			flow->mtr = mtr;
2297 			mtr->refcnt++;
2298 			specified++;
2299 		} else {
2300 			rte_flow_error_set(error, ENOTSUP,
2301 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2302 					   "Action not supported");
2303 			return -rte_errno;
2304 		}
2305 	}
2306 
2307 	if (!specified) {
2308 		rte_flow_error_set(error, EINVAL,
2309 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2310 				   "Action not specified");
2311 		return -rte_errno;
2312 	}
2313 
2314 	return 0;
2315 }
2316 
2317 /**
2318  * Parse flow attribute, pattern and actions.
2319  *
2320  * @param priv Pointer to the port's private data.
2321  * @param attr Pointer to the flow attribute.
2322  * @param pattern Pointer to the flow pattern.
2323  * @param actions Pointer to the flow actions.
2324  * @param flow Pointer to the flow.
2325  * @param error Pointer to the flow error.
2326  * @returns 0 on success, negative value otherwise.
2327  */
2328 static int
2329 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
2330 		const struct rte_flow_item pattern[],
2331 		const struct rte_flow_action actions[],
2332 		struct rte_flow *flow,
2333 		struct rte_flow_error *error)
2334 {
2335 	int ret;
2336 
2337 	ret = mrvl_flow_parse_attr(priv, attr, flow, error);
2338 	if (ret)
2339 		return ret;
2340 
2341 	ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
2342 	if (ret)
2343 		return ret;
2344 
2345 	return mrvl_flow_parse_actions(priv, actions, flow, error);
2346 }
2347 
2348 /**
2349  * Get engine type for the given flow.
2350  *
2351  * @param field Pointer to the flow.
2352  * @returns The type of the engine.
2353  */
2354 static inline enum pp2_cls_tbl_type
2355 mrvl_engine_type(const struct rte_flow *flow)
2356 {
2357 	int i, size = 0;
2358 
2359 	for (i = 0; i < flow->rule.num_fields; i++)
2360 		size += flow->rule.fields[i].size;
2361 
2362 	/*
2363 	 * For maskable engine type the key size must be up to 8 bytes.
2364 	 * For keys with size bigger than 8 bytes, engine type must
2365 	 * be set to exact match.
2366 	 */
2367 	if (size > 8)
2368 		return PP2_CLS_TBL_EXACT_MATCH;
2369 
2370 	return PP2_CLS_TBL_MASKABLE;
2371 }
2372 
2373 /**
2374  * Create classifier table.
2375  *
2376  * @param dev Pointer to the device.
2377  * @param flow Pointer to the very first flow.
2378  * @returns 0 in case of success, negative value otherwise.
2379  */
2380 static int
2381 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
2382 {
2383 	struct mrvl_priv *priv = dev->data->dev_private;
2384 	struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
2385 	int ret;
2386 
2387 	if (priv->cls_tbl) {
2388 		pp2_cls_tbl_deinit(priv->cls_tbl);
2389 		priv->cls_tbl = NULL;
2390 	}
2391 
2392 	memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
2393 
2394 	priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
2395 	MRVL_LOG(INFO, "Setting cls search engine type to %s",
2396 			priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
2397 			"exact" : "maskable");
2398 	priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
2399 	priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
2400 	priv->cls_tbl_params.default_act.cos = &first_flow->cos;
2401 
2402 	if (first_flow->pattern & F_DMAC) {
2403 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2404 		key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
2405 		key->key_size += 6;
2406 		key->num_fields += 1;
2407 	}
2408 
2409 	if (first_flow->pattern & F_SMAC) {
2410 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2411 		key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
2412 		key->key_size += 6;
2413 		key->num_fields += 1;
2414 	}
2415 
2416 	if (first_flow->pattern & F_TYPE) {
2417 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2418 		key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
2419 		key->key_size += 2;
2420 		key->num_fields += 1;
2421 	}
2422 
2423 	if (first_flow->pattern & F_VLAN_ID) {
2424 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2425 		key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
2426 		key->key_size += 2;
2427 		key->num_fields += 1;
2428 	}
2429 
2430 	if (first_flow->pattern & F_VLAN_PRI) {
2431 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2432 		key->proto_field[key->num_fields].field.vlan =
2433 			MV_NET_VLAN_F_PRI;
2434 		key->key_size += 1;
2435 		key->num_fields += 1;
2436 	}
2437 
2438 	if (first_flow->pattern & F_IP4_TOS) {
2439 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2440 		key->proto_field[key->num_fields].field.ipv4 =
2441 							MV_NET_IP4_F_DSCP;
2442 		key->key_size += 1;
2443 		key->num_fields += 1;
2444 	}
2445 
2446 	if (first_flow->pattern & F_IP4_SIP) {
2447 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2448 		key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
2449 		key->key_size += 4;
2450 		key->num_fields += 1;
2451 	}
2452 
2453 	if (first_flow->pattern & F_IP4_DIP) {
2454 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2455 		key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
2456 		key->key_size += 4;
2457 		key->num_fields += 1;
2458 	}
2459 
2460 	if (first_flow->pattern & F_IP4_PROTO) {
2461 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2462 		key->proto_field[key->num_fields].field.ipv4 =
2463 			MV_NET_IP4_F_PROTO;
2464 		key->key_size += 1;
2465 		key->num_fields += 1;
2466 	}
2467 
2468 	if (first_flow->pattern & F_IP6_SIP) {
2469 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2470 		key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
2471 		key->key_size += 16;
2472 		key->num_fields += 1;
2473 	}
2474 
2475 	if (first_flow->pattern & F_IP6_DIP) {
2476 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2477 		key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
2478 		key->key_size += 16;
2479 		key->num_fields += 1;
2480 	}
2481 
2482 	if (first_flow->pattern & F_IP6_FLOW) {
2483 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2484 		key->proto_field[key->num_fields].field.ipv6 =
2485 			MV_NET_IP6_F_FLOW;
2486 		key->key_size += 3;
2487 		key->num_fields += 1;
2488 	}
2489 
2490 	if (first_flow->pattern & F_IP6_NEXT_HDR) {
2491 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2492 		key->proto_field[key->num_fields].field.ipv6 =
2493 			MV_NET_IP6_F_NEXT_HDR;
2494 		key->key_size += 1;
2495 		key->num_fields += 1;
2496 	}
2497 
2498 	if (first_flow->pattern & F_TCP_SPORT) {
2499 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2500 		key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2501 		key->key_size += 2;
2502 		key->num_fields += 1;
2503 	}
2504 
2505 	if (first_flow->pattern & F_TCP_DPORT) {
2506 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2507 		key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
2508 		key->key_size += 2;
2509 		key->num_fields += 1;
2510 	}
2511 
2512 	if (first_flow->pattern & F_UDP_SPORT) {
2513 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2514 		key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2515 		key->key_size += 2;
2516 		key->num_fields += 1;
2517 	}
2518 
2519 	if (first_flow->pattern & F_UDP_DPORT) {
2520 		key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2521 		key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
2522 		key->key_size += 2;
2523 		key->num_fields += 1;
2524 	}
2525 
2526 	ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
2527 	if (!ret)
2528 		priv->cls_tbl_pattern = first_flow->pattern;
2529 
2530 	return ret;
2531 }
2532 
2533 /**
2534  * Check whether new flow can be added to the table
2535  *
2536  * @param priv Pointer to the port's private data.
2537  * @param flow Pointer to the new flow.
2538  * @return 1 in case flow can be added, 0 otherwise.
2539  */
2540 static inline int
2541 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
2542 {
2543 	return flow->pattern == priv->cls_tbl_pattern &&
2544 	       mrvl_engine_type(flow) == priv->cls_tbl_params.type;
2545 }
2546 
2547 /**
2548  * DPDK flow create callback called when flow is to be created.
2549  *
2550  * @param dev Pointer to the device.
2551  * @param attr Pointer to the flow attribute.
2552  * @param pattern Pointer to the flow pattern.
2553  * @param actions Pointer to the flow actions.
2554  * @param error Pointer to the flow error.
2555  * @returns Pointer to the created flow in case of success, NULL otherwise.
2556  */
2557 static struct rte_flow *
2558 mrvl_flow_create(struct rte_eth_dev *dev,
2559 		 const struct rte_flow_attr *attr,
2560 		 const struct rte_flow_item pattern[],
2561 		 const struct rte_flow_action actions[],
2562 		 struct rte_flow_error *error)
2563 {
2564 	struct mrvl_priv *priv = dev->data->dev_private;
2565 	struct rte_flow *flow, *first;
2566 	int ret;
2567 
2568 	if (!dev->data->dev_started) {
2569 		rte_flow_error_set(error, EINVAL,
2570 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2571 				   "Port must be started first\n");
2572 		return NULL;
2573 	}
2574 
2575 	flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
2576 	if (!flow)
2577 		return NULL;
2578 
2579 	ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
2580 	if (ret)
2581 		goto out;
2582 
2583 	/*
2584 	 * Four cases here:
2585 	 *
2586 	 * 1. In case table does not exist - create one.
2587 	 * 2. In case table exists, is empty and new flow cannot be added
2588 	 *    recreate table.
2589 	 * 3. In case table is not empty and new flow matches table format
2590 	 *    add it.
2591 	 * 4. Otherwise flow cannot be added.
2592 	 */
2593 	first = LIST_FIRST(&priv->flows);
2594 	if (!priv->cls_tbl) {
2595 		ret = mrvl_create_cls_table(dev, flow);
2596 	} else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
2597 		ret = mrvl_create_cls_table(dev, flow);
2598 	} else if (mrvl_flow_can_be_added(priv, flow)) {
2599 		ret = 0;
2600 	} else {
2601 		rte_flow_error_set(error, EINVAL,
2602 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2603 				   "Pattern does not match cls table format\n");
2604 		goto out;
2605 	}
2606 
2607 	if (ret) {
2608 		rte_flow_error_set(error, EINVAL,
2609 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2610 				   "Failed to create cls table\n");
2611 		goto out;
2612 	}
2613 
2614 	ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
2615 	if (ret) {
2616 		rte_flow_error_set(error, EINVAL,
2617 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2618 				   "Failed to add rule\n");
2619 		goto out;
2620 	}
2621 
2622 	LIST_INSERT_HEAD(&priv->flows, flow, next);
2623 
2624 	return flow;
2625 out:
2626 	rte_free(flow);
2627 	return NULL;
2628 }
2629 
2630 /**
2631  * Remove classifier rule associated with given flow.
2632  *
2633  * @param priv Pointer to the port's private data.
2634  * @param flow Pointer to the flow.
2635  * @param error Pointer to the flow error.
2636  * @returns 0 in case of success, negative value otherwise.
2637  */
2638 static int
2639 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
2640 		 struct rte_flow_error *error)
2641 {
2642 	int ret;
2643 
2644 	if (!priv->cls_tbl) {
2645 		rte_flow_error_set(error, EINVAL,
2646 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2647 				   "Classifier table not initialized");
2648 		return -rte_errno;
2649 	}
2650 
2651 	ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
2652 	if (ret) {
2653 		rte_flow_error_set(error, EINVAL,
2654 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2655 				   "Failed to remove rule");
2656 		return -rte_errno;
2657 	}
2658 
2659 	mrvl_free_all_key_mask(&flow->rule);
2660 
2661 	if (flow->mtr) {
2662 		flow->mtr->refcnt--;
2663 		flow->mtr = NULL;
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 /**
2670  * DPDK flow destroy callback called when flow is to be removed.
2671  *
2672  * @param dev Pointer to the device.
2673  * @param flow Pointer to the flow.
2674  * @param error Pointer to the flow error.
2675  * @returns 0 in case of success, negative value otherwise.
2676  */
2677 static int
2678 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2679 		  struct rte_flow_error *error)
2680 {
2681 	struct mrvl_priv *priv = dev->data->dev_private;
2682 	struct rte_flow *f;
2683 	int ret;
2684 
2685 	LIST_FOREACH(f, &priv->flows, next) {
2686 		if (f == flow)
2687 			break;
2688 	}
2689 
2690 	if (!flow) {
2691 		rte_flow_error_set(error, EINVAL,
2692 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2693 				   "Rule was not found");
2694 		return -rte_errno;
2695 	}
2696 
2697 	LIST_REMOVE(f, next);
2698 
2699 	ret = mrvl_flow_remove(priv, flow, error);
2700 	if (ret)
2701 		return ret;
2702 
2703 	rte_free(flow);
2704 
2705 	return 0;
2706 }
2707 
2708 /**
2709  * DPDK flow callback called to verify given attribute, pattern and actions.
2710  *
2711  * @param dev Pointer to the device.
2712  * @param attr Pointer to the flow attribute.
2713  * @param pattern Pointer to the flow pattern.
2714  * @param actions Pointer to the flow actions.
2715  * @param error Pointer to the flow error.
2716  * @returns 0 on success, negative value otherwise.
2717  */
2718 static int
2719 mrvl_flow_validate(struct rte_eth_dev *dev,
2720 		   const struct rte_flow_attr *attr,
2721 		   const struct rte_flow_item pattern[],
2722 		   const struct rte_flow_action actions[],
2723 		   struct rte_flow_error *error)
2724 {
2725 	static struct rte_flow *flow;
2726 
2727 	flow = mrvl_flow_create(dev, attr, pattern, actions, error);
2728 	if (!flow)
2729 		return -rte_errno;
2730 
2731 	mrvl_flow_destroy(dev, flow, error);
2732 
2733 	return 0;
2734 }
2735 
2736 /**
2737  * DPDK flow flush callback called when flows are to be flushed.
2738  *
2739  * @param dev Pointer to the device.
2740  * @param error Pointer to the flow error.
2741  * @returns 0 in case of success, negative value otherwise.
2742  */
2743 static int
2744 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2745 {
2746 	struct mrvl_priv *priv = dev->data->dev_private;
2747 
2748 	while (!LIST_EMPTY(&priv->flows)) {
2749 		struct rte_flow *flow = LIST_FIRST(&priv->flows);
2750 		int ret = mrvl_flow_remove(priv, flow, error);
2751 		if (ret)
2752 			return ret;
2753 
2754 		LIST_REMOVE(flow, next);
2755 		rte_free(flow);
2756 	}
2757 
2758 	return 0;
2759 }
2760 
2761 /**
2762  * DPDK flow isolate callback called to isolate port.
2763  *
2764  * @param dev Pointer to the device.
2765  * @param enable Pass 0/1 to disable/enable port isolation.
2766  * @param error Pointer to the flow error.
2767  * @returns 0 in case of success, negative value otherwise.
2768  */
2769 static int
2770 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
2771 		  struct rte_flow_error *error)
2772 {
2773 	struct mrvl_priv *priv = dev->data->dev_private;
2774 
2775 	if (dev->data->dev_started) {
2776 		rte_flow_error_set(error, EBUSY,
2777 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2778 				   NULL, "Port must be stopped first\n");
2779 		return -rte_errno;
2780 	}
2781 
2782 	priv->isolated = enable;
2783 
2784 	return 0;
2785 }
2786 
2787 const struct rte_flow_ops mrvl_flow_ops = {
2788 	.validate = mrvl_flow_validate,
2789 	.create = mrvl_flow_create,
2790 	.destroy = mrvl_flow_destroy,
2791 	.flush = mrvl_flow_flush,
2792 	.isolate = mrvl_flow_isolate
2793 };
2794 
2795 /**
2796  * Initialize flow resources.
2797  *
2798  * @param dev Pointer to the device.
2799  */
2800 void
2801 mrvl_flow_init(struct rte_eth_dev *dev)
2802 {
2803 	struct mrvl_priv *priv = dev->data->dev_private;
2804 
2805 	LIST_INIT(&priv->flows);
2806 }
2807 
2808 /**
2809  * Cleanup flow resources.
2810  *
2811  * @param dev Pointer to the device.
2812  */
2813 void
2814 mrvl_flow_deinit(struct rte_eth_dev *dev)
2815 {
2816 	struct mrvl_priv *priv = dev->data->dev_private;
2817 
2818 	mrvl_flow_flush(dev, NULL);
2819 
2820 	if (priv->cls_tbl) {
2821 		pp2_cls_tbl_deinit(priv->cls_tbl);
2822 		priv->cls_tbl = NULL;
2823 	}
2824 }
2825