xref: /dpdk/drivers/net/mvpp2/mrvl_flow.c (revision 61938a2d178554a0605f8d7ec2e5b7eeaea20e43)
1fe939687SNatalie Samsonov /* SPDX-License-Identifier: BSD-3-Clause
2fe939687SNatalie Samsonov  * Copyright(c) 2018 Marvell International Ltd.
3fe939687SNatalie Samsonov  * Copyright(c) 2018 Semihalf.
4fe939687SNatalie Samsonov  * All rights reserved.
5fe939687SNatalie Samsonov  */
6fe939687SNatalie Samsonov 
7fe939687SNatalie Samsonov #include <rte_flow.h>
8fe939687SNatalie Samsonov #include <rte_flow_driver.h>
9fe939687SNatalie Samsonov #include <rte_malloc.h>
10fe939687SNatalie Samsonov #include <rte_log.h>
11fe939687SNatalie Samsonov 
12fe939687SNatalie Samsonov #include <arpa/inet.h>
13fe939687SNatalie Samsonov 
14a1f83becSTomasz Duszynski #include "mrvl_flow.h"
15fe939687SNatalie Samsonov #include "mrvl_qos.h"
16fe939687SNatalie Samsonov 
17fe939687SNatalie Samsonov /** Number of rules in the classifier table. */
18fe939687SNatalie Samsonov #define MRVL_CLS_MAX_NUM_RULES 20
19fe939687SNatalie Samsonov 
20fe939687SNatalie Samsonov /** Size of the classifier key and mask strings. */
21fe939687SNatalie Samsonov #define MRVL_CLS_STR_SIZE_MAX 40
22fe939687SNatalie Samsonov 
23fe939687SNatalie Samsonov #define MRVL_VLAN_ID_MASK 0x0fff
24fe939687SNatalie Samsonov #define MRVL_VLAN_PRI_MASK 0x7000
25fe939687SNatalie Samsonov #define MRVL_IPV4_DSCP_MASK 0xfc
26fe939687SNatalie Samsonov #define MRVL_IPV4_ADDR_MASK 0xffffffff
27fe939687SNatalie Samsonov #define MRVL_IPV6_FLOW_MASK 0x0fffff
28fe939687SNatalie Samsonov 
29fe939687SNatalie Samsonov /**
30fe939687SNatalie Samsonov  * Allocate memory for classifier rule key and mask fields.
31fe939687SNatalie Samsonov  *
32fe939687SNatalie Samsonov  * @param field Pointer to the classifier rule.
33fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
34fe939687SNatalie Samsonov  */
35fe939687SNatalie Samsonov static int
36fe939687SNatalie Samsonov mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
37fe939687SNatalie Samsonov {
38fe939687SNatalie Samsonov 	unsigned int id = rte_socket_id();
39fe939687SNatalie Samsonov 
40fe939687SNatalie Samsonov 	field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
41fe939687SNatalie Samsonov 	if (!field->key)
42fe939687SNatalie Samsonov 		goto out;
43fe939687SNatalie Samsonov 
44fe939687SNatalie Samsonov 	field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
45fe939687SNatalie Samsonov 	if (!field->mask)
46fe939687SNatalie Samsonov 		goto out_mask;
47fe939687SNatalie Samsonov 
48fe939687SNatalie Samsonov 	return 0;
49fe939687SNatalie Samsonov out_mask:
50fe939687SNatalie Samsonov 	rte_free(field->key);
51fe939687SNatalie Samsonov out:
52fe939687SNatalie Samsonov 	field->key = NULL;
53fe939687SNatalie Samsonov 	field->mask = NULL;
54fe939687SNatalie Samsonov 	return -1;
55fe939687SNatalie Samsonov }
56fe939687SNatalie Samsonov 
57fe939687SNatalie Samsonov /**
58fe939687SNatalie Samsonov  * Free memory allocated for classifier rule key and mask fields.
59fe939687SNatalie Samsonov  *
60fe939687SNatalie Samsonov  * @param field Pointer to the classifier rule.
61fe939687SNatalie Samsonov  */
62fe939687SNatalie Samsonov static void
63fe939687SNatalie Samsonov mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
64fe939687SNatalie Samsonov {
65fe939687SNatalie Samsonov 	rte_free(field->key);
66fe939687SNatalie Samsonov 	rte_free(field->mask);
67fe939687SNatalie Samsonov 	field->key = NULL;
68fe939687SNatalie Samsonov 	field->mask = NULL;
69fe939687SNatalie Samsonov }
70fe939687SNatalie Samsonov 
71fe939687SNatalie Samsonov /**
72fe939687SNatalie Samsonov  * Free memory allocated for all classifier rule key and mask fields.
73fe939687SNatalie Samsonov  *
74fe939687SNatalie Samsonov  * @param rule Pointer to the classifier table rule.
75fe939687SNatalie Samsonov  */
76fe939687SNatalie Samsonov static void
77fe939687SNatalie Samsonov mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
78fe939687SNatalie Samsonov {
79fe939687SNatalie Samsonov 	int i;
80fe939687SNatalie Samsonov 
81fe939687SNatalie Samsonov 	for (i = 0; i < rule->num_fields; i++)
82fe939687SNatalie Samsonov 		mrvl_free_key_mask(&rule->fields[i]);
83fe939687SNatalie Samsonov 	rule->num_fields = 0;
84fe939687SNatalie Samsonov }
85fe939687SNatalie Samsonov 
86fe939687SNatalie Samsonov /*
87fe939687SNatalie Samsonov  * Initialize rte flow item parsing.
88fe939687SNatalie Samsonov  *
89fe939687SNatalie Samsonov  * @param item Pointer to the flow item.
90fe939687SNatalie Samsonov  * @param spec_ptr Pointer to the specific item pointer.
91fe939687SNatalie Samsonov  * @param mask_ptr Pointer to the specific item's mask pointer.
92fe939687SNatalie Samsonov  * @def_mask Pointer to the default mask.
93fe939687SNatalie Samsonov  * @size Size of the flow item.
94fe939687SNatalie Samsonov  * @error Pointer to the rte flow error.
95fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
96fe939687SNatalie Samsonov  */
97fe939687SNatalie Samsonov static int
98fe939687SNatalie Samsonov mrvl_parse_init(const struct rte_flow_item *item,
99fe939687SNatalie Samsonov 		const void **spec_ptr,
100fe939687SNatalie Samsonov 		const void **mask_ptr,
101fe939687SNatalie Samsonov 		const void *def_mask,
102fe939687SNatalie Samsonov 		unsigned int size,
103fe939687SNatalie Samsonov 		struct rte_flow_error *error)
104fe939687SNatalie Samsonov {
105fe939687SNatalie Samsonov 	const uint8_t *spec;
106fe939687SNatalie Samsonov 	const uint8_t *mask;
107fe939687SNatalie Samsonov 	const uint8_t *last;
108fe939687SNatalie Samsonov 	uint8_t zeros[size];
109fe939687SNatalie Samsonov 
110fe939687SNatalie Samsonov 	memset(zeros, 0, size);
111fe939687SNatalie Samsonov 
112fe939687SNatalie Samsonov 	if (item == NULL) {
113fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
114fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
115fe939687SNatalie Samsonov 				   "NULL item\n");
116fe939687SNatalie Samsonov 		return -rte_errno;
117fe939687SNatalie Samsonov 	}
118fe939687SNatalie Samsonov 
119fe939687SNatalie Samsonov 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
120fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
121fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
122fe939687SNatalie Samsonov 				   "Mask or last is set without spec\n");
123fe939687SNatalie Samsonov 		return -rte_errno;
124fe939687SNatalie Samsonov 	}
125fe939687SNatalie Samsonov 
126fe939687SNatalie Samsonov 	/*
127fe939687SNatalie Samsonov 	 * If "mask" is not set, default mask is used,
128fe939687SNatalie Samsonov 	 * but if default mask is NULL, "mask" should be set.
129fe939687SNatalie Samsonov 	 */
130fe939687SNatalie Samsonov 	if (item->mask == NULL) {
131fe939687SNatalie Samsonov 		if (def_mask == NULL) {
132fe939687SNatalie Samsonov 			rte_flow_error_set(error, EINVAL,
133fe939687SNatalie Samsonov 					   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134fe939687SNatalie Samsonov 					   "Mask should be specified\n");
135fe939687SNatalie Samsonov 			return -rte_errno;
136fe939687SNatalie Samsonov 		}
137fe939687SNatalie Samsonov 
138fe939687SNatalie Samsonov 		mask = (const uint8_t *)def_mask;
139fe939687SNatalie Samsonov 	} else {
140fe939687SNatalie Samsonov 		mask = (const uint8_t *)item->mask;
141fe939687SNatalie Samsonov 	}
142fe939687SNatalie Samsonov 
143fe939687SNatalie Samsonov 	spec = (const uint8_t *)item->spec;
144fe939687SNatalie Samsonov 	last = (const uint8_t *)item->last;
145fe939687SNatalie Samsonov 
146fe939687SNatalie Samsonov 	if (spec == NULL) {
147fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
148fe939687SNatalie Samsonov 				   NULL, "Spec should be specified\n");
149fe939687SNatalie Samsonov 		return -rte_errno;
150fe939687SNatalie Samsonov 	}
151fe939687SNatalie Samsonov 
152fe939687SNatalie Samsonov 	/*
153fe939687SNatalie Samsonov 	 * If field values in "last" are either 0 or equal to the corresponding
154fe939687SNatalie Samsonov 	 * values in "spec" then they are ignored.
155fe939687SNatalie Samsonov 	 */
156fe939687SNatalie Samsonov 	if (last != NULL &&
157fe939687SNatalie Samsonov 	    !memcmp(last, zeros, size) &&
158fe939687SNatalie Samsonov 	    memcmp(last, spec, size) != 0) {
159fe939687SNatalie Samsonov 		rte_flow_error_set(error, ENOTSUP,
160fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
161fe939687SNatalie Samsonov 				   "Ranging is not supported\n");
162fe939687SNatalie Samsonov 		return -rte_errno;
163fe939687SNatalie Samsonov 	}
164fe939687SNatalie Samsonov 
165fe939687SNatalie Samsonov 	*spec_ptr = spec;
166fe939687SNatalie Samsonov 	*mask_ptr = mask;
167fe939687SNatalie Samsonov 
168fe939687SNatalie Samsonov 	return 0;
169fe939687SNatalie Samsonov }
170fe939687SNatalie Samsonov 
171fe939687SNatalie Samsonov /**
172fe939687SNatalie Samsonov  * Parse the eth flow item.
173fe939687SNatalie Samsonov  *
174fe939687SNatalie Samsonov  * This will create classifier rule that matches either destination or source
175fe939687SNatalie Samsonov  * mac.
176fe939687SNatalie Samsonov  *
177fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
178fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
17963e0f017SNatalie Samsonov  * @param parse_dst Parse either destination or source mac address.
18063e0f017SNatalie Samsonov  * @param flow Pointer to the flow.
181fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
182fe939687SNatalie Samsonov  */
183fe939687SNatalie Samsonov static int
184fe939687SNatalie Samsonov mrvl_parse_mac(const struct rte_flow_item_eth *spec,
185fe939687SNatalie Samsonov 	       const struct rte_flow_item_eth *mask,
186fe939687SNatalie Samsonov 	       int parse_dst, struct rte_flow *flow)
187fe939687SNatalie Samsonov {
188fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
189fe939687SNatalie Samsonov 	const uint8_t *k, *m;
190fe939687SNatalie Samsonov 
191fe939687SNatalie Samsonov 	if (parse_dst) {
1928275d5fcSThomas Monjalon 		k = spec->hdr.dst_addr.addr_bytes;
1938275d5fcSThomas Monjalon 		m = mask->hdr.dst_addr.addr_bytes;
194fe939687SNatalie Samsonov 
195b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.eth =
196b57d1a83SLiron Himi 			MV_NET_ETH_F_DA;
197fe939687SNatalie Samsonov 	} else {
1988275d5fcSThomas Monjalon 		k = spec->hdr.src_addr.addr_bytes;
1998275d5fcSThomas Monjalon 		m = mask->hdr.src_addr.addr_bytes;
200fe939687SNatalie Samsonov 
201b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.eth =
202b57d1a83SLiron Himi 			MV_NET_ETH_F_SA;
203fe939687SNatalie Samsonov 	}
204fe939687SNatalie Samsonov 
205fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
206fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
207fe939687SNatalie Samsonov 	key_field->size = 6;
208fe939687SNatalie Samsonov 
209fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
210c2c4f87bSAman Deep Singh 		 RTE_ETHER_ADDR_PRT_FMT,
211fe939687SNatalie Samsonov 		 k[0], k[1], k[2], k[3], k[4], k[5]);
212fe939687SNatalie Samsonov 
213fe939687SNatalie Samsonov 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
214c2c4f87bSAman Deep Singh 		 RTE_ETHER_ADDR_PRT_FMT,
215fe939687SNatalie Samsonov 		 m[0], m[1], m[2], m[3], m[4], m[5]);
216fe939687SNatalie Samsonov 
217b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
218b57d1a83SLiron Himi 		MV_NET_PROTO_ETH;
219b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
220b57d1a83SLiron Himi 
221fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
222fe939687SNatalie Samsonov 
223fe939687SNatalie Samsonov 	return 0;
224fe939687SNatalie Samsonov }
225fe939687SNatalie Samsonov 
226fe939687SNatalie Samsonov /**
227fe939687SNatalie Samsonov  * Helper for parsing the eth flow item destination mac address.
228fe939687SNatalie Samsonov  *
229fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
230fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
231fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
232fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
233fe939687SNatalie Samsonov  */
234fe939687SNatalie Samsonov static inline int
235fe939687SNatalie Samsonov mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
236fe939687SNatalie Samsonov 		const struct rte_flow_item_eth *mask,
237fe939687SNatalie Samsonov 		struct rte_flow *flow)
238fe939687SNatalie Samsonov {
239fe939687SNatalie Samsonov 	return mrvl_parse_mac(spec, mask, 1, flow);
240fe939687SNatalie Samsonov }
241fe939687SNatalie Samsonov 
242fe939687SNatalie Samsonov /**
243fe939687SNatalie Samsonov  * Helper for parsing the eth flow item source mac address.
244fe939687SNatalie Samsonov  *
245fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
246fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
247fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
248fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
249fe939687SNatalie Samsonov  */
250fe939687SNatalie Samsonov static inline int
251fe939687SNatalie Samsonov mrvl_parse_smac(const struct rte_flow_item_eth *spec,
252fe939687SNatalie Samsonov 		const struct rte_flow_item_eth *mask,
253fe939687SNatalie Samsonov 		struct rte_flow *flow)
254fe939687SNatalie Samsonov {
255fe939687SNatalie Samsonov 	return mrvl_parse_mac(spec, mask, 0, flow);
256fe939687SNatalie Samsonov }
257fe939687SNatalie Samsonov 
258fe939687SNatalie Samsonov /**
259fe939687SNatalie Samsonov  * Parse the ether type field of the eth flow item.
260fe939687SNatalie Samsonov  *
261fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
262fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
263fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
264fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
265fe939687SNatalie Samsonov  */
266fe939687SNatalie Samsonov static int
267fe939687SNatalie Samsonov mrvl_parse_type(const struct rte_flow_item_eth *spec,
268fe939687SNatalie Samsonov 		const struct rte_flow_item_eth *mask __rte_unused,
269fe939687SNatalie Samsonov 		struct rte_flow *flow)
270fe939687SNatalie Samsonov {
271fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
272fe939687SNatalie Samsonov 	uint16_t k;
273fe939687SNatalie Samsonov 
274fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
275fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
276fe939687SNatalie Samsonov 	key_field->size = 2;
277fe939687SNatalie Samsonov 
2788275d5fcSThomas Monjalon 	k = rte_be_to_cpu_16(spec->hdr.ether_type);
279fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
280fe939687SNatalie Samsonov 
281b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
282b57d1a83SLiron Himi 		MV_NET_PROTO_ETH;
283b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].field.eth =
284b57d1a83SLiron Himi 		MV_NET_ETH_F_TYPE;
285b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
286b57d1a83SLiron Himi 
287fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
288fe939687SNatalie Samsonov 
289fe939687SNatalie Samsonov 	return 0;
290fe939687SNatalie Samsonov }
291fe939687SNatalie Samsonov 
292fe939687SNatalie Samsonov /**
293fe939687SNatalie Samsonov  * Parse the vid field of the vlan rte flow item.
294fe939687SNatalie Samsonov  *
295fe939687SNatalie Samsonov  * This will create classifier rule that matches vid.
296fe939687SNatalie Samsonov  *
297fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
298fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
299fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
300fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
301fe939687SNatalie Samsonov  */
302fe939687SNatalie Samsonov static int
303fe939687SNatalie Samsonov mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
304fe939687SNatalie Samsonov 		   const struct rte_flow_item_vlan *mask __rte_unused,
305fe939687SNatalie Samsonov 		   struct rte_flow *flow)
306fe939687SNatalie Samsonov {
307fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
308fe939687SNatalie Samsonov 	uint16_t k;
309fe939687SNatalie Samsonov 
310fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
311fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
312fe939687SNatalie Samsonov 	key_field->size = 2;
313fe939687SNatalie Samsonov 
3148275d5fcSThomas Monjalon 	k = rte_be_to_cpu_16(spec->hdr.vlan_tci) & MRVL_VLAN_ID_MASK;
315fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
316fe939687SNatalie Samsonov 
317b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
318b57d1a83SLiron Himi 		MV_NET_PROTO_VLAN;
319b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
320b57d1a83SLiron Himi 		MV_NET_VLAN_F_ID;
321b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
322b57d1a83SLiron Himi 
323fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
324fe939687SNatalie Samsonov 
325fe939687SNatalie Samsonov 	return 0;
326fe939687SNatalie Samsonov }
327fe939687SNatalie Samsonov 
328fe939687SNatalie Samsonov /**
329fe939687SNatalie Samsonov  * Parse the pri field of the vlan rte flow item.
330fe939687SNatalie Samsonov  *
331fe939687SNatalie Samsonov  * This will create classifier rule that matches pri.
332fe939687SNatalie Samsonov  *
333fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
334fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
335fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
336fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
337fe939687SNatalie Samsonov  */
338fe939687SNatalie Samsonov static int
339fe939687SNatalie Samsonov mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
340fe939687SNatalie Samsonov 		    const struct rte_flow_item_vlan *mask __rte_unused,
341fe939687SNatalie Samsonov 		    struct rte_flow *flow)
342fe939687SNatalie Samsonov {
343fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
344fe939687SNatalie Samsonov 	uint16_t k;
345fe939687SNatalie Samsonov 
346fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
347fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
348fe939687SNatalie Samsonov 	key_field->size = 1;
349fe939687SNatalie Samsonov 
3508275d5fcSThomas Monjalon 	k = (rte_be_to_cpu_16(spec->hdr.vlan_tci) & MRVL_VLAN_PRI_MASK) >> 13;
351fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
352fe939687SNatalie Samsonov 
353b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
354b57d1a83SLiron Himi 		MV_NET_PROTO_VLAN;
355b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
356b57d1a83SLiron Himi 		MV_NET_VLAN_F_PRI;
357b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
358b57d1a83SLiron Himi 
359fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
360fe939687SNatalie Samsonov 
361fe939687SNatalie Samsonov 	return 0;
362fe939687SNatalie Samsonov }
363fe939687SNatalie Samsonov 
364fe939687SNatalie Samsonov /**
365fe939687SNatalie Samsonov  * Parse the dscp field of the ipv4 rte flow item.
366fe939687SNatalie Samsonov  *
367fe939687SNatalie Samsonov  * This will create classifier rule that matches dscp field.
368fe939687SNatalie Samsonov  *
369fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
370fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
371fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
372fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
373fe939687SNatalie Samsonov  */
374fe939687SNatalie Samsonov static int
375fe939687SNatalie Samsonov mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
376fe939687SNatalie Samsonov 		    const struct rte_flow_item_ipv4 *mask,
377fe939687SNatalie Samsonov 		    struct rte_flow *flow)
378fe939687SNatalie Samsonov {
379fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
380fe939687SNatalie Samsonov 	uint8_t k, m;
381fe939687SNatalie Samsonov 
382fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
383fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
384fe939687SNatalie Samsonov 	key_field->size = 1;
385fe939687SNatalie Samsonov 
386fe939687SNatalie Samsonov 	k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
387fe939687SNatalie Samsonov 	m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
388fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
389fe939687SNatalie Samsonov 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
390fe939687SNatalie Samsonov 
391b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
392b57d1a83SLiron Himi 		MV_NET_PROTO_IP4;
393b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
394b57d1a83SLiron Himi 		MV_NET_IP4_F_DSCP;
395b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
396b57d1a83SLiron Himi 
397fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
398fe939687SNatalie Samsonov 
399fe939687SNatalie Samsonov 	return 0;
400fe939687SNatalie Samsonov }
401fe939687SNatalie Samsonov 
402fe939687SNatalie Samsonov /**
403fe939687SNatalie Samsonov  * Parse either source or destination ip addresses of the ipv4 flow item.
404fe939687SNatalie Samsonov  *
405fe939687SNatalie Samsonov  * This will create classifier rule that matches either destination
406fe939687SNatalie Samsonov  * or source ip field.
407fe939687SNatalie Samsonov  *
408fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
409fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
41063e0f017SNatalie Samsonov  * @param parse_dst Parse either destination or source ip address.
411fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
412fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
413fe939687SNatalie Samsonov  */
414fe939687SNatalie Samsonov static int
415fe939687SNatalie Samsonov mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
416fe939687SNatalie Samsonov 		    const struct rte_flow_item_ipv4 *mask,
417fe939687SNatalie Samsonov 		    int parse_dst, struct rte_flow *flow)
418fe939687SNatalie Samsonov {
419fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
420fe939687SNatalie Samsonov 	struct in_addr k;
421fe939687SNatalie Samsonov 	uint32_t m;
422fe939687SNatalie Samsonov 
423fe939687SNatalie Samsonov 	memset(&k, 0, sizeof(k));
424fe939687SNatalie Samsonov 	if (parse_dst) {
425fe939687SNatalie Samsonov 		k.s_addr = spec->hdr.dst_addr;
426fe939687SNatalie Samsonov 		m = rte_be_to_cpu_32(mask->hdr.dst_addr);
427fe939687SNatalie Samsonov 
428b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
429b57d1a83SLiron Himi 			MV_NET_IP4_F_DA;
430fe939687SNatalie Samsonov 	} else {
431fe939687SNatalie Samsonov 		k.s_addr = spec->hdr.src_addr;
432fe939687SNatalie Samsonov 		m = rte_be_to_cpu_32(mask->hdr.src_addr);
433fe939687SNatalie Samsonov 
434b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
435b57d1a83SLiron Himi 			MV_NET_IP4_F_SA;
436fe939687SNatalie Samsonov 	}
437fe939687SNatalie Samsonov 
438fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
439fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
440fe939687SNatalie Samsonov 	key_field->size = 4;
441fe939687SNatalie Samsonov 
442fe939687SNatalie Samsonov 	inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
443fe939687SNatalie Samsonov 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
444fe939687SNatalie Samsonov 
445b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
446b57d1a83SLiron Himi 		MV_NET_PROTO_IP4;
447b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
448b57d1a83SLiron Himi 
449fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
450fe939687SNatalie Samsonov 
451fe939687SNatalie Samsonov 	return 0;
452fe939687SNatalie Samsonov }
453fe939687SNatalie Samsonov 
454fe939687SNatalie Samsonov /**
455fe939687SNatalie Samsonov  * Helper for parsing destination ip of the ipv4 flow item.
456fe939687SNatalie Samsonov  *
457fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
458fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
459fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
460fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
461fe939687SNatalie Samsonov  */
462fe939687SNatalie Samsonov static inline int
463fe939687SNatalie Samsonov mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
464fe939687SNatalie Samsonov 		   const struct rte_flow_item_ipv4 *mask,
465fe939687SNatalie Samsonov 		   struct rte_flow *flow)
466fe939687SNatalie Samsonov {
467fe939687SNatalie Samsonov 	return mrvl_parse_ip4_addr(spec, mask, 1, flow);
468fe939687SNatalie Samsonov }
469fe939687SNatalie Samsonov 
470fe939687SNatalie Samsonov /**
471fe939687SNatalie Samsonov  * Helper for parsing source ip of the ipv4 flow item.
472fe939687SNatalie Samsonov  *
473fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
474fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
475fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
476fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
477fe939687SNatalie Samsonov  */
478fe939687SNatalie Samsonov static inline int
479fe939687SNatalie Samsonov mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
480fe939687SNatalie Samsonov 		   const struct rte_flow_item_ipv4 *mask,
481fe939687SNatalie Samsonov 		   struct rte_flow *flow)
482fe939687SNatalie Samsonov {
483fe939687SNatalie Samsonov 	return mrvl_parse_ip4_addr(spec, mask, 0, flow);
484fe939687SNatalie Samsonov }
485fe939687SNatalie Samsonov 
486fe939687SNatalie Samsonov /**
487fe939687SNatalie Samsonov  * Parse the proto field of the ipv4 rte flow item.
488fe939687SNatalie Samsonov  *
489fe939687SNatalie Samsonov  * This will create classifier rule that matches proto field.
490fe939687SNatalie Samsonov  *
491fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
492fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
493fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
494fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
495fe939687SNatalie Samsonov  */
496fe939687SNatalie Samsonov static int
497fe939687SNatalie Samsonov mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
498fe939687SNatalie Samsonov 		     const struct rte_flow_item_ipv4 *mask __rte_unused,
499fe939687SNatalie Samsonov 		     struct rte_flow *flow)
500fe939687SNatalie Samsonov {
501fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
502fe939687SNatalie Samsonov 	uint8_t k = spec->hdr.next_proto_id;
503fe939687SNatalie Samsonov 
504fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
505fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
506fe939687SNatalie Samsonov 	key_field->size = 1;
507fe939687SNatalie Samsonov 
508fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
509fe939687SNatalie Samsonov 
510b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
511b57d1a83SLiron Himi 		MV_NET_PROTO_IP4;
512b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
513b57d1a83SLiron Himi 		MV_NET_IP4_F_PROTO;
514b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
515b57d1a83SLiron Himi 
516fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
517fe939687SNatalie Samsonov 
518fe939687SNatalie Samsonov 	return 0;
519fe939687SNatalie Samsonov }
520fe939687SNatalie Samsonov 
521fe939687SNatalie Samsonov /**
522fe939687SNatalie Samsonov  * Parse either source or destination ip addresses of the ipv6 rte flow item.
523fe939687SNatalie Samsonov  *
524fe939687SNatalie Samsonov  * This will create classifier rule that matches either destination
525fe939687SNatalie Samsonov  * or source ip field.
526fe939687SNatalie Samsonov  *
527fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
528fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
52963e0f017SNatalie Samsonov  * @param parse_dst Parse either destination or source ipv6 address.
530fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
531fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
532fe939687SNatalie Samsonov  */
533fe939687SNatalie Samsonov static int
534fe939687SNatalie Samsonov mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
535fe939687SNatalie Samsonov 	       const struct rte_flow_item_ipv6 *mask,
536fe939687SNatalie Samsonov 	       int parse_dst, struct rte_flow *flow)
537fe939687SNatalie Samsonov {
538fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
53989b5642dSRobin Jarry 	struct rte_ipv6_addr k, m;
540fe939687SNatalie Samsonov 
541fe939687SNatalie Samsonov 	if (parse_dst) {
54289b5642dSRobin Jarry 		k = spec->hdr.dst_addr;
54389b5642dSRobin Jarry 		m = mask->hdr.dst_addr;
544b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
545b57d1a83SLiron Himi 			MV_NET_IP6_F_DA;
546fe939687SNatalie Samsonov 	} else {
54789b5642dSRobin Jarry 		k = spec->hdr.src_addr;
54889b5642dSRobin Jarry 		m = mask->hdr.src_addr;
549b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
550b57d1a83SLiron Himi 			MV_NET_IP6_F_SA;
551fe939687SNatalie Samsonov 	}
552fe939687SNatalie Samsonov 
553fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
554fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
55589b5642dSRobin Jarry 	key_field->size = RTE_IPV6_ADDR_SIZE;
556fe939687SNatalie Samsonov 
557fe939687SNatalie Samsonov 	inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
558fe939687SNatalie Samsonov 	inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
559fe939687SNatalie Samsonov 
560b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
561b57d1a83SLiron Himi 		MV_NET_PROTO_IP6;
562b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
563b57d1a83SLiron Himi 
564fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
565fe939687SNatalie Samsonov 
566fe939687SNatalie Samsonov 	return 0;
567fe939687SNatalie Samsonov }
568fe939687SNatalie Samsonov 
569fe939687SNatalie Samsonov /**
570fe939687SNatalie Samsonov  * Helper for parsing destination ip of the ipv6 flow item.
571fe939687SNatalie Samsonov  *
572fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
573fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
574fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
575fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
576fe939687SNatalie Samsonov  */
577fe939687SNatalie Samsonov static inline int
578fe939687SNatalie Samsonov mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
579fe939687SNatalie Samsonov 		   const struct rte_flow_item_ipv6 *mask,
580fe939687SNatalie Samsonov 		   struct rte_flow *flow)
581fe939687SNatalie Samsonov {
582fe939687SNatalie Samsonov 	return mrvl_parse_ip6_addr(spec, mask, 1, flow);
583fe939687SNatalie Samsonov }
584fe939687SNatalie Samsonov 
585fe939687SNatalie Samsonov /**
586fe939687SNatalie Samsonov  * Helper for parsing source ip of the ipv6 flow item.
587fe939687SNatalie Samsonov  *
588fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
589fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
590fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
591fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
592fe939687SNatalie Samsonov  */
593fe939687SNatalie Samsonov static inline int
594fe939687SNatalie Samsonov mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
595fe939687SNatalie Samsonov 		   const struct rte_flow_item_ipv6 *mask,
596fe939687SNatalie Samsonov 		   struct rte_flow *flow)
597fe939687SNatalie Samsonov {
598fe939687SNatalie Samsonov 	return mrvl_parse_ip6_addr(spec, mask, 0, flow);
599fe939687SNatalie Samsonov }
600fe939687SNatalie Samsonov 
601fe939687SNatalie Samsonov /**
602fe939687SNatalie Samsonov  * Parse the flow label of the ipv6 flow item.
603fe939687SNatalie Samsonov  *
604fe939687SNatalie Samsonov  * This will create classifier rule that matches flow field.
605fe939687SNatalie Samsonov  *
606fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
607fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
608fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
609fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
610fe939687SNatalie Samsonov  */
611fe939687SNatalie Samsonov static int
612fe939687SNatalie Samsonov mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
613fe939687SNatalie Samsonov 		    const struct rte_flow_item_ipv6 *mask,
614fe939687SNatalie Samsonov 		    struct rte_flow *flow)
615fe939687SNatalie Samsonov {
616fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
617fe939687SNatalie Samsonov 	uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
618fe939687SNatalie Samsonov 		 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
619fe939687SNatalie Samsonov 
620fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
621fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
622fe939687SNatalie Samsonov 	key_field->size = 3;
623fe939687SNatalie Samsonov 
624fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
625fe939687SNatalie Samsonov 	snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
626fe939687SNatalie Samsonov 
627b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
628b57d1a83SLiron Himi 		MV_NET_PROTO_IP6;
629b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
630b57d1a83SLiron Himi 		MV_NET_IP6_F_FLOW;
631b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
632b57d1a83SLiron Himi 
633fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
634fe939687SNatalie Samsonov 
635fe939687SNatalie Samsonov 	return 0;
636fe939687SNatalie Samsonov }
637fe939687SNatalie Samsonov 
638fe939687SNatalie Samsonov /**
639fe939687SNatalie Samsonov  * Parse the next header of the ipv6 flow item.
640fe939687SNatalie Samsonov  *
641fe939687SNatalie Samsonov  * This will create classifier rule that matches next header field.
642fe939687SNatalie Samsonov  *
643fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
644fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
645fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
646fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
647fe939687SNatalie Samsonov  */
648fe939687SNatalie Samsonov static int
649fe939687SNatalie Samsonov mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
650fe939687SNatalie Samsonov 			const struct rte_flow_item_ipv6 *mask __rte_unused,
651fe939687SNatalie Samsonov 			struct rte_flow *flow)
652fe939687SNatalie Samsonov {
653fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
654fe939687SNatalie Samsonov 	uint8_t k = spec->hdr.proto;
655fe939687SNatalie Samsonov 
656fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
657fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
658fe939687SNatalie Samsonov 	key_field->size = 1;
659fe939687SNatalie Samsonov 
660fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
661fe939687SNatalie Samsonov 
662b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
663b57d1a83SLiron Himi 		MV_NET_PROTO_IP6;
664b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
665b57d1a83SLiron Himi 		MV_NET_IP6_F_NEXT_HDR;
666b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
667b57d1a83SLiron Himi 
668fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
669fe939687SNatalie Samsonov 
670fe939687SNatalie Samsonov 	return 0;
671fe939687SNatalie Samsonov }
672fe939687SNatalie Samsonov 
673fe939687SNatalie Samsonov /**
674fe939687SNatalie Samsonov  * Parse destination or source port of the tcp flow item.
675fe939687SNatalie Samsonov  *
676fe939687SNatalie Samsonov  * This will create classifier rule that matches either destination or
677fe939687SNatalie Samsonov  * source tcp port.
678fe939687SNatalie Samsonov  *
679fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
680fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
68163e0f017SNatalie Samsonov  * @param parse_dst Parse either destination or source port.
682fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
683fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
684fe939687SNatalie Samsonov  */
685fe939687SNatalie Samsonov static int
686fe939687SNatalie Samsonov mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
687fe939687SNatalie Samsonov 		    const struct rte_flow_item_tcp *mask __rte_unused,
688fe939687SNatalie Samsonov 		    int parse_dst, struct rte_flow *flow)
689fe939687SNatalie Samsonov {
690fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
691fe939687SNatalie Samsonov 	uint16_t k;
692fe939687SNatalie Samsonov 
693fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
694fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
695fe939687SNatalie Samsonov 	key_field->size = 2;
696fe939687SNatalie Samsonov 
697fe939687SNatalie Samsonov 	if (parse_dst) {
698fe939687SNatalie Samsonov 		k = rte_be_to_cpu_16(spec->hdr.dst_port);
699fe939687SNatalie Samsonov 
700b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
701b57d1a83SLiron Himi 			MV_NET_TCP_F_DP;
702fe939687SNatalie Samsonov 	} else {
703fe939687SNatalie Samsonov 		k = rte_be_to_cpu_16(spec->hdr.src_port);
704fe939687SNatalie Samsonov 
705b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
706b57d1a83SLiron Himi 			MV_NET_TCP_F_SP;
707fe939687SNatalie Samsonov 	}
708fe939687SNatalie Samsonov 
709fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
710fe939687SNatalie Samsonov 
711b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
712b57d1a83SLiron Himi 		MV_NET_PROTO_TCP;
713b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
714b57d1a83SLiron Himi 
715fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
716fe939687SNatalie Samsonov 
717fe939687SNatalie Samsonov 	return 0;
718fe939687SNatalie Samsonov }
719fe939687SNatalie Samsonov 
720fe939687SNatalie Samsonov /**
721fe939687SNatalie Samsonov  * Helper for parsing the tcp source port of the tcp flow item.
722fe939687SNatalie Samsonov  *
723fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
724fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
725fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
726fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
727fe939687SNatalie Samsonov  */
728fe939687SNatalie Samsonov static inline int
729fe939687SNatalie Samsonov mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
730fe939687SNatalie Samsonov 		     const struct rte_flow_item_tcp *mask,
731fe939687SNatalie Samsonov 		     struct rte_flow *flow)
732fe939687SNatalie Samsonov {
733fe939687SNatalie Samsonov 	return mrvl_parse_tcp_port(spec, mask, 0, flow);
734fe939687SNatalie Samsonov }
735fe939687SNatalie Samsonov 
736fe939687SNatalie Samsonov /**
737fe939687SNatalie Samsonov  * Helper for parsing the tcp destination port of the tcp flow item.
738fe939687SNatalie Samsonov  *
739fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
740fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
741fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
742fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
743fe939687SNatalie Samsonov  */
744fe939687SNatalie Samsonov static inline int
745fe939687SNatalie Samsonov mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
746fe939687SNatalie Samsonov 		     const struct rte_flow_item_tcp *mask,
747fe939687SNatalie Samsonov 		     struct rte_flow *flow)
748fe939687SNatalie Samsonov {
749fe939687SNatalie Samsonov 	return mrvl_parse_tcp_port(spec, mask, 1, flow);
750fe939687SNatalie Samsonov }
751fe939687SNatalie Samsonov 
752fe939687SNatalie Samsonov /**
753fe939687SNatalie Samsonov  * Parse destination or source port of the udp flow item.
754fe939687SNatalie Samsonov  *
755fe939687SNatalie Samsonov  * This will create classifier rule that matches either destination or
756fe939687SNatalie Samsonov  * source udp port.
757fe939687SNatalie Samsonov  *
758fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
759fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
76063e0f017SNatalie Samsonov  * @param parse_dst Parse either destination or source port.
761fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
762fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
763fe939687SNatalie Samsonov  */
764fe939687SNatalie Samsonov static int
765fe939687SNatalie Samsonov mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
766fe939687SNatalie Samsonov 		    const struct rte_flow_item_udp *mask __rte_unused,
767fe939687SNatalie Samsonov 		    int parse_dst, struct rte_flow *flow)
768fe939687SNatalie Samsonov {
769fe939687SNatalie Samsonov 	struct pp2_cls_rule_key_field *key_field;
770fe939687SNatalie Samsonov 	uint16_t k;
771fe939687SNatalie Samsonov 
772fe939687SNatalie Samsonov 	key_field = &flow->rule.fields[flow->rule.num_fields];
773fe939687SNatalie Samsonov 	mrvl_alloc_key_mask(key_field);
774fe939687SNatalie Samsonov 	key_field->size = 2;
775fe939687SNatalie Samsonov 
776fe939687SNatalie Samsonov 	if (parse_dst) {
777fe939687SNatalie Samsonov 		k = rte_be_to_cpu_16(spec->hdr.dst_port);
778fe939687SNatalie Samsonov 
779b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.udp =
780b57d1a83SLiron Himi 			MV_NET_UDP_F_DP;
781fe939687SNatalie Samsonov 	} else {
782fe939687SNatalie Samsonov 		k = rte_be_to_cpu_16(spec->hdr.src_port);
783fe939687SNatalie Samsonov 
784b57d1a83SLiron Himi 		flow->table_key.proto_field[flow->rule.num_fields].field.udp =
785b57d1a83SLiron Himi 			MV_NET_UDP_F_SP;
786fe939687SNatalie Samsonov 	}
787fe939687SNatalie Samsonov 
788fe939687SNatalie Samsonov 	snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
789fe939687SNatalie Samsonov 
790b57d1a83SLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
791b57d1a83SLiron Himi 		MV_NET_PROTO_UDP;
792b57d1a83SLiron Himi 	flow->table_key.key_size += key_field->size;
793b57d1a83SLiron Himi 
794fe939687SNatalie Samsonov 	flow->rule.num_fields += 1;
795fe939687SNatalie Samsonov 
796fe939687SNatalie Samsonov 	return 0;
797fe939687SNatalie Samsonov }
798fe939687SNatalie Samsonov 
799fe939687SNatalie Samsonov /**
800fe939687SNatalie Samsonov  * Helper for parsing the udp source port of the udp flow item.
801fe939687SNatalie Samsonov  *
802fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
803fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
804fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
805fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
806fe939687SNatalie Samsonov  */
807fe939687SNatalie Samsonov static inline int
808fe939687SNatalie Samsonov mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
809fe939687SNatalie Samsonov 		     const struct rte_flow_item_udp *mask,
810fe939687SNatalie Samsonov 		     struct rte_flow *flow)
811fe939687SNatalie Samsonov {
812fe939687SNatalie Samsonov 	return mrvl_parse_udp_port(spec, mask, 0, flow);
813fe939687SNatalie Samsonov }
814fe939687SNatalie Samsonov 
815fe939687SNatalie Samsonov /**
816fe939687SNatalie Samsonov  * Helper for parsing the udp destination port of the udp flow item.
817fe939687SNatalie Samsonov  *
818fe939687SNatalie Samsonov  * @param spec Pointer to the specific flow item.
819fe939687SNatalie Samsonov  * @param mask Pointer to the specific flow item's mask.
820fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
821fe939687SNatalie Samsonov  * @return 0 in case of success, negative error value otherwise.
822fe939687SNatalie Samsonov  */
823fe939687SNatalie Samsonov static inline int
824fe939687SNatalie Samsonov mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
825fe939687SNatalie Samsonov 		     const struct rte_flow_item_udp *mask,
826fe939687SNatalie Samsonov 		     struct rte_flow *flow)
827fe939687SNatalie Samsonov {
828fe939687SNatalie Samsonov 	return mrvl_parse_udp_port(spec, mask, 1, flow);
829fe939687SNatalie Samsonov }
830fe939687SNatalie Samsonov 
831fe939687SNatalie Samsonov /**
832fe939687SNatalie Samsonov  * Parse eth flow item.
833fe939687SNatalie Samsonov  *
834fe939687SNatalie Samsonov  * @param item Pointer to the flow item.
835fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
836fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
837fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
838fe939687SNatalie Samsonov  */
839fe939687SNatalie Samsonov static int
840fe939687SNatalie Samsonov mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
841fe939687SNatalie Samsonov 	       struct rte_flow_error *error)
842fe939687SNatalie Samsonov {
843fe939687SNatalie Samsonov 	const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
8446d13ea8eSOlivier Matz 	struct rte_ether_addr zero;
845fe939687SNatalie Samsonov 	int ret;
846fe939687SNatalie Samsonov 
847fe939687SNatalie Samsonov 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
848fe939687SNatalie Samsonov 			      &rte_flow_item_eth_mask,
849fe939687SNatalie Samsonov 			      sizeof(struct rte_flow_item_eth), error);
850fe939687SNatalie Samsonov 	if (ret)
851fe939687SNatalie Samsonov 		return ret;
852fe939687SNatalie Samsonov 
853fe939687SNatalie Samsonov 	memset(&zero, 0, sizeof(zero));
854fe939687SNatalie Samsonov 
8558275d5fcSThomas Monjalon 	if (memcmp(&mask->hdr.dst_addr, &zero, sizeof(mask->hdr.dst_addr))) {
856fe939687SNatalie Samsonov 		ret = mrvl_parse_dmac(spec, mask, flow);
857fe939687SNatalie Samsonov 		if (ret)
858fe939687SNatalie Samsonov 			goto out;
859fe939687SNatalie Samsonov 	}
860fe939687SNatalie Samsonov 
8618275d5fcSThomas Monjalon 	if (memcmp(&mask->hdr.src_addr, &zero, sizeof(mask->hdr.src_addr))) {
862fe939687SNatalie Samsonov 		ret = mrvl_parse_smac(spec, mask, flow);
863fe939687SNatalie Samsonov 		if (ret)
864fe939687SNatalie Samsonov 			goto out;
865fe939687SNatalie Samsonov 	}
866fe939687SNatalie Samsonov 
8678275d5fcSThomas Monjalon 	if (mask->hdr.ether_type) {
868acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "eth type mask is ignored");
869fe939687SNatalie Samsonov 		ret = mrvl_parse_type(spec, mask, flow);
870fe939687SNatalie Samsonov 		if (ret)
871fe939687SNatalie Samsonov 			goto out;
872fe939687SNatalie Samsonov 	}
873fe939687SNatalie Samsonov 
874fe939687SNatalie Samsonov 	return 0;
875fe939687SNatalie Samsonov out:
876fe939687SNatalie Samsonov 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
877fe939687SNatalie Samsonov 			   "Reached maximum number of fields in cls tbl key\n");
878fe939687SNatalie Samsonov 	return -rte_errno;
879fe939687SNatalie Samsonov }
880fe939687SNatalie Samsonov 
881fe939687SNatalie Samsonov /**
882fe939687SNatalie Samsonov  * Parse vlan flow item.
883fe939687SNatalie Samsonov  *
884fe939687SNatalie Samsonov  * @param item Pointer to the flow item.
885fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
886fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
887fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
888fe939687SNatalie Samsonov  */
889fe939687SNatalie Samsonov static int
890fe939687SNatalie Samsonov mrvl_parse_vlan(const struct rte_flow_item *item,
891fe939687SNatalie Samsonov 		struct rte_flow *flow,
892fe939687SNatalie Samsonov 		struct rte_flow_error *error)
893fe939687SNatalie Samsonov {
894fe939687SNatalie Samsonov 	const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
895fe939687SNatalie Samsonov 	uint16_t m;
896b57d1a83SLiron Himi 	int ret, i;
897fe939687SNatalie Samsonov 
898fe939687SNatalie Samsonov 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
899fe939687SNatalie Samsonov 			      &rte_flow_item_vlan_mask,
900fe939687SNatalie Samsonov 			      sizeof(struct rte_flow_item_vlan), error);
901fe939687SNatalie Samsonov 	if (ret)
902fe939687SNatalie Samsonov 		return ret;
903fe939687SNatalie Samsonov 
9048275d5fcSThomas Monjalon 	m = rte_be_to_cpu_16(mask->hdr.vlan_tci);
905fe939687SNatalie Samsonov 	if (m & MRVL_VLAN_ID_MASK) {
906acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "vlan id mask is ignored");
907fe939687SNatalie Samsonov 		ret = mrvl_parse_vlan_id(spec, mask, flow);
908fe939687SNatalie Samsonov 		if (ret)
909fe939687SNatalie Samsonov 			goto out;
910fe939687SNatalie Samsonov 	}
911fe939687SNatalie Samsonov 
912fe939687SNatalie Samsonov 	if (m & MRVL_VLAN_PRI_MASK) {
913acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "vlan pri mask is ignored");
914fe939687SNatalie Samsonov 		ret = mrvl_parse_vlan_pri(spec, mask, flow);
915fe939687SNatalie Samsonov 		if (ret)
916fe939687SNatalie Samsonov 			goto out;
917fe939687SNatalie Samsonov 	}
918fe939687SNatalie Samsonov 
9198275d5fcSThomas Monjalon 	if (mask->hdr.eth_proto) {
920e58638c3SAdrien Mazarguil 		struct rte_flow_item_eth spec_eth = {
9218275d5fcSThomas Monjalon 			.hdr.ether_type = spec->hdr.eth_proto,
922e58638c3SAdrien Mazarguil 		};
923e58638c3SAdrien Mazarguil 		struct rte_flow_item_eth mask_eth = {
9248275d5fcSThomas Monjalon 			.hdr.ether_type = mask->hdr.eth_proto,
925e58638c3SAdrien Mazarguil 		};
926e58638c3SAdrien Mazarguil 
927b57d1a83SLiron Himi 		/* TPID is not supported so if ETH_TYPE was selected,
928b57d1a83SLiron Himi 		 * error is return. else, classify eth-type with the tpid value
929b57d1a83SLiron Himi 		 */
930b57d1a83SLiron Himi 		for (i = 0; i < flow->rule.num_fields; i++)
931b57d1a83SLiron Himi 			if (flow->table_key.proto_field[i].proto ==
932b57d1a83SLiron Himi 			    MV_NET_PROTO_ETH &&
933b57d1a83SLiron Himi 			    flow->table_key.proto_field[i].field.eth ==
934b57d1a83SLiron Himi 			    MV_NET_ETH_F_TYPE) {
935b57d1a83SLiron Himi 				rte_flow_error_set(error, ENOTSUP,
936b57d1a83SLiron Himi 						   RTE_FLOW_ERROR_TYPE_ITEM,
937b57d1a83SLiron Himi 						   item,
938b57d1a83SLiron Himi 						   "VLAN TPID matching is not supported");
939b57d1a83SLiron Himi 				return -rte_errno;
940b57d1a83SLiron Himi 			}
941b57d1a83SLiron Himi 
942acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "inner eth type mask is ignored");
943e58638c3SAdrien Mazarguil 		ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
944e58638c3SAdrien Mazarguil 		if (ret)
945e58638c3SAdrien Mazarguil 			goto out;
946e58638c3SAdrien Mazarguil 	}
947e58638c3SAdrien Mazarguil 
948fe939687SNatalie Samsonov 	return 0;
949fe939687SNatalie Samsonov out:
950fe939687SNatalie Samsonov 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
951fe939687SNatalie Samsonov 			   "Reached maximum number of fields in cls tbl key\n");
952fe939687SNatalie Samsonov 	return -rte_errno;
953fe939687SNatalie Samsonov }
954fe939687SNatalie Samsonov 
955fe939687SNatalie Samsonov /**
956fe939687SNatalie Samsonov  * Parse ipv4 flow item.
957fe939687SNatalie Samsonov  *
958fe939687SNatalie Samsonov  * @param item Pointer to the flow item.
959fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
960fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
961fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
962fe939687SNatalie Samsonov  */
963fe939687SNatalie Samsonov static int
964fe939687SNatalie Samsonov mrvl_parse_ip4(const struct rte_flow_item *item,
965fe939687SNatalie Samsonov 	       struct rte_flow *flow,
966fe939687SNatalie Samsonov 	       struct rte_flow_error *error)
967fe939687SNatalie Samsonov {
968fe939687SNatalie Samsonov 	const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
969fe939687SNatalie Samsonov 	int ret;
970fe939687SNatalie Samsonov 
971fe939687SNatalie Samsonov 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
972fe939687SNatalie Samsonov 			      &rte_flow_item_ipv4_mask,
973fe939687SNatalie Samsonov 			      sizeof(struct rte_flow_item_ipv4), error);
974fe939687SNatalie Samsonov 	if (ret)
975fe939687SNatalie Samsonov 		return ret;
976fe939687SNatalie Samsonov 
977fe939687SNatalie Samsonov 	if (mask->hdr.version_ihl ||
978fe939687SNatalie Samsonov 	    mask->hdr.total_length ||
979fe939687SNatalie Samsonov 	    mask->hdr.packet_id ||
980fe939687SNatalie Samsonov 	    mask->hdr.fragment_offset ||
981fe939687SNatalie Samsonov 	    mask->hdr.time_to_live ||
982fe939687SNatalie Samsonov 	    mask->hdr.hdr_checksum) {
983fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
984fe939687SNatalie Samsonov 				   NULL, "Not supported by classifier\n");
985fe939687SNatalie Samsonov 		return -rte_errno;
986fe939687SNatalie Samsonov 	}
987fe939687SNatalie Samsonov 
988fe939687SNatalie Samsonov 	if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
989fe939687SNatalie Samsonov 		ret = mrvl_parse_ip4_dscp(spec, mask, flow);
990fe939687SNatalie Samsonov 		if (ret)
991fe939687SNatalie Samsonov 			goto out;
992fe939687SNatalie Samsonov 	}
993fe939687SNatalie Samsonov 
994fe939687SNatalie Samsonov 	if (mask->hdr.src_addr) {
995fe939687SNatalie Samsonov 		ret = mrvl_parse_ip4_sip(spec, mask, flow);
996fe939687SNatalie Samsonov 		if (ret)
997fe939687SNatalie Samsonov 			goto out;
998fe939687SNatalie Samsonov 	}
999fe939687SNatalie Samsonov 
1000fe939687SNatalie Samsonov 	if (mask->hdr.dst_addr) {
1001fe939687SNatalie Samsonov 		ret = mrvl_parse_ip4_dip(spec, mask, flow);
1002fe939687SNatalie Samsonov 		if (ret)
1003fe939687SNatalie Samsonov 			goto out;
1004fe939687SNatalie Samsonov 	}
1005fe939687SNatalie Samsonov 
1006fe939687SNatalie Samsonov 	if (mask->hdr.next_proto_id) {
1007acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "next proto id mask is ignored");
1008fe939687SNatalie Samsonov 		ret = mrvl_parse_ip4_proto(spec, mask, flow);
1009fe939687SNatalie Samsonov 		if (ret)
1010fe939687SNatalie Samsonov 			goto out;
1011fe939687SNatalie Samsonov 	}
1012fe939687SNatalie Samsonov 
1013fe939687SNatalie Samsonov 	return 0;
1014fe939687SNatalie Samsonov out:
1015fe939687SNatalie Samsonov 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1016fe939687SNatalie Samsonov 			   "Reached maximum number of fields in cls tbl key\n");
1017fe939687SNatalie Samsonov 	return -rte_errno;
1018fe939687SNatalie Samsonov }
1019fe939687SNatalie Samsonov 
1020fe939687SNatalie Samsonov /**
1021fe939687SNatalie Samsonov  * Parse ipv6 flow item.
1022fe939687SNatalie Samsonov  *
1023fe939687SNatalie Samsonov  * @param item Pointer to the flow item.
1024fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1025fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1026fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
1027fe939687SNatalie Samsonov  */
1028fe939687SNatalie Samsonov static int
1029fe939687SNatalie Samsonov mrvl_parse_ip6(const struct rte_flow_item *item,
1030fe939687SNatalie Samsonov 	       struct rte_flow *flow,
1031fe939687SNatalie Samsonov 	       struct rte_flow_error *error)
1032fe939687SNatalie Samsonov {
1033fe939687SNatalie Samsonov 	const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1034fe939687SNatalie Samsonov 	uint32_t flow_mask;
1035fe939687SNatalie Samsonov 	int ret;
1036fe939687SNatalie Samsonov 
1037fe939687SNatalie Samsonov 	ret = mrvl_parse_init(item, (const void **)&spec,
1038fe939687SNatalie Samsonov 			      (const void **)&mask,
1039fe939687SNatalie Samsonov 			      &rte_flow_item_ipv6_mask,
1040fe939687SNatalie Samsonov 			      sizeof(struct rte_flow_item_ipv6),
1041fe939687SNatalie Samsonov 			      error);
1042fe939687SNatalie Samsonov 	if (ret)
1043fe939687SNatalie Samsonov 		return ret;
1044fe939687SNatalie Samsonov 
1045fe939687SNatalie Samsonov 	if (mask->hdr.payload_len ||
1046fe939687SNatalie Samsonov 	    mask->hdr.hop_limits) {
1047fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1048fe939687SNatalie Samsonov 				   NULL, "Not supported by classifier\n");
1049fe939687SNatalie Samsonov 		return -rte_errno;
1050fe939687SNatalie Samsonov 	}
1051fe939687SNatalie Samsonov 
1052*61938a2dSThomas Monjalon 	if (!rte_ipv6_addr_is_unspec(&mask->hdr.src_addr)) {
1053fe939687SNatalie Samsonov 		ret = mrvl_parse_ip6_sip(spec, mask, flow);
1054fe939687SNatalie Samsonov 		if (ret)
1055fe939687SNatalie Samsonov 			goto out;
1056fe939687SNatalie Samsonov 	}
1057fe939687SNatalie Samsonov 
1058*61938a2dSThomas Monjalon 	if (!rte_ipv6_addr_is_unspec(&mask->hdr.dst_addr)) {
1059fe939687SNatalie Samsonov 		ret = mrvl_parse_ip6_dip(spec, mask, flow);
1060fe939687SNatalie Samsonov 		if (ret)
1061fe939687SNatalie Samsonov 			goto out;
1062fe939687SNatalie Samsonov 	}
1063fe939687SNatalie Samsonov 
1064fe939687SNatalie Samsonov 	flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1065fe939687SNatalie Samsonov 	if (flow_mask) {
1066fe939687SNatalie Samsonov 		ret = mrvl_parse_ip6_flow(spec, mask, flow);
1067fe939687SNatalie Samsonov 		if (ret)
1068fe939687SNatalie Samsonov 			goto out;
1069fe939687SNatalie Samsonov 	}
1070fe939687SNatalie Samsonov 
1071fe939687SNatalie Samsonov 	if (mask->hdr.proto) {
1072acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "next header mask is ignored");
1073fe939687SNatalie Samsonov 		ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1074fe939687SNatalie Samsonov 		if (ret)
1075fe939687SNatalie Samsonov 			goto out;
1076fe939687SNatalie Samsonov 	}
1077fe939687SNatalie Samsonov 
1078fe939687SNatalie Samsonov 	return 0;
1079fe939687SNatalie Samsonov out:
1080fe939687SNatalie Samsonov 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1081fe939687SNatalie Samsonov 			   "Reached maximum number of fields in cls tbl key\n");
1082fe939687SNatalie Samsonov 	return -rte_errno;
1083fe939687SNatalie Samsonov }
1084fe939687SNatalie Samsonov 
1085fe939687SNatalie Samsonov /**
1086fe939687SNatalie Samsonov  * Parse tcp flow item.
1087fe939687SNatalie Samsonov  *
1088fe939687SNatalie Samsonov  * @param item Pointer to the flow item.
1089fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1090fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1091fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
1092fe939687SNatalie Samsonov  */
1093fe939687SNatalie Samsonov static int
1094fe939687SNatalie Samsonov mrvl_parse_tcp(const struct rte_flow_item *item,
1095fe939687SNatalie Samsonov 	       struct rte_flow *flow,
1096fe939687SNatalie Samsonov 	       struct rte_flow_error *error)
1097fe939687SNatalie Samsonov {
1098fe939687SNatalie Samsonov 	const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1099fe939687SNatalie Samsonov 	int ret;
1100fe939687SNatalie Samsonov 
1101fe939687SNatalie Samsonov 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1102e0ae4cf6SLiron Himi 			      &rte_flow_item_tcp_mask,
1103e0ae4cf6SLiron Himi 			      sizeof(struct rte_flow_item_tcp), error);
1104fe939687SNatalie Samsonov 	if (ret)
1105fe939687SNatalie Samsonov 		return ret;
1106fe939687SNatalie Samsonov 
1107fe939687SNatalie Samsonov 	if (mask->hdr.sent_seq ||
1108fe939687SNatalie Samsonov 	    mask->hdr.recv_ack ||
1109fe939687SNatalie Samsonov 	    mask->hdr.data_off ||
1110fe939687SNatalie Samsonov 	    mask->hdr.tcp_flags ||
1111fe939687SNatalie Samsonov 	    mask->hdr.rx_win ||
1112fe939687SNatalie Samsonov 	    mask->hdr.cksum ||
1113fe939687SNatalie Samsonov 	    mask->hdr.tcp_urp) {
1114fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1115fe939687SNatalie Samsonov 				   NULL, "Not supported by classifier\n");
1116fe939687SNatalie Samsonov 		return -rte_errno;
1117fe939687SNatalie Samsonov 	}
1118fe939687SNatalie Samsonov 
1119fe939687SNatalie Samsonov 	if (mask->hdr.src_port) {
1120acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "tcp sport mask is ignored");
1121fe939687SNatalie Samsonov 		ret = mrvl_parse_tcp_sport(spec, mask, flow);
1122fe939687SNatalie Samsonov 		if (ret)
1123fe939687SNatalie Samsonov 			goto out;
1124fe939687SNatalie Samsonov 	}
1125fe939687SNatalie Samsonov 
1126fe939687SNatalie Samsonov 	if (mask->hdr.dst_port) {
1127acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "tcp dport mask is ignored");
1128fe939687SNatalie Samsonov 		ret = mrvl_parse_tcp_dport(spec, mask, flow);
1129fe939687SNatalie Samsonov 		if (ret)
1130fe939687SNatalie Samsonov 			goto out;
1131fe939687SNatalie Samsonov 	}
1132fe939687SNatalie Samsonov 
1133fe939687SNatalie Samsonov 	return 0;
1134fe939687SNatalie Samsonov out:
1135fe939687SNatalie Samsonov 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1136fe939687SNatalie Samsonov 			   "Reached maximum number of fields in cls tbl key\n");
1137fe939687SNatalie Samsonov 	return -rte_errno;
1138fe939687SNatalie Samsonov }
1139fe939687SNatalie Samsonov 
1140fe939687SNatalie Samsonov /**
1141fe939687SNatalie Samsonov  * Parse udp flow item.
1142fe939687SNatalie Samsonov  *
1143fe939687SNatalie Samsonov  * @param item Pointer to the flow item.
1144fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1145fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1146fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
1147fe939687SNatalie Samsonov  */
1148fe939687SNatalie Samsonov static int
1149fe939687SNatalie Samsonov mrvl_parse_udp(const struct rte_flow_item *item,
1150fe939687SNatalie Samsonov 	       struct rte_flow *flow,
1151fe939687SNatalie Samsonov 	       struct rte_flow_error *error)
1152fe939687SNatalie Samsonov {
1153fe939687SNatalie Samsonov 	const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1154fe939687SNatalie Samsonov 	int ret;
1155fe939687SNatalie Samsonov 
1156fe939687SNatalie Samsonov 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1157e0ae4cf6SLiron Himi 			      &rte_flow_item_udp_mask,
1158e0ae4cf6SLiron Himi 			      sizeof(struct rte_flow_item_udp), error);
1159fe939687SNatalie Samsonov 	if (ret)
1160fe939687SNatalie Samsonov 		return ret;
1161fe939687SNatalie Samsonov 
1162fe939687SNatalie Samsonov 	if (mask->hdr.dgram_len ||
1163fe939687SNatalie Samsonov 	    mask->hdr.dgram_cksum) {
1164fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1165fe939687SNatalie Samsonov 				   NULL, "Not supported by classifier\n");
1166fe939687SNatalie Samsonov 		return -rte_errno;
1167fe939687SNatalie Samsonov 	}
1168fe939687SNatalie Samsonov 
1169fe939687SNatalie Samsonov 	if (mask->hdr.src_port) {
1170acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "udp sport mask is ignored");
1171fe939687SNatalie Samsonov 		ret = mrvl_parse_udp_sport(spec, mask, flow);
1172fe939687SNatalie Samsonov 		if (ret)
1173fe939687SNatalie Samsonov 			goto out;
1174fe939687SNatalie Samsonov 	}
1175fe939687SNatalie Samsonov 
1176fe939687SNatalie Samsonov 	if (mask->hdr.dst_port) {
1177acab7d58STomasz Duszynski 		MRVL_LOG(WARNING, "udp dport mask is ignored");
1178fe939687SNatalie Samsonov 		ret = mrvl_parse_udp_dport(spec, mask, flow);
1179fe939687SNatalie Samsonov 		if (ret)
1180fe939687SNatalie Samsonov 			goto out;
1181fe939687SNatalie Samsonov 	}
1182fe939687SNatalie Samsonov 
1183fe939687SNatalie Samsonov 	return 0;
1184fe939687SNatalie Samsonov out:
1185fe939687SNatalie Samsonov 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1186fe939687SNatalie Samsonov 			   "Reached maximum number of fields in cls tbl key\n");
1187fe939687SNatalie Samsonov 	return -rte_errno;
1188fe939687SNatalie Samsonov }
1189fe939687SNatalie Samsonov 
1190cb932ddbSLiron Himi static int
1191cb932ddbSLiron Himi mrvl_string_to_hex_values(const uint8_t *input_string,
1192cb932ddbSLiron Himi 			  uint8_t *hex_key,
1193cb932ddbSLiron Himi 			  uint8_t *length)
1194cb932ddbSLiron Himi {
1195cb932ddbSLiron Himi 	char tmp_arr[3], tmp_string[MRVL_CLS_STR_SIZE_MAX], *string_iter;
1196cb932ddbSLiron Himi 	int i;
1197cb932ddbSLiron Himi 
1198cb932ddbSLiron Himi 	strcpy(tmp_string, (const char *)input_string);
1199cb932ddbSLiron Himi 	string_iter = tmp_string;
1200cb932ddbSLiron Himi 
1201cb932ddbSLiron Himi 	string_iter += 2; /* skip the '0x' */
1202cb932ddbSLiron Himi 	*length = ((*length - 2) + 1) / 2;
1203cb932ddbSLiron Himi 
1204cb932ddbSLiron Himi 	for (i = 0; i < *length; i++) {
1205cb932ddbSLiron Himi 		strncpy(tmp_arr, string_iter, 2);
1206cb932ddbSLiron Himi 		tmp_arr[2] = '\0';
1207cb932ddbSLiron Himi 		if (get_val_securely8(tmp_arr, 16,
1208cb932ddbSLiron Himi 				      &hex_key[*length - 1 - i]) < 0)
1209cb932ddbSLiron Himi 			return -1;
1210cb932ddbSLiron Himi 		string_iter += 2;
1211cb932ddbSLiron Himi 	}
1212cb932ddbSLiron Himi 
1213cb932ddbSLiron Himi 	return 0;
1214cb932ddbSLiron Himi }
1215cb932ddbSLiron Himi 
1216cb932ddbSLiron Himi /**
1217cb932ddbSLiron Himi  * Parse raw flow item.
1218cb932ddbSLiron Himi  *
1219cb932ddbSLiron Himi  * @param item Pointer to the flow item.
1220cb932ddbSLiron Himi  * @param flow Pointer to the flow.
1221cb932ddbSLiron Himi  * @param error Pointer to the flow error.
1222cb932ddbSLiron Himi  * @returns 0 on success, negative value otherwise.
1223cb932ddbSLiron Himi  */
1224cb932ddbSLiron Himi static int
1225cb932ddbSLiron Himi mrvl_parse_raw(const struct rte_flow_item *item,
1226cb932ddbSLiron Himi 	       struct rte_flow *flow,
1227cb932ddbSLiron Himi 	       struct rte_flow_error *error)
1228cb932ddbSLiron Himi {
1229cb932ddbSLiron Himi 	const struct rte_flow_item_raw *spec = NULL, *mask = NULL;
1230cb932ddbSLiron Himi 	struct pp2_cls_rule_key_field *key_field;
1231cb932ddbSLiron Himi 	struct mv_net_udf *udf_params;
1232cb932ddbSLiron Himi 	uint8_t length;
1233cb932ddbSLiron Himi 	int ret;
1234cb932ddbSLiron Himi 
1235cb932ddbSLiron Himi 	ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1236cb932ddbSLiron Himi 			      &rte_flow_item_raw_mask,
1237cb932ddbSLiron Himi 			      sizeof(struct rte_flow_item_raw), error);
1238cb932ddbSLiron Himi 	if (ret)
1239cb932ddbSLiron Himi 		return ret;
1240cb932ddbSLiron Himi 
1241cb932ddbSLiron Himi 	if (!spec->pattern) {
1242cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1243cb932ddbSLiron Himi 				   NULL, "pattern pointer MUST be given\n");
1244cb932ddbSLiron Himi 		return -rte_errno;
1245cb932ddbSLiron Himi 	}
1246cb932ddbSLiron Himi 
1247cb932ddbSLiron Himi 	/* Only hex string is supported; so, it must start with '0x' */
1248cb932ddbSLiron Himi 	if (strncmp((const char *)spec->pattern, "0x", 2) != 0)  {
1249cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1250cb932ddbSLiron Himi 				   NULL, "'pattern' string must start with '0x'\n");
1251cb932ddbSLiron Himi 		return -rte_errno;
1252cb932ddbSLiron Himi 	}
1253cb932ddbSLiron Himi 
1254cb932ddbSLiron Himi 	if (mask->pattern &&
1255cb932ddbSLiron Himi 	    strncmp((const char *)mask->pattern, "0x", 2) != 0)  {
1256cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1257cb932ddbSLiron Himi 				   NULL, "'mask-pattern' string must start with '0x'\n");
1258cb932ddbSLiron Himi 		return -rte_errno;
1259cb932ddbSLiron Himi 	}
1260cb932ddbSLiron Himi 
1261cb932ddbSLiron Himi 	if (mask->search && spec->search) {
1262cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1263cb932ddbSLiron Himi 				   NULL, "'search' option must be '0'\n");
1264cb932ddbSLiron Himi 		return -rte_errno;
1265cb932ddbSLiron Himi 	}
1266cb932ddbSLiron Himi 
1267cb932ddbSLiron Himi 	if (mask->offset && spec->offset != 0) {
1268cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1269cb932ddbSLiron Himi 				   NULL, "'offset' option must be '0'\n");
1270cb932ddbSLiron Himi 		return -rte_errno;
1271cb932ddbSLiron Himi 	}
1272cb932ddbSLiron Himi 
1273cb932ddbSLiron Himi 	if (!mask->relative || !spec->relative) {
1274cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1275cb932ddbSLiron Himi 				   NULL, "'relative' option must be given and enabled\n");
1276cb932ddbSLiron Himi 		return -rte_errno;
1277cb932ddbSLiron Himi 	}
1278cb932ddbSLiron Himi 
1279cb932ddbSLiron Himi 	length = spec->length & mask->length;
1280cb932ddbSLiron Himi 	if (!length) {
1281cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1282cb932ddbSLiron Himi 				   NULL, "'length' option must be given bigger than '0'\n");
1283cb932ddbSLiron Himi 		return -rte_errno;
1284cb932ddbSLiron Himi 	}
1285cb932ddbSLiron Himi 
1286cb932ddbSLiron Himi 	key_field = &flow->rule.fields[flow->rule.num_fields];
1287cb932ddbSLiron Himi 	mrvl_alloc_key_mask(key_field);
1288cb932ddbSLiron Himi 
1289cb932ddbSLiron Himi 	/* pattern and length refer to string bytes. we need to convert it to
1290cb932ddbSLiron Himi 	 * values.
1291cb932ddbSLiron Himi 	 */
1292cb932ddbSLiron Himi 	key_field->size = length;
1293cb932ddbSLiron Himi 	ret = mrvl_string_to_hex_values(spec->pattern, key_field->key,
1294cb932ddbSLiron Himi 					&key_field->size);
1295cb932ddbSLiron Himi 	if (ret) {
1296cb932ddbSLiron Himi 		rte_flow_error_set(error, EINVAL,
1297cb932ddbSLiron Himi 				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1298cb932ddbSLiron Himi 				   NULL,
1299cb932ddbSLiron Himi 				   "can't convert pattern from string to hex\n");
1300cb932ddbSLiron Himi 		return -rte_errno;
1301cb932ddbSLiron Himi 	}
1302cb932ddbSLiron Himi 	if (mask->pattern) {
1303cb932ddbSLiron Himi 		ret = mrvl_string_to_hex_values(mask->pattern, key_field->mask,
1304cb932ddbSLiron Himi 						&length);
1305cb932ddbSLiron Himi 		if (ret) {
1306cb932ddbSLiron Himi 			rte_flow_error_set(error, EINVAL,
1307cb932ddbSLiron Himi 					   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1308cb932ddbSLiron Himi 					   NULL,
1309cb932ddbSLiron Himi 					   "can't convert mask-pattern from string to hex\n");
1310cb932ddbSLiron Himi 			return -rte_errno;
1311cb932ddbSLiron Himi 		}
1312cb932ddbSLiron Himi 	} else {
1313cb932ddbSLiron Himi 		rte_free(key_field->mask);
1314cb932ddbSLiron Himi 		key_field->mask = NULL;
1315cb932ddbSLiron Himi 	}
1316cb932ddbSLiron Himi 
1317cb932ddbSLiron Himi 	flow->table_key.proto_field[flow->rule.num_fields].proto =
1318cb932ddbSLiron Himi 		MV_NET_UDF;
1319cb932ddbSLiron Himi 	udf_params =
1320cb932ddbSLiron Himi 		&flow->table_key.proto_field[flow->rule.num_fields].field.udf;
1321cb932ddbSLiron Himi 	udf_params->id = flow->next_udf_id++;
1322cb932ddbSLiron Himi 	udf_params->size = key_field->size;
1323cb932ddbSLiron Himi 	flow->table_key.key_size += key_field->size;
1324cb932ddbSLiron Himi 
1325cb932ddbSLiron Himi 	flow->rule.num_fields += 1;
1326cb932ddbSLiron Himi 
1327cb932ddbSLiron Himi 	return 0;
1328cb932ddbSLiron Himi }
1329cb932ddbSLiron Himi 
1330fe939687SNatalie Samsonov /**
1331fe939687SNatalie Samsonov  * Structure used to map specific flow pattern to the pattern parse callback
1332fe939687SNatalie Samsonov  * which will iterate over each pattern item and extract relevant data.
1333fe939687SNatalie Samsonov  */
1334fe939687SNatalie Samsonov static const struct {
1335e0ae4cf6SLiron Himi 	const enum rte_flow_item_type pattern_type;
1336e0ae4cf6SLiron Himi 	int (*parse)(const struct rte_flow_item *pattern,
1337fe939687SNatalie Samsonov 		struct rte_flow *flow,
1338fe939687SNatalie Samsonov 		struct rte_flow_error *error);
1339fe939687SNatalie Samsonov } mrvl_patterns[] = {
1340e0ae4cf6SLiron Himi 	{ RTE_FLOW_ITEM_TYPE_ETH, mrvl_parse_eth },
1341e0ae4cf6SLiron Himi 	{ RTE_FLOW_ITEM_TYPE_VLAN, mrvl_parse_vlan },
1342e0ae4cf6SLiron Himi 	{ RTE_FLOW_ITEM_TYPE_IPV4, mrvl_parse_ip4 },
1343e0ae4cf6SLiron Himi 	{ RTE_FLOW_ITEM_TYPE_IPV6, mrvl_parse_ip6 },
1344e0ae4cf6SLiron Himi 	{ RTE_FLOW_ITEM_TYPE_TCP, mrvl_parse_tcp },
1345e0ae4cf6SLiron Himi 	{ RTE_FLOW_ITEM_TYPE_UDP, mrvl_parse_udp },
1346cb932ddbSLiron Himi 	{ RTE_FLOW_ITEM_TYPE_RAW, mrvl_parse_raw },
1347e0ae4cf6SLiron Himi 	{ RTE_FLOW_ITEM_TYPE_END, NULL }
1348fe939687SNatalie Samsonov };
1349fe939687SNatalie Samsonov 
1350fe939687SNatalie Samsonov /**
1351fe939687SNatalie Samsonov  * Parse flow attribute.
1352fe939687SNatalie Samsonov  *
1353fe939687SNatalie Samsonov  * This will check whether the provided attribute's flags are supported.
1354fe939687SNatalie Samsonov  *
1355fe939687SNatalie Samsonov  * @param priv Unused
1356fe939687SNatalie Samsonov  * @param attr Pointer to the flow attribute.
1357fe939687SNatalie Samsonov  * @param flow Unused
1358fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1359fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
1360fe939687SNatalie Samsonov  */
1361fe939687SNatalie Samsonov static int
1362fe939687SNatalie Samsonov mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
1363fe939687SNatalie Samsonov 		     const struct rte_flow_attr *attr,
1364fe939687SNatalie Samsonov 		     struct rte_flow *flow __rte_unused,
1365fe939687SNatalie Samsonov 		     struct rte_flow_error *error)
1366fe939687SNatalie Samsonov {
1367fe939687SNatalie Samsonov 	if (!attr) {
1368fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
1369fe939687SNatalie Samsonov 				   NULL, "NULL attribute");
1370fe939687SNatalie Samsonov 		return -rte_errno;
1371fe939687SNatalie Samsonov 	}
1372fe939687SNatalie Samsonov 
1373fe939687SNatalie Samsonov 	if (attr->group) {
1374fe939687SNatalie Samsonov 		rte_flow_error_set(error, ENOTSUP,
1375fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1376fe939687SNatalie Samsonov 				   "Groups are not supported");
1377fe939687SNatalie Samsonov 		return -rte_errno;
1378fe939687SNatalie Samsonov 	}
1379fe939687SNatalie Samsonov 	if (attr->priority) {
1380fe939687SNatalie Samsonov 		rte_flow_error_set(error, ENOTSUP,
1381fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
1382fe939687SNatalie Samsonov 				   "Priorities are not supported");
1383fe939687SNatalie Samsonov 		return -rte_errno;
1384fe939687SNatalie Samsonov 	}
1385fe939687SNatalie Samsonov 	if (!attr->ingress) {
1386fe939687SNatalie Samsonov 		rte_flow_error_set(error, ENOTSUP,
1387fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
1388fe939687SNatalie Samsonov 				   "Only ingress is supported");
1389fe939687SNatalie Samsonov 		return -rte_errno;
1390fe939687SNatalie Samsonov 	}
1391fe939687SNatalie Samsonov 	if (attr->egress) {
1392fe939687SNatalie Samsonov 		rte_flow_error_set(error, ENOTSUP,
1393fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1394fe939687SNatalie Samsonov 				   "Egress is not supported");
1395fe939687SNatalie Samsonov 		return -rte_errno;
1396fe939687SNatalie Samsonov 	}
139776e9a55bSAdrien Mazarguil 	if (attr->transfer) {
139876e9a55bSAdrien Mazarguil 		rte_flow_error_set(error, ENOTSUP,
139976e9a55bSAdrien Mazarguil 				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
140076e9a55bSAdrien Mazarguil 				   "Transfer is not supported");
140176e9a55bSAdrien Mazarguil 		return -rte_errno;
140276e9a55bSAdrien Mazarguil 	}
1403fe939687SNatalie Samsonov 
1404fe939687SNatalie Samsonov 	return 0;
1405fe939687SNatalie Samsonov }
1406fe939687SNatalie Samsonov 
1407fe939687SNatalie Samsonov /**
1408fe939687SNatalie Samsonov  * Parse flow pattern.
1409fe939687SNatalie Samsonov  *
1410fe939687SNatalie Samsonov  * Specific classifier rule will be created as well.
1411fe939687SNatalie Samsonov  *
1412fe939687SNatalie Samsonov  * @param priv Unused
1413fe939687SNatalie Samsonov  * @param pattern Pointer to the flow pattern.
1414fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1415fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1416fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
1417fe939687SNatalie Samsonov  */
1418fe939687SNatalie Samsonov static int
1419fe939687SNatalie Samsonov mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
1420fe939687SNatalie Samsonov 			const struct rte_flow_item pattern[],
1421fe939687SNatalie Samsonov 			struct rte_flow *flow,
1422fe939687SNatalie Samsonov 			struct rte_flow_error *error)
1423fe939687SNatalie Samsonov {
1424e0ae4cf6SLiron Himi 	unsigned int i, j;
1425fe939687SNatalie Samsonov 	int ret;
1426fe939687SNatalie Samsonov 
1427e0ae4cf6SLiron Himi 	for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1428e0ae4cf6SLiron Himi 		if (pattern[i].type == RTE_FLOW_ITEM_TYPE_VOID)
1429e0ae4cf6SLiron Himi 			continue;
1430e0ae4cf6SLiron Himi 		for (j = 0; mrvl_patterns[j].pattern_type !=
1431e0ae4cf6SLiron Himi 			RTE_FLOW_ITEM_TYPE_END; j++) {
1432e0ae4cf6SLiron Himi 			if (mrvl_patterns[j].pattern_type != pattern[i].type)
1433fe939687SNatalie Samsonov 				continue;
1434fe939687SNatalie Samsonov 
1435e0ae4cf6SLiron Himi 			if (flow->rule.num_fields >=
1436e0ae4cf6SLiron Himi 			    PP2_CLS_TBL_MAX_NUM_FIELDS) {
1437e0ae4cf6SLiron Himi 				rte_flow_error_set(error, ENOSPC,
1438e0ae4cf6SLiron Himi 						   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1439e0ae4cf6SLiron Himi 						   NULL,
1440e0ae4cf6SLiron Himi 						   "too many pattern (max %d)");
1441e0ae4cf6SLiron Himi 				return -rte_errno;
1442fe939687SNatalie Samsonov 			}
1443fe939687SNatalie Samsonov 
1444e0ae4cf6SLiron Himi 			ret = mrvl_patterns[j].parse(&pattern[i], flow, error);
1445e0ae4cf6SLiron Himi 			if (ret) {
1446e0ae4cf6SLiron Himi 				mrvl_free_all_key_mask(&flow->rule);
1447e0ae4cf6SLiron Himi 				return ret;
1448e0ae4cf6SLiron Himi 			}
1449e0ae4cf6SLiron Himi 			break;
1450e0ae4cf6SLiron Himi 		}
1451e0ae4cf6SLiron Himi 		if (mrvl_patterns[j].pattern_type == RTE_FLOW_ITEM_TYPE_END) {
1452e0ae4cf6SLiron Himi 			rte_flow_error_set(error, ENOTSUP,
1453e0ae4cf6SLiron Himi 					   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1454fe939687SNatalie Samsonov 					   "Unsupported pattern");
1455fe939687SNatalie Samsonov 			return -rte_errno;
1456fe939687SNatalie Samsonov 		}
1457e0ae4cf6SLiron Himi 	}
1458e0ae4cf6SLiron Himi 
1459b57d1a83SLiron Himi 	flow->table_key.num_fields = flow->rule.num_fields;
1460b57d1a83SLiron Himi 
1461e0ae4cf6SLiron Himi 	return 0;
1462e0ae4cf6SLiron Himi }
1463fe939687SNatalie Samsonov 
1464fe939687SNatalie Samsonov /**
1465fe939687SNatalie Samsonov  * Parse flow actions.
1466fe939687SNatalie Samsonov  *
1467fe939687SNatalie Samsonov  * @param priv Pointer to the port's private data.
1468fe939687SNatalie Samsonov  * @param actions Pointer the action table.
1469fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1470fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1471fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
1472fe939687SNatalie Samsonov  */
1473fe939687SNatalie Samsonov static int
1474fe939687SNatalie Samsonov mrvl_flow_parse_actions(struct mrvl_priv *priv,
1475fe939687SNatalie Samsonov 			const struct rte_flow_action actions[],
1476fe939687SNatalie Samsonov 			struct rte_flow *flow,
1477fe939687SNatalie Samsonov 			struct rte_flow_error *error)
1478fe939687SNatalie Samsonov {
1479fe939687SNatalie Samsonov 	const struct rte_flow_action *action = actions;
1480fe939687SNatalie Samsonov 	int specified = 0;
1481fe939687SNatalie Samsonov 
1482fe939687SNatalie Samsonov 	for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1483fe939687SNatalie Samsonov 		if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1484fe939687SNatalie Samsonov 			continue;
1485fe939687SNatalie Samsonov 
1486fe939687SNatalie Samsonov 		if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
1487fe939687SNatalie Samsonov 			flow->cos.ppio = priv->ppio;
1488fe939687SNatalie Samsonov 			flow->cos.tc = 0;
1489fe939687SNatalie Samsonov 			flow->action.type = PP2_CLS_TBL_ACT_DROP;
1490fe939687SNatalie Samsonov 			flow->action.cos = &flow->cos;
1491fe939687SNatalie Samsonov 			specified++;
1492fe939687SNatalie Samsonov 		} else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1493fe939687SNatalie Samsonov 			const struct rte_flow_action_queue *q =
1494fe939687SNatalie Samsonov 				(const struct rte_flow_action_queue *)
1495fe939687SNatalie Samsonov 				action->conf;
1496fe939687SNatalie Samsonov 
1497fe939687SNatalie Samsonov 			if (q->index > priv->nb_rx_queues) {
1498fe939687SNatalie Samsonov 				rte_flow_error_set(error, EINVAL,
1499fe939687SNatalie Samsonov 						RTE_FLOW_ERROR_TYPE_ACTION,
1500fe939687SNatalie Samsonov 						NULL,
1501fe939687SNatalie Samsonov 						"Queue index out of range");
1502fe939687SNatalie Samsonov 				return -rte_errno;
1503fe939687SNatalie Samsonov 			}
1504fe939687SNatalie Samsonov 
1505fe939687SNatalie Samsonov 			if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
1506fe939687SNatalie Samsonov 				/*
1507fe939687SNatalie Samsonov 				 * Unknown TC mapping, mapping will not have
1508fe939687SNatalie Samsonov 				 * a correct queue.
1509fe939687SNatalie Samsonov 				 */
1510acab7d58STomasz Duszynski 				MRVL_LOG(ERR,
1511acab7d58STomasz Duszynski 					"Unknown TC mapping for queue %hu eth%hhu",
1512fe939687SNatalie Samsonov 					q->index, priv->ppio_id);
1513fe939687SNatalie Samsonov 
1514fe939687SNatalie Samsonov 				rte_flow_error_set(error, EFAULT,
1515fe939687SNatalie Samsonov 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1516fe939687SNatalie Samsonov 						NULL, NULL);
1517fe939687SNatalie Samsonov 				return -rte_errno;
1518fe939687SNatalie Samsonov 			}
1519fe939687SNatalie Samsonov 
1520acab7d58STomasz Duszynski 			MRVL_LOG(DEBUG,
1521acab7d58STomasz Duszynski 				"Action: Assign packets to queue %d, tc:%d, q:%d",
1522fe939687SNatalie Samsonov 				q->index, priv->rxq_map[q->index].tc,
1523fe939687SNatalie Samsonov 				priv->rxq_map[q->index].inq);
1524fe939687SNatalie Samsonov 
1525fe939687SNatalie Samsonov 			flow->cos.ppio = priv->ppio;
1526fe939687SNatalie Samsonov 			flow->cos.tc = priv->rxq_map[q->index].tc;
1527fe939687SNatalie Samsonov 			flow->action.type = PP2_CLS_TBL_ACT_DONE;
1528fe939687SNatalie Samsonov 			flow->action.cos = &flow->cos;
1529fe939687SNatalie Samsonov 			specified++;
1530cdb53f8dSTomasz Duszynski 		} else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1531cdb53f8dSTomasz Duszynski 			const struct rte_flow_action_meter *meter;
1532cdb53f8dSTomasz Duszynski 			struct mrvl_mtr *mtr;
1533cdb53f8dSTomasz Duszynski 
1534cdb53f8dSTomasz Duszynski 			meter = action->conf;
1535cdb53f8dSTomasz Duszynski 			if (!meter)
1536cdb53f8dSTomasz Duszynski 				return -rte_flow_error_set(error, EINVAL,
1537cdb53f8dSTomasz Duszynski 						RTE_FLOW_ERROR_TYPE_ACTION,
1538cdb53f8dSTomasz Duszynski 						NULL, "Invalid meter\n");
1539cdb53f8dSTomasz Duszynski 
1540cdb53f8dSTomasz Duszynski 			LIST_FOREACH(mtr, &priv->mtrs, next)
1541cdb53f8dSTomasz Duszynski 				if (mtr->mtr_id == meter->mtr_id)
1542cdb53f8dSTomasz Duszynski 					break;
1543cdb53f8dSTomasz Duszynski 
1544cdb53f8dSTomasz Duszynski 			if (!mtr)
1545cdb53f8dSTomasz Duszynski 				return -rte_flow_error_set(error, EINVAL,
1546cdb53f8dSTomasz Duszynski 						RTE_FLOW_ERROR_TYPE_ACTION,
1547cdb53f8dSTomasz Duszynski 						NULL,
1548cdb53f8dSTomasz Duszynski 						"Meter id does not exist\n");
1549cdb53f8dSTomasz Duszynski 
1550cdb53f8dSTomasz Duszynski 			if (!mtr->shared && mtr->refcnt)
1551cdb53f8dSTomasz Duszynski 				return -rte_flow_error_set(error, EPERM,
1552cdb53f8dSTomasz Duszynski 						RTE_FLOW_ERROR_TYPE_ACTION,
1553cdb53f8dSTomasz Duszynski 						NULL,
1554cdb53f8dSTomasz Duszynski 						"Meter cannot be shared\n");
1555cdb53f8dSTomasz Duszynski 
1556cdb53f8dSTomasz Duszynski 			/*
1557cdb53f8dSTomasz Duszynski 			 * In case cos has already been set
1558cdb53f8dSTomasz Duszynski 			 * do not modify it.
1559cdb53f8dSTomasz Duszynski 			 */
1560cdb53f8dSTomasz Duszynski 			if (!flow->cos.ppio) {
1561cdb53f8dSTomasz Duszynski 				flow->cos.ppio = priv->ppio;
1562cdb53f8dSTomasz Duszynski 				flow->cos.tc = 0;
1563cdb53f8dSTomasz Duszynski 			}
1564cdb53f8dSTomasz Duszynski 
1565cdb53f8dSTomasz Duszynski 			flow->action.type = PP2_CLS_TBL_ACT_DONE;
1566cdb53f8dSTomasz Duszynski 			flow->action.cos = &flow->cos;
1567cdb53f8dSTomasz Duszynski 			flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
1568cdb53f8dSTomasz Duszynski 			flow->mtr = mtr;
1569cdb53f8dSTomasz Duszynski 			mtr->refcnt++;
1570cdb53f8dSTomasz Duszynski 			specified++;
1571fe939687SNatalie Samsonov 		} else {
1572fe939687SNatalie Samsonov 			rte_flow_error_set(error, ENOTSUP,
1573fe939687SNatalie Samsonov 					   RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1574fe939687SNatalie Samsonov 					   "Action not supported");
1575fe939687SNatalie Samsonov 			return -rte_errno;
1576fe939687SNatalie Samsonov 		}
1577fe939687SNatalie Samsonov 	}
1578fe939687SNatalie Samsonov 
1579fe939687SNatalie Samsonov 	if (!specified) {
1580fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1581cdb53f8dSTomasz Duszynski 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1582cdb53f8dSTomasz Duszynski 				   "Action not specified");
1583fe939687SNatalie Samsonov 		return -rte_errno;
1584fe939687SNatalie Samsonov 	}
1585fe939687SNatalie Samsonov 
1586fe939687SNatalie Samsonov 	return 0;
1587fe939687SNatalie Samsonov }
1588fe939687SNatalie Samsonov 
1589fe939687SNatalie Samsonov /**
1590fe939687SNatalie Samsonov  * Parse flow attribute, pattern and actions.
1591fe939687SNatalie Samsonov  *
1592fe939687SNatalie Samsonov  * @param priv Pointer to the port's private data.
1593fe939687SNatalie Samsonov  * @param attr Pointer to the flow attribute.
1594fe939687SNatalie Samsonov  * @param pattern Pointer to the flow pattern.
1595fe939687SNatalie Samsonov  * @param actions Pointer to the flow actions.
1596fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1597fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1598fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
1599fe939687SNatalie Samsonov  */
1600fe939687SNatalie Samsonov static int
1601fe939687SNatalie Samsonov mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
1602fe939687SNatalie Samsonov 		const struct rte_flow_item pattern[],
1603fe939687SNatalie Samsonov 		const struct rte_flow_action actions[],
1604fe939687SNatalie Samsonov 		struct rte_flow *flow,
1605fe939687SNatalie Samsonov 		struct rte_flow_error *error)
1606fe939687SNatalie Samsonov {
1607fe939687SNatalie Samsonov 	int ret;
1608fe939687SNatalie Samsonov 
1609fe939687SNatalie Samsonov 	ret = mrvl_flow_parse_attr(priv, attr, flow, error);
1610fe939687SNatalie Samsonov 	if (ret)
1611fe939687SNatalie Samsonov 		return ret;
1612fe939687SNatalie Samsonov 
1613fe939687SNatalie Samsonov 	ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
1614fe939687SNatalie Samsonov 	if (ret)
1615fe939687SNatalie Samsonov 		return ret;
1616fe939687SNatalie Samsonov 
1617fe939687SNatalie Samsonov 	return mrvl_flow_parse_actions(priv, actions, flow, error);
1618fe939687SNatalie Samsonov }
1619fe939687SNatalie Samsonov 
162063e0f017SNatalie Samsonov /**
162163e0f017SNatalie Samsonov  * Get engine type for the given flow.
162263e0f017SNatalie Samsonov  *
162363e0f017SNatalie Samsonov  * @param field Pointer to the flow.
162463e0f017SNatalie Samsonov  * @returns The type of the engine.
162563e0f017SNatalie Samsonov  */
1626fe939687SNatalie Samsonov static inline enum pp2_cls_tbl_type
1627fe939687SNatalie Samsonov mrvl_engine_type(const struct rte_flow *flow)
1628fe939687SNatalie Samsonov {
1629fe939687SNatalie Samsonov 	int i, size = 0;
1630fe939687SNatalie Samsonov 
1631fe939687SNatalie Samsonov 	for (i = 0; i < flow->rule.num_fields; i++)
1632fe939687SNatalie Samsonov 		size += flow->rule.fields[i].size;
1633fe939687SNatalie Samsonov 
1634fe939687SNatalie Samsonov 	/*
1635fe939687SNatalie Samsonov 	 * For maskable engine type the key size must be up to 8 bytes.
1636fe939687SNatalie Samsonov 	 * For keys with size bigger than 8 bytes, engine type must
1637fe939687SNatalie Samsonov 	 * be set to exact match.
1638fe939687SNatalie Samsonov 	 */
1639fe939687SNatalie Samsonov 	if (size > 8)
1640fe939687SNatalie Samsonov 		return PP2_CLS_TBL_EXACT_MATCH;
1641fe939687SNatalie Samsonov 
1642fe939687SNatalie Samsonov 	return PP2_CLS_TBL_MASKABLE;
1643fe939687SNatalie Samsonov }
1644fe939687SNatalie Samsonov 
164563e0f017SNatalie Samsonov /**
164663e0f017SNatalie Samsonov  * Create classifier table.
164763e0f017SNatalie Samsonov  *
164863e0f017SNatalie Samsonov  * @param dev Pointer to the device.
164963e0f017SNatalie Samsonov  * @param flow Pointer to the very first flow.
165063e0f017SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
165163e0f017SNatalie Samsonov  */
1652fe939687SNatalie Samsonov static int
1653fe939687SNatalie Samsonov mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
1654fe939687SNatalie Samsonov {
1655fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1656fe939687SNatalie Samsonov 	struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
1657fe939687SNatalie Samsonov 	int ret;
1658fe939687SNatalie Samsonov 
1659fe939687SNatalie Samsonov 	if (priv->cls_tbl) {
1660fe939687SNatalie Samsonov 		pp2_cls_tbl_deinit(priv->cls_tbl);
1661fe939687SNatalie Samsonov 		priv->cls_tbl = NULL;
1662fe939687SNatalie Samsonov 	}
1663fe939687SNatalie Samsonov 
1664fe939687SNatalie Samsonov 	memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
1665fe939687SNatalie Samsonov 
1666fe939687SNatalie Samsonov 	priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
1667acab7d58STomasz Duszynski 	MRVL_LOG(INFO, "Setting cls search engine type to %s",
1668fe939687SNatalie Samsonov 			priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
1669fe939687SNatalie Samsonov 			"exact" : "maskable");
1670fe939687SNatalie Samsonov 	priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
1671fe939687SNatalie Samsonov 	priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
1672fe939687SNatalie Samsonov 	priv->cls_tbl_params.default_act.cos = &first_flow->cos;
1673b57d1a83SLiron Himi 	memcpy(key, &first_flow->table_key, sizeof(struct pp2_cls_tbl_key));
1674fe939687SNatalie Samsonov 
1675fe939687SNatalie Samsonov 	ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
1676fe939687SNatalie Samsonov 
1677fe939687SNatalie Samsonov 	return ret;
1678fe939687SNatalie Samsonov }
1679fe939687SNatalie Samsonov 
1680fe939687SNatalie Samsonov /**
1681fe939687SNatalie Samsonov  * Check whether new flow can be added to the table
1682fe939687SNatalie Samsonov  *
1683fe939687SNatalie Samsonov  * @param priv Pointer to the port's private data.
1684fe939687SNatalie Samsonov  * @param flow Pointer to the new flow.
1685fe939687SNatalie Samsonov  * @return 1 in case flow can be added, 0 otherwise.
1686fe939687SNatalie Samsonov  */
1687fe939687SNatalie Samsonov static inline int
1688fe939687SNatalie Samsonov mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
1689fe939687SNatalie Samsonov {
1690b57d1a83SLiron Himi 	int same = memcmp(&flow->table_key, &priv->cls_tbl_params.key,
1691b57d1a83SLiron Himi 			  sizeof(struct pp2_cls_tbl_key)) == 0;
1692b57d1a83SLiron Himi 
1693b57d1a83SLiron Himi 	return same && mrvl_engine_type(flow) == priv->cls_tbl_params.type;
1694fe939687SNatalie Samsonov }
1695fe939687SNatalie Samsonov 
1696fe939687SNatalie Samsonov /**
1697fe939687SNatalie Samsonov  * DPDK flow create callback called when flow is to be created.
1698fe939687SNatalie Samsonov  *
1699fe939687SNatalie Samsonov  * @param dev Pointer to the device.
1700fe939687SNatalie Samsonov  * @param attr Pointer to the flow attribute.
1701fe939687SNatalie Samsonov  * @param pattern Pointer to the flow pattern.
1702fe939687SNatalie Samsonov  * @param actions Pointer to the flow actions.
1703fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1704fe939687SNatalie Samsonov  * @returns Pointer to the created flow in case of success, NULL otherwise.
1705fe939687SNatalie Samsonov  */
1706fe939687SNatalie Samsonov static struct rte_flow *
1707fe939687SNatalie Samsonov mrvl_flow_create(struct rte_eth_dev *dev,
1708fe939687SNatalie Samsonov 		 const struct rte_flow_attr *attr,
1709fe939687SNatalie Samsonov 		 const struct rte_flow_item pattern[],
1710fe939687SNatalie Samsonov 		 const struct rte_flow_action actions[],
1711fe939687SNatalie Samsonov 		 struct rte_flow_error *error)
1712fe939687SNatalie Samsonov {
1713fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1714fe939687SNatalie Samsonov 	struct rte_flow *flow, *first;
1715fe939687SNatalie Samsonov 	int ret;
1716fe939687SNatalie Samsonov 
1717fe939687SNatalie Samsonov 	if (!dev->data->dev_started) {
1718fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1719fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1720fe939687SNatalie Samsonov 				   "Port must be started first\n");
1721fe939687SNatalie Samsonov 		return NULL;
1722fe939687SNatalie Samsonov 	}
1723fe939687SNatalie Samsonov 
1724fe939687SNatalie Samsonov 	flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
1725fe939687SNatalie Samsonov 	if (!flow)
1726fe939687SNatalie Samsonov 		return NULL;
1727fe939687SNatalie Samsonov 
1728fe939687SNatalie Samsonov 	ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
1729fe939687SNatalie Samsonov 	if (ret)
1730fe939687SNatalie Samsonov 		goto out;
1731fe939687SNatalie Samsonov 
1732fe939687SNatalie Samsonov 	/*
1733fe939687SNatalie Samsonov 	 * Four cases here:
1734fe939687SNatalie Samsonov 	 *
1735fe939687SNatalie Samsonov 	 * 1. In case table does not exist - create one.
1736fe939687SNatalie Samsonov 	 * 2. In case table exists, is empty and new flow cannot be added
1737fe939687SNatalie Samsonov 	 *    recreate table.
1738fe939687SNatalie Samsonov 	 * 3. In case table is not empty and new flow matches table format
1739fe939687SNatalie Samsonov 	 *    add it.
1740fe939687SNatalie Samsonov 	 * 4. Otherwise flow cannot be added.
1741fe939687SNatalie Samsonov 	 */
1742fe939687SNatalie Samsonov 	first = LIST_FIRST(&priv->flows);
1743fe939687SNatalie Samsonov 	if (!priv->cls_tbl) {
1744fe939687SNatalie Samsonov 		ret = mrvl_create_cls_table(dev, flow);
1745fe939687SNatalie Samsonov 	} else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
1746fe939687SNatalie Samsonov 		ret = mrvl_create_cls_table(dev, flow);
1747fe939687SNatalie Samsonov 	} else if (mrvl_flow_can_be_added(priv, flow)) {
1748fe939687SNatalie Samsonov 		ret = 0;
1749fe939687SNatalie Samsonov 	} else {
1750fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1751fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1752fe939687SNatalie Samsonov 				   "Pattern does not match cls table format\n");
1753fe939687SNatalie Samsonov 		goto out;
1754fe939687SNatalie Samsonov 	}
1755fe939687SNatalie Samsonov 
1756fe939687SNatalie Samsonov 	if (ret) {
1757fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1758fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1759fe939687SNatalie Samsonov 				   "Failed to create cls table\n");
1760fe939687SNatalie Samsonov 		goto out;
1761fe939687SNatalie Samsonov 	}
1762fe939687SNatalie Samsonov 
1763fe939687SNatalie Samsonov 	ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
1764fe939687SNatalie Samsonov 	if (ret) {
1765fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1766fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1767fe939687SNatalie Samsonov 				   "Failed to add rule\n");
1768fe939687SNatalie Samsonov 		goto out;
1769fe939687SNatalie Samsonov 	}
1770fe939687SNatalie Samsonov 
1771fe939687SNatalie Samsonov 	LIST_INSERT_HEAD(&priv->flows, flow, next);
1772fe939687SNatalie Samsonov 
1773fe939687SNatalie Samsonov 	return flow;
1774fe939687SNatalie Samsonov out:
1775fe939687SNatalie Samsonov 	rte_free(flow);
1776fe939687SNatalie Samsonov 	return NULL;
1777fe939687SNatalie Samsonov }
1778fe939687SNatalie Samsonov 
1779fe939687SNatalie Samsonov /**
1780fe939687SNatalie Samsonov  * Remove classifier rule associated with given flow.
1781fe939687SNatalie Samsonov  *
1782fe939687SNatalie Samsonov  * @param priv Pointer to the port's private data.
1783fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1784fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1785fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
1786fe939687SNatalie Samsonov  */
1787fe939687SNatalie Samsonov static int
1788fe939687SNatalie Samsonov mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
1789fe939687SNatalie Samsonov 		 struct rte_flow_error *error)
1790fe939687SNatalie Samsonov {
1791fe939687SNatalie Samsonov 	int ret;
1792fe939687SNatalie Samsonov 
1793fe939687SNatalie Samsonov 	if (!priv->cls_tbl) {
1794fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1795fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1796fe939687SNatalie Samsonov 				   "Classifier table not initialized");
1797fe939687SNatalie Samsonov 		return -rte_errno;
1798fe939687SNatalie Samsonov 	}
1799fe939687SNatalie Samsonov 
1800fe939687SNatalie Samsonov 	ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
1801fe939687SNatalie Samsonov 	if (ret) {
1802fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1803fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1804fe939687SNatalie Samsonov 				   "Failed to remove rule");
1805fe939687SNatalie Samsonov 		return -rte_errno;
1806fe939687SNatalie Samsonov 	}
1807fe939687SNatalie Samsonov 
1808fe939687SNatalie Samsonov 	mrvl_free_all_key_mask(&flow->rule);
1809fe939687SNatalie Samsonov 
1810cdb53f8dSTomasz Duszynski 	if (flow->mtr) {
1811cdb53f8dSTomasz Duszynski 		flow->mtr->refcnt--;
1812cdb53f8dSTomasz Duszynski 		flow->mtr = NULL;
1813cdb53f8dSTomasz Duszynski 	}
1814cdb53f8dSTomasz Duszynski 
1815fe939687SNatalie Samsonov 	return 0;
1816fe939687SNatalie Samsonov }
1817fe939687SNatalie Samsonov 
1818fe939687SNatalie Samsonov /**
1819fe939687SNatalie Samsonov  * DPDK flow destroy callback called when flow is to be removed.
1820fe939687SNatalie Samsonov  *
182163e0f017SNatalie Samsonov  * @param dev Pointer to the device.
1822fe939687SNatalie Samsonov  * @param flow Pointer to the flow.
1823fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1824fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
1825fe939687SNatalie Samsonov  */
1826fe939687SNatalie Samsonov static int
1827fe939687SNatalie Samsonov mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1828fe939687SNatalie Samsonov 		  struct rte_flow_error *error)
1829fe939687SNatalie Samsonov {
1830fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1831fe939687SNatalie Samsonov 	struct rte_flow *f;
1832fe939687SNatalie Samsonov 	int ret;
1833fe939687SNatalie Samsonov 
1834fe939687SNatalie Samsonov 	LIST_FOREACH(f, &priv->flows, next) {
1835fe939687SNatalie Samsonov 		if (f == flow)
1836fe939687SNatalie Samsonov 			break;
1837fe939687SNatalie Samsonov 	}
1838fe939687SNatalie Samsonov 
1839fe939687SNatalie Samsonov 	if (!flow) {
1840fe939687SNatalie Samsonov 		rte_flow_error_set(error, EINVAL,
1841fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1842fe939687SNatalie Samsonov 				   "Rule was not found");
1843fe939687SNatalie Samsonov 		return -rte_errno;
1844fe939687SNatalie Samsonov 	}
1845fe939687SNatalie Samsonov 
1846fe939687SNatalie Samsonov 	LIST_REMOVE(f, next);
1847fe939687SNatalie Samsonov 
1848fe939687SNatalie Samsonov 	ret = mrvl_flow_remove(priv, flow, error);
1849fe939687SNatalie Samsonov 	if (ret)
1850fe939687SNatalie Samsonov 		return ret;
1851fe939687SNatalie Samsonov 
1852fe939687SNatalie Samsonov 	rte_free(flow);
1853fe939687SNatalie Samsonov 
1854fe939687SNatalie Samsonov 	return 0;
1855fe939687SNatalie Samsonov }
1856fe939687SNatalie Samsonov 
1857fe939687SNatalie Samsonov /**
1858fe939687SNatalie Samsonov  * DPDK flow callback called to verify given attribute, pattern and actions.
1859fe939687SNatalie Samsonov  *
1860fe939687SNatalie Samsonov  * @param dev Pointer to the device.
1861fe939687SNatalie Samsonov  * @param attr Pointer to the flow attribute.
1862fe939687SNatalie Samsonov  * @param pattern Pointer to the flow pattern.
1863fe939687SNatalie Samsonov  * @param actions Pointer to the flow actions.
1864fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1865fe939687SNatalie Samsonov  * @returns 0 on success, negative value otherwise.
1866fe939687SNatalie Samsonov  */
1867fe939687SNatalie Samsonov static int
1868fe939687SNatalie Samsonov mrvl_flow_validate(struct rte_eth_dev *dev,
1869fe939687SNatalie Samsonov 		   const struct rte_flow_attr *attr,
1870fe939687SNatalie Samsonov 		   const struct rte_flow_item pattern[],
1871fe939687SNatalie Samsonov 		   const struct rte_flow_action actions[],
1872fe939687SNatalie Samsonov 		   struct rte_flow_error *error)
1873fe939687SNatalie Samsonov {
1874fe939687SNatalie Samsonov 	static struct rte_flow *flow;
1875fe939687SNatalie Samsonov 
1876fe939687SNatalie Samsonov 	flow = mrvl_flow_create(dev, attr, pattern, actions, error);
1877fe939687SNatalie Samsonov 	if (!flow)
1878fe939687SNatalie Samsonov 		return -rte_errno;
1879fe939687SNatalie Samsonov 
1880fe939687SNatalie Samsonov 	mrvl_flow_destroy(dev, flow, error);
1881fe939687SNatalie Samsonov 
1882fe939687SNatalie Samsonov 	return 0;
1883fe939687SNatalie Samsonov }
1884fe939687SNatalie Samsonov 
1885fe939687SNatalie Samsonov /**
1886fe939687SNatalie Samsonov  * DPDK flow flush callback called when flows are to be flushed.
1887fe939687SNatalie Samsonov  *
1888fe939687SNatalie Samsonov  * @param dev Pointer to the device.
1889fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1890fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
1891fe939687SNatalie Samsonov  */
1892fe939687SNatalie Samsonov static int
1893fe939687SNatalie Samsonov mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1894fe939687SNatalie Samsonov {
1895fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1896fe939687SNatalie Samsonov 
1897fe939687SNatalie Samsonov 	while (!LIST_EMPTY(&priv->flows)) {
1898fe939687SNatalie Samsonov 		struct rte_flow *flow = LIST_FIRST(&priv->flows);
1899fe939687SNatalie Samsonov 		int ret = mrvl_flow_remove(priv, flow, error);
1900fe939687SNatalie Samsonov 		if (ret)
1901fe939687SNatalie Samsonov 			return ret;
1902fe939687SNatalie Samsonov 
1903fe939687SNatalie Samsonov 		LIST_REMOVE(flow, next);
1904fe939687SNatalie Samsonov 		rte_free(flow);
1905fe939687SNatalie Samsonov 	}
1906fe939687SNatalie Samsonov 
1907e0ae4cf6SLiron Himi 	if (priv->cls_tbl) {
1908e0ae4cf6SLiron Himi 		pp2_cls_tbl_deinit(priv->cls_tbl);
1909e0ae4cf6SLiron Himi 		priv->cls_tbl = NULL;
1910e0ae4cf6SLiron Himi 	}
1911e0ae4cf6SLiron Himi 
1912fe939687SNatalie Samsonov 	return 0;
1913fe939687SNatalie Samsonov }
1914fe939687SNatalie Samsonov 
1915fe939687SNatalie Samsonov /**
1916fe939687SNatalie Samsonov  * DPDK flow isolate callback called to isolate port.
1917fe939687SNatalie Samsonov  *
1918fe939687SNatalie Samsonov  * @param dev Pointer to the device.
1919fe939687SNatalie Samsonov  * @param enable Pass 0/1 to disable/enable port isolation.
1920fe939687SNatalie Samsonov  * @param error Pointer to the flow error.
1921fe939687SNatalie Samsonov  * @returns 0 in case of success, negative value otherwise.
1922fe939687SNatalie Samsonov  */
1923fe939687SNatalie Samsonov static int
1924fe939687SNatalie Samsonov mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
1925fe939687SNatalie Samsonov 		  struct rte_flow_error *error)
1926fe939687SNatalie Samsonov {
1927fe939687SNatalie Samsonov 	struct mrvl_priv *priv = dev->data->dev_private;
1928fe939687SNatalie Samsonov 
1929fe939687SNatalie Samsonov 	if (dev->data->dev_started) {
1930fe939687SNatalie Samsonov 		rte_flow_error_set(error, EBUSY,
1931fe939687SNatalie Samsonov 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1932fe939687SNatalie Samsonov 				   NULL, "Port must be stopped first\n");
1933fe939687SNatalie Samsonov 		return -rte_errno;
1934fe939687SNatalie Samsonov 	}
1935fe939687SNatalie Samsonov 
1936fe939687SNatalie Samsonov 	priv->isolated = enable;
1937fe939687SNatalie Samsonov 
1938fe939687SNatalie Samsonov 	return 0;
1939fe939687SNatalie Samsonov }
1940fe939687SNatalie Samsonov 
1941fe939687SNatalie Samsonov const struct rte_flow_ops mrvl_flow_ops = {
1942fe939687SNatalie Samsonov 	.validate = mrvl_flow_validate,
1943fe939687SNatalie Samsonov 	.create = mrvl_flow_create,
1944fe939687SNatalie Samsonov 	.destroy = mrvl_flow_destroy,
1945fe939687SNatalie Samsonov 	.flush = mrvl_flow_flush,
1946fe939687SNatalie Samsonov 	.isolate = mrvl_flow_isolate
1947fe939687SNatalie Samsonov };
1948a1f83becSTomasz Duszynski 
1949a1f83becSTomasz Duszynski /**
1950a1f83becSTomasz Duszynski  * Initialize flow resources.
1951a1f83becSTomasz Duszynski  *
1952a1f83becSTomasz Duszynski  * @param dev Pointer to the device.
1953a1f83becSTomasz Duszynski  */
1954a1f83becSTomasz Duszynski void
1955a1f83becSTomasz Duszynski mrvl_flow_init(struct rte_eth_dev *dev)
1956a1f83becSTomasz Duszynski {
1957a1f83becSTomasz Duszynski 	struct mrvl_priv *priv = dev->data->dev_private;
1958a1f83becSTomasz Duszynski 
1959a1f83becSTomasz Duszynski 	LIST_INIT(&priv->flows);
1960a1f83becSTomasz Duszynski }
1961a1f83becSTomasz Duszynski 
1962a1f83becSTomasz Duszynski /**
1963a1f83becSTomasz Duszynski  * Cleanup flow resources.
1964a1f83becSTomasz Duszynski  *
1965a1f83becSTomasz Duszynski  * @param dev Pointer to the device.
1966a1f83becSTomasz Duszynski  */
1967a1f83becSTomasz Duszynski void
1968a1f83becSTomasz Duszynski mrvl_flow_deinit(struct rte_eth_dev *dev)
1969a1f83becSTomasz Duszynski {
1970a1f83becSTomasz Duszynski 	mrvl_flow_flush(dev, NULL);
1971a1f83becSTomasz Duszynski }
1972