xref: /dpdk/drivers/net/cxgbe/cxgbe_flow.c (revision cc13675026303f1da82551deee89027cda3d7aef)
1ee61f511SShagun Agrawal /* SPDX-License-Identifier: BSD-3-Clause
2ee61f511SShagun Agrawal  * Copyright(c) 2018 Chelsio Communications.
3ee61f511SShagun Agrawal  * All rights reserved.
4ee61f511SShagun Agrawal  */
589c8bd95SRahul Lakkireddy #include "base/common.h"
6ee61f511SShagun Agrawal #include "cxgbe_flow.h"
7ee61f511SShagun Agrawal 
8ee61f511SShagun Agrawal #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9ee61f511SShagun Agrawal do { \
1015fb77f6SRahul Lakkireddy 	if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
1115fb77f6SRahul Lakkireddy 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
1215fb77f6SRahul Lakkireddy 					  NULL, "Redefined match item with" \
1315fb77f6SRahul Lakkireddy 					  " different values found"); \
14ee61f511SShagun Agrawal 	(fs)->val.elem = (__v); \
15ee61f511SShagun Agrawal 	(fs)->mask.elem = (__m); \
16ee61f511SShagun Agrawal } while (0)
17ee61f511SShagun Agrawal 
18ee61f511SShagun Agrawal #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
19ee61f511SShagun Agrawal do { \
20ee61f511SShagun Agrawal 	memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21ee61f511SShagun Agrawal 	memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
22ee61f511SShagun Agrawal } while (0)
23ee61f511SShagun Agrawal 
24ee61f511SShagun Agrawal #define CXGBE_FILL_FS(v, m, elem) \
25ee61f511SShagun Agrawal 	__CXGBE_FILL_FS(v, m, fs, elem, e)
26ee61f511SShagun Agrawal 
27ee61f511SShagun Agrawal #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28ee61f511SShagun Agrawal 	__CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
29ee61f511SShagun Agrawal 
30ee61f511SShagun Agrawal static int
31ee61f511SShagun Agrawal cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
32ee61f511SShagun Agrawal {
33ee61f511SShagun Agrawal 	/* rte_flow specification does not allow it. */
34ee61f511SShagun Agrawal 	if (!i->spec && (i->mask ||  i->last))
35ee61f511SShagun Agrawal 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36ee61f511SShagun Agrawal 				   i, "last or mask given without spec");
37ee61f511SShagun Agrawal 	/*
38ee61f511SShagun Agrawal 	 * We don't support it.
39ee61f511SShagun Agrawal 	 * Although, we can support values in last as 0's or last == spec.
40ee61f511SShagun Agrawal 	 * But this will not provide user with any additional functionality
41ee61f511SShagun Agrawal 	 * and will only increase the complexity for us.
42ee61f511SShagun Agrawal 	 */
43ee61f511SShagun Agrawal 	if (i->last)
44ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45ee61f511SShagun Agrawal 				   i, "last is not supported by chelsio pmd");
46ee61f511SShagun Agrawal 	return 0;
47ee61f511SShagun Agrawal }
48ee61f511SShagun Agrawal 
49ed709206SRahul Lakkireddy /**
50ed709206SRahul Lakkireddy  * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51ed709206SRahul Lakkireddy  * there's only 40-bits available to store match fields.
52ed709206SRahul Lakkireddy  * So, to save space, optimize filter spec for some common
53ed709206SRahul Lakkireddy  * known fields that hardware can parse against incoming
54ed709206SRahul Lakkireddy  * packets automatically.
55ed709206SRahul Lakkireddy  */
56ed709206SRahul Lakkireddy static void
57ed709206SRahul Lakkireddy cxgbe_tweak_filter_spec(struct adapter *adap,
58ed709206SRahul Lakkireddy 			struct ch_filter_specification *fs)
59ed709206SRahul Lakkireddy {
60ed709206SRahul Lakkireddy 	/* Save 16-bit ethertype field space, by setting corresponding
61ed709206SRahul Lakkireddy 	 * 1-bit flags in the filter spec for common known ethertypes.
62ed709206SRahul Lakkireddy 	 * When hardware sees these flags, it automatically infers and
63ed709206SRahul Lakkireddy 	 * matches incoming packets against the corresponding ethertype.
64ed709206SRahul Lakkireddy 	 */
65ed709206SRahul Lakkireddy 	if (fs->mask.ethtype == 0xffff) {
66ed709206SRahul Lakkireddy 		switch (fs->val.ethtype) {
67ed709206SRahul Lakkireddy 		case RTE_ETHER_TYPE_IPV4:
68ed709206SRahul Lakkireddy 			if (adap->params.tp.ethertype_shift < 0) {
69ed709206SRahul Lakkireddy 				fs->type = FILTER_TYPE_IPV4;
70ed709206SRahul Lakkireddy 				fs->val.ethtype = 0;
71ed709206SRahul Lakkireddy 				fs->mask.ethtype = 0;
72ed709206SRahul Lakkireddy 			}
73ed709206SRahul Lakkireddy 			break;
74ed709206SRahul Lakkireddy 		case RTE_ETHER_TYPE_IPV6:
75ed709206SRahul Lakkireddy 			if (adap->params.tp.ethertype_shift < 0) {
76ed709206SRahul Lakkireddy 				fs->type = FILTER_TYPE_IPV6;
77ed709206SRahul Lakkireddy 				fs->val.ethtype = 0;
78ed709206SRahul Lakkireddy 				fs->mask.ethtype = 0;
79ed709206SRahul Lakkireddy 			}
80ed709206SRahul Lakkireddy 			break;
81ed709206SRahul Lakkireddy 		case RTE_ETHER_TYPE_VLAN:
82ed709206SRahul Lakkireddy 			if (adap->params.tp.ethertype_shift < 0 &&
83ed709206SRahul Lakkireddy 			    adap->params.tp.vlan_shift >= 0) {
84ed709206SRahul Lakkireddy 				fs->val.ivlan_vld = 1;
85ed709206SRahul Lakkireddy 				fs->mask.ivlan_vld = 1;
86ed709206SRahul Lakkireddy 				fs->val.ethtype = 0;
87ed709206SRahul Lakkireddy 				fs->mask.ethtype = 0;
88ed709206SRahul Lakkireddy 			}
89ed709206SRahul Lakkireddy 			break;
9055f003d8SKarra Satwik 		case RTE_ETHER_TYPE_QINQ:
9155f003d8SKarra Satwik 			if (adap->params.tp.ethertype_shift < 0 &&
9255f003d8SKarra Satwik 			    adap->params.tp.vnic_shift >= 0) {
9355f003d8SKarra Satwik 				fs->val.ovlan_vld = 1;
9455f003d8SKarra Satwik 				fs->mask.ovlan_vld = 1;
9555f003d8SKarra Satwik 				fs->val.ethtype = 0;
9655f003d8SKarra Satwik 				fs->mask.ethtype = 0;
9755f003d8SKarra Satwik 			}
9855f003d8SKarra Satwik 			break;
99ed709206SRahul Lakkireddy 		default:
100ed709206SRahul Lakkireddy 			break;
101ed709206SRahul Lakkireddy 		}
102ed709206SRahul Lakkireddy 	}
103ed709206SRahul Lakkireddy }
104ed709206SRahul Lakkireddy 
10578192b38SShagun Agrawal static void
10678192b38SShagun Agrawal cxgbe_fill_filter_region(struct adapter *adap,
10778192b38SShagun Agrawal 			 struct ch_filter_specification *fs)
10878192b38SShagun Agrawal {
10978192b38SShagun Agrawal 	struct tp_params *tp = &adap->params.tp;
11078192b38SShagun Agrawal 	u64 hash_filter_mask = tp->hash_filter_mask;
11178192b38SShagun Agrawal 	u64 ntuple_mask = 0;
11278192b38SShagun Agrawal 
11378192b38SShagun Agrawal 	fs->cap = 0;
11478192b38SShagun Agrawal 
11578192b38SShagun Agrawal 	if (!is_hashfilter(adap))
11678192b38SShagun Agrawal 		return;
11778192b38SShagun Agrawal 
11878192b38SShagun Agrawal 	if (fs->type) {
11978192b38SShagun Agrawal 		uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
12078192b38SShagun Agrawal 				     0xff, 0xff, 0xff, 0xff,
12178192b38SShagun Agrawal 				     0xff, 0xff, 0xff, 0xff,
12278192b38SShagun Agrawal 				     0xff, 0xff, 0xff, 0xff};
12378192b38SShagun Agrawal 		uint8_t bitoff[16] = {0};
12478192b38SShagun Agrawal 
12578192b38SShagun Agrawal 		if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
12678192b38SShagun Agrawal 		    !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
12778192b38SShagun Agrawal 		    memcmp(fs->mask.lip, biton, sizeof(biton)) ||
12878192b38SShagun Agrawal 		    memcmp(fs->mask.fip, biton, sizeof(biton)))
12978192b38SShagun Agrawal 			return;
13078192b38SShagun Agrawal 	} else {
13178192b38SShagun Agrawal 		uint32_t biton  = 0xffffffff;
13278192b38SShagun Agrawal 		uint32_t bitoff = 0x0U;
13378192b38SShagun Agrawal 
13478192b38SShagun Agrawal 		if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
13578192b38SShagun Agrawal 		    !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
13678192b38SShagun Agrawal 		    memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
13778192b38SShagun Agrawal 		    memcmp(fs->mask.fip, &biton, sizeof(biton)))
13878192b38SShagun Agrawal 			return;
13978192b38SShagun Agrawal 	}
14078192b38SShagun Agrawal 
14178192b38SShagun Agrawal 	if (!fs->val.lport || fs->mask.lport != 0xffff)
14278192b38SShagun Agrawal 		return;
14378192b38SShagun Agrawal 	if (!fs->val.fport || fs->mask.fport != 0xffff)
14478192b38SShagun Agrawal 		return;
14578192b38SShagun Agrawal 
14678192b38SShagun Agrawal 	if (tp->protocol_shift >= 0)
14778192b38SShagun Agrawal 		ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
14878192b38SShagun Agrawal 	if (tp->ethertype_shift >= 0)
14978192b38SShagun Agrawal 		ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
1506b805d23SShagun Agrawal 	if (tp->port_shift >= 0)
1516b805d23SShagun Agrawal 		ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152fefee7a6SShagun Agrawal 	if (tp->macmatch_shift >= 0)
153fefee7a6SShagun Agrawal 		ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154ed709206SRahul Lakkireddy 	if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155ed709206SRahul Lakkireddy 		ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
156ed709206SRahul Lakkireddy 			       tp->vlan_shift;
157caa01424SKarra Satwik 	if (tp->vnic_shift >= 0) {
158caa01424SKarra Satwik 		if (fs->mask.ovlan_vld)
159caa01424SKarra Satwik 			ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
160caa01424SKarra Satwik 					     fs->mask.ovlan) << tp->vnic_shift;
161caa01424SKarra Satwik 		else if (fs->mask.pfvf_vld)
16224c1d49aSKarra Satwik 			ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
16324c1d49aSKarra Satwik 					     fs->mask.pf << 13 |
16424c1d49aSKarra Satwik 					     fs->mask.vf) << tp->vnic_shift;
165caa01424SKarra Satwik 	}
166095e6760SKarra Satwik 	if (tp->tos_shift >= 0)
167095e6760SKarra Satwik 		ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
16878192b38SShagun Agrawal 
16978192b38SShagun Agrawal 	if (ntuple_mask != hash_filter_mask)
17078192b38SShagun Agrawal 		return;
17178192b38SShagun Agrawal 
17278192b38SShagun Agrawal 	fs->cap = 1;	/* use hash region */
17378192b38SShagun Agrawal }
17478192b38SShagun Agrawal 
175ee61f511SShagun Agrawal static int
176fefee7a6SShagun Agrawal ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
177fefee7a6SShagun Agrawal 		     struct ch_filter_specification *fs,
178fefee7a6SShagun Agrawal 		     struct rte_flow_error *e)
179fefee7a6SShagun Agrawal {
180fefee7a6SShagun Agrawal 	const struct rte_flow_item_eth *spec = item->spec;
181fefee7a6SShagun Agrawal 	const struct rte_flow_item_eth *umask = item->mask;
182fefee7a6SShagun Agrawal 	const struct rte_flow_item_eth *mask;
183fefee7a6SShagun Agrawal 
184fefee7a6SShagun Agrawal 	/* If user has not given any mask, then use chelsio supported mask. */
185fefee7a6SShagun Agrawal 	mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
186fefee7a6SShagun Agrawal 
187ed709206SRahul Lakkireddy 	if (!spec)
188ed709206SRahul Lakkireddy 		return 0;
189ed709206SRahul Lakkireddy 
190fefee7a6SShagun Agrawal 	/* we don't support SRC_MAC filtering*/
1918275d5fcSThomas Monjalon 	if (!rte_is_zero_ether_addr(&spec->hdr.src_addr) ||
1928275d5fcSThomas Monjalon 	    (umask && !rte_is_zero_ether_addr(&umask->hdr.src_addr)))
193fefee7a6SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
194fefee7a6SShagun Agrawal 					  item,
195fefee7a6SShagun Agrawal 					  "src mac filtering not supported");
196fefee7a6SShagun Agrawal 
1978275d5fcSThomas Monjalon 	if (!rte_is_zero_ether_addr(&spec->hdr.dst_addr) ||
1988275d5fcSThomas Monjalon 	    (umask && !rte_is_zero_ether_addr(&umask->hdr.dst_addr))) {
199be5f4d5cSRahul Lakkireddy 		CXGBE_FILL_FS(0, 0x1ff, macidx);
2008275d5fcSThomas Monjalon 		CXGBE_FILL_FS_MEMCPY(spec->hdr.dst_addr.addr_bytes, mask->hdr.dst_addr.addr_bytes,
201be5f4d5cSRahul Lakkireddy 				     dmac);
202fefee7a6SShagun Agrawal 	}
203fefee7a6SShagun Agrawal 
2048275d5fcSThomas Monjalon 	if (spec->hdr.ether_type || (umask && umask->hdr.ether_type))
2058275d5fcSThomas Monjalon 		CXGBE_FILL_FS(be16_to_cpu(spec->hdr.ether_type),
2068275d5fcSThomas Monjalon 			      be16_to_cpu(mask->hdr.ether_type), ethtype);
20755f003d8SKarra Satwik 
208fefee7a6SShagun Agrawal 	return 0;
209fefee7a6SShagun Agrawal }
210fefee7a6SShagun Agrawal 
211fefee7a6SShagun Agrawal static int
212ed709206SRahul Lakkireddy ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
213ed709206SRahul Lakkireddy 		      struct ch_filter_specification *fs,
214ed709206SRahul Lakkireddy 		      struct rte_flow_error *e)
215ed709206SRahul Lakkireddy {
216ed709206SRahul Lakkireddy 	const struct rte_flow_item_vlan *spec = item->spec;
217ed709206SRahul Lakkireddy 	const struct rte_flow_item_vlan *umask = item->mask;
218ed709206SRahul Lakkireddy 	const struct rte_flow_item_vlan *mask;
219ed709206SRahul Lakkireddy 
220ed709206SRahul Lakkireddy 	/* If user has not given any mask, then use chelsio supported mask. */
221ed709206SRahul Lakkireddy 	mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
222ed709206SRahul Lakkireddy 
22355f003d8SKarra Satwik 	/* If ethertype is already set and is not VLAN (0x8100) or
22455f003d8SKarra Satwik 	 * QINQ(0x88A8), then don't proceed further. Otherwise,
22555f003d8SKarra Satwik 	 * reset the outer ethertype, so that it can be replaced by
22655f003d8SKarra Satwik 	 * innermost ethertype. Note that hardware will automatically
22755f003d8SKarra Satwik 	 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
22855f003d8SKarra Satwik 	 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
229ed709206SRahul Lakkireddy 	 */
230ed709206SRahul Lakkireddy 	if (fs->mask.ethtype) {
23155f003d8SKarra Satwik 		if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
23255f003d8SKarra Satwik 		    fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
233ed709206SRahul Lakkireddy 			return rte_flow_error_set(e, EINVAL,
234ed709206SRahul Lakkireddy 						  RTE_FLOW_ERROR_TYPE_ITEM,
235ed709206SRahul Lakkireddy 						  item,
23655f003d8SKarra Satwik 						  "Ethertype must be 0x8100 or 0x88a8");
237ed709206SRahul Lakkireddy 	}
238ed709206SRahul Lakkireddy 
23955f003d8SKarra Satwik 	if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
24055f003d8SKarra Satwik 		CXGBE_FILL_FS(1, 1, ovlan_vld);
24155f003d8SKarra Satwik 		if (spec) {
2428275d5fcSThomas Monjalon 			if (spec->hdr.vlan_tci || (umask && umask->hdr.vlan_tci))
2438275d5fcSThomas Monjalon 				CXGBE_FILL_FS(be16_to_cpu(spec->hdr.vlan_tci),
2448275d5fcSThomas Monjalon 					      be16_to_cpu(mask->hdr.vlan_tci), ovlan);
24555f003d8SKarra Satwik 			fs->mask.ethtype = 0;
24655f003d8SKarra Satwik 			fs->val.ethtype = 0;
24755f003d8SKarra Satwik 		}
248c3bbc381SKarra Satwik 	} else {
24955f003d8SKarra Satwik 		CXGBE_FILL_FS(1, 1, ivlan_vld);
25055f003d8SKarra Satwik 		if (spec) {
2518275d5fcSThomas Monjalon 			if (spec->hdr.vlan_tci || (umask && umask->hdr.vlan_tci))
2528275d5fcSThomas Monjalon 				CXGBE_FILL_FS(be16_to_cpu(spec->hdr.vlan_tci),
2538275d5fcSThomas Monjalon 					      be16_to_cpu(mask->hdr.vlan_tci), ivlan);
25455f003d8SKarra Satwik 			fs->mask.ethtype = 0;
25555f003d8SKarra Satwik 			fs->val.ethtype = 0;
25655f003d8SKarra Satwik 		}
25755f003d8SKarra Satwik 	}
25855f003d8SKarra Satwik 
2598275d5fcSThomas Monjalon 	if (spec && (spec->hdr.eth_proto || (umask && umask->hdr.eth_proto)))
2608275d5fcSThomas Monjalon 		CXGBE_FILL_FS(be16_to_cpu(spec->hdr.eth_proto),
2618275d5fcSThomas Monjalon 			      be16_to_cpu(mask->hdr.eth_proto), ethtype);
262ed709206SRahul Lakkireddy 
263ed709206SRahul Lakkireddy 	return 0;
264ed709206SRahul Lakkireddy }
265ed709206SRahul Lakkireddy 
266ed709206SRahul Lakkireddy static int
267ee61f511SShagun Agrawal ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
268ee61f511SShagun Agrawal 		     struct ch_filter_specification *fs,
269ee61f511SShagun Agrawal 		     struct rte_flow_error *e)
270ee61f511SShagun Agrawal {
271ee61f511SShagun Agrawal 	const struct rte_flow_item_udp *val = item->spec;
272ee61f511SShagun Agrawal 	const struct rte_flow_item_udp *umask = item->mask;
273ee61f511SShagun Agrawal 	const struct rte_flow_item_udp *mask;
274ee61f511SShagun Agrawal 
275ee61f511SShagun Agrawal 	mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
276ee61f511SShagun Agrawal 
277ee61f511SShagun Agrawal 	if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
278ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
279ee61f511SShagun Agrawal 					  item,
280ee61f511SShagun Agrawal 					  "udp: only src/dst port supported");
281ee61f511SShagun Agrawal 
282ee61f511SShagun Agrawal 	CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
283ee61f511SShagun Agrawal 	if (!val)
284ee61f511SShagun Agrawal 		return 0;
285ff7079a3SRahul Lakkireddy 
286ff7079a3SRahul Lakkireddy 	if (val->hdr.src_port || (umask && umask->hdr.src_port))
287ee61f511SShagun Agrawal 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
288ee61f511SShagun Agrawal 			      be16_to_cpu(mask->hdr.src_port), fport);
289ff7079a3SRahul Lakkireddy 
290ff7079a3SRahul Lakkireddy 	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
291ee61f511SShagun Agrawal 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
292ee61f511SShagun Agrawal 			      be16_to_cpu(mask->hdr.dst_port), lport);
293ff7079a3SRahul Lakkireddy 
294ee61f511SShagun Agrawal 	return 0;
295ee61f511SShagun Agrawal }
296ee61f511SShagun Agrawal 
297ee61f511SShagun Agrawal static int
298ee61f511SShagun Agrawal ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
299ee61f511SShagun Agrawal 		     struct ch_filter_specification *fs,
300ee61f511SShagun Agrawal 		     struct rte_flow_error *e)
301ee61f511SShagun Agrawal {
302ee61f511SShagun Agrawal 	const struct rte_flow_item_tcp *val = item->spec;
303ee61f511SShagun Agrawal 	const struct rte_flow_item_tcp *umask = item->mask;
304ee61f511SShagun Agrawal 	const struct rte_flow_item_tcp *mask;
305ee61f511SShagun Agrawal 
306ee61f511SShagun Agrawal 	mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
307ee61f511SShagun Agrawal 
308ee61f511SShagun Agrawal 	if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
309ee61f511SShagun Agrawal 	    mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
310ee61f511SShagun Agrawal 	    mask->hdr.tcp_urp)
311ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
312ee61f511SShagun Agrawal 					  item,
313ee61f511SShagun Agrawal 					  "tcp: only src/dst port supported");
314ee61f511SShagun Agrawal 
315ee61f511SShagun Agrawal 	CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
316ee61f511SShagun Agrawal 	if (!val)
317ee61f511SShagun Agrawal 		return 0;
318ff7079a3SRahul Lakkireddy 
319ff7079a3SRahul Lakkireddy 	if (val->hdr.src_port || (umask && umask->hdr.src_port))
320ee61f511SShagun Agrawal 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
321ee61f511SShagun Agrawal 			      be16_to_cpu(mask->hdr.src_port), fport);
322ff7079a3SRahul Lakkireddy 
323ff7079a3SRahul Lakkireddy 	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
324ee61f511SShagun Agrawal 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
325ee61f511SShagun Agrawal 			      be16_to_cpu(mask->hdr.dst_port), lport);
326ff7079a3SRahul Lakkireddy 
327ee61f511SShagun Agrawal 	return 0;
328ee61f511SShagun Agrawal }
329ee61f511SShagun Agrawal 
330ee61f511SShagun Agrawal static int
331ee61f511SShagun Agrawal ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
332ee61f511SShagun Agrawal 		      struct ch_filter_specification *fs,
333ee61f511SShagun Agrawal 		      struct rte_flow_error *e)
334ee61f511SShagun Agrawal {
335ee61f511SShagun Agrawal 	const struct rte_flow_item_ipv4 *val = item->spec;
336ee61f511SShagun Agrawal 	const struct rte_flow_item_ipv4 *umask = item->mask;
337ee61f511SShagun Agrawal 	const struct rte_flow_item_ipv4 *mask;
338ee61f511SShagun Agrawal 
339ee61f511SShagun Agrawal 	mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
340ee61f511SShagun Agrawal 
341095e6760SKarra Satwik 	if (mask->hdr.time_to_live)
342ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
343095e6760SKarra Satwik 					  item, "ttl is not supported");
344ee61f511SShagun Agrawal 
345ed709206SRahul Lakkireddy 	if (fs->mask.ethtype &&
34655f003d8SKarra Satwik 	    (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
347ed709206SRahul Lakkireddy 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
348ed709206SRahul Lakkireddy 					  item,
349ed709206SRahul Lakkireddy 					  "Couldn't find IPv4 ethertype");
350ee61f511SShagun Agrawal 	fs->type = FILTER_TYPE_IPV4;
351ee61f511SShagun Agrawal 	if (!val)
352ee61f511SShagun Agrawal 		return 0; /* ipv4 wild card */
353ee61f511SShagun Agrawal 
354ff7079a3SRahul Lakkireddy 	if (val->hdr.next_proto_id || (umask && umask->hdr.next_proto_id))
355ff7079a3SRahul Lakkireddy 		CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id,
356ff7079a3SRahul Lakkireddy 			      proto);
357ff7079a3SRahul Lakkireddy 
358ff7079a3SRahul Lakkireddy 	if (val->hdr.dst_addr || (umask && umask->hdr.dst_addr))
359ff7079a3SRahul Lakkireddy 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
360ff7079a3SRahul Lakkireddy 				     lip);
361ff7079a3SRahul Lakkireddy 
362ff7079a3SRahul Lakkireddy 	if (val->hdr.src_addr || (umask && umask->hdr.src_addr))
363ff7079a3SRahul Lakkireddy 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
364ff7079a3SRahul Lakkireddy 				     fip);
365ff7079a3SRahul Lakkireddy 
366ff7079a3SRahul Lakkireddy 	if (val->hdr.type_of_service || (umask && umask->hdr.type_of_service))
367ff7079a3SRahul Lakkireddy 		CXGBE_FILL_FS(val->hdr.type_of_service,
368ff7079a3SRahul Lakkireddy 			      mask->hdr.type_of_service, tos);
369ee61f511SShagun Agrawal 
370ee61f511SShagun Agrawal 	return 0;
371ee61f511SShagun Agrawal }
372ee61f511SShagun Agrawal 
373ee61f511SShagun Agrawal static int
374ee61f511SShagun Agrawal ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
375ee61f511SShagun Agrawal 		      struct ch_filter_specification *fs,
376ee61f511SShagun Agrawal 		      struct rte_flow_error *e)
377ee61f511SShagun Agrawal {
378ee61f511SShagun Agrawal 	const struct rte_flow_item_ipv6 *val = item->spec;
379ee61f511SShagun Agrawal 	const struct rte_flow_item_ipv6 *umask = item->mask;
380ee61f511SShagun Agrawal 	const struct rte_flow_item_ipv6 *mask;
381095e6760SKarra Satwik 	u32 vtc_flow, vtc_flow_mask;
382ff7079a3SRahul Lakkireddy 	u8 z[16] = { 0 };
383ee61f511SShagun Agrawal 
384ee61f511SShagun Agrawal 	mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
385ee61f511SShagun Agrawal 
386095e6760SKarra Satwik 	vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
387095e6760SKarra Satwik 
388095e6760SKarra Satwik 	if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
389ee61f511SShagun Agrawal 	    mask->hdr.payload_len || mask->hdr.hop_limits)
390ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
391ee61f511SShagun Agrawal 					  item,
392095e6760SKarra Satwik 					  "flow/hop are not supported");
393ee61f511SShagun Agrawal 
394ed709206SRahul Lakkireddy 	if (fs->mask.ethtype &&
39555f003d8SKarra Satwik 	    (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
396ed709206SRahul Lakkireddy 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
397ed709206SRahul Lakkireddy 					  item,
398ed709206SRahul Lakkireddy 					  "Couldn't find IPv6 ethertype");
399ee61f511SShagun Agrawal 	fs->type = FILTER_TYPE_IPV6;
400ee61f511SShagun Agrawal 	if (!val)
401ee61f511SShagun Agrawal 		return 0; /* ipv6 wild card */
402ee61f511SShagun Agrawal 
403ff7079a3SRahul Lakkireddy 	if (val->hdr.proto || (umask && umask->hdr.proto))
404ee61f511SShagun Agrawal 		CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
405095e6760SKarra Satwik 
406095e6760SKarra Satwik 	vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
407ff7079a3SRahul Lakkireddy 	if (val->hdr.vtc_flow || (umask && umask->hdr.vtc_flow))
408095e6760SKarra Satwik 		CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
409095e6760SKarra Satwik 			      RTE_IPV6_HDR_TC_SHIFT,
410095e6760SKarra Satwik 			      (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
411095e6760SKarra Satwik 			      RTE_IPV6_HDR_TC_SHIFT,
412095e6760SKarra Satwik 			      tos);
413095e6760SKarra Satwik 
41489b5642dSRobin Jarry 	if (memcmp(&val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
415ff7079a3SRahul Lakkireddy 	    (umask &&
41689b5642dSRobin Jarry 	     memcmp(&umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
417ff7079a3SRahul Lakkireddy 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
418ff7079a3SRahul Lakkireddy 				     lip);
419ff7079a3SRahul Lakkireddy 
42089b5642dSRobin Jarry 	if (memcmp(&val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
421ff7079a3SRahul Lakkireddy 	    (umask &&
42289b5642dSRobin Jarry 	     memcmp(&umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
423ff7079a3SRahul Lakkireddy 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
424ff7079a3SRahul Lakkireddy 				     fip);
425ee61f511SShagun Agrawal 
426ee61f511SShagun Agrawal 	return 0;
427ee61f511SShagun Agrawal }
428ee61f511SShagun Agrawal 
429ee61f511SShagun Agrawal static int
430ee61f511SShagun Agrawal cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
431ee61f511SShagun Agrawal 		      struct rte_flow_error *e)
432ee61f511SShagun Agrawal {
433ee61f511SShagun Agrawal 	if (attr->egress)
434ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
435ee61f511SShagun Agrawal 					  attr, "attribute:<egress> is"
436ee61f511SShagun Agrawal 					  " not supported !");
437ee61f511SShagun Agrawal 	if (attr->group > 0)
438ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
439ee61f511SShagun Agrawal 					  attr, "group parameter is"
440ee61f511SShagun Agrawal 					  " not supported.");
441ee61f511SShagun Agrawal 
442ee61f511SShagun Agrawal 	flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
443ee61f511SShagun Agrawal 
444ee61f511SShagun Agrawal 	return 0;
445ee61f511SShagun Agrawal }
446ee61f511SShagun Agrawal 
447ee61f511SShagun Agrawal static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
448ee61f511SShagun Agrawal {
449ee61f511SShagun Agrawal 	struct port_info *pi = ethdev2pinfo(dev);
450ee61f511SShagun Agrawal 
451ee61f511SShagun Agrawal 	if (rxq > pi->n_rx_qsets)
452ee61f511SShagun Agrawal 		return -EINVAL;
453ee61f511SShagun Agrawal 	return 0;
454ee61f511SShagun Agrawal }
455ee61f511SShagun Agrawal 
456ee61f511SShagun Agrawal static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
457ee61f511SShagun Agrawal {
458ee61f511SShagun Agrawal 	struct adapter *adap = ethdev2adap(f->dev);
459ee61f511SShagun Agrawal 	struct ch_filter_specification fs = f->fs;
46031d4d233SRahul Lakkireddy 	u8 nentries;
461ee61f511SShagun Agrawal 
462ee61f511SShagun Agrawal 	if (fidx >= adap->tids.nftids) {
463ee61f511SShagun Agrawal 		dev_err(adap, "invalid flow index %d.\n", fidx);
464ee61f511SShagun Agrawal 		return -EINVAL;
465ee61f511SShagun Agrawal 	}
46631d4d233SRahul Lakkireddy 
46731d4d233SRahul Lakkireddy 	nentries = cxgbe_filter_slots(adap, fs.type);
46831d4d233SRahul Lakkireddy 	if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
469ee61f511SShagun Agrawal 		dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
470ee61f511SShagun Agrawal 		return -EINVAL;
471ee61f511SShagun Agrawal 	}
472ee61f511SShagun Agrawal 
473ee61f511SShagun Agrawal 	return 0;
474ee61f511SShagun Agrawal }
475ee61f511SShagun Agrawal 
476ee61f511SShagun Agrawal static int
477ee61f511SShagun Agrawal cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
478ee61f511SShagun Agrawal 			 struct adapter *adap, unsigned int fidx)
479ee61f511SShagun Agrawal {
48031d4d233SRahul Lakkireddy 	u8 nentries;
48131d4d233SRahul Lakkireddy 
48231d4d233SRahul Lakkireddy 	nentries = cxgbe_filter_slots(adap, fs->type);
48331d4d233SRahul Lakkireddy 	if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
484ee61f511SShagun Agrawal 		dev_err(adap, "filter index: %d is busy.\n", fidx);
485ee61f511SShagun Agrawal 		return -EBUSY;
486ee61f511SShagun Agrawal 	}
48731d4d233SRahul Lakkireddy 
488ee61f511SShagun Agrawal 	if (fidx >= adap->tids.nftids) {
489ee61f511SShagun Agrawal 		dev_err(adap, "filter index (%u) >= max(%u)\n",
490ee61f511SShagun Agrawal 			fidx, adap->tids.nftids);
491ee61f511SShagun Agrawal 		return -ERANGE;
492ee61f511SShagun Agrawal 	}
493ee61f511SShagun Agrawal 
494ee61f511SShagun Agrawal 	return 0;
495ee61f511SShagun Agrawal }
496ee61f511SShagun Agrawal 
497ee61f511SShagun Agrawal static int
498ee61f511SShagun Agrawal cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
499ee61f511SShagun Agrawal {
50078192b38SShagun Agrawal 	if (flow->fs.cap)
50178192b38SShagun Agrawal 		return 0; /* Hash filters */
502ee61f511SShagun Agrawal 	return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
503ee61f511SShagun Agrawal 		cxgbe_validate_fidxonadd(&flow->fs,
504ee61f511SShagun Agrawal 					 ethdev2adap(flow->dev), fidx);
505ee61f511SShagun Agrawal }
506ee61f511SShagun Agrawal 
507ee61f511SShagun Agrawal static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
508ee61f511SShagun Agrawal {
509ee61f511SShagun Agrawal 	struct ch_filter_specification *fs = &flow->fs;
510ee61f511SShagun Agrawal 	struct adapter *adap = ethdev2adap(flow->dev);
511ee61f511SShagun Agrawal 
512ee61f511SShagun Agrawal 	/* For tcam get the next available slot, if default value specified */
513ee61f511SShagun Agrawal 	if (flow->fidx == FILTER_ID_MAX) {
51431d4d233SRahul Lakkireddy 		u8 nentries;
515ee61f511SShagun Agrawal 		int idx;
516ee61f511SShagun Agrawal 
51731d4d233SRahul Lakkireddy 		nentries = cxgbe_filter_slots(adap, fs->type);
51831d4d233SRahul Lakkireddy 		idx = cxgbe_alloc_ftid(adap, nentries);
519ee61f511SShagun Agrawal 		if (idx < 0) {
520ee61f511SShagun Agrawal 			dev_err(adap, "unable to get a filter index in tcam\n");
521ee61f511SShagun Agrawal 			return -ENOMEM;
522ee61f511SShagun Agrawal 		}
523ee61f511SShagun Agrawal 		*fidx = (unsigned int)idx;
524ee61f511SShagun Agrawal 	} else {
525ee61f511SShagun Agrawal 		*fidx = flow->fidx;
526ee61f511SShagun Agrawal 	}
527ee61f511SShagun Agrawal 
528ee61f511SShagun Agrawal 	return 0;
529ee61f511SShagun Agrawal }
530ee61f511SShagun Agrawal 
531ee61f511SShagun Agrawal static int
53248f523f6SRahul Lakkireddy cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
53348f523f6SRahul Lakkireddy {
53448f523f6SRahul Lakkireddy 	const struct rte_flow_item *i;
53548f523f6SRahul Lakkireddy 	int j, index = -ENOENT;
53648f523f6SRahul Lakkireddy 
53748f523f6SRahul Lakkireddy 	for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
53848f523f6SRahul Lakkireddy 		if (i->type == type) {
53948f523f6SRahul Lakkireddy 			index = j;
54048f523f6SRahul Lakkireddy 			break;
54148f523f6SRahul Lakkireddy 		}
54248f523f6SRahul Lakkireddy 	}
54348f523f6SRahul Lakkireddy 
54448f523f6SRahul Lakkireddy 	return index;
54548f523f6SRahul Lakkireddy }
54648f523f6SRahul Lakkireddy 
54748f523f6SRahul Lakkireddy static int
54848f523f6SRahul Lakkireddy ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
54948f523f6SRahul Lakkireddy {
55048f523f6SRahul Lakkireddy 	/* nmode:
55148f523f6SRahul Lakkireddy 	 * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
55248f523f6SRahul Lakkireddy 	 * BIT_2 = [src_port], BIT_3 = [dst_port]
55348f523f6SRahul Lakkireddy 	 *
55448f523f6SRahul Lakkireddy 	 * Only below cases are supported as per our spec.
55548f523f6SRahul Lakkireddy 	 */
55648f523f6SRahul Lakkireddy 	switch (nmode) {
55748f523f6SRahul Lakkireddy 	case 0:  /* 0000b */
55848f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_NONE;
55948f523f6SRahul Lakkireddy 		break;
56048f523f6SRahul Lakkireddy 	case 2:  /* 0010b */
56148f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_DIP;
56248f523f6SRahul Lakkireddy 		break;
56348f523f6SRahul Lakkireddy 	case 5:  /* 0101b */
56448f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_SIP_SP;
56548f523f6SRahul Lakkireddy 		break;
56648f523f6SRahul Lakkireddy 	case 7:  /* 0111b */
56748f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_DIP_SIP_SP;
56848f523f6SRahul Lakkireddy 		break;
56948f523f6SRahul Lakkireddy 	case 10: /* 1010b */
57048f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_DIP_DP;
57148f523f6SRahul Lakkireddy 		break;
57248f523f6SRahul Lakkireddy 	case 11: /* 1011b */
57348f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_DIP_DP_SIP;
57448f523f6SRahul Lakkireddy 		break;
57548f523f6SRahul Lakkireddy 	case 14: /* 1110b */
57648f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_DIP_DP_SP;
57748f523f6SRahul Lakkireddy 		break;
57848f523f6SRahul Lakkireddy 	case 15: /* 1111b */
57948f523f6SRahul Lakkireddy 		fs->nat_mode = NAT_MODE_ALL;
58048f523f6SRahul Lakkireddy 		break;
58148f523f6SRahul Lakkireddy 	default:
58248f523f6SRahul Lakkireddy 		return -EINVAL;
58348f523f6SRahul Lakkireddy 	}
58448f523f6SRahul Lakkireddy 
58548f523f6SRahul Lakkireddy 	return 0;
58648f523f6SRahul Lakkireddy }
58748f523f6SRahul Lakkireddy 
58848f523f6SRahul Lakkireddy static int
589281c6e7bSShagun Agrawal ch_rte_parse_atype_switch(const struct rte_flow_action *a,
59048f523f6SRahul Lakkireddy 			  const struct rte_flow_item items[],
59148f523f6SRahul Lakkireddy 			  uint8_t *nmode,
592281c6e7bSShagun Agrawal 			  struct ch_filter_specification *fs,
593281c6e7bSShagun Agrawal 			  struct rte_flow_error *e)
594281c6e7bSShagun Agrawal {
5951decc62bSShagun Agrawal 	const struct rte_flow_action_of_set_vlan_vid *vlanid;
596ea949955SRahul Lakkireddy 	const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
5971decc62bSShagun Agrawal 	const struct rte_flow_action_of_push_vlan *pushvlan;
59848f523f6SRahul Lakkireddy 	const struct rte_flow_action_set_ipv4 *ipv4;
59948f523f6SRahul Lakkireddy 	const struct rte_flow_action_set_ipv6 *ipv6;
60048f523f6SRahul Lakkireddy 	const struct rte_flow_action_set_tp *tp_port;
6017d71ba5dSKarra Satwik 	const struct rte_flow_action_set_mac *mac;
60248f523f6SRahul Lakkireddy 	int item_index;
60346a687dbSRahul Lakkireddy 	u16 tmp_vlan;
604281c6e7bSShagun Agrawal 
605281c6e7bSShagun Agrawal 	switch (a->type) {
6061decc62bSShagun Agrawal 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6071decc62bSShagun Agrawal 		vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
6081decc62bSShagun Agrawal 			  a->conf;
60946a687dbSRahul Lakkireddy 		/* If explicitly asked to push a new VLAN header,
61046a687dbSRahul Lakkireddy 		 * then don't set rewrite mode. Otherwise, the
61146a687dbSRahul Lakkireddy 		 * incoming VLAN packets will get their VLAN fields
61246a687dbSRahul Lakkireddy 		 * rewritten, instead of adding an additional outer
61346a687dbSRahul Lakkireddy 		 * VLAN header.
61446a687dbSRahul Lakkireddy 		 */
61546a687dbSRahul Lakkireddy 		if (fs->newvlan != VLAN_INSERT)
6161decc62bSShagun Agrawal 			fs->newvlan = VLAN_REWRITE;
61746a687dbSRahul Lakkireddy 		tmp_vlan = fs->vlan & 0xe000;
61846a687dbSRahul Lakkireddy 		fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
6191decc62bSShagun Agrawal 		break;
620ea949955SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
621ea949955SRahul Lakkireddy 		vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
622ea949955SRahul Lakkireddy 			  a->conf;
623ea949955SRahul Lakkireddy 		/* If explicitly asked to push a new VLAN header,
624ea949955SRahul Lakkireddy 		 * then don't set rewrite mode. Otherwise, the
625ea949955SRahul Lakkireddy 		 * incoming VLAN packets will get their VLAN fields
626ea949955SRahul Lakkireddy 		 * rewritten, instead of adding an additional outer
627ea949955SRahul Lakkireddy 		 * VLAN header.
628ea949955SRahul Lakkireddy 		 */
629ea949955SRahul Lakkireddy 		if (fs->newvlan != VLAN_INSERT)
630ea949955SRahul Lakkireddy 			fs->newvlan = VLAN_REWRITE;
631ea949955SRahul Lakkireddy 		tmp_vlan = fs->vlan & 0xfff;
632ea949955SRahul Lakkireddy 		fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
633ea949955SRahul Lakkireddy 		break;
6341decc62bSShagun Agrawal 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6351decc62bSShagun Agrawal 		pushvlan = (const struct rte_flow_action_of_push_vlan *)
6361decc62bSShagun Agrawal 			    a->conf;
63746a687dbSRahul Lakkireddy 		if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
6381decc62bSShagun Agrawal 			return rte_flow_error_set(e, EINVAL,
6391decc62bSShagun Agrawal 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
6401decc62bSShagun Agrawal 						  "only ethertype 0x8100 "
6411decc62bSShagun Agrawal 						  "supported for push vlan.");
6421decc62bSShagun Agrawal 		fs->newvlan = VLAN_INSERT;
6431decc62bSShagun Agrawal 		break;
6441decc62bSShagun Agrawal 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6451decc62bSShagun Agrawal 		fs->newvlan = VLAN_REMOVE;
6461decc62bSShagun Agrawal 		break;
64748f523f6SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
64848f523f6SRahul Lakkireddy 		item_index = cxgbe_get_flow_item_index(items,
64948f523f6SRahul Lakkireddy 						       RTE_FLOW_ITEM_TYPE_IPV4);
65048f523f6SRahul Lakkireddy 		if (item_index < 0)
65148f523f6SRahul Lakkireddy 			return rte_flow_error_set(e, EINVAL,
65248f523f6SRahul Lakkireddy 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
65348f523f6SRahul Lakkireddy 						  "No RTE_FLOW_ITEM_TYPE_IPV4 "
65448f523f6SRahul Lakkireddy 						  "found.");
65548f523f6SRahul Lakkireddy 
65648f523f6SRahul Lakkireddy 		ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
65748f523f6SRahul Lakkireddy 		memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
65848f523f6SRahul Lakkireddy 		*nmode |= 1 << 0;
65948f523f6SRahul Lakkireddy 		break;
66048f523f6SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
66148f523f6SRahul Lakkireddy 		item_index = cxgbe_get_flow_item_index(items,
66248f523f6SRahul Lakkireddy 						       RTE_FLOW_ITEM_TYPE_IPV4);
66348f523f6SRahul Lakkireddy 		if (item_index < 0)
66448f523f6SRahul Lakkireddy 			return rte_flow_error_set(e, EINVAL,
66548f523f6SRahul Lakkireddy 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
66648f523f6SRahul Lakkireddy 						  "No RTE_FLOW_ITEM_TYPE_IPV4 "
66748f523f6SRahul Lakkireddy 						  "found.");
66848f523f6SRahul Lakkireddy 
66948f523f6SRahul Lakkireddy 		ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
67048f523f6SRahul Lakkireddy 		memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
67148f523f6SRahul Lakkireddy 		*nmode |= 1 << 1;
67248f523f6SRahul Lakkireddy 		break;
67348f523f6SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
67448f523f6SRahul Lakkireddy 		item_index = cxgbe_get_flow_item_index(items,
67548f523f6SRahul Lakkireddy 						       RTE_FLOW_ITEM_TYPE_IPV6);
67648f523f6SRahul Lakkireddy 		if (item_index < 0)
67748f523f6SRahul Lakkireddy 			return rte_flow_error_set(e, EINVAL,
67848f523f6SRahul Lakkireddy 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
67948f523f6SRahul Lakkireddy 						  "No RTE_FLOW_ITEM_TYPE_IPV6 "
68048f523f6SRahul Lakkireddy 						  "found.");
68148f523f6SRahul Lakkireddy 
68248f523f6SRahul Lakkireddy 		ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
683*cc136750SRobin Jarry 		memcpy(fs->nat_fip, &ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
68448f523f6SRahul Lakkireddy 		*nmode |= 1 << 0;
68548f523f6SRahul Lakkireddy 		break;
68648f523f6SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
68748f523f6SRahul Lakkireddy 		item_index = cxgbe_get_flow_item_index(items,
68848f523f6SRahul Lakkireddy 						       RTE_FLOW_ITEM_TYPE_IPV6);
68948f523f6SRahul Lakkireddy 		if (item_index < 0)
69048f523f6SRahul Lakkireddy 			return rte_flow_error_set(e, EINVAL,
69148f523f6SRahul Lakkireddy 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
69248f523f6SRahul Lakkireddy 						  "No RTE_FLOW_ITEM_TYPE_IPV6 "
69348f523f6SRahul Lakkireddy 						  "found.");
69448f523f6SRahul Lakkireddy 
69548f523f6SRahul Lakkireddy 		ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
696*cc136750SRobin Jarry 		memcpy(fs->nat_lip, &ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
69748f523f6SRahul Lakkireddy 		*nmode |= 1 << 1;
69848f523f6SRahul Lakkireddy 		break;
69948f523f6SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
70048f523f6SRahul Lakkireddy 		item_index = cxgbe_get_flow_item_index(items,
70148f523f6SRahul Lakkireddy 						       RTE_FLOW_ITEM_TYPE_TCP);
70248f523f6SRahul Lakkireddy 		if (item_index < 0) {
70348f523f6SRahul Lakkireddy 			item_index =
70448f523f6SRahul Lakkireddy 				cxgbe_get_flow_item_index(items,
70548f523f6SRahul Lakkireddy 						RTE_FLOW_ITEM_TYPE_UDP);
70648f523f6SRahul Lakkireddy 			if (item_index < 0)
70748f523f6SRahul Lakkireddy 				return rte_flow_error_set(e, EINVAL,
70848f523f6SRahul Lakkireddy 						RTE_FLOW_ERROR_TYPE_ACTION, a,
70948f523f6SRahul Lakkireddy 						"No RTE_FLOW_ITEM_TYPE_TCP or "
71048f523f6SRahul Lakkireddy 						"RTE_FLOW_ITEM_TYPE_UDP found");
71148f523f6SRahul Lakkireddy 		}
71248f523f6SRahul Lakkireddy 
71348f523f6SRahul Lakkireddy 		tp_port = (const struct rte_flow_action_set_tp *)a->conf;
71448f523f6SRahul Lakkireddy 		fs->nat_fport = be16_to_cpu(tp_port->port);
71548f523f6SRahul Lakkireddy 		*nmode |= 1 << 2;
71648f523f6SRahul Lakkireddy 		break;
71748f523f6SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
71848f523f6SRahul Lakkireddy 		item_index = cxgbe_get_flow_item_index(items,
71948f523f6SRahul Lakkireddy 						       RTE_FLOW_ITEM_TYPE_TCP);
72048f523f6SRahul Lakkireddy 		if (item_index < 0) {
72148f523f6SRahul Lakkireddy 			item_index =
72248f523f6SRahul Lakkireddy 				cxgbe_get_flow_item_index(items,
72348f523f6SRahul Lakkireddy 						RTE_FLOW_ITEM_TYPE_UDP);
72448f523f6SRahul Lakkireddy 			if (item_index < 0)
72548f523f6SRahul Lakkireddy 				return rte_flow_error_set(e, EINVAL,
72648f523f6SRahul Lakkireddy 						RTE_FLOW_ERROR_TYPE_ACTION, a,
72748f523f6SRahul Lakkireddy 						"No RTE_FLOW_ITEM_TYPE_TCP or "
72848f523f6SRahul Lakkireddy 						"RTE_FLOW_ITEM_TYPE_UDP found");
72948f523f6SRahul Lakkireddy 		}
73048f523f6SRahul Lakkireddy 
73148f523f6SRahul Lakkireddy 		tp_port = (const struct rte_flow_action_set_tp *)a->conf;
73248f523f6SRahul Lakkireddy 		fs->nat_lport = be16_to_cpu(tp_port->port);
73348f523f6SRahul Lakkireddy 		*nmode |= 1 << 3;
73448f523f6SRahul Lakkireddy 		break;
735f683a520SRahul Lakkireddy 	case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
736f683a520SRahul Lakkireddy 		item_index = cxgbe_get_flow_item_index(items,
737f683a520SRahul Lakkireddy 						       RTE_FLOW_ITEM_TYPE_ETH);
738f683a520SRahul Lakkireddy 		if (item_index < 0)
739f683a520SRahul Lakkireddy 			return rte_flow_error_set(e, EINVAL,
740f683a520SRahul Lakkireddy 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
741f683a520SRahul Lakkireddy 						  "No RTE_FLOW_ITEM_TYPE_ETH "
742f683a520SRahul Lakkireddy 						  "found");
743f683a520SRahul Lakkireddy 		fs->swapmac = 1;
744f683a520SRahul Lakkireddy 		break;
745993541b2SKarra Satwik 	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
746993541b2SKarra Satwik 		item_index = cxgbe_get_flow_item_index(items,
747993541b2SKarra Satwik 						       RTE_FLOW_ITEM_TYPE_ETH);
748993541b2SKarra Satwik 		if (item_index < 0)
749993541b2SKarra Satwik 			return rte_flow_error_set(e, EINVAL,
750993541b2SKarra Satwik 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
751993541b2SKarra Satwik 						  "No RTE_FLOW_ITEM_TYPE_ETH "
752993541b2SKarra Satwik 						  "found");
753993541b2SKarra Satwik 		mac = (const struct rte_flow_action_set_mac *)a->conf;
754993541b2SKarra Satwik 
755993541b2SKarra Satwik 		fs->newsmac = 1;
756993541b2SKarra Satwik 		memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
757993541b2SKarra Satwik 		break;
7587d71ba5dSKarra Satwik 	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7597d71ba5dSKarra Satwik 		item_index = cxgbe_get_flow_item_index(items,
7607d71ba5dSKarra Satwik 						       RTE_FLOW_ITEM_TYPE_ETH);
7617d71ba5dSKarra Satwik 		if (item_index < 0)
7627d71ba5dSKarra Satwik 			return rte_flow_error_set(e, EINVAL,
7637d71ba5dSKarra Satwik 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
7647d71ba5dSKarra Satwik 						  "No RTE_FLOW_ITEM_TYPE_ETH found");
7657d71ba5dSKarra Satwik 		mac = (const struct rte_flow_action_set_mac *)a->conf;
7667d71ba5dSKarra Satwik 
7677d71ba5dSKarra Satwik 		fs->newdmac = 1;
7687d71ba5dSKarra Satwik 		memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
7697d71ba5dSKarra Satwik 		break;
770281c6e7bSShagun Agrawal 	default:
771281c6e7bSShagun Agrawal 		/* We are not supposed to come here */
772281c6e7bSShagun Agrawal 		return rte_flow_error_set(e, EINVAL,
773281c6e7bSShagun Agrawal 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
774281c6e7bSShagun Agrawal 					  "Action not supported");
775281c6e7bSShagun Agrawal 	}
776281c6e7bSShagun Agrawal 
777281c6e7bSShagun Agrawal 	return 0;
778281c6e7bSShagun Agrawal }
779281c6e7bSShagun Agrawal 
780281c6e7bSShagun Agrawal static int
781ee61f511SShagun Agrawal cxgbe_rtef_parse_actions(struct rte_flow *flow,
78248f523f6SRahul Lakkireddy 			 const struct rte_flow_item items[],
783ee61f511SShagun Agrawal 			 const struct rte_flow_action action[],
784ee61f511SShagun Agrawal 			 struct rte_flow_error *e)
785ee61f511SShagun Agrawal {
786ee61f511SShagun Agrawal 	struct ch_filter_specification *fs = &flow->fs;
78748f523f6SRahul Lakkireddy 	uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
788ea949955SRahul Lakkireddy 	uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
789ee61f511SShagun Agrawal 	const struct rte_flow_action_queue *q;
790ee61f511SShagun Agrawal 	const struct rte_flow_action *a;
791ee61f511SShagun Agrawal 	char abit = 0;
792281c6e7bSShagun Agrawal 	int ret;
793ee61f511SShagun Agrawal 
794ee61f511SShagun Agrawal 	for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
795ee61f511SShagun Agrawal 		switch (a->type) {
796ee61f511SShagun Agrawal 		case RTE_FLOW_ACTION_TYPE_VOID:
797ee61f511SShagun Agrawal 			continue;
798ee61f511SShagun Agrawal 		case RTE_FLOW_ACTION_TYPE_DROP:
799ee61f511SShagun Agrawal 			if (abit++)
800ee61f511SShagun Agrawal 				return rte_flow_error_set(e, EINVAL,
801ee61f511SShagun Agrawal 						RTE_FLOW_ERROR_TYPE_ACTION, a,
802ee61f511SShagun Agrawal 						"specify only 1 pass/drop");
803ee61f511SShagun Agrawal 			fs->action = FILTER_DROP;
804ee61f511SShagun Agrawal 			break;
805ee61f511SShagun Agrawal 		case RTE_FLOW_ACTION_TYPE_QUEUE:
806ee61f511SShagun Agrawal 			q = (const struct rte_flow_action_queue *)a->conf;
807ee61f511SShagun Agrawal 			if (!q)
808ee61f511SShagun Agrawal 				return rte_flow_error_set(e, EINVAL,
809ee61f511SShagun Agrawal 						RTE_FLOW_ERROR_TYPE_ACTION, q,
810ee61f511SShagun Agrawal 						"specify rx queue index");
811ee61f511SShagun Agrawal 			if (check_rxq(flow->dev, q->index))
812ee61f511SShagun Agrawal 				return rte_flow_error_set(e, EINVAL,
813ee61f511SShagun Agrawal 						RTE_FLOW_ERROR_TYPE_ACTION, q,
814ee61f511SShagun Agrawal 						"Invalid rx queue");
815ee61f511SShagun Agrawal 			if (abit++)
816ee61f511SShagun Agrawal 				return rte_flow_error_set(e, EINVAL,
817ee61f511SShagun Agrawal 						RTE_FLOW_ERROR_TYPE_ACTION, a,
818ee61f511SShagun Agrawal 						"specify only 1 pass/drop");
819ee61f511SShagun Agrawal 			fs->action = FILTER_PASS;
820ee61f511SShagun Agrawal 			fs->dirsteer = 1;
821ee61f511SShagun Agrawal 			fs->iq = q->index;
822ee61f511SShagun Agrawal 			break;
823ee61f511SShagun Agrawal 		case RTE_FLOW_ACTION_TYPE_COUNT:
824ee61f511SShagun Agrawal 			fs->hitcnts = 1;
825ee61f511SShagun Agrawal 			break;
8261decc62bSShagun Agrawal 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
827ea949955SRahul Lakkireddy 			vlan_set_vid++;
828ea949955SRahul Lakkireddy 			goto action_switch;
829ea949955SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
830ea949955SRahul Lakkireddy 			vlan_set_pcp++;
831ea949955SRahul Lakkireddy 			goto action_switch;
8321decc62bSShagun Agrawal 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
8331decc62bSShagun Agrawal 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
834f683a520SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
83548f523f6SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
83648f523f6SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
83748f523f6SRahul Lakkireddy 			nat_ipv4++;
83848f523f6SRahul Lakkireddy 			goto action_switch;
83948f523f6SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
84048f523f6SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
84148f523f6SRahul Lakkireddy 			nat_ipv6++;
84248f523f6SRahul Lakkireddy 			goto action_switch;
84348f523f6SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
84448f523f6SRahul Lakkireddy 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
845993541b2SKarra Satwik 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
8467d71ba5dSKarra Satwik 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
84748f523f6SRahul Lakkireddy action_switch:
848281c6e7bSShagun Agrawal 			/* We allow multiple switch actions, but switch is
849281c6e7bSShagun Agrawal 			 * not compatible with either queue or drop
850281c6e7bSShagun Agrawal 			 */
851281c6e7bSShagun Agrawal 			if (abit++ && fs->action != FILTER_SWITCH)
852281c6e7bSShagun Agrawal 				return rte_flow_error_set(e, EINVAL,
853281c6e7bSShagun Agrawal 						RTE_FLOW_ERROR_TYPE_ACTION, a,
854281c6e7bSShagun Agrawal 						"overlapping action specified");
85548f523f6SRahul Lakkireddy 			if (nat_ipv4 && nat_ipv6)
85648f523f6SRahul Lakkireddy 				return rte_flow_error_set(e, EINVAL,
85748f523f6SRahul Lakkireddy 					RTE_FLOW_ERROR_TYPE_ACTION, a,
85848f523f6SRahul Lakkireddy 					"Can't have one address ipv4 and the"
85948f523f6SRahul Lakkireddy 					" other ipv6");
86048f523f6SRahul Lakkireddy 
86148f523f6SRahul Lakkireddy 			ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
86248f523f6SRahul Lakkireddy 							e);
863281c6e7bSShagun Agrawal 			if (ret)
864281c6e7bSShagun Agrawal 				return ret;
865281c6e7bSShagun Agrawal 			fs->action = FILTER_SWITCH;
866281c6e7bSShagun Agrawal 			break;
867ee61f511SShagun Agrawal 		default:
868ee61f511SShagun Agrawal 			/* Not supported action : return error */
869ee61f511SShagun Agrawal 			return rte_flow_error_set(e, ENOTSUP,
870ee61f511SShagun Agrawal 						  RTE_FLOW_ERROR_TYPE_ACTION,
871ee61f511SShagun Agrawal 						  a, "Action not supported");
872ee61f511SShagun Agrawal 		}
873ee61f511SShagun Agrawal 	}
874ee61f511SShagun Agrawal 
875ea949955SRahul Lakkireddy 	if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
876ea949955SRahul Lakkireddy 		return rte_flow_error_set(e, EINVAL,
877ea949955SRahul Lakkireddy 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
878ea949955SRahul Lakkireddy 					  "Both OF_SET_VLAN_VID and "
879ea949955SRahul Lakkireddy 					  "OF_SET_VLAN_PCP must be specified");
880ea949955SRahul Lakkireddy 
88148f523f6SRahul Lakkireddy 	if (ch_rte_parse_nat(nmode, fs))
88248f523f6SRahul Lakkireddy 		return rte_flow_error_set(e, EINVAL,
88348f523f6SRahul Lakkireddy 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
88448f523f6SRahul Lakkireddy 					  "invalid settings for swich action");
885ee61f511SShagun Agrawal 	return 0;
886ee61f511SShagun Agrawal }
887ee61f511SShagun Agrawal 
888b74fd6b8SFerruh Yigit static struct chrte_fparse parseitem[] = {
889fefee7a6SShagun Agrawal 	[RTE_FLOW_ITEM_TYPE_ETH] = {
890fefee7a6SShagun Agrawal 		.fptr  = ch_rte_parsetype_eth,
891fefee7a6SShagun Agrawal 		.dmask = &(const struct rte_flow_item_eth){
892e0d947a1SFerruh Yigit 			.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
893e0d947a1SFerruh Yigit 			.hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
8948275d5fcSThomas Monjalon 			.hdr.ether_type = 0xffff,
895fefee7a6SShagun Agrawal 		}
896fefee7a6SShagun Agrawal 	},
897fefee7a6SShagun Agrawal 
898ed709206SRahul Lakkireddy 	[RTE_FLOW_ITEM_TYPE_VLAN] = {
899ed709206SRahul Lakkireddy 		.fptr = ch_rte_parsetype_vlan,
900ed709206SRahul Lakkireddy 		.dmask = &(const struct rte_flow_item_vlan){
9018275d5fcSThomas Monjalon 			.hdr.vlan_tci = 0xffff,
9028275d5fcSThomas Monjalon 			.hdr.eth_proto = 0xffff,
903ed709206SRahul Lakkireddy 		}
904ed709206SRahul Lakkireddy 	},
905ed709206SRahul Lakkireddy 
906ee61f511SShagun Agrawal 	[RTE_FLOW_ITEM_TYPE_IPV4] = {
907ee61f511SShagun Agrawal 		.fptr  = ch_rte_parsetype_ipv4,
908095e6760SKarra Satwik 		.dmask = &(const struct rte_flow_item_ipv4) {
909095e6760SKarra Satwik 			.hdr = {
910095e6760SKarra Satwik 				.src_addr = RTE_BE32(0xffffffff),
911095e6760SKarra Satwik 				.dst_addr = RTE_BE32(0xffffffff),
912095e6760SKarra Satwik 				.type_of_service = 0xff,
913095e6760SKarra Satwik 			},
914095e6760SKarra Satwik 		},
915ee61f511SShagun Agrawal 	},
916ee61f511SShagun Agrawal 
917ee61f511SShagun Agrawal 	[RTE_FLOW_ITEM_TYPE_IPV6] = {
918ee61f511SShagun Agrawal 		.fptr  = ch_rte_parsetype_ipv6,
919095e6760SKarra Satwik 		.dmask = &(const struct rte_flow_item_ipv6) {
920095e6760SKarra Satwik 			.hdr = {
92189b5642dSRobin Jarry 				.src_addr = RTE_IPV6_MASK_FULL,
92289b5642dSRobin Jarry 				.dst_addr = RTE_IPV6_MASK_FULL,
923095e6760SKarra Satwik 				.vtc_flow = RTE_BE32(0xff000000),
924095e6760SKarra Satwik 			},
925095e6760SKarra Satwik 		},
926ee61f511SShagun Agrawal 	},
927ee61f511SShagun Agrawal 
928ee61f511SShagun Agrawal 	[RTE_FLOW_ITEM_TYPE_UDP] = {
929ee61f511SShagun Agrawal 		.fptr  = ch_rte_parsetype_udp,
930ee61f511SShagun Agrawal 		.dmask = &rte_flow_item_udp_mask,
931ee61f511SShagun Agrawal 	},
932ee61f511SShagun Agrawal 
933ee61f511SShagun Agrawal 	[RTE_FLOW_ITEM_TYPE_TCP] = {
934ee61f511SShagun Agrawal 		.fptr  = ch_rte_parsetype_tcp,
935ee61f511SShagun Agrawal 		.dmask = &rte_flow_item_tcp_mask,
936ee61f511SShagun Agrawal 	},
937ee61f511SShagun Agrawal };
938ee61f511SShagun Agrawal 
939ee61f511SShagun Agrawal static int
940ee61f511SShagun Agrawal cxgbe_rtef_parse_items(struct rte_flow *flow,
941ee61f511SShagun Agrawal 		       const struct rte_flow_item items[],
942ee61f511SShagun Agrawal 		       struct rte_flow_error *e)
943ee61f511SShagun Agrawal {
94478192b38SShagun Agrawal 	struct adapter *adap = ethdev2adap(flow->dev);
945ee61f511SShagun Agrawal 	const struct rte_flow_item *i;
946ee61f511SShagun Agrawal 	char repeat[ARRAY_SIZE(parseitem)] = {0};
947ee61f511SShagun Agrawal 
948ee61f511SShagun Agrawal 	for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
949450f99ceSShagun Agrawal 		struct chrte_fparse *idx;
950ee61f511SShagun Agrawal 		int ret;
951ee61f511SShagun Agrawal 
952450f99ceSShagun Agrawal 		if (i->type >= ARRAY_SIZE(parseitem))
953ee61f511SShagun Agrawal 			return rte_flow_error_set(e, ENOTSUP,
954ee61f511SShagun Agrawal 						  RTE_FLOW_ERROR_TYPE_ITEM,
955ee61f511SShagun Agrawal 						  i, "Item not supported");
956ee61f511SShagun Agrawal 
957ee61f511SShagun Agrawal 		switch (i->type) {
958ee61f511SShagun Agrawal 		case RTE_FLOW_ITEM_TYPE_VOID:
959ee61f511SShagun Agrawal 			continue;
960ee61f511SShagun Agrawal 		default:
961ee61f511SShagun Agrawal 			/* check if item is repeated */
96255f003d8SKarra Satwik 			if (repeat[i->type] &&
96355f003d8SKarra Satwik 			    i->type != RTE_FLOW_ITEM_TYPE_VLAN)
964ee61f511SShagun Agrawal 				return rte_flow_error_set(e, ENOTSUP,
965ee61f511SShagun Agrawal 						RTE_FLOW_ERROR_TYPE_ITEM, i,
96655f003d8SKarra Satwik 						"parse items cannot be repeated(except void/vlan)");
96755f003d8SKarra Satwik 
968ee61f511SShagun Agrawal 			repeat[i->type] = 1;
969ee61f511SShagun Agrawal 
970ee61f511SShagun Agrawal 			/* validate the item */
971ee61f511SShagun Agrawal 			ret = cxgbe_validate_item(i, e);
972ee61f511SShagun Agrawal 			if (ret)
973ee61f511SShagun Agrawal 				return ret;
974ee61f511SShagun Agrawal 
975450f99ceSShagun Agrawal 			idx = &flow->item_parser[i->type];
976ee61f511SShagun Agrawal 			if (!idx || !idx->fptr) {
977ee61f511SShagun Agrawal 				return rte_flow_error_set(e, ENOTSUP,
978ee61f511SShagun Agrawal 						RTE_FLOW_ERROR_TYPE_ITEM, i,
979ee61f511SShagun Agrawal 						"Item not supported");
980ee61f511SShagun Agrawal 			} else {
981ee61f511SShagun Agrawal 				ret = idx->fptr(idx->dmask, i, &flow->fs, e);
982ee61f511SShagun Agrawal 				if (ret)
983ee61f511SShagun Agrawal 					return ret;
984ee61f511SShagun Agrawal 			}
985ee61f511SShagun Agrawal 		}
986ee61f511SShagun Agrawal 	}
987ee61f511SShagun Agrawal 
988ed709206SRahul Lakkireddy 	cxgbe_tweak_filter_spec(adap, &flow->fs);
989ff7079a3SRahul Lakkireddy 	cxgbe_fill_filter_region(adap, &flow->fs);
99078192b38SShagun Agrawal 
991ee61f511SShagun Agrawal 	return 0;
992ee61f511SShagun Agrawal }
993ee61f511SShagun Agrawal 
994ee61f511SShagun Agrawal static int
995ee61f511SShagun Agrawal cxgbe_flow_parse(struct rte_flow *flow,
996ee61f511SShagun Agrawal 		 const struct rte_flow_attr *attr,
997ee61f511SShagun Agrawal 		 const struct rte_flow_item item[],
998ee61f511SShagun Agrawal 		 const struct rte_flow_action action[],
999ee61f511SShagun Agrawal 		 struct rte_flow_error *e)
1000ee61f511SShagun Agrawal {
1001ee61f511SShagun Agrawal 	int ret;
1002ee61f511SShagun Agrawal 	/* parse user request into ch_filter_specification */
1003ee61f511SShagun Agrawal 	ret = cxgbe_rtef_parse_attr(flow, attr, e);
1004ee61f511SShagun Agrawal 	if (ret)
1005ee61f511SShagun Agrawal 		return ret;
1006ee61f511SShagun Agrawal 	ret = cxgbe_rtef_parse_items(flow, item, e);
1007ee61f511SShagun Agrawal 	if (ret)
1008ee61f511SShagun Agrawal 		return ret;
100948f523f6SRahul Lakkireddy 	return cxgbe_rtef_parse_actions(flow, item, action, e);
1010ee61f511SShagun Agrawal }
1011ee61f511SShagun Agrawal 
10129eb2c9a4SShagun Agrawal static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
10139eb2c9a4SShagun Agrawal {
10149eb2c9a4SShagun Agrawal 	struct ch_filter_specification *fs = &flow->fs;
10159eb2c9a4SShagun Agrawal 	struct adapter *adap = ethdev2adap(dev);
1016af44a577SShagun Agrawal 	struct tid_info *t = &adap->tids;
10179eb2c9a4SShagun Agrawal 	struct filter_ctx ctx;
10189eb2c9a4SShagun Agrawal 	unsigned int fidx;
10199eb2c9a4SShagun Agrawal 	int err;
10209eb2c9a4SShagun Agrawal 
10219eb2c9a4SShagun Agrawal 	if (cxgbe_get_fidx(flow, &fidx))
10229eb2c9a4SShagun Agrawal 		return -ENOMEM;
10239eb2c9a4SShagun Agrawal 	if (cxgbe_verify_fidx(flow, fidx, 0))
10249eb2c9a4SShagun Agrawal 		return -1;
10259eb2c9a4SShagun Agrawal 
10269eb2c9a4SShagun Agrawal 	t4_init_completion(&ctx.completion);
10279eb2c9a4SShagun Agrawal 	/* go create the filter */
10289eb2c9a4SShagun Agrawal 	err = cxgbe_set_filter(dev, fidx, fs, &ctx);
10299eb2c9a4SShagun Agrawal 	if (err) {
10309eb2c9a4SShagun Agrawal 		dev_err(adap, "Error %d while creating filter.\n", err);
10319eb2c9a4SShagun Agrawal 		return err;
10329eb2c9a4SShagun Agrawal 	}
10339eb2c9a4SShagun Agrawal 
10349eb2c9a4SShagun Agrawal 	/* Poll the FW for reply */
10359eb2c9a4SShagun Agrawal 	err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1036f1e9d2afSRahul Lakkireddy 					CXGBE_FLOW_POLL_MS,
10379eb2c9a4SShagun Agrawal 					CXGBE_FLOW_POLL_CNT,
10389eb2c9a4SShagun Agrawal 					&ctx.completion);
10399eb2c9a4SShagun Agrawal 	if (err) {
10409eb2c9a4SShagun Agrawal 		dev_err(adap, "Filter set operation timed out (%d)\n", err);
10419eb2c9a4SShagun Agrawal 		return err;
10429eb2c9a4SShagun Agrawal 	}
10439eb2c9a4SShagun Agrawal 	if (ctx.result) {
10449eb2c9a4SShagun Agrawal 		dev_err(adap, "Hardware error %d while creating the filter.\n",
10459eb2c9a4SShagun Agrawal 			ctx.result);
10469eb2c9a4SShagun Agrawal 		return ctx.result;
10479eb2c9a4SShagun Agrawal 	}
10489eb2c9a4SShagun Agrawal 
1049af44a577SShagun Agrawal 	if (fs->cap) { /* to destroy the filter */
1050af44a577SShagun Agrawal 		flow->fidx = ctx.tid;
1051af44a577SShagun Agrawal 		flow->f = lookup_tid(t, ctx.tid);
1052af44a577SShagun Agrawal 	} else {
10539eb2c9a4SShagun Agrawal 		flow->fidx = fidx;
10549eb2c9a4SShagun Agrawal 		flow->f = &adap->tids.ftid_tab[fidx];
1055af44a577SShagun Agrawal 	}
10569eb2c9a4SShagun Agrawal 
10579eb2c9a4SShagun Agrawal 	return 0;
10589eb2c9a4SShagun Agrawal }
10599eb2c9a4SShagun Agrawal 
10609eb2c9a4SShagun Agrawal static struct rte_flow *
10619eb2c9a4SShagun Agrawal cxgbe_flow_create(struct rte_eth_dev *dev,
10629eb2c9a4SShagun Agrawal 		  const struct rte_flow_attr *attr,
10639eb2c9a4SShagun Agrawal 		  const struct rte_flow_item item[],
10649eb2c9a4SShagun Agrawal 		  const struct rte_flow_action action[],
10659eb2c9a4SShagun Agrawal 		  struct rte_flow_error *e)
10669eb2c9a4SShagun Agrawal {
106797e02581SRahul Lakkireddy 	struct adapter *adap = ethdev2adap(dev);
10689eb2c9a4SShagun Agrawal 	struct rte_flow *flow;
10699eb2c9a4SShagun Agrawal 	int ret;
10709eb2c9a4SShagun Agrawal 
10719eb2c9a4SShagun Agrawal 	flow = t4_os_alloc(sizeof(struct rte_flow));
10729eb2c9a4SShagun Agrawal 	if (!flow) {
10739eb2c9a4SShagun Agrawal 		rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
10749eb2c9a4SShagun Agrawal 				   NULL, "Unable to allocate memory for"
10759eb2c9a4SShagun Agrawal 				   " filter_entry");
10769eb2c9a4SShagun Agrawal 		return NULL;
10779eb2c9a4SShagun Agrawal 	}
10789eb2c9a4SShagun Agrawal 
10799eb2c9a4SShagun Agrawal 	flow->item_parser = parseitem;
10809eb2c9a4SShagun Agrawal 	flow->dev = dev;
1081fefee7a6SShagun Agrawal 	flow->fs.private = (void *)flow;
10829eb2c9a4SShagun Agrawal 
10839eb2c9a4SShagun Agrawal 	if (cxgbe_flow_parse(flow, attr, item, action, e)) {
10849eb2c9a4SShagun Agrawal 		t4_os_free(flow);
10859eb2c9a4SShagun Agrawal 		return NULL;
10869eb2c9a4SShagun Agrawal 	}
10879eb2c9a4SShagun Agrawal 
108897e02581SRahul Lakkireddy 	t4_os_lock(&adap->flow_lock);
10899eb2c9a4SShagun Agrawal 	/* go, interact with cxgbe_filter */
10909eb2c9a4SShagun Agrawal 	ret = __cxgbe_flow_create(dev, flow);
109197e02581SRahul Lakkireddy 	t4_os_unlock(&adap->flow_lock);
10929eb2c9a4SShagun Agrawal 	if (ret) {
10939eb2c9a4SShagun Agrawal 		rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
10949eb2c9a4SShagun Agrawal 				   NULL, "Unable to create flow rule");
10959eb2c9a4SShagun Agrawal 		t4_os_free(flow);
10969eb2c9a4SShagun Agrawal 		return NULL;
10979eb2c9a4SShagun Agrawal 	}
10989eb2c9a4SShagun Agrawal 
109986910379SShagun Agrawal 	flow->f->private = flow; /* Will be used during flush */
110086910379SShagun Agrawal 
11019eb2c9a4SShagun Agrawal 	return flow;
11029eb2c9a4SShagun Agrawal }
11039eb2c9a4SShagun Agrawal 
1104da23bc9dSShagun Agrawal static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1105da23bc9dSShagun Agrawal {
1106da23bc9dSShagun Agrawal 	struct adapter *adap = ethdev2adap(dev);
1107da23bc9dSShagun Agrawal 	struct filter_entry *f = flow->f;
1108da23bc9dSShagun Agrawal 	struct ch_filter_specification *fs;
1109da23bc9dSShagun Agrawal 	struct filter_ctx ctx;
1110da23bc9dSShagun Agrawal 	int err;
1111da23bc9dSShagun Agrawal 
1112da23bc9dSShagun Agrawal 	fs = &f->fs;
1113da23bc9dSShagun Agrawal 	if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1114da23bc9dSShagun Agrawal 		return -1;
1115da23bc9dSShagun Agrawal 
1116da23bc9dSShagun Agrawal 	t4_init_completion(&ctx.completion);
1117da23bc9dSShagun Agrawal 	err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1118da23bc9dSShagun Agrawal 	if (err) {
1119da23bc9dSShagun Agrawal 		dev_err(adap, "Error %d while deleting filter.\n", err);
1120da23bc9dSShagun Agrawal 		return err;
1121da23bc9dSShagun Agrawal 	}
1122da23bc9dSShagun Agrawal 
1123da23bc9dSShagun Agrawal 	/* Poll the FW for reply */
1124da23bc9dSShagun Agrawal 	err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1125f1e9d2afSRahul Lakkireddy 					CXGBE_FLOW_POLL_MS,
1126da23bc9dSShagun Agrawal 					CXGBE_FLOW_POLL_CNT,
1127da23bc9dSShagun Agrawal 					&ctx.completion);
1128da23bc9dSShagun Agrawal 	if (err) {
1129da23bc9dSShagun Agrawal 		dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1130da23bc9dSShagun Agrawal 		return err;
1131da23bc9dSShagun Agrawal 	}
1132da23bc9dSShagun Agrawal 	if (ctx.result) {
1133da23bc9dSShagun Agrawal 		dev_err(adap, "Hardware error %d while deleting the filter.\n",
1134da23bc9dSShagun Agrawal 			ctx.result);
1135da23bc9dSShagun Agrawal 		return ctx.result;
1136da23bc9dSShagun Agrawal 	}
1137da23bc9dSShagun Agrawal 
1138da23bc9dSShagun Agrawal 	return 0;
1139da23bc9dSShagun Agrawal }
1140da23bc9dSShagun Agrawal 
1141da23bc9dSShagun Agrawal static int
1142da23bc9dSShagun Agrawal cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1143da23bc9dSShagun Agrawal 		   struct rte_flow_error *e)
1144da23bc9dSShagun Agrawal {
114597e02581SRahul Lakkireddy 	struct adapter *adap = ethdev2adap(dev);
1146da23bc9dSShagun Agrawal 	int ret;
1147da23bc9dSShagun Agrawal 
114897e02581SRahul Lakkireddy 	t4_os_lock(&adap->flow_lock);
1149da23bc9dSShagun Agrawal 	ret = __cxgbe_flow_destroy(dev, flow);
115097e02581SRahul Lakkireddy 	t4_os_unlock(&adap->flow_lock);
1151da23bc9dSShagun Agrawal 	if (ret)
1152da23bc9dSShagun Agrawal 		return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1153da23bc9dSShagun Agrawal 					  flow, "error destroying filter.");
1154da23bc9dSShagun Agrawal 	t4_os_free(flow);
1155da23bc9dSShagun Agrawal 	return 0;
1156da23bc9dSShagun Agrawal }
1157da23bc9dSShagun Agrawal 
11588d3c12e1SShagun Agrawal static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
11598d3c12e1SShagun Agrawal 			      u64 *byte_count)
11608d3c12e1SShagun Agrawal {
11618d3c12e1SShagun Agrawal 	struct adapter *adap = ethdev2adap(flow->dev);
1162ceaea6d8SShagun Agrawal 	struct ch_filter_specification fs = flow->f->fs;
11638d3c12e1SShagun Agrawal 	unsigned int fidx = flow->fidx;
11648d3c12e1SShagun Agrawal 	int ret = 0;
11658d3c12e1SShagun Agrawal 
1166ceaea6d8SShagun Agrawal 	ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
11678d3c12e1SShagun Agrawal 	if (ret)
11688d3c12e1SShagun Agrawal 		return ret;
1169ceaea6d8SShagun Agrawal 	return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
11708d3c12e1SShagun Agrawal }
11718d3c12e1SShagun Agrawal 
11728d3c12e1SShagun Agrawal static int
11738d3c12e1SShagun Agrawal cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
11748d3c12e1SShagun Agrawal 		 const struct rte_flow_action *action, void *data,
11758d3c12e1SShagun Agrawal 		 struct rte_flow_error *e)
11768d3c12e1SShagun Agrawal {
117720c014b1SRahul Lakkireddy 	struct adapter *adap = ethdev2adap(flow->dev);
11788d3c12e1SShagun Agrawal 	struct ch_filter_specification fs;
11798d3c12e1SShagun Agrawal 	struct rte_flow_query_count *c;
11808d3c12e1SShagun Agrawal 	struct filter_entry *f;
11818d3c12e1SShagun Agrawal 	int ret;
11828d3c12e1SShagun Agrawal 
11838d3c12e1SShagun Agrawal 	RTE_SET_USED(dev);
11848d3c12e1SShagun Agrawal 
11858d3c12e1SShagun Agrawal 	f = flow->f;
11868d3c12e1SShagun Agrawal 	fs = f->fs;
11878d3c12e1SShagun Agrawal 
11888d3c12e1SShagun Agrawal 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
11898d3c12e1SShagun Agrawal 		return rte_flow_error_set(e, ENOTSUP,
11908d3c12e1SShagun Agrawal 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11918d3c12e1SShagun Agrawal 					  "only count supported for query");
11928d3c12e1SShagun Agrawal 
11938d3c12e1SShagun Agrawal 	/*
11948d3c12e1SShagun Agrawal 	 * This is a valid operation, Since we are allowed to do chelsio
11958d3c12e1SShagun Agrawal 	 * specific operations in rte side of our code but not vise-versa
11968d3c12e1SShagun Agrawal 	 *
11978d3c12e1SShagun Agrawal 	 * So, fs can be queried/modified here BUT rte_flow_query_count
11988d3c12e1SShagun Agrawal 	 * cannot be worked on by the lower layer since we want to maintain
11998d3c12e1SShagun Agrawal 	 * it as rte_flow agnostic.
12008d3c12e1SShagun Agrawal 	 */
12018d3c12e1SShagun Agrawal 	if (!fs.hitcnts)
12028d3c12e1SShagun Agrawal 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12038d3c12e1SShagun Agrawal 					  &fs, "filter hit counters were not"
12048d3c12e1SShagun Agrawal 					  " enabled during filter creation");
12058d3c12e1SShagun Agrawal 
12068d3c12e1SShagun Agrawal 	c = (struct rte_flow_query_count *)data;
120797e02581SRahul Lakkireddy 
120897e02581SRahul Lakkireddy 	t4_os_lock(&adap->flow_lock);
12098d3c12e1SShagun Agrawal 	ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
121097e02581SRahul Lakkireddy 	if (ret) {
121197e02581SRahul Lakkireddy 		rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
121297e02581SRahul Lakkireddy 				   f, "cxgbe pmd failed to perform query");
121397e02581SRahul Lakkireddy 		goto out;
121497e02581SRahul Lakkireddy 	}
12158d3c12e1SShagun Agrawal 
12168d3c12e1SShagun Agrawal 	/* Query was successful */
12178d3c12e1SShagun Agrawal 	c->bytes_set = 1;
12188d3c12e1SShagun Agrawal 	c->hits_set = 1;
121920c014b1SRahul Lakkireddy 	if (c->reset)
122020c014b1SRahul Lakkireddy 		cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
12218d3c12e1SShagun Agrawal 
122297e02581SRahul Lakkireddy out:
122397e02581SRahul Lakkireddy 	t4_os_unlock(&adap->flow_lock);
122497e02581SRahul Lakkireddy 	return ret;
12258d3c12e1SShagun Agrawal }
12268d3c12e1SShagun Agrawal 
1227ee61f511SShagun Agrawal static int
1228ee61f511SShagun Agrawal cxgbe_flow_validate(struct rte_eth_dev *dev,
1229ee61f511SShagun Agrawal 		    const struct rte_flow_attr *attr,
1230ee61f511SShagun Agrawal 		    const struct rte_flow_item item[],
1231ee61f511SShagun Agrawal 		    const struct rte_flow_action action[],
1232ee61f511SShagun Agrawal 		    struct rte_flow_error *e)
1233ee61f511SShagun Agrawal {
1234ee61f511SShagun Agrawal 	struct adapter *adap = ethdev2adap(dev);
1235ee61f511SShagun Agrawal 	struct rte_flow *flow;
1236ee61f511SShagun Agrawal 	unsigned int fidx;
123797e02581SRahul Lakkireddy 	int ret = 0;
1238ee61f511SShagun Agrawal 
1239ee61f511SShagun Agrawal 	flow = t4_os_alloc(sizeof(struct rte_flow));
1240ee61f511SShagun Agrawal 	if (!flow)
1241ee61f511SShagun Agrawal 		return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1242ee61f511SShagun Agrawal 				NULL,
1243ee61f511SShagun Agrawal 				"Unable to allocate memory for filter_entry");
1244ee61f511SShagun Agrawal 
1245ee61f511SShagun Agrawal 	flow->item_parser = parseitem;
1246ee61f511SShagun Agrawal 	flow->dev = dev;
1247ff7079a3SRahul Lakkireddy 	flow->fs.private = (void *)flow;
1248ee61f511SShagun Agrawal 
1249ee61f511SShagun Agrawal 	ret = cxgbe_flow_parse(flow, attr, item, action, e);
1250ee61f511SShagun Agrawal 	if (ret) {
1251ee61f511SShagun Agrawal 		t4_os_free(flow);
1252ee61f511SShagun Agrawal 		return ret;
1253ee61f511SShagun Agrawal 	}
1254ee61f511SShagun Agrawal 
125571e9b334SRahul Lakkireddy 	if (cxgbe_validate_filter(adap, &flow->fs)) {
1256ee61f511SShagun Agrawal 		t4_os_free(flow);
1257ee61f511SShagun Agrawal 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1258ee61f511SShagun Agrawal 				NULL,
1259ee61f511SShagun Agrawal 				"validation failed. Check f/w config file.");
1260ee61f511SShagun Agrawal 	}
1261ee61f511SShagun Agrawal 
126297e02581SRahul Lakkireddy 	t4_os_lock(&adap->flow_lock);
1263ee61f511SShagun Agrawal 	if (cxgbe_get_fidx(flow, &fidx)) {
126497e02581SRahul Lakkireddy 		ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1265ee61f511SShagun Agrawal 					 NULL, "no memory in tcam.");
126697e02581SRahul Lakkireddy 		goto out;
1267ee61f511SShagun Agrawal 	}
1268ee61f511SShagun Agrawal 
1269ee61f511SShagun Agrawal 	if (cxgbe_verify_fidx(flow, fidx, 0)) {
127097e02581SRahul Lakkireddy 		ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1271ee61f511SShagun Agrawal 					 NULL, "validation failed");
127297e02581SRahul Lakkireddy 		goto out;
1273ee61f511SShagun Agrawal 	}
1274ee61f511SShagun Agrawal 
127597e02581SRahul Lakkireddy out:
127697e02581SRahul Lakkireddy 	t4_os_unlock(&adap->flow_lock);
1277ee61f511SShagun Agrawal 	t4_os_free(flow);
127897e02581SRahul Lakkireddy 	return ret;
1279ee61f511SShagun Agrawal }
1280ee61f511SShagun Agrawal 
128186910379SShagun Agrawal /*
12827be78d02SJosh Soref  * @ret : > 0 filter destroyed successfully
128386910379SShagun Agrawal  *        < 0 error destroying filter
128486910379SShagun Agrawal  *        == 1 filter not active / not found
128586910379SShagun Agrawal  */
128686910379SShagun Agrawal static int
128797e02581SRahul Lakkireddy cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
128886910379SShagun Agrawal {
128986910379SShagun Agrawal 	if (f && (f->valid || f->pending) &&
129086910379SShagun Agrawal 	    f->dev == dev && /* Only if user has asked for this port */
129186910379SShagun Agrawal 	     f->private) /* We (rte_flow) created this filter */
129297e02581SRahul Lakkireddy 		return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
129386910379SShagun Agrawal 	return 1;
129486910379SShagun Agrawal }
129586910379SShagun Agrawal 
129686910379SShagun Agrawal static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
129786910379SShagun Agrawal {
129886910379SShagun Agrawal 	struct adapter *adap = ethdev2adap(dev);
129986910379SShagun Agrawal 	unsigned int i;
130086910379SShagun Agrawal 	int ret = 0;
130186910379SShagun Agrawal 
130297e02581SRahul Lakkireddy 	t4_os_lock(&adap->flow_lock);
130386910379SShagun Agrawal 	if (adap->tids.ftid_tab) {
130486910379SShagun Agrawal 		struct filter_entry *f = &adap->tids.ftid_tab[0];
130586910379SShagun Agrawal 
130686910379SShagun Agrawal 		for (i = 0; i < adap->tids.nftids; i++, f++) {
130797e02581SRahul Lakkireddy 			ret = cxgbe_check_n_destroy(f, dev);
130897e02581SRahul Lakkireddy 			if (ret < 0) {
130997e02581SRahul Lakkireddy 				rte_flow_error_set(e, ret,
131097e02581SRahul Lakkireddy 						   RTE_FLOW_ERROR_TYPE_HANDLE,
131197e02581SRahul Lakkireddy 						   f->private,
131297e02581SRahul Lakkireddy 						   "error destroying TCAM "
131397e02581SRahul Lakkireddy 						   "filter.");
131486910379SShagun Agrawal 				goto out;
131586910379SShagun Agrawal 			}
131686910379SShagun Agrawal 		}
131797e02581SRahul Lakkireddy 	}
1318a4279771SShagun Agrawal 
1319a4279771SShagun Agrawal 	if (is_hashfilter(adap) && adap->tids.tid_tab) {
1320a4279771SShagun Agrawal 		struct filter_entry *f;
1321a4279771SShagun Agrawal 
1322a4279771SShagun Agrawal 		for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1323a4279771SShagun Agrawal 			f = (struct filter_entry *)adap->tids.tid_tab[i];
1324a4279771SShagun Agrawal 
132597e02581SRahul Lakkireddy 			ret = cxgbe_check_n_destroy(f, dev);
132697e02581SRahul Lakkireddy 			if (ret < 0) {
132797e02581SRahul Lakkireddy 				rte_flow_error_set(e, ret,
132897e02581SRahul Lakkireddy 						   RTE_FLOW_ERROR_TYPE_HANDLE,
132997e02581SRahul Lakkireddy 						   f->private,
133097e02581SRahul Lakkireddy 						   "error destroying HASH "
133197e02581SRahul Lakkireddy 						   "filter.");
1332a4279771SShagun Agrawal 				goto out;
1333a4279771SShagun Agrawal 			}
1334a4279771SShagun Agrawal 		}
133597e02581SRahul Lakkireddy 	}
1336a4279771SShagun Agrawal 
133786910379SShagun Agrawal out:
133897e02581SRahul Lakkireddy 	t4_os_unlock(&adap->flow_lock);
133986910379SShagun Agrawal 	return ret >= 0 ? 0 : ret;
134086910379SShagun Agrawal }
134186910379SShagun Agrawal 
1342ee61f511SShagun Agrawal static const struct rte_flow_ops cxgbe_flow_ops = {
1343ee61f511SShagun Agrawal 	.validate	= cxgbe_flow_validate,
13449eb2c9a4SShagun Agrawal 	.create		= cxgbe_flow_create,
1345da23bc9dSShagun Agrawal 	.destroy	= cxgbe_flow_destroy,
134686910379SShagun Agrawal 	.flush		= cxgbe_flow_flush,
13478d3c12e1SShagun Agrawal 	.query		= cxgbe_flow_query,
1348ee61f511SShagun Agrawal 	.isolate	= NULL,
1349ee61f511SShagun Agrawal };
1350ee61f511SShagun Agrawal 
1351ee61f511SShagun Agrawal int
1352fb7ad441SThomas Monjalon cxgbe_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
1353fb7ad441SThomas Monjalon 		       const struct rte_flow_ops **ops)
1354ee61f511SShagun Agrawal {
1355fb7ad441SThomas Monjalon 	*ops = &cxgbe_flow_ops;
1356fb7ad441SThomas Monjalon 	return 0;
1357ee61f511SShagun Agrawal }
1358