xref: /dpdk/drivers/net/cxgbe/cxgbe_flow.c (revision 4aa10e5dc1b0fd6cc5b1b18770ac603e2c33a66c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
7 
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10 	if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 					  NULL, "Redefined match item with" \
13 					  " different values found"); \
14 	(fs)->val.elem = (__v); \
15 	(fs)->mask.elem = (__m); \
16 } while (0)
17 
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
19 do { \
20 	memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 	memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
22 } while (0)
23 
24 #define CXGBE_FILL_FS(v, m, elem) \
25 	__CXGBE_FILL_FS(v, m, fs, elem, e)
26 
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 	__CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
29 
30 static int
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
32 {
33 	/* rte_flow specification does not allow it. */
34 	if (!i->spec && (i->mask ||  i->last))
35 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 				   i, "last or mask given without spec");
37 	/*
38 	 * We don't support it.
39 	 * Although, we can support values in last as 0's or last == spec.
40 	 * But this will not provide user with any additional functionality
41 	 * and will only increase the complexity for us.
42 	 */
43 	if (i->last)
44 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 				   i, "last is not supported by chelsio pmd");
46 	return 0;
47 }
48 
49 /**
50  * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51  * there's only 40-bits available to store match fields.
52  * So, to save space, optimize filter spec for some common
53  * known fields that hardware can parse against incoming
54  * packets automatically.
55  */
56 static void
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 			struct ch_filter_specification *fs)
59 {
60 	/* Save 16-bit ethertype field space, by setting corresponding
61 	 * 1-bit flags in the filter spec for common known ethertypes.
62 	 * When hardware sees these flags, it automatically infers and
63 	 * matches incoming packets against the corresponding ethertype.
64 	 */
65 	if (fs->mask.ethtype == 0xffff) {
66 		switch (fs->val.ethtype) {
67 		case RTE_ETHER_TYPE_IPV4:
68 			if (adap->params.tp.ethertype_shift < 0) {
69 				fs->type = FILTER_TYPE_IPV4;
70 				fs->val.ethtype = 0;
71 				fs->mask.ethtype = 0;
72 			}
73 			break;
74 		case RTE_ETHER_TYPE_IPV6:
75 			if (adap->params.tp.ethertype_shift < 0) {
76 				fs->type = FILTER_TYPE_IPV6;
77 				fs->val.ethtype = 0;
78 				fs->mask.ethtype = 0;
79 			}
80 			break;
81 		case RTE_ETHER_TYPE_VLAN:
82 			if (adap->params.tp.ethertype_shift < 0 &&
83 			    adap->params.tp.vlan_shift >= 0) {
84 				fs->val.ivlan_vld = 1;
85 				fs->mask.ivlan_vld = 1;
86 				fs->val.ethtype = 0;
87 				fs->mask.ethtype = 0;
88 			}
89 			break;
90 		case RTE_ETHER_TYPE_QINQ:
91 			if (adap->params.tp.ethertype_shift < 0 &&
92 			    adap->params.tp.vnic_shift >= 0) {
93 				fs->val.ovlan_vld = 1;
94 				fs->mask.ovlan_vld = 1;
95 				fs->val.ethtype = 0;
96 				fs->mask.ethtype = 0;
97 			}
98 			break;
99 		default:
100 			break;
101 		}
102 	}
103 }
104 
105 static void
106 cxgbe_fill_filter_region(struct adapter *adap,
107 			 struct ch_filter_specification *fs)
108 {
109 	struct tp_params *tp = &adap->params.tp;
110 	u64 hash_filter_mask = tp->hash_filter_mask;
111 	u64 ntuple_mask = 0;
112 
113 	fs->cap = 0;
114 
115 	if (!is_hashfilter(adap))
116 		return;
117 
118 	if (fs->type) {
119 		uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120 				     0xff, 0xff, 0xff, 0xff,
121 				     0xff, 0xff, 0xff, 0xff,
122 				     0xff, 0xff, 0xff, 0xff};
123 		uint8_t bitoff[16] = {0};
124 
125 		if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126 		    !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127 		    memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128 		    memcmp(fs->mask.fip, biton, sizeof(biton)))
129 			return;
130 	} else {
131 		uint32_t biton  = 0xffffffff;
132 		uint32_t bitoff = 0x0U;
133 
134 		if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135 		    !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136 		    memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137 		    memcmp(fs->mask.fip, &biton, sizeof(biton)))
138 			return;
139 	}
140 
141 	if (!fs->val.lport || fs->mask.lport != 0xffff)
142 		return;
143 	if (!fs->val.fport || fs->mask.fport != 0xffff)
144 		return;
145 
146 	if (tp->protocol_shift >= 0)
147 		ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148 	if (tp->ethertype_shift >= 0)
149 		ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150 	if (tp->port_shift >= 0)
151 		ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152 	if (tp->macmatch_shift >= 0)
153 		ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154 	if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155 		ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
156 			       tp->vlan_shift;
157 	if (tp->vnic_shift >= 0) {
158 		if (fs->mask.ovlan_vld)
159 			ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
160 					     fs->mask.ovlan) << tp->vnic_shift;
161 		else if (fs->mask.pfvf_vld)
162 			ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
163 					     fs->mask.pf << 13 |
164 					     fs->mask.vf) << tp->vnic_shift;
165 	}
166 	if (tp->tos_shift >= 0)
167 		ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
168 
169 	if (ntuple_mask != hash_filter_mask)
170 		return;
171 
172 	fs->cap = 1;	/* use hash region */
173 }
174 
175 static int
176 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
177 		     struct ch_filter_specification *fs,
178 		     struct rte_flow_error *e)
179 {
180 	const struct rte_flow_item_eth *spec = item->spec;
181 	const struct rte_flow_item_eth *umask = item->mask;
182 	const struct rte_flow_item_eth *mask;
183 
184 	/* If user has not given any mask, then use chelsio supported mask. */
185 	mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
186 
187 	if (!spec)
188 		return 0;
189 
190 	/* we don't support SRC_MAC filtering*/
191 	if (!rte_is_zero_ether_addr(&spec->hdr.src_addr) ||
192 	    (umask && !rte_is_zero_ether_addr(&umask->hdr.src_addr)))
193 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
194 					  item,
195 					  "src mac filtering not supported");
196 
197 	if (!rte_is_zero_ether_addr(&spec->hdr.dst_addr) ||
198 	    (umask && !rte_is_zero_ether_addr(&umask->hdr.dst_addr))) {
199 		CXGBE_FILL_FS(0, 0x1ff, macidx);
200 		CXGBE_FILL_FS_MEMCPY(spec->hdr.dst_addr.addr_bytes, mask->hdr.dst_addr.addr_bytes,
201 				     dmac);
202 	}
203 
204 	if (spec->hdr.ether_type || (umask && umask->hdr.ether_type))
205 		CXGBE_FILL_FS(be16_to_cpu(spec->hdr.ether_type),
206 			      be16_to_cpu(mask->hdr.ether_type), ethtype);
207 
208 	return 0;
209 }
210 
211 static int
212 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
213 		      struct ch_filter_specification *fs,
214 		      struct rte_flow_error *e)
215 {
216 	const struct rte_flow_item_vlan *spec = item->spec;
217 	const struct rte_flow_item_vlan *umask = item->mask;
218 	const struct rte_flow_item_vlan *mask;
219 
220 	/* If user has not given any mask, then use chelsio supported mask. */
221 	mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
222 
223 	/* If ethertype is already set and is not VLAN (0x8100) or
224 	 * QINQ(0x88A8), then don't proceed further. Otherwise,
225 	 * reset the outer ethertype, so that it can be replaced by
226 	 * innermost ethertype. Note that hardware will automatically
227 	 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
228 	 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
229 	 */
230 	if (fs->mask.ethtype) {
231 		if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
232 		    fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
233 			return rte_flow_error_set(e, EINVAL,
234 						  RTE_FLOW_ERROR_TYPE_ITEM,
235 						  item,
236 						  "Ethertype must be 0x8100 or 0x88a8");
237 	}
238 
239 	if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
240 		CXGBE_FILL_FS(1, 1, ovlan_vld);
241 		if (spec) {
242 			if (spec->hdr.vlan_tci || (umask && umask->hdr.vlan_tci))
243 				CXGBE_FILL_FS(be16_to_cpu(spec->hdr.vlan_tci),
244 					      be16_to_cpu(mask->hdr.vlan_tci), ovlan);
245 			fs->mask.ethtype = 0;
246 			fs->val.ethtype = 0;
247 		}
248 	} else {
249 		CXGBE_FILL_FS(1, 1, ivlan_vld);
250 		if (spec) {
251 			if (spec->hdr.vlan_tci || (umask && umask->hdr.vlan_tci))
252 				CXGBE_FILL_FS(be16_to_cpu(spec->hdr.vlan_tci),
253 					      be16_to_cpu(mask->hdr.vlan_tci), ivlan);
254 			fs->mask.ethtype = 0;
255 			fs->val.ethtype = 0;
256 		}
257 	}
258 
259 	if (spec && (spec->hdr.eth_proto || (umask && umask->hdr.eth_proto)))
260 		CXGBE_FILL_FS(be16_to_cpu(spec->hdr.eth_proto),
261 			      be16_to_cpu(mask->hdr.eth_proto), ethtype);
262 
263 	return 0;
264 }
265 
266 static int
267 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
268 		     struct ch_filter_specification *fs,
269 		     struct rte_flow_error *e)
270 {
271 	const struct rte_flow_item_udp *val = item->spec;
272 	const struct rte_flow_item_udp *umask = item->mask;
273 	const struct rte_flow_item_udp *mask;
274 
275 	mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
276 
277 	if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
278 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
279 					  item,
280 					  "udp: only src/dst port supported");
281 
282 	CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
283 	if (!val)
284 		return 0;
285 
286 	if (val->hdr.src_port || (umask && umask->hdr.src_port))
287 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
288 			      be16_to_cpu(mask->hdr.src_port), fport);
289 
290 	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
291 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
292 			      be16_to_cpu(mask->hdr.dst_port), lport);
293 
294 	return 0;
295 }
296 
297 static int
298 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
299 		     struct ch_filter_specification *fs,
300 		     struct rte_flow_error *e)
301 {
302 	const struct rte_flow_item_tcp *val = item->spec;
303 	const struct rte_flow_item_tcp *umask = item->mask;
304 	const struct rte_flow_item_tcp *mask;
305 
306 	mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
307 
308 	if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
309 	    mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
310 	    mask->hdr.tcp_urp)
311 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
312 					  item,
313 					  "tcp: only src/dst port supported");
314 
315 	CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
316 	if (!val)
317 		return 0;
318 
319 	if (val->hdr.src_port || (umask && umask->hdr.src_port))
320 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
321 			      be16_to_cpu(mask->hdr.src_port), fport);
322 
323 	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
324 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
325 			      be16_to_cpu(mask->hdr.dst_port), lport);
326 
327 	return 0;
328 }
329 
330 static int
331 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
332 		      struct ch_filter_specification *fs,
333 		      struct rte_flow_error *e)
334 {
335 	const struct rte_flow_item_ipv4 *val = item->spec;
336 	const struct rte_flow_item_ipv4 *umask = item->mask;
337 	const struct rte_flow_item_ipv4 *mask;
338 
339 	mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
340 
341 	if (mask->hdr.time_to_live)
342 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
343 					  item, "ttl is not supported");
344 
345 	if (fs->mask.ethtype &&
346 	    (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
347 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
348 					  item,
349 					  "Couldn't find IPv4 ethertype");
350 	fs->type = FILTER_TYPE_IPV4;
351 	if (!val)
352 		return 0; /* ipv4 wild card */
353 
354 	if (val->hdr.next_proto_id || (umask && umask->hdr.next_proto_id))
355 		CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id,
356 			      proto);
357 
358 	if (val->hdr.dst_addr || (umask && umask->hdr.dst_addr))
359 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
360 				     lip);
361 
362 	if (val->hdr.src_addr || (umask && umask->hdr.src_addr))
363 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
364 				     fip);
365 
366 	if (val->hdr.type_of_service || (umask && umask->hdr.type_of_service))
367 		CXGBE_FILL_FS(val->hdr.type_of_service,
368 			      mask->hdr.type_of_service, tos);
369 
370 	return 0;
371 }
372 
373 static int
374 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
375 		      struct ch_filter_specification *fs,
376 		      struct rte_flow_error *e)
377 {
378 	const struct rte_flow_item_ipv6 *val = item->spec;
379 	const struct rte_flow_item_ipv6 *umask = item->mask;
380 	const struct rte_flow_item_ipv6 *mask;
381 	u32 vtc_flow, vtc_flow_mask;
382 	u8 z[16] = { 0 };
383 
384 	mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
385 
386 	vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
387 
388 	if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
389 	    mask->hdr.payload_len || mask->hdr.hop_limits)
390 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
391 					  item,
392 					  "flow/hop are not supported");
393 
394 	if (fs->mask.ethtype &&
395 	    (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
396 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
397 					  item,
398 					  "Couldn't find IPv6 ethertype");
399 	fs->type = FILTER_TYPE_IPV6;
400 	if (!val)
401 		return 0; /* ipv6 wild card */
402 
403 	if (val->hdr.proto || (umask && umask->hdr.proto))
404 		CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
405 
406 	vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
407 	if (val->hdr.vtc_flow || (umask && umask->hdr.vtc_flow))
408 		CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
409 			      RTE_IPV6_HDR_TC_SHIFT,
410 			      (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
411 			      RTE_IPV6_HDR_TC_SHIFT,
412 			      tos);
413 
414 	if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
415 	    (umask &&
416 	     memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
417 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
418 				     lip);
419 
420 	if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
421 	    (umask &&
422 	     memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
423 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
424 				     fip);
425 
426 	return 0;
427 }
428 
429 static int
430 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
431 		      struct rte_flow_error *e)
432 {
433 	if (attr->egress)
434 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
435 					  attr, "attribute:<egress> is"
436 					  " not supported !");
437 	if (attr->group > 0)
438 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
439 					  attr, "group parameter is"
440 					  " not supported.");
441 
442 	flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
443 
444 	return 0;
445 }
446 
447 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
448 {
449 	struct port_info *pi = ethdev2pinfo(dev);
450 
451 	if (rxq > pi->n_rx_qsets)
452 		return -EINVAL;
453 	return 0;
454 }
455 
456 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
457 {
458 	struct adapter *adap = ethdev2adap(f->dev);
459 	struct ch_filter_specification fs = f->fs;
460 	u8 nentries;
461 
462 	if (fidx >= adap->tids.nftids) {
463 		dev_err(adap, "invalid flow index %d.\n", fidx);
464 		return -EINVAL;
465 	}
466 
467 	nentries = cxgbe_filter_slots(adap, fs.type);
468 	if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
469 		dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
470 		return -EINVAL;
471 	}
472 
473 	return 0;
474 }
475 
476 static int
477 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
478 			 struct adapter *adap, unsigned int fidx)
479 {
480 	u8 nentries;
481 
482 	nentries = cxgbe_filter_slots(adap, fs->type);
483 	if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
484 		dev_err(adap, "filter index: %d is busy.\n", fidx);
485 		return -EBUSY;
486 	}
487 
488 	if (fidx >= adap->tids.nftids) {
489 		dev_err(adap, "filter index (%u) >= max(%u)\n",
490 			fidx, adap->tids.nftids);
491 		return -ERANGE;
492 	}
493 
494 	return 0;
495 }
496 
497 static int
498 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
499 {
500 	if (flow->fs.cap)
501 		return 0; /* Hash filters */
502 	return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
503 		cxgbe_validate_fidxonadd(&flow->fs,
504 					 ethdev2adap(flow->dev), fidx);
505 }
506 
507 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
508 {
509 	struct ch_filter_specification *fs = &flow->fs;
510 	struct adapter *adap = ethdev2adap(flow->dev);
511 
512 	/* For tcam get the next available slot, if default value specified */
513 	if (flow->fidx == FILTER_ID_MAX) {
514 		u8 nentries;
515 		int idx;
516 
517 		nentries = cxgbe_filter_slots(adap, fs->type);
518 		idx = cxgbe_alloc_ftid(adap, nentries);
519 		if (idx < 0) {
520 			dev_err(adap, "unable to get a filter index in tcam\n");
521 			return -ENOMEM;
522 		}
523 		*fidx = (unsigned int)idx;
524 	} else {
525 		*fidx = flow->fidx;
526 	}
527 
528 	return 0;
529 }
530 
531 static int
532 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
533 {
534 	const struct rte_flow_item *i;
535 	int j, index = -ENOENT;
536 
537 	for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
538 		if (i->type == type) {
539 			index = j;
540 			break;
541 		}
542 	}
543 
544 	return index;
545 }
546 
547 static int
548 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
549 {
550 	/* nmode:
551 	 * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
552 	 * BIT_2 = [src_port], BIT_3 = [dst_port]
553 	 *
554 	 * Only below cases are supported as per our spec.
555 	 */
556 	switch (nmode) {
557 	case 0:  /* 0000b */
558 		fs->nat_mode = NAT_MODE_NONE;
559 		break;
560 	case 2:  /* 0010b */
561 		fs->nat_mode = NAT_MODE_DIP;
562 		break;
563 	case 5:  /* 0101b */
564 		fs->nat_mode = NAT_MODE_SIP_SP;
565 		break;
566 	case 7:  /* 0111b */
567 		fs->nat_mode = NAT_MODE_DIP_SIP_SP;
568 		break;
569 	case 10: /* 1010b */
570 		fs->nat_mode = NAT_MODE_DIP_DP;
571 		break;
572 	case 11: /* 1011b */
573 		fs->nat_mode = NAT_MODE_DIP_DP_SIP;
574 		break;
575 	case 14: /* 1110b */
576 		fs->nat_mode = NAT_MODE_DIP_DP_SP;
577 		break;
578 	case 15: /* 1111b */
579 		fs->nat_mode = NAT_MODE_ALL;
580 		break;
581 	default:
582 		return -EINVAL;
583 	}
584 
585 	return 0;
586 }
587 
588 static int
589 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
590 			  const struct rte_flow_item items[],
591 			  uint8_t *nmode,
592 			  struct ch_filter_specification *fs,
593 			  struct rte_flow_error *e)
594 {
595 	const struct rte_flow_action_of_set_vlan_vid *vlanid;
596 	const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
597 	const struct rte_flow_action_of_push_vlan *pushvlan;
598 	const struct rte_flow_action_set_ipv4 *ipv4;
599 	const struct rte_flow_action_set_ipv6 *ipv6;
600 	const struct rte_flow_action_set_tp *tp_port;
601 	const struct rte_flow_action_set_mac *mac;
602 	int item_index;
603 	u16 tmp_vlan;
604 
605 	switch (a->type) {
606 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
607 		vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
608 			  a->conf;
609 		/* If explicitly asked to push a new VLAN header,
610 		 * then don't set rewrite mode. Otherwise, the
611 		 * incoming VLAN packets will get their VLAN fields
612 		 * rewritten, instead of adding an additional outer
613 		 * VLAN header.
614 		 */
615 		if (fs->newvlan != VLAN_INSERT)
616 			fs->newvlan = VLAN_REWRITE;
617 		tmp_vlan = fs->vlan & 0xe000;
618 		fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
619 		break;
620 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
621 		vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
622 			  a->conf;
623 		/* If explicitly asked to push a new VLAN header,
624 		 * then don't set rewrite mode. Otherwise, the
625 		 * incoming VLAN packets will get their VLAN fields
626 		 * rewritten, instead of adding an additional outer
627 		 * VLAN header.
628 		 */
629 		if (fs->newvlan != VLAN_INSERT)
630 			fs->newvlan = VLAN_REWRITE;
631 		tmp_vlan = fs->vlan & 0xfff;
632 		fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
633 		break;
634 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
635 		pushvlan = (const struct rte_flow_action_of_push_vlan *)
636 			    a->conf;
637 		if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
638 			return rte_flow_error_set(e, EINVAL,
639 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
640 						  "only ethertype 0x8100 "
641 						  "supported for push vlan.");
642 		fs->newvlan = VLAN_INSERT;
643 		break;
644 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
645 		fs->newvlan = VLAN_REMOVE;
646 		break;
647 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
648 		item_index = cxgbe_get_flow_item_index(items,
649 						       RTE_FLOW_ITEM_TYPE_IPV4);
650 		if (item_index < 0)
651 			return rte_flow_error_set(e, EINVAL,
652 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
653 						  "No RTE_FLOW_ITEM_TYPE_IPV4 "
654 						  "found.");
655 
656 		ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
657 		memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
658 		*nmode |= 1 << 0;
659 		break;
660 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
661 		item_index = cxgbe_get_flow_item_index(items,
662 						       RTE_FLOW_ITEM_TYPE_IPV4);
663 		if (item_index < 0)
664 			return rte_flow_error_set(e, EINVAL,
665 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
666 						  "No RTE_FLOW_ITEM_TYPE_IPV4 "
667 						  "found.");
668 
669 		ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
670 		memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
671 		*nmode |= 1 << 1;
672 		break;
673 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
674 		item_index = cxgbe_get_flow_item_index(items,
675 						       RTE_FLOW_ITEM_TYPE_IPV6);
676 		if (item_index < 0)
677 			return rte_flow_error_set(e, EINVAL,
678 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
679 						  "No RTE_FLOW_ITEM_TYPE_IPV6 "
680 						  "found.");
681 
682 		ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
683 		memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
684 		*nmode |= 1 << 0;
685 		break;
686 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
687 		item_index = cxgbe_get_flow_item_index(items,
688 						       RTE_FLOW_ITEM_TYPE_IPV6);
689 		if (item_index < 0)
690 			return rte_flow_error_set(e, EINVAL,
691 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
692 						  "No RTE_FLOW_ITEM_TYPE_IPV6 "
693 						  "found.");
694 
695 		ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
696 		memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
697 		*nmode |= 1 << 1;
698 		break;
699 	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
700 		item_index = cxgbe_get_flow_item_index(items,
701 						       RTE_FLOW_ITEM_TYPE_TCP);
702 		if (item_index < 0) {
703 			item_index =
704 				cxgbe_get_flow_item_index(items,
705 						RTE_FLOW_ITEM_TYPE_UDP);
706 			if (item_index < 0)
707 				return rte_flow_error_set(e, EINVAL,
708 						RTE_FLOW_ERROR_TYPE_ACTION, a,
709 						"No RTE_FLOW_ITEM_TYPE_TCP or "
710 						"RTE_FLOW_ITEM_TYPE_UDP found");
711 		}
712 
713 		tp_port = (const struct rte_flow_action_set_tp *)a->conf;
714 		fs->nat_fport = be16_to_cpu(tp_port->port);
715 		*nmode |= 1 << 2;
716 		break;
717 	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
718 		item_index = cxgbe_get_flow_item_index(items,
719 						       RTE_FLOW_ITEM_TYPE_TCP);
720 		if (item_index < 0) {
721 			item_index =
722 				cxgbe_get_flow_item_index(items,
723 						RTE_FLOW_ITEM_TYPE_UDP);
724 			if (item_index < 0)
725 				return rte_flow_error_set(e, EINVAL,
726 						RTE_FLOW_ERROR_TYPE_ACTION, a,
727 						"No RTE_FLOW_ITEM_TYPE_TCP or "
728 						"RTE_FLOW_ITEM_TYPE_UDP found");
729 		}
730 
731 		tp_port = (const struct rte_flow_action_set_tp *)a->conf;
732 		fs->nat_lport = be16_to_cpu(tp_port->port);
733 		*nmode |= 1 << 3;
734 		break;
735 	case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
736 		item_index = cxgbe_get_flow_item_index(items,
737 						       RTE_FLOW_ITEM_TYPE_ETH);
738 		if (item_index < 0)
739 			return rte_flow_error_set(e, EINVAL,
740 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
741 						  "No RTE_FLOW_ITEM_TYPE_ETH "
742 						  "found");
743 		fs->swapmac = 1;
744 		break;
745 	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
746 		item_index = cxgbe_get_flow_item_index(items,
747 						       RTE_FLOW_ITEM_TYPE_ETH);
748 		if (item_index < 0)
749 			return rte_flow_error_set(e, EINVAL,
750 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
751 						  "No RTE_FLOW_ITEM_TYPE_ETH "
752 						  "found");
753 		mac = (const struct rte_flow_action_set_mac *)a->conf;
754 
755 		fs->newsmac = 1;
756 		memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
757 		break;
758 	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
759 		item_index = cxgbe_get_flow_item_index(items,
760 						       RTE_FLOW_ITEM_TYPE_ETH);
761 		if (item_index < 0)
762 			return rte_flow_error_set(e, EINVAL,
763 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
764 						  "No RTE_FLOW_ITEM_TYPE_ETH found");
765 		mac = (const struct rte_flow_action_set_mac *)a->conf;
766 
767 		fs->newdmac = 1;
768 		memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
769 		break;
770 	default:
771 		/* We are not supposed to come here */
772 		return rte_flow_error_set(e, EINVAL,
773 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
774 					  "Action not supported");
775 	}
776 
777 	return 0;
778 }
779 
780 static int
781 cxgbe_rtef_parse_actions(struct rte_flow *flow,
782 			 const struct rte_flow_item items[],
783 			 const struct rte_flow_action action[],
784 			 struct rte_flow_error *e)
785 {
786 	struct ch_filter_specification *fs = &flow->fs;
787 	uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
788 	uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
789 	const struct rte_flow_action_queue *q;
790 	const struct rte_flow_action *a;
791 	char abit = 0;
792 	int ret;
793 
794 	for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
795 		switch (a->type) {
796 		case RTE_FLOW_ACTION_TYPE_VOID:
797 			continue;
798 		case RTE_FLOW_ACTION_TYPE_DROP:
799 			if (abit++)
800 				return rte_flow_error_set(e, EINVAL,
801 						RTE_FLOW_ERROR_TYPE_ACTION, a,
802 						"specify only 1 pass/drop");
803 			fs->action = FILTER_DROP;
804 			break;
805 		case RTE_FLOW_ACTION_TYPE_QUEUE:
806 			q = (const struct rte_flow_action_queue *)a->conf;
807 			if (!q)
808 				return rte_flow_error_set(e, EINVAL,
809 						RTE_FLOW_ERROR_TYPE_ACTION, q,
810 						"specify rx queue index");
811 			if (check_rxq(flow->dev, q->index))
812 				return rte_flow_error_set(e, EINVAL,
813 						RTE_FLOW_ERROR_TYPE_ACTION, q,
814 						"Invalid rx queue");
815 			if (abit++)
816 				return rte_flow_error_set(e, EINVAL,
817 						RTE_FLOW_ERROR_TYPE_ACTION, a,
818 						"specify only 1 pass/drop");
819 			fs->action = FILTER_PASS;
820 			fs->dirsteer = 1;
821 			fs->iq = q->index;
822 			break;
823 		case RTE_FLOW_ACTION_TYPE_COUNT:
824 			fs->hitcnts = 1;
825 			break;
826 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
827 			vlan_set_vid++;
828 			goto action_switch;
829 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
830 			vlan_set_pcp++;
831 			goto action_switch;
832 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
833 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
834 		case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
835 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
836 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
837 			nat_ipv4++;
838 			goto action_switch;
839 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
840 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
841 			nat_ipv6++;
842 			goto action_switch;
843 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
844 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
845 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
846 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
847 action_switch:
848 			/* We allow multiple switch actions, but switch is
849 			 * not compatible with either queue or drop
850 			 */
851 			if (abit++ && fs->action != FILTER_SWITCH)
852 				return rte_flow_error_set(e, EINVAL,
853 						RTE_FLOW_ERROR_TYPE_ACTION, a,
854 						"overlapping action specified");
855 			if (nat_ipv4 && nat_ipv6)
856 				return rte_flow_error_set(e, EINVAL,
857 					RTE_FLOW_ERROR_TYPE_ACTION, a,
858 					"Can't have one address ipv4 and the"
859 					" other ipv6");
860 
861 			ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
862 							e);
863 			if (ret)
864 				return ret;
865 			fs->action = FILTER_SWITCH;
866 			break;
867 		default:
868 			/* Not supported action : return error */
869 			return rte_flow_error_set(e, ENOTSUP,
870 						  RTE_FLOW_ERROR_TYPE_ACTION,
871 						  a, "Action not supported");
872 		}
873 	}
874 
875 	if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
876 		return rte_flow_error_set(e, EINVAL,
877 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
878 					  "Both OF_SET_VLAN_VID and "
879 					  "OF_SET_VLAN_PCP must be specified");
880 
881 	if (ch_rte_parse_nat(nmode, fs))
882 		return rte_flow_error_set(e, EINVAL,
883 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
884 					  "invalid settings for swich action");
885 	return 0;
886 }
887 
888 static struct chrte_fparse parseitem[] = {
889 	[RTE_FLOW_ITEM_TYPE_ETH] = {
890 		.fptr  = ch_rte_parsetype_eth,
891 		.dmask = &(const struct rte_flow_item_eth){
892 			.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
893 			.hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
894 			.hdr.ether_type = 0xffff,
895 		}
896 	},
897 
898 	[RTE_FLOW_ITEM_TYPE_VLAN] = {
899 		.fptr = ch_rte_parsetype_vlan,
900 		.dmask = &(const struct rte_flow_item_vlan){
901 			.hdr.vlan_tci = 0xffff,
902 			.hdr.eth_proto = 0xffff,
903 		}
904 	},
905 
906 	[RTE_FLOW_ITEM_TYPE_IPV4] = {
907 		.fptr  = ch_rte_parsetype_ipv4,
908 		.dmask = &(const struct rte_flow_item_ipv4) {
909 			.hdr = {
910 				.src_addr = RTE_BE32(0xffffffff),
911 				.dst_addr = RTE_BE32(0xffffffff),
912 				.type_of_service = 0xff,
913 			},
914 		},
915 	},
916 
917 	[RTE_FLOW_ITEM_TYPE_IPV6] = {
918 		.fptr  = ch_rte_parsetype_ipv6,
919 		.dmask = &(const struct rte_flow_item_ipv6) {
920 			.hdr = {
921 				.src_addr =
922 					"\xff\xff\xff\xff\xff\xff\xff\xff"
923 					"\xff\xff\xff\xff\xff\xff\xff\xff",
924 				.dst_addr =
925 					"\xff\xff\xff\xff\xff\xff\xff\xff"
926 					"\xff\xff\xff\xff\xff\xff\xff\xff",
927 				.vtc_flow = RTE_BE32(0xff000000),
928 			},
929 		},
930 	},
931 
932 	[RTE_FLOW_ITEM_TYPE_UDP] = {
933 		.fptr  = ch_rte_parsetype_udp,
934 		.dmask = &rte_flow_item_udp_mask,
935 	},
936 
937 	[RTE_FLOW_ITEM_TYPE_TCP] = {
938 		.fptr  = ch_rte_parsetype_tcp,
939 		.dmask = &rte_flow_item_tcp_mask,
940 	},
941 };
942 
943 static int
944 cxgbe_rtef_parse_items(struct rte_flow *flow,
945 		       const struct rte_flow_item items[],
946 		       struct rte_flow_error *e)
947 {
948 	struct adapter *adap = ethdev2adap(flow->dev);
949 	const struct rte_flow_item *i;
950 	char repeat[ARRAY_SIZE(parseitem)] = {0};
951 
952 	for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
953 		struct chrte_fparse *idx;
954 		int ret;
955 
956 		if (i->type >= ARRAY_SIZE(parseitem))
957 			return rte_flow_error_set(e, ENOTSUP,
958 						  RTE_FLOW_ERROR_TYPE_ITEM,
959 						  i, "Item not supported");
960 
961 		switch (i->type) {
962 		case RTE_FLOW_ITEM_TYPE_VOID:
963 			continue;
964 		default:
965 			/* check if item is repeated */
966 			if (repeat[i->type] &&
967 			    i->type != RTE_FLOW_ITEM_TYPE_VLAN)
968 				return rte_flow_error_set(e, ENOTSUP,
969 						RTE_FLOW_ERROR_TYPE_ITEM, i,
970 						"parse items cannot be repeated(except void/vlan)");
971 
972 			repeat[i->type] = 1;
973 
974 			/* validate the item */
975 			ret = cxgbe_validate_item(i, e);
976 			if (ret)
977 				return ret;
978 
979 			idx = &flow->item_parser[i->type];
980 			if (!idx || !idx->fptr) {
981 				return rte_flow_error_set(e, ENOTSUP,
982 						RTE_FLOW_ERROR_TYPE_ITEM, i,
983 						"Item not supported");
984 			} else {
985 				ret = idx->fptr(idx->dmask, i, &flow->fs, e);
986 				if (ret)
987 					return ret;
988 			}
989 		}
990 	}
991 
992 	cxgbe_tweak_filter_spec(adap, &flow->fs);
993 	cxgbe_fill_filter_region(adap, &flow->fs);
994 
995 	return 0;
996 }
997 
998 static int
999 cxgbe_flow_parse(struct rte_flow *flow,
1000 		 const struct rte_flow_attr *attr,
1001 		 const struct rte_flow_item item[],
1002 		 const struct rte_flow_action action[],
1003 		 struct rte_flow_error *e)
1004 {
1005 	int ret;
1006 	/* parse user request into ch_filter_specification */
1007 	ret = cxgbe_rtef_parse_attr(flow, attr, e);
1008 	if (ret)
1009 		return ret;
1010 	ret = cxgbe_rtef_parse_items(flow, item, e);
1011 	if (ret)
1012 		return ret;
1013 	return cxgbe_rtef_parse_actions(flow, item, action, e);
1014 }
1015 
1016 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1017 {
1018 	struct ch_filter_specification *fs = &flow->fs;
1019 	struct adapter *adap = ethdev2adap(dev);
1020 	struct tid_info *t = &adap->tids;
1021 	struct filter_ctx ctx;
1022 	unsigned int fidx;
1023 	int err;
1024 
1025 	if (cxgbe_get_fidx(flow, &fidx))
1026 		return -ENOMEM;
1027 	if (cxgbe_verify_fidx(flow, fidx, 0))
1028 		return -1;
1029 
1030 	t4_init_completion(&ctx.completion);
1031 	/* go create the filter */
1032 	err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1033 	if (err) {
1034 		dev_err(adap, "Error %d while creating filter.\n", err);
1035 		return err;
1036 	}
1037 
1038 	/* Poll the FW for reply */
1039 	err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1040 					CXGBE_FLOW_POLL_MS,
1041 					CXGBE_FLOW_POLL_CNT,
1042 					&ctx.completion);
1043 	if (err) {
1044 		dev_err(adap, "Filter set operation timed out (%d)\n", err);
1045 		return err;
1046 	}
1047 	if (ctx.result) {
1048 		dev_err(adap, "Hardware error %d while creating the filter.\n",
1049 			ctx.result);
1050 		return ctx.result;
1051 	}
1052 
1053 	if (fs->cap) { /* to destroy the filter */
1054 		flow->fidx = ctx.tid;
1055 		flow->f = lookup_tid(t, ctx.tid);
1056 	} else {
1057 		flow->fidx = fidx;
1058 		flow->f = &adap->tids.ftid_tab[fidx];
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 static struct rte_flow *
1065 cxgbe_flow_create(struct rte_eth_dev *dev,
1066 		  const struct rte_flow_attr *attr,
1067 		  const struct rte_flow_item item[],
1068 		  const struct rte_flow_action action[],
1069 		  struct rte_flow_error *e)
1070 {
1071 	struct adapter *adap = ethdev2adap(dev);
1072 	struct rte_flow *flow;
1073 	int ret;
1074 
1075 	flow = t4_os_alloc(sizeof(struct rte_flow));
1076 	if (!flow) {
1077 		rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1078 				   NULL, "Unable to allocate memory for"
1079 				   " filter_entry");
1080 		return NULL;
1081 	}
1082 
1083 	flow->item_parser = parseitem;
1084 	flow->dev = dev;
1085 	flow->fs.private = (void *)flow;
1086 
1087 	if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1088 		t4_os_free(flow);
1089 		return NULL;
1090 	}
1091 
1092 	t4_os_lock(&adap->flow_lock);
1093 	/* go, interact with cxgbe_filter */
1094 	ret = __cxgbe_flow_create(dev, flow);
1095 	t4_os_unlock(&adap->flow_lock);
1096 	if (ret) {
1097 		rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1098 				   NULL, "Unable to create flow rule");
1099 		t4_os_free(flow);
1100 		return NULL;
1101 	}
1102 
1103 	flow->f->private = flow; /* Will be used during flush */
1104 
1105 	return flow;
1106 }
1107 
1108 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1109 {
1110 	struct adapter *adap = ethdev2adap(dev);
1111 	struct filter_entry *f = flow->f;
1112 	struct ch_filter_specification *fs;
1113 	struct filter_ctx ctx;
1114 	int err;
1115 
1116 	fs = &f->fs;
1117 	if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1118 		return -1;
1119 
1120 	t4_init_completion(&ctx.completion);
1121 	err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1122 	if (err) {
1123 		dev_err(adap, "Error %d while deleting filter.\n", err);
1124 		return err;
1125 	}
1126 
1127 	/* Poll the FW for reply */
1128 	err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1129 					CXGBE_FLOW_POLL_MS,
1130 					CXGBE_FLOW_POLL_CNT,
1131 					&ctx.completion);
1132 	if (err) {
1133 		dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1134 		return err;
1135 	}
1136 	if (ctx.result) {
1137 		dev_err(adap, "Hardware error %d while deleting the filter.\n",
1138 			ctx.result);
1139 		return ctx.result;
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static int
1146 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1147 		   struct rte_flow_error *e)
1148 {
1149 	struct adapter *adap = ethdev2adap(dev);
1150 	int ret;
1151 
1152 	t4_os_lock(&adap->flow_lock);
1153 	ret = __cxgbe_flow_destroy(dev, flow);
1154 	t4_os_unlock(&adap->flow_lock);
1155 	if (ret)
1156 		return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1157 					  flow, "error destroying filter.");
1158 	t4_os_free(flow);
1159 	return 0;
1160 }
1161 
1162 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1163 			      u64 *byte_count)
1164 {
1165 	struct adapter *adap = ethdev2adap(flow->dev);
1166 	struct ch_filter_specification fs = flow->f->fs;
1167 	unsigned int fidx = flow->fidx;
1168 	int ret = 0;
1169 
1170 	ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1171 	if (ret)
1172 		return ret;
1173 	return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1174 }
1175 
1176 static int
1177 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1178 		 const struct rte_flow_action *action, void *data,
1179 		 struct rte_flow_error *e)
1180 {
1181 	struct adapter *adap = ethdev2adap(flow->dev);
1182 	struct ch_filter_specification fs;
1183 	struct rte_flow_query_count *c;
1184 	struct filter_entry *f;
1185 	int ret;
1186 
1187 	RTE_SET_USED(dev);
1188 
1189 	f = flow->f;
1190 	fs = f->fs;
1191 
1192 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1193 		return rte_flow_error_set(e, ENOTSUP,
1194 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1195 					  "only count supported for query");
1196 
1197 	/*
1198 	 * This is a valid operation, Since we are allowed to do chelsio
1199 	 * specific operations in rte side of our code but not vise-versa
1200 	 *
1201 	 * So, fs can be queried/modified here BUT rte_flow_query_count
1202 	 * cannot be worked on by the lower layer since we want to maintain
1203 	 * it as rte_flow agnostic.
1204 	 */
1205 	if (!fs.hitcnts)
1206 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1207 					  &fs, "filter hit counters were not"
1208 					  " enabled during filter creation");
1209 
1210 	c = (struct rte_flow_query_count *)data;
1211 
1212 	t4_os_lock(&adap->flow_lock);
1213 	ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1214 	if (ret) {
1215 		rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1216 				   f, "cxgbe pmd failed to perform query");
1217 		goto out;
1218 	}
1219 
1220 	/* Query was successful */
1221 	c->bytes_set = 1;
1222 	c->hits_set = 1;
1223 	if (c->reset)
1224 		cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1225 
1226 out:
1227 	t4_os_unlock(&adap->flow_lock);
1228 	return ret;
1229 }
1230 
1231 static int
1232 cxgbe_flow_validate(struct rte_eth_dev *dev,
1233 		    const struct rte_flow_attr *attr,
1234 		    const struct rte_flow_item item[],
1235 		    const struct rte_flow_action action[],
1236 		    struct rte_flow_error *e)
1237 {
1238 	struct adapter *adap = ethdev2adap(dev);
1239 	struct rte_flow *flow;
1240 	unsigned int fidx;
1241 	int ret = 0;
1242 
1243 	flow = t4_os_alloc(sizeof(struct rte_flow));
1244 	if (!flow)
1245 		return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1246 				NULL,
1247 				"Unable to allocate memory for filter_entry");
1248 
1249 	flow->item_parser = parseitem;
1250 	flow->dev = dev;
1251 	flow->fs.private = (void *)flow;
1252 
1253 	ret = cxgbe_flow_parse(flow, attr, item, action, e);
1254 	if (ret) {
1255 		t4_os_free(flow);
1256 		return ret;
1257 	}
1258 
1259 	if (cxgbe_validate_filter(adap, &flow->fs)) {
1260 		t4_os_free(flow);
1261 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1262 				NULL,
1263 				"validation failed. Check f/w config file.");
1264 	}
1265 
1266 	t4_os_lock(&adap->flow_lock);
1267 	if (cxgbe_get_fidx(flow, &fidx)) {
1268 		ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1269 					 NULL, "no memory in tcam.");
1270 		goto out;
1271 	}
1272 
1273 	if (cxgbe_verify_fidx(flow, fidx, 0)) {
1274 		ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1275 					 NULL, "validation failed");
1276 		goto out;
1277 	}
1278 
1279 out:
1280 	t4_os_unlock(&adap->flow_lock);
1281 	t4_os_free(flow);
1282 	return ret;
1283 }
1284 
1285 /*
1286  * @ret : > 0 filter destroyed successfully
1287  *        < 0 error destroying filter
1288  *        == 1 filter not active / not found
1289  */
1290 static int
1291 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1292 {
1293 	if (f && (f->valid || f->pending) &&
1294 	    f->dev == dev && /* Only if user has asked for this port */
1295 	     f->private) /* We (rte_flow) created this filter */
1296 		return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1297 	return 1;
1298 }
1299 
1300 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1301 {
1302 	struct adapter *adap = ethdev2adap(dev);
1303 	unsigned int i;
1304 	int ret = 0;
1305 
1306 	t4_os_lock(&adap->flow_lock);
1307 	if (adap->tids.ftid_tab) {
1308 		struct filter_entry *f = &adap->tids.ftid_tab[0];
1309 
1310 		for (i = 0; i < adap->tids.nftids; i++, f++) {
1311 			ret = cxgbe_check_n_destroy(f, dev);
1312 			if (ret < 0) {
1313 				rte_flow_error_set(e, ret,
1314 						   RTE_FLOW_ERROR_TYPE_HANDLE,
1315 						   f->private,
1316 						   "error destroying TCAM "
1317 						   "filter.");
1318 				goto out;
1319 			}
1320 		}
1321 	}
1322 
1323 	if (is_hashfilter(adap) && adap->tids.tid_tab) {
1324 		struct filter_entry *f;
1325 
1326 		for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1327 			f = (struct filter_entry *)adap->tids.tid_tab[i];
1328 
1329 			ret = cxgbe_check_n_destroy(f, dev);
1330 			if (ret < 0) {
1331 				rte_flow_error_set(e, ret,
1332 						   RTE_FLOW_ERROR_TYPE_HANDLE,
1333 						   f->private,
1334 						   "error destroying HASH "
1335 						   "filter.");
1336 				goto out;
1337 			}
1338 		}
1339 	}
1340 
1341 out:
1342 	t4_os_unlock(&adap->flow_lock);
1343 	return ret >= 0 ? 0 : ret;
1344 }
1345 
1346 static const struct rte_flow_ops cxgbe_flow_ops = {
1347 	.validate	= cxgbe_flow_validate,
1348 	.create		= cxgbe_flow_create,
1349 	.destroy	= cxgbe_flow_destroy,
1350 	.flush		= cxgbe_flow_flush,
1351 	.query		= cxgbe_flow_query,
1352 	.isolate	= NULL,
1353 };
1354 
1355 int
1356 cxgbe_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
1357 		       const struct rte_flow_ops **ops)
1358 {
1359 	*ops = &cxgbe_flow_ops;
1360 	return 0;
1361 }
1362