xref: /dpdk/drivers/net/cxgbe/cxgbe_flow.c (revision e88bd4746737a1ca464b866d29f20ff5a739cd3f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
7 
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10 	if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 					  NULL, "Redefined match item with" \
13 					  " different values found"); \
14 	(fs)->val.elem = (__v); \
15 	(fs)->mask.elem = (__m); \
16 } while (0)
17 
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
19 do { \
20 	memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 	memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
22 } while (0)
23 
24 #define CXGBE_FILL_FS(v, m, elem) \
25 	__CXGBE_FILL_FS(v, m, fs, elem, e)
26 
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 	__CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
29 
30 static int
31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
32 {
33 	/* rte_flow specification does not allow it. */
34 	if (!i->spec && (i->mask ||  i->last))
35 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 				   i, "last or mask given without spec");
37 	/*
38 	 * We don't support it.
39 	 * Although, we can support values in last as 0's or last == spec.
40 	 * But this will not provide user with any additional functionality
41 	 * and will only increase the complexity for us.
42 	 */
43 	if (i->last)
44 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 				   i, "last is not supported by chelsio pmd");
46 	return 0;
47 }
48 
49 /**
50  * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51  * there's only 40-bits available to store match fields.
52  * So, to save space, optimize filter spec for some common
53  * known fields that hardware can parse against incoming
54  * packets automatically.
55  */
56 static void
57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 			struct ch_filter_specification *fs)
59 {
60 	/* Save 16-bit ethertype field space, by setting corresponding
61 	 * 1-bit flags in the filter spec for common known ethertypes.
62 	 * When hardware sees these flags, it automatically infers and
63 	 * matches incoming packets against the corresponding ethertype.
64 	 */
65 	if (fs->mask.ethtype == 0xffff) {
66 		switch (fs->val.ethtype) {
67 		case RTE_ETHER_TYPE_IPV4:
68 			if (adap->params.tp.ethertype_shift < 0) {
69 				fs->type = FILTER_TYPE_IPV4;
70 				fs->val.ethtype = 0;
71 				fs->mask.ethtype = 0;
72 			}
73 			break;
74 		case RTE_ETHER_TYPE_IPV6:
75 			if (adap->params.tp.ethertype_shift < 0) {
76 				fs->type = FILTER_TYPE_IPV6;
77 				fs->val.ethtype = 0;
78 				fs->mask.ethtype = 0;
79 			}
80 			break;
81 		case RTE_ETHER_TYPE_VLAN:
82 			if (adap->params.tp.ethertype_shift < 0 &&
83 			    adap->params.tp.vlan_shift >= 0) {
84 				fs->val.ivlan_vld = 1;
85 				fs->mask.ivlan_vld = 1;
86 				fs->val.ethtype = 0;
87 				fs->mask.ethtype = 0;
88 			}
89 			break;
90 		case RTE_ETHER_TYPE_QINQ:
91 			if (adap->params.tp.ethertype_shift < 0 &&
92 			    adap->params.tp.vnic_shift >= 0) {
93 				fs->val.ovlan_vld = 1;
94 				fs->mask.ovlan_vld = 1;
95 				fs->val.ethtype = 0;
96 				fs->mask.ethtype = 0;
97 			}
98 			break;
99 		default:
100 			break;
101 		}
102 	}
103 }
104 
105 static void
106 cxgbe_fill_filter_region(struct adapter *adap,
107 			 struct ch_filter_specification *fs)
108 {
109 	struct tp_params *tp = &adap->params.tp;
110 	u64 hash_filter_mask = tp->hash_filter_mask;
111 	u64 ntuple_mask = 0;
112 
113 	fs->cap = 0;
114 
115 	if (!is_hashfilter(adap))
116 		return;
117 
118 	if (fs->type) {
119 		uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120 				     0xff, 0xff, 0xff, 0xff,
121 				     0xff, 0xff, 0xff, 0xff,
122 				     0xff, 0xff, 0xff, 0xff};
123 		uint8_t bitoff[16] = {0};
124 
125 		if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126 		    !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127 		    memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128 		    memcmp(fs->mask.fip, biton, sizeof(biton)))
129 			return;
130 	} else {
131 		uint32_t biton  = 0xffffffff;
132 		uint32_t bitoff = 0x0U;
133 
134 		if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135 		    !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136 		    memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137 		    memcmp(fs->mask.fip, &biton, sizeof(biton)))
138 			return;
139 	}
140 
141 	if (!fs->val.lport || fs->mask.lport != 0xffff)
142 		return;
143 	if (!fs->val.fport || fs->mask.fport != 0xffff)
144 		return;
145 
146 	if (tp->protocol_shift >= 0)
147 		ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148 	if (tp->ethertype_shift >= 0)
149 		ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150 	if (tp->port_shift >= 0)
151 		ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152 	if (tp->macmatch_shift >= 0)
153 		ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154 	if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155 		ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
156 			       tp->vlan_shift;
157 	if (tp->vnic_shift >= 0) {
158 		if (fs->mask.ovlan_vld)
159 			ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
160 					     fs->mask.ovlan) << tp->vnic_shift;
161 		else if (fs->mask.pfvf_vld)
162 			ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
163 					     fs->mask.pf << 13 |
164 					     fs->mask.vf) << tp->vnic_shift;
165 	}
166 	if (tp->tos_shift >= 0)
167 		ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
168 
169 	if (ntuple_mask != hash_filter_mask)
170 		return;
171 
172 	fs->cap = 1;	/* use hash region */
173 }
174 
175 static int
176 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
177 		     struct ch_filter_specification *fs,
178 		     struct rte_flow_error *e)
179 {
180 	const struct rte_flow_item_eth *spec = item->spec;
181 	const struct rte_flow_item_eth *umask = item->mask;
182 	const struct rte_flow_item_eth *mask;
183 
184 	/* If user has not given any mask, then use chelsio supported mask. */
185 	mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
186 
187 	if (!spec)
188 		return 0;
189 
190 	/* we don't support SRC_MAC filtering*/
191 	if (!rte_is_zero_ether_addr(&spec->src) ||
192 	    (umask && !rte_is_zero_ether_addr(&umask->src)))
193 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
194 					  item,
195 					  "src mac filtering not supported");
196 
197 	if (!rte_is_zero_ether_addr(&spec->dst) ||
198 	    (umask && !rte_is_zero_ether_addr(&umask->dst))) {
199 		CXGBE_FILL_FS(0, 0x1ff, macidx);
200 		CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
201 				     dmac);
202 	}
203 
204 	if (spec->type || (umask && umask->type))
205 		CXGBE_FILL_FS(be16_to_cpu(spec->type),
206 			      be16_to_cpu(mask->type), ethtype);
207 
208 	return 0;
209 }
210 
211 static int
212 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
213 		      struct ch_filter_specification *fs,
214 		      struct rte_flow_error *e)
215 {
216 	const struct rte_flow_item_phy_port *val = item->spec;
217 	const struct rte_flow_item_phy_port *umask = item->mask;
218 	const struct rte_flow_item_phy_port *mask;
219 
220 	mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
221 
222 	if (!val)
223 		return 0; /* Wildcard, match all physical ports */
224 
225 	if (val->index > 0x7)
226 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
227 					  item,
228 					  "port index up to 0x7 is supported");
229 
230 	if (val->index || (umask && umask->index))
231 		CXGBE_FILL_FS(val->index, mask->index, iport);
232 
233 	return 0;
234 }
235 
236 static int
237 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
238 		      struct ch_filter_specification *fs,
239 		      struct rte_flow_error *e)
240 {
241 	const struct rte_flow_item_vlan *spec = item->spec;
242 	const struct rte_flow_item_vlan *umask = item->mask;
243 	const struct rte_flow_item_vlan *mask;
244 
245 	/* If user has not given any mask, then use chelsio supported mask. */
246 	mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
247 
248 	if (!fs->mask.ethtype)
249 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
250 					  item,
251 					  "Can't parse VLAN item without knowing ethertype");
252 
253 	/* If ethertype is already set and is not VLAN (0x8100) or
254 	 * QINQ(0x88A8), then don't proceed further. Otherwise,
255 	 * reset the outer ethertype, so that it can be replaced by
256 	 * innermost ethertype. Note that hardware will automatically
257 	 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
258 	 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
259 	 */
260 	if (fs->mask.ethtype) {
261 		if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
262 		    fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
263 			return rte_flow_error_set(e, EINVAL,
264 						  RTE_FLOW_ERROR_TYPE_ITEM,
265 						  item,
266 						  "Ethertype must be 0x8100 or 0x88a8");
267 	}
268 
269 	if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
270 		CXGBE_FILL_FS(1, 1, ovlan_vld);
271 		if (spec) {
272 			if (spec->tci || (umask && umask->tci))
273 				CXGBE_FILL_FS(be16_to_cpu(spec->tci),
274 					      be16_to_cpu(mask->tci), ovlan);
275 			fs->mask.ethtype = 0;
276 			fs->val.ethtype = 0;
277 		}
278 	} else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
279 		CXGBE_FILL_FS(1, 1, ivlan_vld);
280 		if (spec) {
281 			if (spec->tci || (umask && umask->tci))
282 				CXGBE_FILL_FS(be16_to_cpu(spec->tci),
283 					      be16_to_cpu(mask->tci), ivlan);
284 			fs->mask.ethtype = 0;
285 			fs->val.ethtype = 0;
286 		}
287 	}
288 
289 	if (spec && (spec->inner_type || (umask && umask->inner_type)))
290 		CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
291 			      be16_to_cpu(mask->inner_type), ethtype);
292 
293 	return 0;
294 }
295 
296 static int
297 ch_rte_parsetype_pf(const void *dmask __rte_unused,
298 		    const struct rte_flow_item *item __rte_unused,
299 		    struct ch_filter_specification *fs,
300 		    struct rte_flow_error *e __rte_unused)
301 {
302 	struct rte_flow *flow = (struct rte_flow *)fs->private;
303 	struct rte_eth_dev *dev = flow->dev;
304 	struct adapter *adap = ethdev2adap(dev);
305 
306 	CXGBE_FILL_FS(1, 1, pfvf_vld);
307 
308 	CXGBE_FILL_FS(adap->pf, 0x7, pf);
309 	return 0;
310 }
311 
312 static int
313 ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
314 		    struct ch_filter_specification *fs,
315 		    struct rte_flow_error *e)
316 {
317 	const struct rte_flow_item_vf *umask = item->mask;
318 	const struct rte_flow_item_vf *val = item->spec;
319 	const struct rte_flow_item_vf *mask;
320 
321 	/* If user has not given any mask, then use chelsio supported mask. */
322 	mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
323 
324 	CXGBE_FILL_FS(1, 1, pfvf_vld);
325 
326 	if (!val)
327 		return 0; /* Wildcard, match all Vf */
328 
329 	if (val->id > UCHAR_MAX)
330 		return rte_flow_error_set(e, EINVAL,
331 					  RTE_FLOW_ERROR_TYPE_ITEM,
332 					  item,
333 					  "VF ID > MAX(255)");
334 
335 	if (val->id || (umask && umask->id))
336 		CXGBE_FILL_FS(val->id, mask->id, vf);
337 
338 	return 0;
339 }
340 
341 static int
342 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
343 		     struct ch_filter_specification *fs,
344 		     struct rte_flow_error *e)
345 {
346 	const struct rte_flow_item_udp *val = item->spec;
347 	const struct rte_flow_item_udp *umask = item->mask;
348 	const struct rte_flow_item_udp *mask;
349 
350 	mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
351 
352 	if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
353 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
354 					  item,
355 					  "udp: only src/dst port supported");
356 
357 	CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
358 	if (!val)
359 		return 0;
360 
361 	if (val->hdr.src_port || (umask && umask->hdr.src_port))
362 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
363 			      be16_to_cpu(mask->hdr.src_port), fport);
364 
365 	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
366 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
367 			      be16_to_cpu(mask->hdr.dst_port), lport);
368 
369 	return 0;
370 }
371 
372 static int
373 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
374 		     struct ch_filter_specification *fs,
375 		     struct rte_flow_error *e)
376 {
377 	const struct rte_flow_item_tcp *val = item->spec;
378 	const struct rte_flow_item_tcp *umask = item->mask;
379 	const struct rte_flow_item_tcp *mask;
380 
381 	mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
382 
383 	if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
384 	    mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
385 	    mask->hdr.tcp_urp)
386 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
387 					  item,
388 					  "tcp: only src/dst port supported");
389 
390 	CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
391 	if (!val)
392 		return 0;
393 
394 	if (val->hdr.src_port || (umask && umask->hdr.src_port))
395 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
396 			      be16_to_cpu(mask->hdr.src_port), fport);
397 
398 	if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
399 		CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
400 			      be16_to_cpu(mask->hdr.dst_port), lport);
401 
402 	return 0;
403 }
404 
405 static int
406 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
407 		      struct ch_filter_specification *fs,
408 		      struct rte_flow_error *e)
409 {
410 	const struct rte_flow_item_ipv4 *val = item->spec;
411 	const struct rte_flow_item_ipv4 *umask = item->mask;
412 	const struct rte_flow_item_ipv4 *mask;
413 
414 	mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
415 
416 	if (mask->hdr.time_to_live)
417 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
418 					  item, "ttl is not supported");
419 
420 	if (fs->mask.ethtype &&
421 	    (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
422 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
423 					  item,
424 					  "Couldn't find IPv4 ethertype");
425 	fs->type = FILTER_TYPE_IPV4;
426 	if (!val)
427 		return 0; /* ipv4 wild card */
428 
429 	if (val->hdr.next_proto_id || (umask && umask->hdr.next_proto_id))
430 		CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id,
431 			      proto);
432 
433 	if (val->hdr.dst_addr || (umask && umask->hdr.dst_addr))
434 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
435 				     lip);
436 
437 	if (val->hdr.src_addr || (umask && umask->hdr.src_addr))
438 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
439 				     fip);
440 
441 	if (val->hdr.type_of_service || (umask && umask->hdr.type_of_service))
442 		CXGBE_FILL_FS(val->hdr.type_of_service,
443 			      mask->hdr.type_of_service, tos);
444 
445 	return 0;
446 }
447 
448 static int
449 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
450 		      struct ch_filter_specification *fs,
451 		      struct rte_flow_error *e)
452 {
453 	const struct rte_flow_item_ipv6 *val = item->spec;
454 	const struct rte_flow_item_ipv6 *umask = item->mask;
455 	const struct rte_flow_item_ipv6 *mask;
456 	u32 vtc_flow, vtc_flow_mask;
457 	u8 z[16] = { 0 };
458 
459 	mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
460 
461 	vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
462 
463 	if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
464 	    mask->hdr.payload_len || mask->hdr.hop_limits)
465 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
466 					  item,
467 					  "flow/hop are not supported");
468 
469 	if (fs->mask.ethtype &&
470 	    (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
471 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
472 					  item,
473 					  "Couldn't find IPv6 ethertype");
474 	fs->type = FILTER_TYPE_IPV6;
475 	if (!val)
476 		return 0; /* ipv6 wild card */
477 
478 	if (val->hdr.proto || (umask && umask->hdr.proto))
479 		CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
480 
481 	vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
482 	if (val->hdr.vtc_flow || (umask && umask->hdr.vtc_flow))
483 		CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
484 			      RTE_IPV6_HDR_TC_SHIFT,
485 			      (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
486 			      RTE_IPV6_HDR_TC_SHIFT,
487 			      tos);
488 
489 	if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
490 	    (umask &&
491 	     memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
492 		CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
493 				     lip);
494 
495 	if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
496 	    (umask &&
497 	     memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
498 		CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
499 				     fip);
500 
501 	return 0;
502 }
503 
504 static int
505 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
506 		      struct rte_flow_error *e)
507 {
508 	if (attr->egress)
509 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
510 					  attr, "attribute:<egress> is"
511 					  " not supported !");
512 	if (attr->group > 0)
513 		return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
514 					  attr, "group parameter is"
515 					  " not supported.");
516 
517 	flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
518 
519 	return 0;
520 }
521 
522 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
523 {
524 	struct port_info *pi = ethdev2pinfo(dev);
525 
526 	if (rxq > pi->n_rx_qsets)
527 		return -EINVAL;
528 	return 0;
529 }
530 
531 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
532 {
533 	struct adapter *adap = ethdev2adap(f->dev);
534 	struct ch_filter_specification fs = f->fs;
535 	u8 nentries;
536 
537 	if (fidx >= adap->tids.nftids) {
538 		dev_err(adap, "invalid flow index %d.\n", fidx);
539 		return -EINVAL;
540 	}
541 
542 	nentries = cxgbe_filter_slots(adap, fs.type);
543 	if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
544 		dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
545 		return -EINVAL;
546 	}
547 
548 	return 0;
549 }
550 
551 static int
552 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
553 			 struct adapter *adap, unsigned int fidx)
554 {
555 	u8 nentries;
556 
557 	nentries = cxgbe_filter_slots(adap, fs->type);
558 	if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
559 		dev_err(adap, "filter index: %d is busy.\n", fidx);
560 		return -EBUSY;
561 	}
562 
563 	if (fidx >= adap->tids.nftids) {
564 		dev_err(adap, "filter index (%u) >= max(%u)\n",
565 			fidx, adap->tids.nftids);
566 		return -ERANGE;
567 	}
568 
569 	return 0;
570 }
571 
572 static int
573 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
574 {
575 	if (flow->fs.cap)
576 		return 0; /* Hash filters */
577 	return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
578 		cxgbe_validate_fidxonadd(&flow->fs,
579 					 ethdev2adap(flow->dev), fidx);
580 }
581 
582 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
583 {
584 	struct ch_filter_specification *fs = &flow->fs;
585 	struct adapter *adap = ethdev2adap(flow->dev);
586 
587 	/* For tcam get the next available slot, if default value specified */
588 	if (flow->fidx == FILTER_ID_MAX) {
589 		u8 nentries;
590 		int idx;
591 
592 		nentries = cxgbe_filter_slots(adap, fs->type);
593 		idx = cxgbe_alloc_ftid(adap, nentries);
594 		if (idx < 0) {
595 			dev_err(adap, "unable to get a filter index in tcam\n");
596 			return -ENOMEM;
597 		}
598 		*fidx = (unsigned int)idx;
599 	} else {
600 		*fidx = flow->fidx;
601 	}
602 
603 	return 0;
604 }
605 
606 static int
607 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
608 {
609 	const struct rte_flow_item *i;
610 	int j, index = -ENOENT;
611 
612 	for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
613 		if (i->type == type) {
614 			index = j;
615 			break;
616 		}
617 	}
618 
619 	return index;
620 }
621 
622 static int
623 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
624 {
625 	/* nmode:
626 	 * BIT_0 = [src_ip],   BIT_1 = [dst_ip]
627 	 * BIT_2 = [src_port], BIT_3 = [dst_port]
628 	 *
629 	 * Only below cases are supported as per our spec.
630 	 */
631 	switch (nmode) {
632 	case 0:  /* 0000b */
633 		fs->nat_mode = NAT_MODE_NONE;
634 		break;
635 	case 2:  /* 0010b */
636 		fs->nat_mode = NAT_MODE_DIP;
637 		break;
638 	case 5:  /* 0101b */
639 		fs->nat_mode = NAT_MODE_SIP_SP;
640 		break;
641 	case 7:  /* 0111b */
642 		fs->nat_mode = NAT_MODE_DIP_SIP_SP;
643 		break;
644 	case 10: /* 1010b */
645 		fs->nat_mode = NAT_MODE_DIP_DP;
646 		break;
647 	case 11: /* 1011b */
648 		fs->nat_mode = NAT_MODE_DIP_DP_SIP;
649 		break;
650 	case 14: /* 1110b */
651 		fs->nat_mode = NAT_MODE_DIP_DP_SP;
652 		break;
653 	case 15: /* 1111b */
654 		fs->nat_mode = NAT_MODE_ALL;
655 		break;
656 	default:
657 		return -EINVAL;
658 	}
659 
660 	return 0;
661 }
662 
663 static int
664 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
665 			  const struct rte_flow_item items[],
666 			  uint8_t *nmode,
667 			  struct ch_filter_specification *fs,
668 			  struct rte_flow_error *e)
669 {
670 	const struct rte_flow_action_of_set_vlan_vid *vlanid;
671 	const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
672 	const struct rte_flow_action_of_push_vlan *pushvlan;
673 	const struct rte_flow_action_set_ipv4 *ipv4;
674 	const struct rte_flow_action_set_ipv6 *ipv6;
675 	const struct rte_flow_action_set_tp *tp_port;
676 	const struct rte_flow_action_phy_port *port;
677 	const struct rte_flow_action_set_mac *mac;
678 	int item_index;
679 	u16 tmp_vlan;
680 
681 	switch (a->type) {
682 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
683 		vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
684 			  a->conf;
685 		/* If explicitly asked to push a new VLAN header,
686 		 * then don't set rewrite mode. Otherwise, the
687 		 * incoming VLAN packets will get their VLAN fields
688 		 * rewritten, instead of adding an additional outer
689 		 * VLAN header.
690 		 */
691 		if (fs->newvlan != VLAN_INSERT)
692 			fs->newvlan = VLAN_REWRITE;
693 		tmp_vlan = fs->vlan & 0xe000;
694 		fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
695 		break;
696 	case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
697 		vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
698 			  a->conf;
699 		/* If explicitly asked to push a new VLAN header,
700 		 * then don't set rewrite mode. Otherwise, the
701 		 * incoming VLAN packets will get their VLAN fields
702 		 * rewritten, instead of adding an additional outer
703 		 * VLAN header.
704 		 */
705 		if (fs->newvlan != VLAN_INSERT)
706 			fs->newvlan = VLAN_REWRITE;
707 		tmp_vlan = fs->vlan & 0xfff;
708 		fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
709 		break;
710 	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
711 		pushvlan = (const struct rte_flow_action_of_push_vlan *)
712 			    a->conf;
713 		if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
714 			return rte_flow_error_set(e, EINVAL,
715 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
716 						  "only ethertype 0x8100 "
717 						  "supported for push vlan.");
718 		fs->newvlan = VLAN_INSERT;
719 		break;
720 	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
721 		fs->newvlan = VLAN_REMOVE;
722 		break;
723 	case RTE_FLOW_ACTION_TYPE_PHY_PORT:
724 		port = (const struct rte_flow_action_phy_port *)a->conf;
725 		fs->eport = port->index;
726 		break;
727 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
728 		item_index = cxgbe_get_flow_item_index(items,
729 						       RTE_FLOW_ITEM_TYPE_IPV4);
730 		if (item_index < 0)
731 			return rte_flow_error_set(e, EINVAL,
732 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
733 						  "No RTE_FLOW_ITEM_TYPE_IPV4 "
734 						  "found.");
735 
736 		ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
737 		memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
738 		*nmode |= 1 << 0;
739 		break;
740 	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
741 		item_index = cxgbe_get_flow_item_index(items,
742 						       RTE_FLOW_ITEM_TYPE_IPV4);
743 		if (item_index < 0)
744 			return rte_flow_error_set(e, EINVAL,
745 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
746 						  "No RTE_FLOW_ITEM_TYPE_IPV4 "
747 						  "found.");
748 
749 		ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
750 		memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
751 		*nmode |= 1 << 1;
752 		break;
753 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
754 		item_index = cxgbe_get_flow_item_index(items,
755 						       RTE_FLOW_ITEM_TYPE_IPV6);
756 		if (item_index < 0)
757 			return rte_flow_error_set(e, EINVAL,
758 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
759 						  "No RTE_FLOW_ITEM_TYPE_IPV6 "
760 						  "found.");
761 
762 		ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
763 		memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
764 		*nmode |= 1 << 0;
765 		break;
766 	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
767 		item_index = cxgbe_get_flow_item_index(items,
768 						       RTE_FLOW_ITEM_TYPE_IPV6);
769 		if (item_index < 0)
770 			return rte_flow_error_set(e, EINVAL,
771 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
772 						  "No RTE_FLOW_ITEM_TYPE_IPV6 "
773 						  "found.");
774 
775 		ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
776 		memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
777 		*nmode |= 1 << 1;
778 		break;
779 	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
780 		item_index = cxgbe_get_flow_item_index(items,
781 						       RTE_FLOW_ITEM_TYPE_TCP);
782 		if (item_index < 0) {
783 			item_index =
784 				cxgbe_get_flow_item_index(items,
785 						RTE_FLOW_ITEM_TYPE_UDP);
786 			if (item_index < 0)
787 				return rte_flow_error_set(e, EINVAL,
788 						RTE_FLOW_ERROR_TYPE_ACTION, a,
789 						"No RTE_FLOW_ITEM_TYPE_TCP or "
790 						"RTE_FLOW_ITEM_TYPE_UDP found");
791 		}
792 
793 		tp_port = (const struct rte_flow_action_set_tp *)a->conf;
794 		fs->nat_fport = be16_to_cpu(tp_port->port);
795 		*nmode |= 1 << 2;
796 		break;
797 	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
798 		item_index = cxgbe_get_flow_item_index(items,
799 						       RTE_FLOW_ITEM_TYPE_TCP);
800 		if (item_index < 0) {
801 			item_index =
802 				cxgbe_get_flow_item_index(items,
803 						RTE_FLOW_ITEM_TYPE_UDP);
804 			if (item_index < 0)
805 				return rte_flow_error_set(e, EINVAL,
806 						RTE_FLOW_ERROR_TYPE_ACTION, a,
807 						"No RTE_FLOW_ITEM_TYPE_TCP or "
808 						"RTE_FLOW_ITEM_TYPE_UDP found");
809 		}
810 
811 		tp_port = (const struct rte_flow_action_set_tp *)a->conf;
812 		fs->nat_lport = be16_to_cpu(tp_port->port);
813 		*nmode |= 1 << 3;
814 		break;
815 	case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
816 		item_index = cxgbe_get_flow_item_index(items,
817 						       RTE_FLOW_ITEM_TYPE_ETH);
818 		if (item_index < 0)
819 			return rte_flow_error_set(e, EINVAL,
820 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
821 						  "No RTE_FLOW_ITEM_TYPE_ETH "
822 						  "found");
823 		fs->swapmac = 1;
824 		break;
825 	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
826 		item_index = cxgbe_get_flow_item_index(items,
827 						       RTE_FLOW_ITEM_TYPE_ETH);
828 		if (item_index < 0)
829 			return rte_flow_error_set(e, EINVAL,
830 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
831 						  "No RTE_FLOW_ITEM_TYPE_ETH "
832 						  "found");
833 		mac = (const struct rte_flow_action_set_mac *)a->conf;
834 
835 		fs->newsmac = 1;
836 		memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
837 		break;
838 	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
839 		item_index = cxgbe_get_flow_item_index(items,
840 						       RTE_FLOW_ITEM_TYPE_ETH);
841 		if (item_index < 0)
842 			return rte_flow_error_set(e, EINVAL,
843 						  RTE_FLOW_ERROR_TYPE_ACTION, a,
844 						  "No RTE_FLOW_ITEM_TYPE_ETH found");
845 		mac = (const struct rte_flow_action_set_mac *)a->conf;
846 
847 		fs->newdmac = 1;
848 		memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
849 		break;
850 	default:
851 		/* We are not supposed to come here */
852 		return rte_flow_error_set(e, EINVAL,
853 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
854 					  "Action not supported");
855 	}
856 
857 	return 0;
858 }
859 
860 static int
861 cxgbe_rtef_parse_actions(struct rte_flow *flow,
862 			 const struct rte_flow_item items[],
863 			 const struct rte_flow_action action[],
864 			 struct rte_flow_error *e)
865 {
866 	struct ch_filter_specification *fs = &flow->fs;
867 	uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
868 	uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
869 	const struct rte_flow_action_queue *q;
870 	const struct rte_flow_action *a;
871 	char abit = 0;
872 	int ret;
873 
874 	for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
875 		switch (a->type) {
876 		case RTE_FLOW_ACTION_TYPE_VOID:
877 			continue;
878 		case RTE_FLOW_ACTION_TYPE_DROP:
879 			if (abit++)
880 				return rte_flow_error_set(e, EINVAL,
881 						RTE_FLOW_ERROR_TYPE_ACTION, a,
882 						"specify only 1 pass/drop");
883 			fs->action = FILTER_DROP;
884 			break;
885 		case RTE_FLOW_ACTION_TYPE_QUEUE:
886 			q = (const struct rte_flow_action_queue *)a->conf;
887 			if (!q)
888 				return rte_flow_error_set(e, EINVAL,
889 						RTE_FLOW_ERROR_TYPE_ACTION, q,
890 						"specify rx queue index");
891 			if (check_rxq(flow->dev, q->index))
892 				return rte_flow_error_set(e, EINVAL,
893 						RTE_FLOW_ERROR_TYPE_ACTION, q,
894 						"Invalid rx queue");
895 			if (abit++)
896 				return rte_flow_error_set(e, EINVAL,
897 						RTE_FLOW_ERROR_TYPE_ACTION, a,
898 						"specify only 1 pass/drop");
899 			fs->action = FILTER_PASS;
900 			fs->dirsteer = 1;
901 			fs->iq = q->index;
902 			break;
903 		case RTE_FLOW_ACTION_TYPE_COUNT:
904 			fs->hitcnts = 1;
905 			break;
906 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
907 			vlan_set_vid++;
908 			goto action_switch;
909 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
910 			vlan_set_pcp++;
911 			goto action_switch;
912 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
913 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
914 		case RTE_FLOW_ACTION_TYPE_PHY_PORT:
915 		case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
916 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
917 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
918 			nat_ipv4++;
919 			goto action_switch;
920 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
921 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
922 			nat_ipv6++;
923 			goto action_switch;
924 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
925 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
926 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
927 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
928 action_switch:
929 			/* We allow multiple switch actions, but switch is
930 			 * not compatible with either queue or drop
931 			 */
932 			if (abit++ && fs->action != FILTER_SWITCH)
933 				return rte_flow_error_set(e, EINVAL,
934 						RTE_FLOW_ERROR_TYPE_ACTION, a,
935 						"overlapping action specified");
936 			if (nat_ipv4 && nat_ipv6)
937 				return rte_flow_error_set(e, EINVAL,
938 					RTE_FLOW_ERROR_TYPE_ACTION, a,
939 					"Can't have one address ipv4 and the"
940 					" other ipv6");
941 
942 			ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
943 							e);
944 			if (ret)
945 				return ret;
946 			fs->action = FILTER_SWITCH;
947 			break;
948 		default:
949 			/* Not supported action : return error */
950 			return rte_flow_error_set(e, ENOTSUP,
951 						  RTE_FLOW_ERROR_TYPE_ACTION,
952 						  a, "Action not supported");
953 		}
954 	}
955 
956 	if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
957 		return rte_flow_error_set(e, EINVAL,
958 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
959 					  "Both OF_SET_VLAN_VID and "
960 					  "OF_SET_VLAN_PCP must be specified");
961 
962 	if (ch_rte_parse_nat(nmode, fs))
963 		return rte_flow_error_set(e, EINVAL,
964 					  RTE_FLOW_ERROR_TYPE_ACTION, a,
965 					  "invalid settings for swich action");
966 	return 0;
967 }
968 
969 static struct chrte_fparse parseitem[] = {
970 	[RTE_FLOW_ITEM_TYPE_ETH] = {
971 		.fptr  = ch_rte_parsetype_eth,
972 		.dmask = &(const struct rte_flow_item_eth){
973 			.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
974 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
975 			.type = 0xffff,
976 		}
977 	},
978 
979 	[RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
980 		.fptr = ch_rte_parsetype_port,
981 		.dmask = &(const struct rte_flow_item_phy_port){
982 			.index = 0x7,
983 		}
984 	},
985 
986 	[RTE_FLOW_ITEM_TYPE_VLAN] = {
987 		.fptr = ch_rte_parsetype_vlan,
988 		.dmask = &(const struct rte_flow_item_vlan){
989 			.tci = 0xffff,
990 			.inner_type = 0xffff,
991 		}
992 	},
993 
994 	[RTE_FLOW_ITEM_TYPE_IPV4] = {
995 		.fptr  = ch_rte_parsetype_ipv4,
996 		.dmask = &(const struct rte_flow_item_ipv4) {
997 			.hdr = {
998 				.src_addr = RTE_BE32(0xffffffff),
999 				.dst_addr = RTE_BE32(0xffffffff),
1000 				.type_of_service = 0xff,
1001 			},
1002 		},
1003 	},
1004 
1005 	[RTE_FLOW_ITEM_TYPE_IPV6] = {
1006 		.fptr  = ch_rte_parsetype_ipv6,
1007 		.dmask = &(const struct rte_flow_item_ipv6) {
1008 			.hdr = {
1009 				.src_addr =
1010 					"\xff\xff\xff\xff\xff\xff\xff\xff"
1011 					"\xff\xff\xff\xff\xff\xff\xff\xff",
1012 				.dst_addr =
1013 					"\xff\xff\xff\xff\xff\xff\xff\xff"
1014 					"\xff\xff\xff\xff\xff\xff\xff\xff",
1015 				.vtc_flow = RTE_BE32(0xff000000),
1016 			},
1017 		},
1018 	},
1019 
1020 	[RTE_FLOW_ITEM_TYPE_UDP] = {
1021 		.fptr  = ch_rte_parsetype_udp,
1022 		.dmask = &rte_flow_item_udp_mask,
1023 	},
1024 
1025 	[RTE_FLOW_ITEM_TYPE_TCP] = {
1026 		.fptr  = ch_rte_parsetype_tcp,
1027 		.dmask = &rte_flow_item_tcp_mask,
1028 	},
1029 
1030 	[RTE_FLOW_ITEM_TYPE_PF] = {
1031 		.fptr = ch_rte_parsetype_pf,
1032 		.dmask = NULL,
1033 	},
1034 
1035 	[RTE_FLOW_ITEM_TYPE_VF] = {
1036 		.fptr = ch_rte_parsetype_vf,
1037 		.dmask = &(const struct rte_flow_item_vf){
1038 			.id = 0xffffffff,
1039 		}
1040 	},
1041 };
1042 
1043 static int
1044 cxgbe_rtef_parse_items(struct rte_flow *flow,
1045 		       const struct rte_flow_item items[],
1046 		       struct rte_flow_error *e)
1047 {
1048 	struct adapter *adap = ethdev2adap(flow->dev);
1049 	const struct rte_flow_item *i;
1050 	char repeat[ARRAY_SIZE(parseitem)] = {0};
1051 
1052 	for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
1053 		struct chrte_fparse *idx;
1054 		int ret;
1055 
1056 		if (i->type >= ARRAY_SIZE(parseitem))
1057 			return rte_flow_error_set(e, ENOTSUP,
1058 						  RTE_FLOW_ERROR_TYPE_ITEM,
1059 						  i, "Item not supported");
1060 
1061 		switch (i->type) {
1062 		case RTE_FLOW_ITEM_TYPE_VOID:
1063 			continue;
1064 		default:
1065 			/* check if item is repeated */
1066 			if (repeat[i->type] &&
1067 			    i->type != RTE_FLOW_ITEM_TYPE_VLAN)
1068 				return rte_flow_error_set(e, ENOTSUP,
1069 						RTE_FLOW_ERROR_TYPE_ITEM, i,
1070 						"parse items cannot be repeated(except void/vlan)");
1071 
1072 			repeat[i->type] = 1;
1073 
1074 			/* validate the item */
1075 			ret = cxgbe_validate_item(i, e);
1076 			if (ret)
1077 				return ret;
1078 
1079 			idx = &flow->item_parser[i->type];
1080 			if (!idx || !idx->fptr) {
1081 				return rte_flow_error_set(e, ENOTSUP,
1082 						RTE_FLOW_ERROR_TYPE_ITEM, i,
1083 						"Item not supported");
1084 			} else {
1085 				ret = idx->fptr(idx->dmask, i, &flow->fs, e);
1086 				if (ret)
1087 					return ret;
1088 			}
1089 		}
1090 	}
1091 
1092 	cxgbe_tweak_filter_spec(adap, &flow->fs);
1093 	cxgbe_fill_filter_region(adap, &flow->fs);
1094 
1095 	return 0;
1096 }
1097 
1098 static int
1099 cxgbe_flow_parse(struct rte_flow *flow,
1100 		 const struct rte_flow_attr *attr,
1101 		 const struct rte_flow_item item[],
1102 		 const struct rte_flow_action action[],
1103 		 struct rte_flow_error *e)
1104 {
1105 	int ret;
1106 	/* parse user request into ch_filter_specification */
1107 	ret = cxgbe_rtef_parse_attr(flow, attr, e);
1108 	if (ret)
1109 		return ret;
1110 	ret = cxgbe_rtef_parse_items(flow, item, e);
1111 	if (ret)
1112 		return ret;
1113 	return cxgbe_rtef_parse_actions(flow, item, action, e);
1114 }
1115 
1116 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1117 {
1118 	struct ch_filter_specification *fs = &flow->fs;
1119 	struct adapter *adap = ethdev2adap(dev);
1120 	struct tid_info *t = &adap->tids;
1121 	struct filter_ctx ctx;
1122 	unsigned int fidx;
1123 	int err;
1124 
1125 	if (cxgbe_get_fidx(flow, &fidx))
1126 		return -ENOMEM;
1127 	if (cxgbe_verify_fidx(flow, fidx, 0))
1128 		return -1;
1129 
1130 	t4_init_completion(&ctx.completion);
1131 	/* go create the filter */
1132 	err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1133 	if (err) {
1134 		dev_err(adap, "Error %d while creating filter.\n", err);
1135 		return err;
1136 	}
1137 
1138 	/* Poll the FW for reply */
1139 	err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1140 					CXGBE_FLOW_POLL_MS,
1141 					CXGBE_FLOW_POLL_CNT,
1142 					&ctx.completion);
1143 	if (err) {
1144 		dev_err(adap, "Filter set operation timed out (%d)\n", err);
1145 		return err;
1146 	}
1147 	if (ctx.result) {
1148 		dev_err(adap, "Hardware error %d while creating the filter.\n",
1149 			ctx.result);
1150 		return ctx.result;
1151 	}
1152 
1153 	if (fs->cap) { /* to destroy the filter */
1154 		flow->fidx = ctx.tid;
1155 		flow->f = lookup_tid(t, ctx.tid);
1156 	} else {
1157 		flow->fidx = fidx;
1158 		flow->f = &adap->tids.ftid_tab[fidx];
1159 	}
1160 
1161 	return 0;
1162 }
1163 
1164 static struct rte_flow *
1165 cxgbe_flow_create(struct rte_eth_dev *dev,
1166 		  const struct rte_flow_attr *attr,
1167 		  const struct rte_flow_item item[],
1168 		  const struct rte_flow_action action[],
1169 		  struct rte_flow_error *e)
1170 {
1171 	struct adapter *adap = ethdev2adap(dev);
1172 	struct rte_flow *flow;
1173 	int ret;
1174 
1175 	flow = t4_os_alloc(sizeof(struct rte_flow));
1176 	if (!flow) {
1177 		rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1178 				   NULL, "Unable to allocate memory for"
1179 				   " filter_entry");
1180 		return NULL;
1181 	}
1182 
1183 	flow->item_parser = parseitem;
1184 	flow->dev = dev;
1185 	flow->fs.private = (void *)flow;
1186 
1187 	if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1188 		t4_os_free(flow);
1189 		return NULL;
1190 	}
1191 
1192 	t4_os_lock(&adap->flow_lock);
1193 	/* go, interact with cxgbe_filter */
1194 	ret = __cxgbe_flow_create(dev, flow);
1195 	t4_os_unlock(&adap->flow_lock);
1196 	if (ret) {
1197 		rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1198 				   NULL, "Unable to create flow rule");
1199 		t4_os_free(flow);
1200 		return NULL;
1201 	}
1202 
1203 	flow->f->private = flow; /* Will be used during flush */
1204 
1205 	return flow;
1206 }
1207 
1208 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1209 {
1210 	struct adapter *adap = ethdev2adap(dev);
1211 	struct filter_entry *f = flow->f;
1212 	struct ch_filter_specification *fs;
1213 	struct filter_ctx ctx;
1214 	int err;
1215 
1216 	fs = &f->fs;
1217 	if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1218 		return -1;
1219 
1220 	t4_init_completion(&ctx.completion);
1221 	err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1222 	if (err) {
1223 		dev_err(adap, "Error %d while deleting filter.\n", err);
1224 		return err;
1225 	}
1226 
1227 	/* Poll the FW for reply */
1228 	err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1229 					CXGBE_FLOW_POLL_MS,
1230 					CXGBE_FLOW_POLL_CNT,
1231 					&ctx.completion);
1232 	if (err) {
1233 		dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1234 		return err;
1235 	}
1236 	if (ctx.result) {
1237 		dev_err(adap, "Hardware error %d while deleting the filter.\n",
1238 			ctx.result);
1239 		return ctx.result;
1240 	}
1241 
1242 	return 0;
1243 }
1244 
1245 static int
1246 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1247 		   struct rte_flow_error *e)
1248 {
1249 	struct adapter *adap = ethdev2adap(dev);
1250 	int ret;
1251 
1252 	t4_os_lock(&adap->flow_lock);
1253 	ret = __cxgbe_flow_destroy(dev, flow);
1254 	t4_os_unlock(&adap->flow_lock);
1255 	if (ret)
1256 		return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1257 					  flow, "error destroying filter.");
1258 	t4_os_free(flow);
1259 	return 0;
1260 }
1261 
1262 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1263 			      u64 *byte_count)
1264 {
1265 	struct adapter *adap = ethdev2adap(flow->dev);
1266 	struct ch_filter_specification fs = flow->f->fs;
1267 	unsigned int fidx = flow->fidx;
1268 	int ret = 0;
1269 
1270 	ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1271 	if (ret)
1272 		return ret;
1273 	return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1274 }
1275 
1276 static int
1277 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1278 		 const struct rte_flow_action *action, void *data,
1279 		 struct rte_flow_error *e)
1280 {
1281 	struct adapter *adap = ethdev2adap(flow->dev);
1282 	struct ch_filter_specification fs;
1283 	struct rte_flow_query_count *c;
1284 	struct filter_entry *f;
1285 	int ret;
1286 
1287 	RTE_SET_USED(dev);
1288 
1289 	f = flow->f;
1290 	fs = f->fs;
1291 
1292 	if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1293 		return rte_flow_error_set(e, ENOTSUP,
1294 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1295 					  "only count supported for query");
1296 
1297 	/*
1298 	 * This is a valid operation, Since we are allowed to do chelsio
1299 	 * specific operations in rte side of our code but not vise-versa
1300 	 *
1301 	 * So, fs can be queried/modified here BUT rte_flow_query_count
1302 	 * cannot be worked on by the lower layer since we want to maintain
1303 	 * it as rte_flow agnostic.
1304 	 */
1305 	if (!fs.hitcnts)
1306 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1307 					  &fs, "filter hit counters were not"
1308 					  " enabled during filter creation");
1309 
1310 	c = (struct rte_flow_query_count *)data;
1311 
1312 	t4_os_lock(&adap->flow_lock);
1313 	ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1314 	if (ret) {
1315 		rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1316 				   f, "cxgbe pmd failed to perform query");
1317 		goto out;
1318 	}
1319 
1320 	/* Query was successful */
1321 	c->bytes_set = 1;
1322 	c->hits_set = 1;
1323 	if (c->reset)
1324 		cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1325 
1326 out:
1327 	t4_os_unlock(&adap->flow_lock);
1328 	return ret;
1329 }
1330 
1331 static int
1332 cxgbe_flow_validate(struct rte_eth_dev *dev,
1333 		    const struct rte_flow_attr *attr,
1334 		    const struct rte_flow_item item[],
1335 		    const struct rte_flow_action action[],
1336 		    struct rte_flow_error *e)
1337 {
1338 	struct adapter *adap = ethdev2adap(dev);
1339 	struct rte_flow *flow;
1340 	unsigned int fidx;
1341 	int ret = 0;
1342 
1343 	flow = t4_os_alloc(sizeof(struct rte_flow));
1344 	if (!flow)
1345 		return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1346 				NULL,
1347 				"Unable to allocate memory for filter_entry");
1348 
1349 	flow->item_parser = parseitem;
1350 	flow->dev = dev;
1351 	flow->fs.private = (void *)flow;
1352 
1353 	ret = cxgbe_flow_parse(flow, attr, item, action, e);
1354 	if (ret) {
1355 		t4_os_free(flow);
1356 		return ret;
1357 	}
1358 
1359 	if (cxgbe_validate_filter(adap, &flow->fs)) {
1360 		t4_os_free(flow);
1361 		return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1362 				NULL,
1363 				"validation failed. Check f/w config file.");
1364 	}
1365 
1366 	t4_os_lock(&adap->flow_lock);
1367 	if (cxgbe_get_fidx(flow, &fidx)) {
1368 		ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1369 					 NULL, "no memory in tcam.");
1370 		goto out;
1371 	}
1372 
1373 	if (cxgbe_verify_fidx(flow, fidx, 0)) {
1374 		ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1375 					 NULL, "validation failed");
1376 		goto out;
1377 	}
1378 
1379 out:
1380 	t4_os_unlock(&adap->flow_lock);
1381 	t4_os_free(flow);
1382 	return ret;
1383 }
1384 
1385 /*
1386  * @ret : > 0 filter destroyed succsesfully
1387  *        < 0 error destroying filter
1388  *        == 1 filter not active / not found
1389  */
1390 static int
1391 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1392 {
1393 	if (f && (f->valid || f->pending) &&
1394 	    f->dev == dev && /* Only if user has asked for this port */
1395 	     f->private) /* We (rte_flow) created this filter */
1396 		return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1397 	return 1;
1398 }
1399 
1400 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1401 {
1402 	struct adapter *adap = ethdev2adap(dev);
1403 	unsigned int i;
1404 	int ret = 0;
1405 
1406 	t4_os_lock(&adap->flow_lock);
1407 	if (adap->tids.ftid_tab) {
1408 		struct filter_entry *f = &adap->tids.ftid_tab[0];
1409 
1410 		for (i = 0; i < adap->tids.nftids; i++, f++) {
1411 			ret = cxgbe_check_n_destroy(f, dev);
1412 			if (ret < 0) {
1413 				rte_flow_error_set(e, ret,
1414 						   RTE_FLOW_ERROR_TYPE_HANDLE,
1415 						   f->private,
1416 						   "error destroying TCAM "
1417 						   "filter.");
1418 				goto out;
1419 			}
1420 		}
1421 	}
1422 
1423 	if (is_hashfilter(adap) && adap->tids.tid_tab) {
1424 		struct filter_entry *f;
1425 
1426 		for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1427 			f = (struct filter_entry *)adap->tids.tid_tab[i];
1428 
1429 			ret = cxgbe_check_n_destroy(f, dev);
1430 			if (ret < 0) {
1431 				rte_flow_error_set(e, ret,
1432 						   RTE_FLOW_ERROR_TYPE_HANDLE,
1433 						   f->private,
1434 						   "error destroying HASH "
1435 						   "filter.");
1436 				goto out;
1437 			}
1438 		}
1439 	}
1440 
1441 out:
1442 	t4_os_unlock(&adap->flow_lock);
1443 	return ret >= 0 ? 0 : ret;
1444 }
1445 
1446 static const struct rte_flow_ops cxgbe_flow_ops = {
1447 	.validate	= cxgbe_flow_validate,
1448 	.create		= cxgbe_flow_create,
1449 	.destroy	= cxgbe_flow_destroy,
1450 	.flush		= cxgbe_flow_flush,
1451 	.query		= cxgbe_flow_query,
1452 	.isolate	= NULL,
1453 };
1454 
1455 int
1456 cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
1457 		      enum rte_filter_type filter_type,
1458 		      enum rte_filter_op filter_op,
1459 		      void *arg)
1460 {
1461 	int ret = 0;
1462 
1463 	RTE_SET_USED(dev);
1464 	switch (filter_type) {
1465 	case RTE_ETH_FILTER_GENERIC:
1466 		if (filter_op != RTE_ETH_FILTER_GET)
1467 			return -EINVAL;
1468 		*(const void **)arg = &cxgbe_flow_ops;
1469 		break;
1470 	default:
1471 		ret = -ENOTSUP;
1472 		break;
1473 	}
1474 	return ret;
1475 }
1476