xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 2e2e5bdf908ef7ce6ba7a33be5bec6f42f4a39fe)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 #include "sfc_dp_rx.h"
26 
27 struct sfc_flow_ops_by_spec {
28 	sfc_flow_parse_cb_t	*parse;
29 	sfc_flow_insert_cb_t	*insert;
30 	sfc_flow_remove_cb_t	*remove;
31 };
32 
33 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
34 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
35 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
36 
37 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
38 	.parse = sfc_flow_parse_rte_to_filter,
39 	.insert = sfc_flow_filter_insert,
40 	.remove = sfc_flow_filter_remove,
41 };
42 
43 static const struct sfc_flow_ops_by_spec *
44 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
45 {
46 	struct sfc_flow_spec *spec = &flow->spec;
47 	const struct sfc_flow_ops_by_spec *ops = NULL;
48 
49 	switch (spec->type) {
50 	case SFC_FLOW_SPEC_FILTER:
51 		ops = &sfc_flow_ops_filter;
52 		break;
53 	default:
54 		SFC_ASSERT(false);
55 		break;
56 	}
57 
58 	return ops;
59 }
60 
61 /*
62  * Currently, filter-based (VNIC) flow API is implemented in such a manner
63  * that each flow rule is converted to one or more hardware filters.
64  * All elements of flow rule (attributes, pattern items, actions)
65  * correspond to one or more fields in the efx_filter_spec_s structure
66  * that is responsible for the hardware filter.
67  * If some required field is unset in the flow rule, then a handful
68  * of filter copies will be created to cover all possible values
69  * of such a field.
70  */
71 
72 static sfc_flow_item_parse sfc_flow_parse_void;
73 static sfc_flow_item_parse sfc_flow_parse_eth;
74 static sfc_flow_item_parse sfc_flow_parse_vlan;
75 static sfc_flow_item_parse sfc_flow_parse_ipv4;
76 static sfc_flow_item_parse sfc_flow_parse_ipv6;
77 static sfc_flow_item_parse sfc_flow_parse_tcp;
78 static sfc_flow_item_parse sfc_flow_parse_udp;
79 static sfc_flow_item_parse sfc_flow_parse_vxlan;
80 static sfc_flow_item_parse sfc_flow_parse_geneve;
81 static sfc_flow_item_parse sfc_flow_parse_nvgre;
82 
83 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
84 				     unsigned int filters_count_for_one_val,
85 				     struct rte_flow_error *error);
86 
87 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
88 					efx_filter_spec_t *spec,
89 					struct sfc_filter *filter);
90 
91 struct sfc_flow_copy_flag {
92 	/* EFX filter specification match flag */
93 	efx_filter_match_flags_t flag;
94 	/* Number of values of corresponding field */
95 	unsigned int vals_count;
96 	/* Function to set values in specifications */
97 	sfc_flow_spec_set_vals *set_vals;
98 	/*
99 	 * Function to check that the specification is suitable
100 	 * for adding this match flag
101 	 */
102 	sfc_flow_spec_check *spec_check;
103 };
104 
105 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
106 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
107 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
108 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
109 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
110 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
111 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
112 
113 static boolean_t
114 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
115 {
116 	uint8_t sum = 0;
117 	unsigned int i;
118 
119 	for (i = 0; i < size; i++)
120 		sum |= buf[i];
121 
122 	return (sum == 0) ? B_TRUE : B_FALSE;
123 }
124 
125 /*
126  * Validate item and prepare structures spec and mask for parsing
127  */
128 int
129 sfc_flow_parse_init(const struct rte_flow_item *item,
130 		    const void **spec_ptr,
131 		    const void **mask_ptr,
132 		    const void *supp_mask,
133 		    const void *def_mask,
134 		    unsigned int size,
135 		    struct rte_flow_error *error)
136 {
137 	const uint8_t *spec;
138 	const uint8_t *mask;
139 	const uint8_t *last;
140 	uint8_t supp;
141 	unsigned int i;
142 
143 	if (item == NULL) {
144 		rte_flow_error_set(error, EINVAL,
145 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
146 				   "NULL item");
147 		return -rte_errno;
148 	}
149 
150 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
151 		rte_flow_error_set(error, EINVAL,
152 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
153 				   "Mask or last is set without spec");
154 		return -rte_errno;
155 	}
156 
157 	/*
158 	 * If "mask" is not set, default mask is used,
159 	 * but if default mask is NULL, "mask" should be set
160 	 */
161 	if (item->mask == NULL) {
162 		if (def_mask == NULL) {
163 			rte_flow_error_set(error, EINVAL,
164 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
165 				"Mask should be specified");
166 			return -rte_errno;
167 		}
168 
169 		mask = def_mask;
170 	} else {
171 		mask = item->mask;
172 	}
173 
174 	spec = item->spec;
175 	last = item->last;
176 
177 	if (spec == NULL)
178 		goto exit;
179 
180 	/*
181 	 * If field values in "last" are either 0 or equal to the corresponding
182 	 * values in "spec" then they are ignored
183 	 */
184 	if (last != NULL &&
185 	    !sfc_flow_is_zero(last, size) &&
186 	    memcmp(last, spec, size) != 0) {
187 		rte_flow_error_set(error, ENOTSUP,
188 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
189 				   "Ranging is not supported");
190 		return -rte_errno;
191 	}
192 
193 	if (supp_mask == NULL) {
194 		rte_flow_error_set(error, EINVAL,
195 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
196 			"Supported mask for item should be specified");
197 		return -rte_errno;
198 	}
199 
200 	/* Check that mask does not ask for more match than supp_mask */
201 	for (i = 0; i < size; i++) {
202 		supp = ((const uint8_t *)supp_mask)[i];
203 
204 		if (~supp & mask[i]) {
205 			rte_flow_error_set(error, ENOTSUP,
206 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
207 					   "Item's field is not supported");
208 			return -rte_errno;
209 		}
210 	}
211 
212 exit:
213 	*spec_ptr = spec;
214 	*mask_ptr = mask;
215 	return 0;
216 }
217 
218 /*
219  * Protocol parsers.
220  * Masking is not supported, so masks in items should be either
221  * full or empty (zeroed) and set only for supported fields which
222  * are specified in the supp_mask.
223  */
224 
225 static int
226 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
227 		    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
228 		    __rte_unused struct rte_flow_error *error)
229 {
230 	return 0;
231 }
232 
233 /**
234  * Convert Ethernet item to EFX filter specification.
235  *
236  * @param item[in]
237  *   Item specification. Outer frame specification may only comprise
238  *   source/destination addresses and Ethertype field.
239  *   Inner frame specification may contain destination address only.
240  *   There is support for individual/group mask as well as for empty and full.
241  *   If the mask is NULL, default mask will be used. Ranging is not supported.
242  * @param efx_spec[in, out]
243  *   EFX filter specification to update.
244  * @param[out] error
245  *   Perform verbose error reporting if not NULL.
246  */
247 static int
248 sfc_flow_parse_eth(const struct rte_flow_item *item,
249 		   struct sfc_flow_parse_ctx *parse_ctx,
250 		   struct rte_flow_error *error)
251 {
252 	int rc;
253 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
254 	const struct rte_flow_item_eth *spec = NULL;
255 	const struct rte_flow_item_eth *mask = NULL;
256 	const struct rte_flow_item_eth supp_mask = {
257 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
258 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
259 		.type = 0xffff,
260 	};
261 	const struct rte_flow_item_eth ifrm_supp_mask = {
262 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
263 	};
264 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
265 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
266 	};
267 	const struct rte_flow_item_eth *supp_mask_p;
268 	const struct rte_flow_item_eth *def_mask_p;
269 	uint8_t *loc_mac = NULL;
270 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
271 		EFX_TUNNEL_PROTOCOL_NONE);
272 
273 	if (is_ifrm) {
274 		supp_mask_p = &ifrm_supp_mask;
275 		def_mask_p = &ifrm_supp_mask;
276 		loc_mac = efx_spec->efs_ifrm_loc_mac;
277 	} else {
278 		supp_mask_p = &supp_mask;
279 		def_mask_p = &rte_flow_item_eth_mask;
280 		loc_mac = efx_spec->efs_loc_mac;
281 	}
282 
283 	rc = sfc_flow_parse_init(item,
284 				 (const void **)&spec,
285 				 (const void **)&mask,
286 				 supp_mask_p, def_mask_p,
287 				 sizeof(struct rte_flow_item_eth),
288 				 error);
289 	if (rc != 0)
290 		return rc;
291 
292 	/* If "spec" is not set, could be any Ethernet */
293 	if (spec == NULL)
294 		return 0;
295 
296 	if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
297 		efx_spec->efs_match_flags |= is_ifrm ?
298 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
299 			EFX_FILTER_MATCH_LOC_MAC;
300 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
301 			   EFX_MAC_ADDR_LEN);
302 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
303 			  EFX_MAC_ADDR_LEN) == 0) {
304 		if (rte_is_unicast_ether_addr(&spec->dst))
305 			efx_spec->efs_match_flags |= is_ifrm ?
306 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
307 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
308 		else
309 			efx_spec->efs_match_flags |= is_ifrm ?
310 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
311 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
312 	} else if (!rte_is_zero_ether_addr(&mask->dst)) {
313 		goto fail_bad_mask;
314 	}
315 
316 	/*
317 	 * ifrm_supp_mask ensures that the source address and
318 	 * ethertype masks are equal to zero in inner frame,
319 	 * so these fields are filled in only for the outer frame
320 	 */
321 	if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
322 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
323 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
324 			   EFX_MAC_ADDR_LEN);
325 	} else if (!rte_is_zero_ether_addr(&mask->src)) {
326 		goto fail_bad_mask;
327 	}
328 
329 	/*
330 	 * Ether type is in big-endian byte order in item and
331 	 * in little-endian in efx_spec, so byte swap is used
332 	 */
333 	if (mask->type == supp_mask.type) {
334 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
335 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
336 	} else if (mask->type != 0) {
337 		goto fail_bad_mask;
338 	}
339 
340 	return 0;
341 
342 fail_bad_mask:
343 	rte_flow_error_set(error, EINVAL,
344 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
345 			   "Bad mask in the ETH pattern item");
346 	return -rte_errno;
347 }
348 
349 /**
350  * Convert VLAN item to EFX filter specification.
351  *
352  * @param item[in]
353  *   Item specification. Only VID field is supported.
354  *   The mask can not be NULL. Ranging is not supported.
355  * @param efx_spec[in, out]
356  *   EFX filter specification to update.
357  * @param[out] error
358  *   Perform verbose error reporting if not NULL.
359  */
360 static int
361 sfc_flow_parse_vlan(const struct rte_flow_item *item,
362 		    struct sfc_flow_parse_ctx *parse_ctx,
363 		    struct rte_flow_error *error)
364 {
365 	int rc;
366 	uint16_t vid;
367 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
368 	const struct rte_flow_item_vlan *spec = NULL;
369 	const struct rte_flow_item_vlan *mask = NULL;
370 	const struct rte_flow_item_vlan supp_mask = {
371 		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
372 		.inner_type = RTE_BE16(0xffff),
373 	};
374 
375 	rc = sfc_flow_parse_init(item,
376 				 (const void **)&spec,
377 				 (const void **)&mask,
378 				 &supp_mask,
379 				 NULL,
380 				 sizeof(struct rte_flow_item_vlan),
381 				 error);
382 	if (rc != 0)
383 		return rc;
384 
385 	/*
386 	 * VID is in big-endian byte order in item and
387 	 * in little-endian in efx_spec, so byte swap is used.
388 	 * If two VLAN items are included, the first matches
389 	 * the outer tag and the next matches the inner tag.
390 	 */
391 	if (mask->tci == supp_mask.tci) {
392 		/* Apply mask to keep VID only */
393 		vid = rte_bswap16(spec->tci & mask->tci);
394 
395 		if (!(efx_spec->efs_match_flags &
396 		      EFX_FILTER_MATCH_OUTER_VID)) {
397 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
398 			efx_spec->efs_outer_vid = vid;
399 		} else if (!(efx_spec->efs_match_flags &
400 			     EFX_FILTER_MATCH_INNER_VID)) {
401 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
402 			efx_spec->efs_inner_vid = vid;
403 		} else {
404 			rte_flow_error_set(error, EINVAL,
405 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
406 					   "More than two VLAN items");
407 			return -rte_errno;
408 		}
409 	} else {
410 		rte_flow_error_set(error, EINVAL,
411 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
412 				   "VLAN ID in TCI match is required");
413 		return -rte_errno;
414 	}
415 
416 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
417 		rte_flow_error_set(error, EINVAL,
418 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
419 				   "VLAN TPID matching is not supported");
420 		return -rte_errno;
421 	}
422 	if (mask->inner_type == supp_mask.inner_type) {
423 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
424 		efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
425 	} else if (mask->inner_type) {
426 		rte_flow_error_set(error, EINVAL,
427 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
428 				   "Bad mask for VLAN inner_type");
429 		return -rte_errno;
430 	}
431 
432 	return 0;
433 }
434 
435 /**
436  * Convert IPv4 item to EFX filter specification.
437  *
438  * @param item[in]
439  *   Item specification. Only source and destination addresses and
440  *   protocol fields are supported. If the mask is NULL, default
441  *   mask will be used. Ranging is not supported.
442  * @param efx_spec[in, out]
443  *   EFX filter specification to update.
444  * @param[out] error
445  *   Perform verbose error reporting if not NULL.
446  */
447 static int
448 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
449 		    struct sfc_flow_parse_ctx *parse_ctx,
450 		    struct rte_flow_error *error)
451 {
452 	int rc;
453 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
454 	const struct rte_flow_item_ipv4 *spec = NULL;
455 	const struct rte_flow_item_ipv4 *mask = NULL;
456 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
457 	const struct rte_flow_item_ipv4 supp_mask = {
458 		.hdr = {
459 			.src_addr = 0xffffffff,
460 			.dst_addr = 0xffffffff,
461 			.next_proto_id = 0xff,
462 		}
463 	};
464 
465 	rc = sfc_flow_parse_init(item,
466 				 (const void **)&spec,
467 				 (const void **)&mask,
468 				 &supp_mask,
469 				 &rte_flow_item_ipv4_mask,
470 				 sizeof(struct rte_flow_item_ipv4),
471 				 error);
472 	if (rc != 0)
473 		return rc;
474 
475 	/*
476 	 * Filtering by IPv4 source and destination addresses requires
477 	 * the appropriate ETHER_TYPE in hardware filters
478 	 */
479 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
480 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
481 		efx_spec->efs_ether_type = ether_type_ipv4;
482 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
483 		rte_flow_error_set(error, EINVAL,
484 			RTE_FLOW_ERROR_TYPE_ITEM, item,
485 			"Ethertype in pattern with IPV4 item should be appropriate");
486 		return -rte_errno;
487 	}
488 
489 	if (spec == NULL)
490 		return 0;
491 
492 	/*
493 	 * IPv4 addresses are in big-endian byte order in item and in
494 	 * efx_spec
495 	 */
496 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
497 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
498 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
499 	} else if (mask->hdr.src_addr != 0) {
500 		goto fail_bad_mask;
501 	}
502 
503 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
504 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
505 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
506 	} else if (mask->hdr.dst_addr != 0) {
507 		goto fail_bad_mask;
508 	}
509 
510 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
511 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
512 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
513 	} else if (mask->hdr.next_proto_id != 0) {
514 		goto fail_bad_mask;
515 	}
516 
517 	return 0;
518 
519 fail_bad_mask:
520 	rte_flow_error_set(error, EINVAL,
521 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
522 			   "Bad mask in the IPV4 pattern item");
523 	return -rte_errno;
524 }
525 
526 /**
527  * Convert IPv6 item to EFX filter specification.
528  *
529  * @param item[in]
530  *   Item specification. Only source and destination addresses and
531  *   next header fields are supported. If the mask is NULL, default
532  *   mask will be used. Ranging is not supported.
533  * @param efx_spec[in, out]
534  *   EFX filter specification to update.
535  * @param[out] error
536  *   Perform verbose error reporting if not NULL.
537  */
538 static int
539 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
540 		    struct sfc_flow_parse_ctx *parse_ctx,
541 		    struct rte_flow_error *error)
542 {
543 	int rc;
544 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
545 	const struct rte_flow_item_ipv6 *spec = NULL;
546 	const struct rte_flow_item_ipv6 *mask = NULL;
547 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
548 	const struct rte_flow_item_ipv6 supp_mask = {
549 		.hdr = {
550 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
551 				      0xff, 0xff, 0xff, 0xff,
552 				      0xff, 0xff, 0xff, 0xff,
553 				      0xff, 0xff, 0xff, 0xff },
554 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
555 				      0xff, 0xff, 0xff, 0xff,
556 				      0xff, 0xff, 0xff, 0xff,
557 				      0xff, 0xff, 0xff, 0xff },
558 			.proto = 0xff,
559 		}
560 	};
561 
562 	rc = sfc_flow_parse_init(item,
563 				 (const void **)&spec,
564 				 (const void **)&mask,
565 				 &supp_mask,
566 				 &rte_flow_item_ipv6_mask,
567 				 sizeof(struct rte_flow_item_ipv6),
568 				 error);
569 	if (rc != 0)
570 		return rc;
571 
572 	/*
573 	 * Filtering by IPv6 source and destination addresses requires
574 	 * the appropriate ETHER_TYPE in hardware filters
575 	 */
576 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
577 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
578 		efx_spec->efs_ether_type = ether_type_ipv6;
579 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
580 		rte_flow_error_set(error, EINVAL,
581 			RTE_FLOW_ERROR_TYPE_ITEM, item,
582 			"Ethertype in pattern with IPV6 item should be appropriate");
583 		return -rte_errno;
584 	}
585 
586 	if (spec == NULL)
587 		return 0;
588 
589 	/*
590 	 * IPv6 addresses are in big-endian byte order in item and in
591 	 * efx_spec
592 	 */
593 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
594 		   sizeof(mask->hdr.src_addr)) == 0) {
595 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
596 
597 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
598 				 sizeof(spec->hdr.src_addr));
599 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
600 			   sizeof(efx_spec->efs_rem_host));
601 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
602 				     sizeof(mask->hdr.src_addr))) {
603 		goto fail_bad_mask;
604 	}
605 
606 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
607 		   sizeof(mask->hdr.dst_addr)) == 0) {
608 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
609 
610 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
611 				 sizeof(spec->hdr.dst_addr));
612 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
613 			   sizeof(efx_spec->efs_loc_host));
614 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
615 				     sizeof(mask->hdr.dst_addr))) {
616 		goto fail_bad_mask;
617 	}
618 
619 	if (mask->hdr.proto == supp_mask.hdr.proto) {
620 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
621 		efx_spec->efs_ip_proto = spec->hdr.proto;
622 	} else if (mask->hdr.proto != 0) {
623 		goto fail_bad_mask;
624 	}
625 
626 	return 0;
627 
628 fail_bad_mask:
629 	rte_flow_error_set(error, EINVAL,
630 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
631 			   "Bad mask in the IPV6 pattern item");
632 	return -rte_errno;
633 }
634 
635 /**
636  * Convert TCP item to EFX filter specification.
637  *
638  * @param item[in]
639  *   Item specification. Only source and destination ports fields
640  *   are supported. If the mask is NULL, default mask will be used.
641  *   Ranging is not supported.
642  * @param efx_spec[in, out]
643  *   EFX filter specification to update.
644  * @param[out] error
645  *   Perform verbose error reporting if not NULL.
646  */
647 static int
648 sfc_flow_parse_tcp(const struct rte_flow_item *item,
649 		   struct sfc_flow_parse_ctx *parse_ctx,
650 		   struct rte_flow_error *error)
651 {
652 	int rc;
653 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
654 	const struct rte_flow_item_tcp *spec = NULL;
655 	const struct rte_flow_item_tcp *mask = NULL;
656 	const struct rte_flow_item_tcp supp_mask = {
657 		.hdr = {
658 			.src_port = 0xffff,
659 			.dst_port = 0xffff,
660 		}
661 	};
662 
663 	rc = sfc_flow_parse_init(item,
664 				 (const void **)&spec,
665 				 (const void **)&mask,
666 				 &supp_mask,
667 				 &rte_flow_item_tcp_mask,
668 				 sizeof(struct rte_flow_item_tcp),
669 				 error);
670 	if (rc != 0)
671 		return rc;
672 
673 	/*
674 	 * Filtering by TCP source and destination ports requires
675 	 * the appropriate IP_PROTO in hardware filters
676 	 */
677 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
678 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
679 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
680 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
681 		rte_flow_error_set(error, EINVAL,
682 			RTE_FLOW_ERROR_TYPE_ITEM, item,
683 			"IP proto in pattern with TCP item should be appropriate");
684 		return -rte_errno;
685 	}
686 
687 	if (spec == NULL)
688 		return 0;
689 
690 	/*
691 	 * Source and destination ports are in big-endian byte order in item and
692 	 * in little-endian in efx_spec, so byte swap is used
693 	 */
694 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
695 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
696 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
697 	} else if (mask->hdr.src_port != 0) {
698 		goto fail_bad_mask;
699 	}
700 
701 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
702 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
703 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
704 	} else if (mask->hdr.dst_port != 0) {
705 		goto fail_bad_mask;
706 	}
707 
708 	return 0;
709 
710 fail_bad_mask:
711 	rte_flow_error_set(error, EINVAL,
712 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
713 			   "Bad mask in the TCP pattern item");
714 	return -rte_errno;
715 }
716 
717 /**
718  * Convert UDP item to EFX filter specification.
719  *
720  * @param item[in]
721  *   Item specification. Only source and destination ports fields
722  *   are supported. If the mask is NULL, default mask will be used.
723  *   Ranging is not supported.
724  * @param efx_spec[in, out]
725  *   EFX filter specification to update.
726  * @param[out] error
727  *   Perform verbose error reporting if not NULL.
728  */
729 static int
730 sfc_flow_parse_udp(const struct rte_flow_item *item,
731 		   struct sfc_flow_parse_ctx *parse_ctx,
732 		   struct rte_flow_error *error)
733 {
734 	int rc;
735 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
736 	const struct rte_flow_item_udp *spec = NULL;
737 	const struct rte_flow_item_udp *mask = NULL;
738 	const struct rte_flow_item_udp supp_mask = {
739 		.hdr = {
740 			.src_port = 0xffff,
741 			.dst_port = 0xffff,
742 		}
743 	};
744 
745 	rc = sfc_flow_parse_init(item,
746 				 (const void **)&spec,
747 				 (const void **)&mask,
748 				 &supp_mask,
749 				 &rte_flow_item_udp_mask,
750 				 sizeof(struct rte_flow_item_udp),
751 				 error);
752 	if (rc != 0)
753 		return rc;
754 
755 	/*
756 	 * Filtering by UDP source and destination ports requires
757 	 * the appropriate IP_PROTO in hardware filters
758 	 */
759 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
760 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
761 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
762 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
763 		rte_flow_error_set(error, EINVAL,
764 			RTE_FLOW_ERROR_TYPE_ITEM, item,
765 			"IP proto in pattern with UDP item should be appropriate");
766 		return -rte_errno;
767 	}
768 
769 	if (spec == NULL)
770 		return 0;
771 
772 	/*
773 	 * Source and destination ports are in big-endian byte order in item and
774 	 * in little-endian in efx_spec, so byte swap is used
775 	 */
776 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
777 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
778 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
779 	} else if (mask->hdr.src_port != 0) {
780 		goto fail_bad_mask;
781 	}
782 
783 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
784 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
785 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
786 	} else if (mask->hdr.dst_port != 0) {
787 		goto fail_bad_mask;
788 	}
789 
790 	return 0;
791 
792 fail_bad_mask:
793 	rte_flow_error_set(error, EINVAL,
794 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
795 			   "Bad mask in the UDP pattern item");
796 	return -rte_errno;
797 }
798 
799 /*
800  * Filters for encapsulated packets match based on the EtherType and IP
801  * protocol in the outer frame.
802  */
803 static int
804 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
805 					efx_filter_spec_t *efx_spec,
806 					uint8_t ip_proto,
807 					struct rte_flow_error *error)
808 {
809 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
810 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
811 		efx_spec->efs_ip_proto = ip_proto;
812 	} else if (efx_spec->efs_ip_proto != ip_proto) {
813 		switch (ip_proto) {
814 		case EFX_IPPROTO_UDP:
815 			rte_flow_error_set(error, EINVAL,
816 				RTE_FLOW_ERROR_TYPE_ITEM, item,
817 				"Outer IP header protocol must be UDP "
818 				"in VxLAN/GENEVE pattern");
819 			return -rte_errno;
820 
821 		case EFX_IPPROTO_GRE:
822 			rte_flow_error_set(error, EINVAL,
823 				RTE_FLOW_ERROR_TYPE_ITEM, item,
824 				"Outer IP header protocol must be GRE "
825 				"in NVGRE pattern");
826 			return -rte_errno;
827 
828 		default:
829 			rte_flow_error_set(error, EINVAL,
830 				RTE_FLOW_ERROR_TYPE_ITEM, item,
831 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
832 				"are supported");
833 			return -rte_errno;
834 		}
835 	}
836 
837 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
838 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
839 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
840 		rte_flow_error_set(error, EINVAL,
841 			RTE_FLOW_ERROR_TYPE_ITEM, item,
842 			"Outer frame EtherType in pattern with tunneling "
843 			"must be IPv4 or IPv6");
844 		return -rte_errno;
845 	}
846 
847 	return 0;
848 }
849 
850 static int
851 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
852 				  const uint8_t *vni_or_vsid_val,
853 				  const uint8_t *vni_or_vsid_mask,
854 				  const struct rte_flow_item *item,
855 				  struct rte_flow_error *error)
856 {
857 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
858 		0xff, 0xff, 0xff
859 	};
860 
861 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
862 		   EFX_VNI_OR_VSID_LEN) == 0) {
863 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
864 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
865 			   EFX_VNI_OR_VSID_LEN);
866 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
867 		rte_flow_error_set(error, EINVAL,
868 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
869 				   "Unsupported VNI/VSID mask");
870 		return -rte_errno;
871 	}
872 
873 	return 0;
874 }
875 
876 /**
877  * Convert VXLAN item to EFX filter specification.
878  *
879  * @param item[in]
880  *   Item specification. Only VXLAN network identifier field is supported.
881  *   If the mask is NULL, default mask will be used.
882  *   Ranging is not supported.
883  * @param efx_spec[in, out]
884  *   EFX filter specification to update.
885  * @param[out] error
886  *   Perform verbose error reporting if not NULL.
887  */
888 static int
889 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
890 		     struct sfc_flow_parse_ctx *parse_ctx,
891 		     struct rte_flow_error *error)
892 {
893 	int rc;
894 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
895 	const struct rte_flow_item_vxlan *spec = NULL;
896 	const struct rte_flow_item_vxlan *mask = NULL;
897 	const struct rte_flow_item_vxlan supp_mask = {
898 		.vni = { 0xff, 0xff, 0xff }
899 	};
900 
901 	rc = sfc_flow_parse_init(item,
902 				 (const void **)&spec,
903 				 (const void **)&mask,
904 				 &supp_mask,
905 				 &rte_flow_item_vxlan_mask,
906 				 sizeof(struct rte_flow_item_vxlan),
907 				 error);
908 	if (rc != 0)
909 		return rc;
910 
911 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
912 						     EFX_IPPROTO_UDP, error);
913 	if (rc != 0)
914 		return rc;
915 
916 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
917 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
918 
919 	if (spec == NULL)
920 		return 0;
921 
922 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
923 					       mask->vni, item, error);
924 
925 	return rc;
926 }
927 
928 /**
929  * Convert GENEVE item to EFX filter specification.
930  *
931  * @param item[in]
932  *   Item specification. Only Virtual Network Identifier and protocol type
933  *   fields are supported. But protocol type can be only Ethernet (0x6558).
934  *   If the mask is NULL, default mask will be used.
935  *   Ranging is not supported.
936  * @param efx_spec[in, out]
937  *   EFX filter specification to update.
938  * @param[out] error
939  *   Perform verbose error reporting if not NULL.
940  */
941 static int
942 sfc_flow_parse_geneve(const struct rte_flow_item *item,
943 		      struct sfc_flow_parse_ctx *parse_ctx,
944 		      struct rte_flow_error *error)
945 {
946 	int rc;
947 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
948 	const struct rte_flow_item_geneve *spec = NULL;
949 	const struct rte_flow_item_geneve *mask = NULL;
950 	const struct rte_flow_item_geneve supp_mask = {
951 		.protocol = RTE_BE16(0xffff),
952 		.vni = { 0xff, 0xff, 0xff }
953 	};
954 
955 	rc = sfc_flow_parse_init(item,
956 				 (const void **)&spec,
957 				 (const void **)&mask,
958 				 &supp_mask,
959 				 &rte_flow_item_geneve_mask,
960 				 sizeof(struct rte_flow_item_geneve),
961 				 error);
962 	if (rc != 0)
963 		return rc;
964 
965 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
966 						     EFX_IPPROTO_UDP, error);
967 	if (rc != 0)
968 		return rc;
969 
970 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
971 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
972 
973 	if (spec == NULL)
974 		return 0;
975 
976 	if (mask->protocol == supp_mask.protocol) {
977 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
978 			rte_flow_error_set(error, EINVAL,
979 				RTE_FLOW_ERROR_TYPE_ITEM, item,
980 				"GENEVE encap. protocol must be Ethernet "
981 				"(0x6558) in the GENEVE pattern item");
982 			return -rte_errno;
983 		}
984 	} else if (mask->protocol != 0) {
985 		rte_flow_error_set(error, EINVAL,
986 			RTE_FLOW_ERROR_TYPE_ITEM, item,
987 			"Unsupported mask for GENEVE encap. protocol");
988 		return -rte_errno;
989 	}
990 
991 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
992 					       mask->vni, item, error);
993 
994 	return rc;
995 }
996 
997 /**
998  * Convert NVGRE item to EFX filter specification.
999  *
1000  * @param item[in]
1001  *   Item specification. Only virtual subnet ID field is supported.
1002  *   If the mask is NULL, default mask will be used.
1003  *   Ranging is not supported.
1004  * @param efx_spec[in, out]
1005  *   EFX filter specification to update.
1006  * @param[out] error
1007  *   Perform verbose error reporting if not NULL.
1008  */
1009 static int
1010 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1011 		     struct sfc_flow_parse_ctx *parse_ctx,
1012 		     struct rte_flow_error *error)
1013 {
1014 	int rc;
1015 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1016 	const struct rte_flow_item_nvgre *spec = NULL;
1017 	const struct rte_flow_item_nvgre *mask = NULL;
1018 	const struct rte_flow_item_nvgre supp_mask = {
1019 		.tni = { 0xff, 0xff, 0xff }
1020 	};
1021 
1022 	rc = sfc_flow_parse_init(item,
1023 				 (const void **)&spec,
1024 				 (const void **)&mask,
1025 				 &supp_mask,
1026 				 &rte_flow_item_nvgre_mask,
1027 				 sizeof(struct rte_flow_item_nvgre),
1028 				 error);
1029 	if (rc != 0)
1030 		return rc;
1031 
1032 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1033 						     EFX_IPPROTO_GRE, error);
1034 	if (rc != 0)
1035 		return rc;
1036 
1037 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1038 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1039 
1040 	if (spec == NULL)
1041 		return 0;
1042 
1043 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1044 					       mask->tni, item, error);
1045 
1046 	return rc;
1047 }
1048 
1049 static const struct sfc_flow_item sfc_flow_items[] = {
1050 	{
1051 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1052 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1053 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1054 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1055 		.parse = sfc_flow_parse_void,
1056 	},
1057 	{
1058 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1059 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1060 		.layer = SFC_FLOW_ITEM_L2,
1061 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1062 		.parse = sfc_flow_parse_eth,
1063 	},
1064 	{
1065 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1066 		.prev_layer = SFC_FLOW_ITEM_L2,
1067 		.layer = SFC_FLOW_ITEM_L2,
1068 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1069 		.parse = sfc_flow_parse_vlan,
1070 	},
1071 	{
1072 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1073 		.prev_layer = SFC_FLOW_ITEM_L2,
1074 		.layer = SFC_FLOW_ITEM_L3,
1075 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1076 		.parse = sfc_flow_parse_ipv4,
1077 	},
1078 	{
1079 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1080 		.prev_layer = SFC_FLOW_ITEM_L2,
1081 		.layer = SFC_FLOW_ITEM_L3,
1082 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1083 		.parse = sfc_flow_parse_ipv6,
1084 	},
1085 	{
1086 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1087 		.prev_layer = SFC_FLOW_ITEM_L3,
1088 		.layer = SFC_FLOW_ITEM_L4,
1089 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1090 		.parse = sfc_flow_parse_tcp,
1091 	},
1092 	{
1093 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1094 		.prev_layer = SFC_FLOW_ITEM_L3,
1095 		.layer = SFC_FLOW_ITEM_L4,
1096 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1097 		.parse = sfc_flow_parse_udp,
1098 	},
1099 	{
1100 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1101 		.prev_layer = SFC_FLOW_ITEM_L4,
1102 		.layer = SFC_FLOW_ITEM_START_LAYER,
1103 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1104 		.parse = sfc_flow_parse_vxlan,
1105 	},
1106 	{
1107 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1108 		.prev_layer = SFC_FLOW_ITEM_L4,
1109 		.layer = SFC_FLOW_ITEM_START_LAYER,
1110 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1111 		.parse = sfc_flow_parse_geneve,
1112 	},
1113 	{
1114 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1115 		.prev_layer = SFC_FLOW_ITEM_L3,
1116 		.layer = SFC_FLOW_ITEM_START_LAYER,
1117 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1118 		.parse = sfc_flow_parse_nvgre,
1119 	},
1120 };
1121 
1122 /*
1123  * Protocol-independent flow API support
1124  */
1125 static int
1126 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1127 		    struct rte_flow *flow,
1128 		    struct rte_flow_error *error)
1129 {
1130 	struct sfc_flow_spec *spec = &flow->spec;
1131 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1132 
1133 	if (attr == NULL) {
1134 		rte_flow_error_set(error, EINVAL,
1135 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1136 				   "NULL attribute");
1137 		return -rte_errno;
1138 	}
1139 	if (attr->group != 0) {
1140 		rte_flow_error_set(error, ENOTSUP,
1141 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1142 				   "Groups are not supported");
1143 		return -rte_errno;
1144 	}
1145 	if (attr->egress != 0) {
1146 		rte_flow_error_set(error, ENOTSUP,
1147 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1148 				   "Egress is not supported");
1149 		return -rte_errno;
1150 	}
1151 	if (attr->ingress == 0) {
1152 		rte_flow_error_set(error, ENOTSUP,
1153 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1154 				   "Ingress is compulsory");
1155 		return -rte_errno;
1156 	}
1157 	if (attr->transfer == 0) {
1158 		if (attr->priority != 0) {
1159 			rte_flow_error_set(error, ENOTSUP,
1160 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1161 					   attr, "Priorities are unsupported");
1162 			return -rte_errno;
1163 		}
1164 		spec->type = SFC_FLOW_SPEC_FILTER;
1165 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1166 		spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1167 	} else {
1168 		rte_flow_error_set(error, ENOTSUP,
1169 				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1170 				   "Transfer is not supported");
1171 		return -rte_errno;
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 /* Get item from array sfc_flow_items */
1178 static const struct sfc_flow_item *
1179 sfc_flow_get_item(const struct sfc_flow_item *items,
1180 		  unsigned int nb_items,
1181 		  enum rte_flow_item_type type)
1182 {
1183 	unsigned int i;
1184 
1185 	for (i = 0; i < nb_items; i++)
1186 		if (items[i].type == type)
1187 			return &items[i];
1188 
1189 	return NULL;
1190 }
1191 
1192 int
1193 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1194 		       unsigned int nb_flow_items,
1195 		       const struct rte_flow_item pattern[],
1196 		       struct sfc_flow_parse_ctx *parse_ctx,
1197 		       struct rte_flow_error *error)
1198 {
1199 	int rc;
1200 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1201 	boolean_t is_ifrm = B_FALSE;
1202 	const struct sfc_flow_item *item;
1203 
1204 	if (pattern == NULL) {
1205 		rte_flow_error_set(error, EINVAL,
1206 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1207 				   "NULL pattern");
1208 		return -rte_errno;
1209 	}
1210 
1211 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1212 		item = sfc_flow_get_item(flow_items, nb_flow_items,
1213 					 pattern->type);
1214 		if (item == NULL) {
1215 			rte_flow_error_set(error, ENOTSUP,
1216 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1217 					   "Unsupported pattern item");
1218 			return -rte_errno;
1219 		}
1220 
1221 		/*
1222 		 * Omitting one or several protocol layers at the beginning
1223 		 * of pattern is supported
1224 		 */
1225 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1226 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1227 		    item->prev_layer != prev_layer) {
1228 			rte_flow_error_set(error, ENOTSUP,
1229 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1230 					   "Unexpected sequence of pattern items");
1231 			return -rte_errno;
1232 		}
1233 
1234 		/*
1235 		 * Allow only VOID and ETH pattern items in the inner frame.
1236 		 * Also check that there is only one tunneling protocol.
1237 		 */
1238 		switch (item->type) {
1239 		case RTE_FLOW_ITEM_TYPE_VOID:
1240 		case RTE_FLOW_ITEM_TYPE_ETH:
1241 			break;
1242 
1243 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1244 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1245 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1246 			if (is_ifrm) {
1247 				rte_flow_error_set(error, EINVAL,
1248 					RTE_FLOW_ERROR_TYPE_ITEM,
1249 					pattern,
1250 					"More than one tunneling protocol");
1251 				return -rte_errno;
1252 			}
1253 			is_ifrm = B_TRUE;
1254 			break;
1255 
1256 		default:
1257 			if (is_ifrm) {
1258 				rte_flow_error_set(error, EINVAL,
1259 					RTE_FLOW_ERROR_TYPE_ITEM,
1260 					pattern,
1261 					"There is an unsupported pattern item "
1262 					"in the inner frame");
1263 				return -rte_errno;
1264 			}
1265 			break;
1266 		}
1267 
1268 		if (parse_ctx->type != item->ctx_type) {
1269 			rte_flow_error_set(error, EINVAL,
1270 					RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1271 					"Parse context type mismatch");
1272 			return -rte_errno;
1273 		}
1274 
1275 		rc = item->parse(pattern, parse_ctx, error);
1276 		if (rc != 0)
1277 			return rc;
1278 
1279 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1280 			prev_layer = item->layer;
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 static int
1287 sfc_flow_parse_queue(struct sfc_adapter *sa,
1288 		     const struct rte_flow_action_queue *queue,
1289 		     struct rte_flow *flow)
1290 {
1291 	struct sfc_flow_spec *spec = &flow->spec;
1292 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1293 	struct sfc_rxq *rxq;
1294 
1295 	if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1296 		return -EINVAL;
1297 
1298 	rxq = &sa->rxq_ctrl[queue->index];
1299 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1300 
1301 	return 0;
1302 }
1303 
1304 static int
1305 sfc_flow_parse_rss(struct sfc_adapter *sa,
1306 		   const struct rte_flow_action_rss *action_rss,
1307 		   struct rte_flow *flow)
1308 {
1309 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1310 	struct sfc_rss *rss = &sas->rss;
1311 	unsigned int rxq_sw_index;
1312 	struct sfc_rxq *rxq;
1313 	unsigned int rxq_hw_index_min;
1314 	unsigned int rxq_hw_index_max;
1315 	efx_rx_hash_type_t efx_hash_types;
1316 	const uint8_t *rss_key;
1317 	struct sfc_flow_spec *spec = &flow->spec;
1318 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1319 	struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1320 	unsigned int i;
1321 
1322 	if (action_rss->queue_num == 0)
1323 		return -EINVAL;
1324 
1325 	rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1326 	rxq = &sa->rxq_ctrl[rxq_sw_index];
1327 	rxq_hw_index_min = rxq->hw_index;
1328 	rxq_hw_index_max = 0;
1329 
1330 	for (i = 0; i < action_rss->queue_num; ++i) {
1331 		rxq_sw_index = action_rss->queue[i];
1332 
1333 		if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1334 			return -EINVAL;
1335 
1336 		rxq = &sa->rxq_ctrl[rxq_sw_index];
1337 
1338 		if (rxq->hw_index < rxq_hw_index_min)
1339 			rxq_hw_index_min = rxq->hw_index;
1340 
1341 		if (rxq->hw_index > rxq_hw_index_max)
1342 			rxq_hw_index_max = rxq->hw_index;
1343 	}
1344 
1345 	switch (action_rss->func) {
1346 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1347 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1348 		break;
1349 	default:
1350 		return -EINVAL;
1351 	}
1352 
1353 	if (action_rss->level)
1354 		return -EINVAL;
1355 
1356 	/*
1357 	 * Dummy RSS action with only one queue and no specific settings
1358 	 * for hash types and key does not require dedicated RSS context
1359 	 * and may be simplified to single queue action.
1360 	 */
1361 	if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1362 	    action_rss->key_len == 0) {
1363 		spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1364 		return 0;
1365 	}
1366 
1367 	if (action_rss->types) {
1368 		int rc;
1369 
1370 		rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1371 					  &efx_hash_types);
1372 		if (rc != 0)
1373 			return -rc;
1374 	} else {
1375 		unsigned int i;
1376 
1377 		efx_hash_types = 0;
1378 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
1379 			efx_hash_types |= rss->hf_map[i].efx;
1380 	}
1381 
1382 	if (action_rss->key_len) {
1383 		if (action_rss->key_len != sizeof(rss->key))
1384 			return -EINVAL;
1385 
1386 		rss_key = action_rss->key;
1387 	} else {
1388 		rss_key = rss->key;
1389 	}
1390 
1391 	spec_filter->rss = B_TRUE;
1392 
1393 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1394 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1395 	sfc_rss_conf->rss_hash_types = efx_hash_types;
1396 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1397 
1398 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1399 		unsigned int nb_queues = action_rss->queue_num;
1400 		unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1401 		struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1402 
1403 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static int
1410 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1411 		    unsigned int filters_count)
1412 {
1413 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1414 	unsigned int i;
1415 	int ret = 0;
1416 
1417 	for (i = 0; i < filters_count; i++) {
1418 		int rc;
1419 
1420 		rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1421 		if (ret == 0 && rc != 0) {
1422 			sfc_err(sa, "failed to remove filter specification "
1423 				"(rc = %d)", rc);
1424 			ret = rc;
1425 		}
1426 	}
1427 
1428 	return ret;
1429 }
1430 
1431 static int
1432 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1433 {
1434 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1435 	unsigned int i;
1436 	int rc = 0;
1437 
1438 	for (i = 0; i < spec_filter->count; i++) {
1439 		rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1440 		if (rc != 0) {
1441 			sfc_flow_spec_flush(sa, spec, i);
1442 			break;
1443 		}
1444 	}
1445 
1446 	return rc;
1447 }
1448 
1449 static int
1450 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1451 {
1452 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1453 
1454 	return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1455 }
1456 
1457 static int
1458 sfc_flow_filter_insert(struct sfc_adapter *sa,
1459 		       struct rte_flow *flow)
1460 {
1461 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1462 	struct sfc_rss *rss = &sas->rss;
1463 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1464 	struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1465 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1466 	unsigned int i;
1467 	int rc = 0;
1468 
1469 	if (spec_filter->rss) {
1470 		unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1471 					      flow_rss->rxq_hw_index_min + 1,
1472 					      EFX_MAXRSS);
1473 
1474 		rc = efx_rx_scale_context_alloc(sa->nic,
1475 						EFX_RX_SCALE_EXCLUSIVE,
1476 						rss_spread,
1477 						&efs_rss_context);
1478 		if (rc != 0)
1479 			goto fail_scale_context_alloc;
1480 
1481 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1482 					   rss->hash_alg,
1483 					   flow_rss->rss_hash_types, B_TRUE);
1484 		if (rc != 0)
1485 			goto fail_scale_mode_set;
1486 
1487 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1488 					  flow_rss->rss_key,
1489 					  sizeof(rss->key));
1490 		if (rc != 0)
1491 			goto fail_scale_key_set;
1492 
1493 		/*
1494 		 * At this point, fully elaborated filter specifications
1495 		 * have been produced from the template. To make sure that
1496 		 * RSS behaviour is consistent between them, set the same
1497 		 * RSS context value everywhere.
1498 		 */
1499 		for (i = 0; i < spec_filter->count; i++) {
1500 			efx_filter_spec_t *spec = &spec_filter->filters[i];
1501 
1502 			spec->efs_rss_context = efs_rss_context;
1503 			spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1504 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1505 		}
1506 	}
1507 
1508 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1509 	if (rc != 0)
1510 		goto fail_filter_insert;
1511 
1512 	if (spec_filter->rss) {
1513 		/*
1514 		 * Scale table is set after filter insertion because
1515 		 * the table entries are relative to the base RxQ ID
1516 		 * and the latter is submitted to the HW by means of
1517 		 * inserting a filter, so by the time of the request
1518 		 * the HW knows all the information needed to verify
1519 		 * the table entries, and the operation will succeed
1520 		 */
1521 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1522 					  flow_rss->rss_tbl,
1523 					  RTE_DIM(flow_rss->rss_tbl));
1524 		if (rc != 0)
1525 			goto fail_scale_tbl_set;
1526 	}
1527 
1528 	return 0;
1529 
1530 fail_scale_tbl_set:
1531 	sfc_flow_spec_remove(sa, &flow->spec);
1532 
1533 fail_filter_insert:
1534 fail_scale_key_set:
1535 fail_scale_mode_set:
1536 	if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1537 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1538 
1539 fail_scale_context_alloc:
1540 	return rc;
1541 }
1542 
1543 static int
1544 sfc_flow_filter_remove(struct sfc_adapter *sa,
1545 		       struct rte_flow *flow)
1546 {
1547 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1548 	int rc = 0;
1549 
1550 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1551 	if (rc != 0)
1552 		return rc;
1553 
1554 	if (spec_filter->rss) {
1555 		/*
1556 		 * All specifications for a given flow rule have the same RSS
1557 		 * context, so that RSS context value is taken from the first
1558 		 * filter specification
1559 		 */
1560 		efx_filter_spec_t *spec = &spec_filter->filters[0];
1561 
1562 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1563 	}
1564 
1565 	return rc;
1566 }
1567 
1568 static int
1569 sfc_flow_parse_mark(struct sfc_adapter *sa,
1570 		    const struct rte_flow_action_mark *mark,
1571 		    struct rte_flow *flow)
1572 {
1573 	struct sfc_flow_spec *spec = &flow->spec;
1574 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1575 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1576 
1577 	if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1578 		return EINVAL;
1579 
1580 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1581 	spec_filter->template.efs_mark = mark->id;
1582 
1583 	return 0;
1584 }
1585 
1586 static int
1587 sfc_flow_parse_actions(struct sfc_adapter *sa,
1588 		       const struct rte_flow_action actions[],
1589 		       struct rte_flow *flow,
1590 		       struct rte_flow_error *error)
1591 {
1592 	int rc;
1593 	struct sfc_flow_spec *spec = &flow->spec;
1594 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1595 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1596 	uint32_t actions_set = 0;
1597 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1598 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1599 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1600 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1601 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1602 
1603 	if (actions == NULL) {
1604 		rte_flow_error_set(error, EINVAL,
1605 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1606 				   "NULL actions");
1607 		return -rte_errno;
1608 	}
1609 
1610 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1611 	RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1612 
1613 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1614 		switch (actions->type) {
1615 		case RTE_FLOW_ACTION_TYPE_VOID:
1616 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1617 					       actions_set);
1618 			break;
1619 
1620 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1621 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1622 					       actions_set);
1623 			if ((actions_set & fate_actions_mask) != 0)
1624 				goto fail_fate_actions;
1625 
1626 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1627 			if (rc != 0) {
1628 				rte_flow_error_set(error, EINVAL,
1629 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1630 					"Bad QUEUE action");
1631 				return -rte_errno;
1632 			}
1633 			break;
1634 
1635 		case RTE_FLOW_ACTION_TYPE_RSS:
1636 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1637 					       actions_set);
1638 			if ((actions_set & fate_actions_mask) != 0)
1639 				goto fail_fate_actions;
1640 
1641 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1642 			if (rc != 0) {
1643 				rte_flow_error_set(error, -rc,
1644 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1645 					"Bad RSS action");
1646 				return -rte_errno;
1647 			}
1648 			break;
1649 
1650 		case RTE_FLOW_ACTION_TYPE_DROP:
1651 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1652 					       actions_set);
1653 			if ((actions_set & fate_actions_mask) != 0)
1654 				goto fail_fate_actions;
1655 
1656 			spec_filter->template.efs_dmaq_id =
1657 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1658 			break;
1659 
1660 		case RTE_FLOW_ACTION_TYPE_FLAG:
1661 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1662 					       actions_set);
1663 			if ((actions_set & mark_actions_mask) != 0)
1664 				goto fail_actions_overlap;
1665 
1666 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1667 				rte_flow_error_set(error, ENOTSUP,
1668 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1669 					"FLAG action is not supported on the current Rx datapath");
1670 				return -rte_errno;
1671 			}
1672 
1673 			spec_filter->template.efs_flags |=
1674 				EFX_FILTER_FLAG_ACTION_FLAG;
1675 			break;
1676 
1677 		case RTE_FLOW_ACTION_TYPE_MARK:
1678 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1679 					       actions_set);
1680 			if ((actions_set & mark_actions_mask) != 0)
1681 				goto fail_actions_overlap;
1682 
1683 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1684 				rte_flow_error_set(error, ENOTSUP,
1685 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1686 					"MARK action is not supported on the current Rx datapath");
1687 				return -rte_errno;
1688 			}
1689 
1690 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1691 			if (rc != 0) {
1692 				rte_flow_error_set(error, rc,
1693 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1694 					"Bad MARK action");
1695 				return -rte_errno;
1696 			}
1697 			break;
1698 
1699 		default:
1700 			rte_flow_error_set(error, ENOTSUP,
1701 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1702 					   "Action is not supported");
1703 			return -rte_errno;
1704 		}
1705 
1706 		actions_set |= (1UL << actions->type);
1707 	}
1708 #undef SFC_BUILD_SET_OVERFLOW
1709 
1710 	/* When fate is unknown, drop traffic. */
1711 	if ((actions_set & fate_actions_mask) == 0) {
1712 		spec_filter->template.efs_dmaq_id =
1713 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1714 	}
1715 
1716 	return 0;
1717 
1718 fail_fate_actions:
1719 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1720 			   "Cannot combine several fate-deciding actions, "
1721 			   "choose between QUEUE, RSS or DROP");
1722 	return -rte_errno;
1723 
1724 fail_actions_overlap:
1725 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1726 			   "Overlapping actions are not supported");
1727 	return -rte_errno;
1728 }
1729 
1730 /**
1731  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1732  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1733  * specifications after copying.
1734  *
1735  * @param spec[in, out]
1736  *   SFC flow specification to update.
1737  * @param filters_count_for_one_val[in]
1738  *   How many specifications should have the same match flag, what is the
1739  *   number of specifications before copying.
1740  * @param error[out]
1741  *   Perform verbose error reporting if not NULL.
1742  */
1743 static int
1744 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1745 			       unsigned int filters_count_for_one_val,
1746 			       struct rte_flow_error *error)
1747 {
1748 	unsigned int i;
1749 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1750 	static const efx_filter_match_flags_t vals[] = {
1751 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1752 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1753 	};
1754 
1755 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1756 		rte_flow_error_set(error, EINVAL,
1757 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1758 			"Number of specifications is incorrect while copying "
1759 			"by unknown destination flags");
1760 		return -rte_errno;
1761 	}
1762 
1763 	for (i = 0; i < spec_filter->count; i++) {
1764 		/* The check above ensures that divisor can't be zero here */
1765 		spec_filter->filters[i].efs_match_flags |=
1766 			vals[i / filters_count_for_one_val];
1767 	}
1768 
1769 	return 0;
1770 }
1771 
1772 /**
1773  * Check that the following conditions are met:
1774  * - the list of supported filters has a filter
1775  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1776  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1777  *   be inserted.
1778  *
1779  * @param match[in]
1780  *   The match flags of filter.
1781  * @param spec[in]
1782  *   Specification to be supplemented.
1783  * @param filter[in]
1784  *   SFC filter with list of supported filters.
1785  */
1786 static boolean_t
1787 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1788 				 __rte_unused efx_filter_spec_t *spec,
1789 				 struct sfc_filter *filter)
1790 {
1791 	unsigned int i;
1792 	efx_filter_match_flags_t match_mcast_dst;
1793 
1794 	match_mcast_dst =
1795 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1796 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1797 	for (i = 0; i < filter->supported_match_num; i++) {
1798 		if (match_mcast_dst == filter->supported_match[i])
1799 			return B_TRUE;
1800 	}
1801 
1802 	return B_FALSE;
1803 }
1804 
1805 /**
1806  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1807  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1808  * specifications after copying.
1809  *
1810  * @param spec[in, out]
1811  *   SFC flow specification to update.
1812  * @param filters_count_for_one_val[in]
1813  *   How many specifications should have the same EtherType value, what is the
1814  *   number of specifications before copying.
1815  * @param error[out]
1816  *   Perform verbose error reporting if not NULL.
1817  */
1818 static int
1819 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1820 			unsigned int filters_count_for_one_val,
1821 			struct rte_flow_error *error)
1822 {
1823 	unsigned int i;
1824 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1825 	static const uint16_t vals[] = {
1826 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1827 	};
1828 
1829 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1830 		rte_flow_error_set(error, EINVAL,
1831 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1832 			"Number of specifications is incorrect "
1833 			"while copying by Ethertype");
1834 		return -rte_errno;
1835 	}
1836 
1837 	for (i = 0; i < spec_filter->count; i++) {
1838 		spec_filter->filters[i].efs_match_flags |=
1839 			EFX_FILTER_MATCH_ETHER_TYPE;
1840 
1841 		/*
1842 		 * The check above ensures that
1843 		 * filters_count_for_one_val is not 0
1844 		 */
1845 		spec_filter->filters[i].efs_ether_type =
1846 			vals[i / filters_count_for_one_val];
1847 	}
1848 
1849 	return 0;
1850 }
1851 
1852 /**
1853  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1854  * in the same specifications after copying.
1855  *
1856  * @param spec[in, out]
1857  *   SFC flow specification to update.
1858  * @param filters_count_for_one_val[in]
1859  *   How many specifications should have the same match flag, what is the
1860  *   number of specifications before copying.
1861  * @param error[out]
1862  *   Perform verbose error reporting if not NULL.
1863  */
1864 static int
1865 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1866 			    unsigned int filters_count_for_one_val,
1867 			    struct rte_flow_error *error)
1868 {
1869 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1870 	unsigned int i;
1871 
1872 	if (filters_count_for_one_val != spec_filter->count) {
1873 		rte_flow_error_set(error, EINVAL,
1874 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1875 			"Number of specifications is incorrect "
1876 			"while copying by outer VLAN ID");
1877 		return -rte_errno;
1878 	}
1879 
1880 	for (i = 0; i < spec_filter->count; i++) {
1881 		spec_filter->filters[i].efs_match_flags |=
1882 			EFX_FILTER_MATCH_OUTER_VID;
1883 
1884 		spec_filter->filters[i].efs_outer_vid = 0;
1885 	}
1886 
1887 	return 0;
1888 }
1889 
1890 /**
1891  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1892  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1893  * specifications after copying.
1894  *
1895  * @param spec[in, out]
1896  *   SFC flow specification to update.
1897  * @param filters_count_for_one_val[in]
1898  *   How many specifications should have the same match flag, what is the
1899  *   number of specifications before copying.
1900  * @param error[out]
1901  *   Perform verbose error reporting if not NULL.
1902  */
1903 static int
1904 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1905 				    unsigned int filters_count_for_one_val,
1906 				    struct rte_flow_error *error)
1907 {
1908 	unsigned int i;
1909 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1910 	static const efx_filter_match_flags_t vals[] = {
1911 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1912 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1913 	};
1914 
1915 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1916 		rte_flow_error_set(error, EINVAL,
1917 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1918 			"Number of specifications is incorrect while copying "
1919 			"by inner frame unknown destination flags");
1920 		return -rte_errno;
1921 	}
1922 
1923 	for (i = 0; i < spec_filter->count; i++) {
1924 		/* The check above ensures that divisor can't be zero here */
1925 		spec_filter->filters[i].efs_match_flags |=
1926 			vals[i / filters_count_for_one_val];
1927 	}
1928 
1929 	return 0;
1930 }
1931 
1932 /**
1933  * Check that the following conditions are met:
1934  * - the specification corresponds to a filter for encapsulated traffic
1935  * - the list of supported filters has a filter
1936  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1937  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1938  *   be inserted.
1939  *
1940  * @param match[in]
1941  *   The match flags of filter.
1942  * @param spec[in]
1943  *   Specification to be supplemented.
1944  * @param filter[in]
1945  *   SFC filter with list of supported filters.
1946  */
1947 static boolean_t
1948 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1949 				      efx_filter_spec_t *spec,
1950 				      struct sfc_filter *filter)
1951 {
1952 	unsigned int i;
1953 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1954 	efx_filter_match_flags_t match_mcast_dst;
1955 
1956 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1957 		return B_FALSE;
1958 
1959 	match_mcast_dst =
1960 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1961 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1962 	for (i = 0; i < filter->supported_match_num; i++) {
1963 		if (match_mcast_dst == filter->supported_match[i])
1964 			return B_TRUE;
1965 	}
1966 
1967 	return B_FALSE;
1968 }
1969 
1970 /**
1971  * Check that the list of supported filters has a filter that differs
1972  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1973  * in this case that filter will be used and the flag
1974  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1975  *
1976  * @param match[in]
1977  *   The match flags of filter.
1978  * @param spec[in]
1979  *   Specification to be supplemented.
1980  * @param filter[in]
1981  *   SFC filter with list of supported filters.
1982  */
1983 static boolean_t
1984 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1985 			      __rte_unused efx_filter_spec_t *spec,
1986 			      struct sfc_filter *filter)
1987 {
1988 	unsigned int i;
1989 	efx_filter_match_flags_t match_without_vid =
1990 		match & ~EFX_FILTER_MATCH_OUTER_VID;
1991 
1992 	for (i = 0; i < filter->supported_match_num; i++) {
1993 		if (match_without_vid == filter->supported_match[i])
1994 			return B_FALSE;
1995 	}
1996 
1997 	return B_TRUE;
1998 }
1999 
2000 /*
2001  * Match flags that can be automatically added to filters.
2002  * Selecting the last minimum when searching for the copy flag ensures that the
2003  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2004  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2005  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2006  * filters.
2007  */
2008 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2009 	{
2010 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2011 		.vals_count = 2,
2012 		.set_vals = sfc_flow_set_unknown_dst_flags,
2013 		.spec_check = sfc_flow_check_unknown_dst_flags,
2014 	},
2015 	{
2016 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
2017 		.vals_count = 2,
2018 		.set_vals = sfc_flow_set_ethertypes,
2019 		.spec_check = NULL,
2020 	},
2021 	{
2022 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2023 		.vals_count = 2,
2024 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2025 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2026 	},
2027 	{
2028 		.flag = EFX_FILTER_MATCH_OUTER_VID,
2029 		.vals_count = 1,
2030 		.set_vals = sfc_flow_set_outer_vid_flag,
2031 		.spec_check = sfc_flow_check_outer_vid_flag,
2032 	},
2033 };
2034 
2035 /* Get item from array sfc_flow_copy_flags */
2036 static const struct sfc_flow_copy_flag *
2037 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2038 {
2039 	unsigned int i;
2040 
2041 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2042 		if (sfc_flow_copy_flags[i].flag == flag)
2043 			return &sfc_flow_copy_flags[i];
2044 	}
2045 
2046 	return NULL;
2047 }
2048 
2049 /**
2050  * Make copies of the specifications, set match flag and values
2051  * of the field that corresponds to it.
2052  *
2053  * @param spec[in, out]
2054  *   SFC flow specification to update.
2055  * @param flag[in]
2056  *   The match flag to add.
2057  * @param error[out]
2058  *   Perform verbose error reporting if not NULL.
2059  */
2060 static int
2061 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2062 			     efx_filter_match_flags_t flag,
2063 			     struct rte_flow_error *error)
2064 {
2065 	unsigned int i;
2066 	unsigned int new_filters_count;
2067 	unsigned int filters_count_for_one_val;
2068 	const struct sfc_flow_copy_flag *copy_flag;
2069 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2070 	int rc;
2071 
2072 	copy_flag = sfc_flow_get_copy_flag(flag);
2073 	if (copy_flag == NULL) {
2074 		rte_flow_error_set(error, ENOTSUP,
2075 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2076 				   "Unsupported spec field for copying");
2077 		return -rte_errno;
2078 	}
2079 
2080 	new_filters_count = spec_filter->count * copy_flag->vals_count;
2081 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2082 		rte_flow_error_set(error, EINVAL,
2083 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2084 			"Too much EFX specifications in the flow rule");
2085 		return -rte_errno;
2086 	}
2087 
2088 	/* Copy filters specifications */
2089 	for (i = spec_filter->count; i < new_filters_count; i++) {
2090 		spec_filter->filters[i] =
2091 			spec_filter->filters[i - spec_filter->count];
2092 	}
2093 
2094 	filters_count_for_one_val = spec_filter->count;
2095 	spec_filter->count = new_filters_count;
2096 
2097 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2098 	if (rc != 0)
2099 		return rc;
2100 
2101 	return 0;
2102 }
2103 
2104 /**
2105  * Check that the given set of match flags missing in the original filter spec
2106  * could be covered by adding spec copies which specify the corresponding
2107  * flags and packet field values to match.
2108  *
2109  * @param miss_flags[in]
2110  *   Flags that are missing until the supported filter.
2111  * @param spec[in]
2112  *   Specification to be supplemented.
2113  * @param filter[in]
2114  *   SFC filter.
2115  *
2116  * @return
2117  *   Number of specifications after copy or 0, if the flags can not be added.
2118  */
2119 static unsigned int
2120 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2121 			     efx_filter_spec_t *spec,
2122 			     struct sfc_filter *filter)
2123 {
2124 	unsigned int i;
2125 	efx_filter_match_flags_t copy_flags = 0;
2126 	efx_filter_match_flags_t flag;
2127 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2128 	sfc_flow_spec_check *check;
2129 	unsigned int multiplier = 1;
2130 
2131 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2132 		flag = sfc_flow_copy_flags[i].flag;
2133 		check = sfc_flow_copy_flags[i].spec_check;
2134 		if ((flag & miss_flags) == flag) {
2135 			if (check != NULL && (!check(match, spec, filter)))
2136 				continue;
2137 
2138 			copy_flags |= flag;
2139 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2140 		}
2141 	}
2142 
2143 	if (copy_flags == miss_flags)
2144 		return multiplier;
2145 
2146 	return 0;
2147 }
2148 
2149 /**
2150  * Attempt to supplement the specification template to the minimally
2151  * supported set of match flags. To do this, it is necessary to copy
2152  * the specifications, filling them with the values of fields that
2153  * correspond to the missing flags.
2154  * The necessary and sufficient filter is built from the fewest number
2155  * of copies which could be made to cover the minimally required set
2156  * of flags.
2157  *
2158  * @param sa[in]
2159  *   SFC adapter.
2160  * @param spec[in, out]
2161  *   SFC flow specification to update.
2162  * @param error[out]
2163  *   Perform verbose error reporting if not NULL.
2164  */
2165 static int
2166 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2167 			       struct sfc_flow_spec *spec,
2168 			       struct rte_flow_error *error)
2169 {
2170 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2171 	struct sfc_filter *filter = &sa->filter;
2172 	efx_filter_match_flags_t miss_flags;
2173 	efx_filter_match_flags_t min_miss_flags = 0;
2174 	efx_filter_match_flags_t match;
2175 	unsigned int min_multiplier = UINT_MAX;
2176 	unsigned int multiplier;
2177 	unsigned int i;
2178 	int rc;
2179 
2180 	match = spec_filter->template.efs_match_flags;
2181 	for (i = 0; i < filter->supported_match_num; i++) {
2182 		if ((match & filter->supported_match[i]) == match) {
2183 			miss_flags = filter->supported_match[i] & (~match);
2184 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2185 				&spec_filter->template, filter);
2186 			if (multiplier > 0) {
2187 				if (multiplier <= min_multiplier) {
2188 					min_multiplier = multiplier;
2189 					min_miss_flags = miss_flags;
2190 				}
2191 			}
2192 		}
2193 	}
2194 
2195 	if (min_multiplier == UINT_MAX) {
2196 		rte_flow_error_set(error, ENOTSUP,
2197 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2198 				   "The flow rule pattern is unsupported");
2199 		return -rte_errno;
2200 	}
2201 
2202 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2203 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2204 
2205 		if ((flag & min_miss_flags) == flag) {
2206 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2207 			if (rc != 0)
2208 				return rc;
2209 		}
2210 	}
2211 
2212 	return 0;
2213 }
2214 
2215 /**
2216  * Check that set of match flags is referred to by a filter. Filter is
2217  * described by match flags with the ability to add OUTER_VID and INNER_VID
2218  * flags.
2219  *
2220  * @param match_flags[in]
2221  *   Set of match flags.
2222  * @param flags_pattern[in]
2223  *   Pattern of filter match flags.
2224  */
2225 static boolean_t
2226 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2227 			    efx_filter_match_flags_t flags_pattern)
2228 {
2229 	if ((match_flags & flags_pattern) != flags_pattern)
2230 		return B_FALSE;
2231 
2232 	switch (match_flags & ~flags_pattern) {
2233 	case 0:
2234 	case EFX_FILTER_MATCH_OUTER_VID:
2235 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2236 		return B_TRUE;
2237 	default:
2238 		return B_FALSE;
2239 	}
2240 }
2241 
2242 /**
2243  * Check whether the spec maps to a hardware filter which is known to be
2244  * ineffective despite being valid.
2245  *
2246  * @param filter[in]
2247  *   SFC filter with list of supported filters.
2248  * @param spec[in]
2249  *   SFC flow specification.
2250  */
2251 static boolean_t
2252 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2253 				  struct sfc_flow_spec *spec)
2254 {
2255 	unsigned int i;
2256 	uint16_t ether_type;
2257 	uint8_t ip_proto;
2258 	efx_filter_match_flags_t match_flags;
2259 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2260 
2261 	for (i = 0; i < spec_filter->count; i++) {
2262 		match_flags = spec_filter->filters[i].efs_match_flags;
2263 
2264 		if (sfc_flow_is_match_with_vids(match_flags,
2265 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2266 		    sfc_flow_is_match_with_vids(match_flags,
2267 						EFX_FILTER_MATCH_ETHER_TYPE |
2268 						EFX_FILTER_MATCH_LOC_MAC)) {
2269 			ether_type = spec_filter->filters[i].efs_ether_type;
2270 			if (filter->supports_ip_proto_or_addr_filter &&
2271 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2272 			     ether_type == EFX_ETHER_TYPE_IPV6))
2273 				return B_TRUE;
2274 		} else if (sfc_flow_is_match_with_vids(match_flags,
2275 				EFX_FILTER_MATCH_ETHER_TYPE |
2276 				EFX_FILTER_MATCH_IP_PROTO) ||
2277 			   sfc_flow_is_match_with_vids(match_flags,
2278 				EFX_FILTER_MATCH_ETHER_TYPE |
2279 				EFX_FILTER_MATCH_IP_PROTO |
2280 				EFX_FILTER_MATCH_LOC_MAC)) {
2281 			ip_proto = spec_filter->filters[i].efs_ip_proto;
2282 			if (filter->supports_rem_or_local_port_filter &&
2283 			    (ip_proto == EFX_IPPROTO_TCP ||
2284 			     ip_proto == EFX_IPPROTO_UDP))
2285 				return B_TRUE;
2286 		}
2287 	}
2288 
2289 	return B_FALSE;
2290 }
2291 
2292 static int
2293 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2294 			      struct rte_flow *flow,
2295 			      struct rte_flow_error *error)
2296 {
2297 	struct sfc_flow_spec *spec = &flow->spec;
2298 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2299 	efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2300 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2301 	int rc;
2302 
2303 	/* Initialize the first filter spec with template */
2304 	spec_filter->filters[0] = *spec_tmpl;
2305 	spec_filter->count = 1;
2306 
2307 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2308 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2309 		if (rc != 0)
2310 			return rc;
2311 	}
2312 
2313 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2314 		rte_flow_error_set(error, ENOTSUP,
2315 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2316 			"The flow rule pattern is unsupported");
2317 		return -rte_errno;
2318 	}
2319 
2320 	return 0;
2321 }
2322 
2323 static int
2324 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2325 			     const struct rte_flow_item pattern[],
2326 			     const struct rte_flow_action actions[],
2327 			     struct rte_flow *flow,
2328 			     struct rte_flow_error *error)
2329 {
2330 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2331 	struct sfc_flow_spec *spec = &flow->spec;
2332 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2333 	struct sfc_flow_parse_ctx ctx;
2334 	int rc;
2335 
2336 	ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2337 	ctx.filter = &spec_filter->template;
2338 
2339 	rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2340 				    pattern, &ctx, error);
2341 	if (rc != 0)
2342 		goto fail_bad_value;
2343 
2344 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2345 	if (rc != 0)
2346 		goto fail_bad_value;
2347 
2348 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2349 	if (rc != 0)
2350 		goto fail_bad_value;
2351 
2352 	return 0;
2353 
2354 fail_bad_value:
2355 	return rc;
2356 }
2357 
2358 static int
2359 sfc_flow_parse(struct rte_eth_dev *dev,
2360 	       const struct rte_flow_attr *attr,
2361 	       const struct rte_flow_item pattern[],
2362 	       const struct rte_flow_action actions[],
2363 	       struct rte_flow *flow,
2364 	       struct rte_flow_error *error)
2365 {
2366 	const struct sfc_flow_ops_by_spec *ops;
2367 	int rc;
2368 
2369 	rc = sfc_flow_parse_attr(attr, flow, error);
2370 	if (rc != 0)
2371 		return rc;
2372 
2373 	ops = sfc_flow_get_ops_by_spec(flow);
2374 	if (ops == NULL || ops->parse == NULL) {
2375 		rte_flow_error_set(error, ENOTSUP,
2376 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2377 				   "No backend to handle this flow");
2378 		return -rte_errno;
2379 	}
2380 
2381 	return ops->parse(dev, pattern, actions, flow, error);
2382 }
2383 
2384 static struct rte_flow *
2385 sfc_flow_zmalloc(struct rte_flow_error *error)
2386 {
2387 	struct rte_flow *flow;
2388 
2389 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2390 	if (flow == NULL) {
2391 		rte_flow_error_set(error, ENOMEM,
2392 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2393 				   "Failed to allocate memory");
2394 	}
2395 
2396 	return flow;
2397 }
2398 
2399 static void
2400 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2401 {
2402 	rte_free(flow);
2403 }
2404 
2405 static int
2406 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2407 		struct rte_flow_error *error)
2408 {
2409 	const struct sfc_flow_ops_by_spec *ops;
2410 	int rc;
2411 
2412 	ops = sfc_flow_get_ops_by_spec(flow);
2413 	if (ops == NULL || ops->insert == NULL) {
2414 		rte_flow_error_set(error, ENOTSUP,
2415 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2416 				   "No backend to handle this flow");
2417 		return rte_errno;
2418 	}
2419 
2420 	rc = ops->insert(sa, flow);
2421 	if (rc != 0) {
2422 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2423 				   NULL, "Failed to insert the flow rule");
2424 	}
2425 
2426 	return rc;
2427 }
2428 
2429 static int
2430 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2431 		struct rte_flow_error *error)
2432 {
2433 	const struct sfc_flow_ops_by_spec *ops;
2434 	int rc;
2435 
2436 	ops = sfc_flow_get_ops_by_spec(flow);
2437 	if (ops == NULL || ops->remove == NULL) {
2438 		rte_flow_error_set(error, ENOTSUP,
2439 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2440 				   "No backend to handle this flow");
2441 		return rte_errno;
2442 	}
2443 
2444 	rc = ops->remove(sa, flow);
2445 	if (rc != 0) {
2446 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2447 				   NULL, "Failed to remove the flow rule");
2448 	}
2449 
2450 	return rc;
2451 }
2452 
2453 static int
2454 sfc_flow_validate(struct rte_eth_dev *dev,
2455 		  const struct rte_flow_attr *attr,
2456 		  const struct rte_flow_item pattern[],
2457 		  const struct rte_flow_action actions[],
2458 		  struct rte_flow_error *error)
2459 {
2460 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2461 	struct rte_flow *flow;
2462 	int rc;
2463 
2464 	flow = sfc_flow_zmalloc(error);
2465 	if (flow == NULL)
2466 		return -rte_errno;
2467 
2468 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2469 
2470 	sfc_flow_free(sa, flow);
2471 
2472 	return rc;
2473 }
2474 
2475 static struct rte_flow *
2476 sfc_flow_create(struct rte_eth_dev *dev,
2477 		const struct rte_flow_attr *attr,
2478 		const struct rte_flow_item pattern[],
2479 		const struct rte_flow_action actions[],
2480 		struct rte_flow_error *error)
2481 {
2482 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2483 	struct rte_flow *flow = NULL;
2484 	int rc;
2485 
2486 	flow = sfc_flow_zmalloc(error);
2487 	if (flow == NULL)
2488 		goto fail_no_mem;
2489 
2490 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2491 	if (rc != 0)
2492 		goto fail_bad_value;
2493 
2494 	sfc_adapter_lock(sa);
2495 
2496 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2497 
2498 	if (sa->state == SFC_ADAPTER_STARTED) {
2499 		rc = sfc_flow_insert(sa, flow, error);
2500 		if (rc != 0)
2501 			goto fail_flow_insert;
2502 	}
2503 
2504 	sfc_adapter_unlock(sa);
2505 
2506 	return flow;
2507 
2508 fail_flow_insert:
2509 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2510 
2511 fail_bad_value:
2512 	sfc_flow_free(sa, flow);
2513 	sfc_adapter_unlock(sa);
2514 
2515 fail_no_mem:
2516 	return NULL;
2517 }
2518 
2519 static int
2520 sfc_flow_destroy(struct rte_eth_dev *dev,
2521 		 struct rte_flow *flow,
2522 		 struct rte_flow_error *error)
2523 {
2524 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2525 	struct rte_flow *flow_ptr;
2526 	int rc = EINVAL;
2527 
2528 	sfc_adapter_lock(sa);
2529 
2530 	TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2531 		if (flow_ptr == flow)
2532 			rc = 0;
2533 	}
2534 	if (rc != 0) {
2535 		rte_flow_error_set(error, rc,
2536 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2537 				   "Failed to find flow rule to destroy");
2538 		goto fail_bad_value;
2539 	}
2540 
2541 	if (sa->state == SFC_ADAPTER_STARTED)
2542 		rc = sfc_flow_remove(sa, flow, error);
2543 
2544 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2545 	sfc_flow_free(sa, flow);
2546 
2547 fail_bad_value:
2548 	sfc_adapter_unlock(sa);
2549 
2550 	return -rc;
2551 }
2552 
2553 static int
2554 sfc_flow_flush(struct rte_eth_dev *dev,
2555 	       struct rte_flow_error *error)
2556 {
2557 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2558 	struct rte_flow *flow;
2559 	int ret = 0;
2560 
2561 	sfc_adapter_lock(sa);
2562 
2563 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2564 		if (sa->state == SFC_ADAPTER_STARTED) {
2565 			int rc;
2566 
2567 			rc = sfc_flow_remove(sa, flow, error);
2568 			if (rc != 0)
2569 				ret = rc;
2570 		}
2571 
2572 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2573 		sfc_flow_free(sa, flow);
2574 	}
2575 
2576 	sfc_adapter_unlock(sa);
2577 
2578 	return -ret;
2579 }
2580 
2581 static int
2582 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2583 		 struct rte_flow_error *error)
2584 {
2585 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2586 	int ret = 0;
2587 
2588 	sfc_adapter_lock(sa);
2589 	if (sa->state != SFC_ADAPTER_INITIALIZED) {
2590 		rte_flow_error_set(error, EBUSY,
2591 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2592 				   NULL, "please close the port first");
2593 		ret = -rte_errno;
2594 	} else {
2595 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2596 	}
2597 	sfc_adapter_unlock(sa);
2598 
2599 	return ret;
2600 }
2601 
2602 const struct rte_flow_ops sfc_flow_ops = {
2603 	.validate = sfc_flow_validate,
2604 	.create = sfc_flow_create,
2605 	.destroy = sfc_flow_destroy,
2606 	.flush = sfc_flow_flush,
2607 	.query = NULL,
2608 	.isolate = sfc_flow_isolate,
2609 };
2610 
2611 void
2612 sfc_flow_init(struct sfc_adapter *sa)
2613 {
2614 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2615 
2616 	TAILQ_INIT(&sa->flow_list);
2617 }
2618 
2619 void
2620 sfc_flow_fini(struct sfc_adapter *sa)
2621 {
2622 	struct rte_flow *flow;
2623 
2624 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2625 
2626 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2627 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2628 		sfc_flow_free(sa, flow);
2629 	}
2630 }
2631 
2632 void
2633 sfc_flow_stop(struct sfc_adapter *sa)
2634 {
2635 	struct rte_flow *flow;
2636 
2637 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2638 
2639 	TAILQ_FOREACH(flow, &sa->flow_list, entries)
2640 		sfc_flow_remove(sa, flow, NULL);
2641 }
2642 
2643 int
2644 sfc_flow_start(struct sfc_adapter *sa)
2645 {
2646 	struct rte_flow *flow;
2647 	int rc = 0;
2648 
2649 	sfc_log_init(sa, "entry");
2650 
2651 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2652 
2653 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2654 		rc = sfc_flow_insert(sa, flow, NULL);
2655 		if (rc != 0)
2656 			goto fail_bad_flow;
2657 	}
2658 
2659 	sfc_log_init(sa, "done");
2660 
2661 fail_bad_flow:
2662 	return rc;
2663 }
2664