xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 #include "sfc_dp_rx.h"
26 
27 /*
28  * At now flow API is implemented in such a manner that each
29  * flow rule is converted to one or more hardware filters.
30  * All elements of flow rule (attributes, pattern items, actions)
31  * correspond to one or more fields in the efx_filter_spec_s structure
32  * that is responsible for the hardware filter.
33  * If some required field is unset in the flow rule, then a handful
34  * of filter copies will be created to cover all possible values
35  * of such a field.
36  */
37 
38 enum sfc_flow_item_layers {
39 	SFC_FLOW_ITEM_ANY_LAYER,
40 	SFC_FLOW_ITEM_START_LAYER,
41 	SFC_FLOW_ITEM_L2,
42 	SFC_FLOW_ITEM_L3,
43 	SFC_FLOW_ITEM_L4,
44 };
45 
46 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
47 				  efx_filter_spec_t *spec,
48 				  struct rte_flow_error *error);
49 
50 struct sfc_flow_item {
51 	enum rte_flow_item_type type;		/* Type of item */
52 	enum sfc_flow_item_layers layer;	/* Layer of item */
53 	enum sfc_flow_item_layers prev_layer;	/* Previous layer of item */
54 	sfc_flow_item_parse *parse;		/* Parsing function */
55 };
56 
57 static sfc_flow_item_parse sfc_flow_parse_void;
58 static sfc_flow_item_parse sfc_flow_parse_eth;
59 static sfc_flow_item_parse sfc_flow_parse_vlan;
60 static sfc_flow_item_parse sfc_flow_parse_ipv4;
61 static sfc_flow_item_parse sfc_flow_parse_ipv6;
62 static sfc_flow_item_parse sfc_flow_parse_tcp;
63 static sfc_flow_item_parse sfc_flow_parse_udp;
64 static sfc_flow_item_parse sfc_flow_parse_vxlan;
65 static sfc_flow_item_parse sfc_flow_parse_geneve;
66 static sfc_flow_item_parse sfc_flow_parse_nvgre;
67 
68 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
69 				     unsigned int filters_count_for_one_val,
70 				     struct rte_flow_error *error);
71 
72 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
73 					efx_filter_spec_t *spec,
74 					struct sfc_filter *filter);
75 
76 struct sfc_flow_copy_flag {
77 	/* EFX filter specification match flag */
78 	efx_filter_match_flags_t flag;
79 	/* Number of values of corresponding field */
80 	unsigned int vals_count;
81 	/* Function to set values in specifications */
82 	sfc_flow_spec_set_vals *set_vals;
83 	/*
84 	 * Function to check that the specification is suitable
85 	 * for adding this match flag
86 	 */
87 	sfc_flow_spec_check *spec_check;
88 };
89 
90 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
91 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
92 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
93 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
94 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
95 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
96 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
97 
98 static boolean_t
99 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
100 {
101 	uint8_t sum = 0;
102 	unsigned int i;
103 
104 	for (i = 0; i < size; i++)
105 		sum |= buf[i];
106 
107 	return (sum == 0) ? B_TRUE : B_FALSE;
108 }
109 
110 /*
111  * Validate item and prepare structures spec and mask for parsing
112  */
113 static int
114 sfc_flow_parse_init(const struct rte_flow_item *item,
115 		    const void **spec_ptr,
116 		    const void **mask_ptr,
117 		    const void *supp_mask,
118 		    const void *def_mask,
119 		    unsigned int size,
120 		    struct rte_flow_error *error)
121 {
122 	const uint8_t *spec;
123 	const uint8_t *mask;
124 	const uint8_t *last;
125 	uint8_t supp;
126 	unsigned int i;
127 
128 	if (item == NULL) {
129 		rte_flow_error_set(error, EINVAL,
130 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
131 				   "NULL item");
132 		return -rte_errno;
133 	}
134 
135 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
136 		rte_flow_error_set(error, EINVAL,
137 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
138 				   "Mask or last is set without spec");
139 		return -rte_errno;
140 	}
141 
142 	/*
143 	 * If "mask" is not set, default mask is used,
144 	 * but if default mask is NULL, "mask" should be set
145 	 */
146 	if (item->mask == NULL) {
147 		if (def_mask == NULL) {
148 			rte_flow_error_set(error, EINVAL,
149 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
150 				"Mask should be specified");
151 			return -rte_errno;
152 		}
153 
154 		mask = def_mask;
155 	} else {
156 		mask = item->mask;
157 	}
158 
159 	spec = item->spec;
160 	last = item->last;
161 
162 	if (spec == NULL)
163 		goto exit;
164 
165 	/*
166 	 * If field values in "last" are either 0 or equal to the corresponding
167 	 * values in "spec" then they are ignored
168 	 */
169 	if (last != NULL &&
170 	    !sfc_flow_is_zero(last, size) &&
171 	    memcmp(last, spec, size) != 0) {
172 		rte_flow_error_set(error, ENOTSUP,
173 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
174 				   "Ranging is not supported");
175 		return -rte_errno;
176 	}
177 
178 	if (supp_mask == NULL) {
179 		rte_flow_error_set(error, EINVAL,
180 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
181 			"Supported mask for item should be specified");
182 		return -rte_errno;
183 	}
184 
185 	/* Check that mask does not ask for more match than supp_mask */
186 	for (i = 0; i < size; i++) {
187 		supp = ((const uint8_t *)supp_mask)[i];
188 
189 		if (~supp & mask[i]) {
190 			rte_flow_error_set(error, ENOTSUP,
191 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
192 					   "Item's field is not supported");
193 			return -rte_errno;
194 		}
195 	}
196 
197 exit:
198 	*spec_ptr = spec;
199 	*mask_ptr = mask;
200 	return 0;
201 }
202 
203 /*
204  * Protocol parsers.
205  * Masking is not supported, so masks in items should be either
206  * full or empty (zeroed) and set only for supported fields which
207  * are specified in the supp_mask.
208  */
209 
210 static int
211 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
212 		    __rte_unused efx_filter_spec_t *efx_spec,
213 		    __rte_unused struct rte_flow_error *error)
214 {
215 	return 0;
216 }
217 
218 /**
219  * Convert Ethernet item to EFX filter specification.
220  *
221  * @param item[in]
222  *   Item specification. Outer frame specification may only comprise
223  *   source/destination addresses and Ethertype field.
224  *   Inner frame specification may contain destination address only.
225  *   There is support for individual/group mask as well as for empty and full.
226  *   If the mask is NULL, default mask will be used. Ranging is not supported.
227  * @param efx_spec[in, out]
228  *   EFX filter specification to update.
229  * @param[out] error
230  *   Perform verbose error reporting if not NULL.
231  */
232 static int
233 sfc_flow_parse_eth(const struct rte_flow_item *item,
234 		   efx_filter_spec_t *efx_spec,
235 		   struct rte_flow_error *error)
236 {
237 	int rc;
238 	const struct rte_flow_item_eth *spec = NULL;
239 	const struct rte_flow_item_eth *mask = NULL;
240 	const struct rte_flow_item_eth supp_mask = {
241 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
243 		.type = 0xffff,
244 	};
245 	const struct rte_flow_item_eth ifrm_supp_mask = {
246 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
247 	};
248 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
249 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
250 	};
251 	const struct rte_flow_item_eth *supp_mask_p;
252 	const struct rte_flow_item_eth *def_mask_p;
253 	uint8_t *loc_mac = NULL;
254 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
255 		EFX_TUNNEL_PROTOCOL_NONE);
256 
257 	if (is_ifrm) {
258 		supp_mask_p = &ifrm_supp_mask;
259 		def_mask_p = &ifrm_supp_mask;
260 		loc_mac = efx_spec->efs_ifrm_loc_mac;
261 	} else {
262 		supp_mask_p = &supp_mask;
263 		def_mask_p = &rte_flow_item_eth_mask;
264 		loc_mac = efx_spec->efs_loc_mac;
265 	}
266 
267 	rc = sfc_flow_parse_init(item,
268 				 (const void **)&spec,
269 				 (const void **)&mask,
270 				 supp_mask_p, def_mask_p,
271 				 sizeof(struct rte_flow_item_eth),
272 				 error);
273 	if (rc != 0)
274 		return rc;
275 
276 	/* If "spec" is not set, could be any Ethernet */
277 	if (spec == NULL)
278 		return 0;
279 
280 	if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
281 		efx_spec->efs_match_flags |= is_ifrm ?
282 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
283 			EFX_FILTER_MATCH_LOC_MAC;
284 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
285 			   EFX_MAC_ADDR_LEN);
286 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
287 			  EFX_MAC_ADDR_LEN) == 0) {
288 		if (rte_is_unicast_ether_addr(&spec->dst))
289 			efx_spec->efs_match_flags |= is_ifrm ?
290 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
291 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
292 		else
293 			efx_spec->efs_match_flags |= is_ifrm ?
294 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
295 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
296 	} else if (!rte_is_zero_ether_addr(&mask->dst)) {
297 		goto fail_bad_mask;
298 	}
299 
300 	/*
301 	 * ifrm_supp_mask ensures that the source address and
302 	 * ethertype masks are equal to zero in inner frame,
303 	 * so these fields are filled in only for the outer frame
304 	 */
305 	if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
306 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
307 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
308 			   EFX_MAC_ADDR_LEN);
309 	} else if (!rte_is_zero_ether_addr(&mask->src)) {
310 		goto fail_bad_mask;
311 	}
312 
313 	/*
314 	 * Ether type is in big-endian byte order in item and
315 	 * in little-endian in efx_spec, so byte swap is used
316 	 */
317 	if (mask->type == supp_mask.type) {
318 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
319 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
320 	} else if (mask->type != 0) {
321 		goto fail_bad_mask;
322 	}
323 
324 	return 0;
325 
326 fail_bad_mask:
327 	rte_flow_error_set(error, EINVAL,
328 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
329 			   "Bad mask in the ETH pattern item");
330 	return -rte_errno;
331 }
332 
333 /**
334  * Convert VLAN item to EFX filter specification.
335  *
336  * @param item[in]
337  *   Item specification. Only VID field is supported.
338  *   The mask can not be NULL. Ranging is not supported.
339  * @param efx_spec[in, out]
340  *   EFX filter specification to update.
341  * @param[out] error
342  *   Perform verbose error reporting if not NULL.
343  */
344 static int
345 sfc_flow_parse_vlan(const struct rte_flow_item *item,
346 		    efx_filter_spec_t *efx_spec,
347 		    struct rte_flow_error *error)
348 {
349 	int rc;
350 	uint16_t vid;
351 	const struct rte_flow_item_vlan *spec = NULL;
352 	const struct rte_flow_item_vlan *mask = NULL;
353 	const struct rte_flow_item_vlan supp_mask = {
354 		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
355 		.inner_type = RTE_BE16(0xffff),
356 	};
357 
358 	rc = sfc_flow_parse_init(item,
359 				 (const void **)&spec,
360 				 (const void **)&mask,
361 				 &supp_mask,
362 				 NULL,
363 				 sizeof(struct rte_flow_item_vlan),
364 				 error);
365 	if (rc != 0)
366 		return rc;
367 
368 	/*
369 	 * VID is in big-endian byte order in item and
370 	 * in little-endian in efx_spec, so byte swap is used.
371 	 * If two VLAN items are included, the first matches
372 	 * the outer tag and the next matches the inner tag.
373 	 */
374 	if (mask->tci == supp_mask.tci) {
375 		/* Apply mask to keep VID only */
376 		vid = rte_bswap16(spec->tci & mask->tci);
377 
378 		if (!(efx_spec->efs_match_flags &
379 		      EFX_FILTER_MATCH_OUTER_VID)) {
380 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
381 			efx_spec->efs_outer_vid = vid;
382 		} else if (!(efx_spec->efs_match_flags &
383 			     EFX_FILTER_MATCH_INNER_VID)) {
384 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
385 			efx_spec->efs_inner_vid = vid;
386 		} else {
387 			rte_flow_error_set(error, EINVAL,
388 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
389 					   "More than two VLAN items");
390 			return -rte_errno;
391 		}
392 	} else {
393 		rte_flow_error_set(error, EINVAL,
394 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
395 				   "VLAN ID in TCI match is required");
396 		return -rte_errno;
397 	}
398 
399 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
400 		rte_flow_error_set(error, EINVAL,
401 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
402 				   "VLAN TPID matching is not supported");
403 		return -rte_errno;
404 	}
405 	if (mask->inner_type == supp_mask.inner_type) {
406 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
407 		efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
408 	} else if (mask->inner_type) {
409 		rte_flow_error_set(error, EINVAL,
410 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
411 				   "Bad mask for VLAN inner_type");
412 		return -rte_errno;
413 	}
414 
415 	return 0;
416 }
417 
418 /**
419  * Convert IPv4 item to EFX filter specification.
420  *
421  * @param item[in]
422  *   Item specification. Only source and destination addresses and
423  *   protocol fields are supported. If the mask is NULL, default
424  *   mask will be used. Ranging is not supported.
425  * @param efx_spec[in, out]
426  *   EFX filter specification to update.
427  * @param[out] error
428  *   Perform verbose error reporting if not NULL.
429  */
430 static int
431 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
432 		    efx_filter_spec_t *efx_spec,
433 		    struct rte_flow_error *error)
434 {
435 	int rc;
436 	const struct rte_flow_item_ipv4 *spec = NULL;
437 	const struct rte_flow_item_ipv4 *mask = NULL;
438 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
439 	const struct rte_flow_item_ipv4 supp_mask = {
440 		.hdr = {
441 			.src_addr = 0xffffffff,
442 			.dst_addr = 0xffffffff,
443 			.next_proto_id = 0xff,
444 		}
445 	};
446 
447 	rc = sfc_flow_parse_init(item,
448 				 (const void **)&spec,
449 				 (const void **)&mask,
450 				 &supp_mask,
451 				 &rte_flow_item_ipv4_mask,
452 				 sizeof(struct rte_flow_item_ipv4),
453 				 error);
454 	if (rc != 0)
455 		return rc;
456 
457 	/*
458 	 * Filtering by IPv4 source and destination addresses requires
459 	 * the appropriate ETHER_TYPE in hardware filters
460 	 */
461 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
462 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
463 		efx_spec->efs_ether_type = ether_type_ipv4;
464 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
465 		rte_flow_error_set(error, EINVAL,
466 			RTE_FLOW_ERROR_TYPE_ITEM, item,
467 			"Ethertype in pattern with IPV4 item should be appropriate");
468 		return -rte_errno;
469 	}
470 
471 	if (spec == NULL)
472 		return 0;
473 
474 	/*
475 	 * IPv4 addresses are in big-endian byte order in item and in
476 	 * efx_spec
477 	 */
478 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
479 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
480 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
481 	} else if (mask->hdr.src_addr != 0) {
482 		goto fail_bad_mask;
483 	}
484 
485 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
486 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
487 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
488 	} else if (mask->hdr.dst_addr != 0) {
489 		goto fail_bad_mask;
490 	}
491 
492 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
493 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
494 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
495 	} else if (mask->hdr.next_proto_id != 0) {
496 		goto fail_bad_mask;
497 	}
498 
499 	return 0;
500 
501 fail_bad_mask:
502 	rte_flow_error_set(error, EINVAL,
503 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
504 			   "Bad mask in the IPV4 pattern item");
505 	return -rte_errno;
506 }
507 
508 /**
509  * Convert IPv6 item to EFX filter specification.
510  *
511  * @param item[in]
512  *   Item specification. Only source and destination addresses and
513  *   next header fields are supported. If the mask is NULL, default
514  *   mask will be used. Ranging is not supported.
515  * @param efx_spec[in, out]
516  *   EFX filter specification to update.
517  * @param[out] error
518  *   Perform verbose error reporting if not NULL.
519  */
520 static int
521 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
522 		    efx_filter_spec_t *efx_spec,
523 		    struct rte_flow_error *error)
524 {
525 	int rc;
526 	const struct rte_flow_item_ipv6 *spec = NULL;
527 	const struct rte_flow_item_ipv6 *mask = NULL;
528 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
529 	const struct rte_flow_item_ipv6 supp_mask = {
530 		.hdr = {
531 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
532 				      0xff, 0xff, 0xff, 0xff,
533 				      0xff, 0xff, 0xff, 0xff,
534 				      0xff, 0xff, 0xff, 0xff },
535 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
536 				      0xff, 0xff, 0xff, 0xff,
537 				      0xff, 0xff, 0xff, 0xff,
538 				      0xff, 0xff, 0xff, 0xff },
539 			.proto = 0xff,
540 		}
541 	};
542 
543 	rc = sfc_flow_parse_init(item,
544 				 (const void **)&spec,
545 				 (const void **)&mask,
546 				 &supp_mask,
547 				 &rte_flow_item_ipv6_mask,
548 				 sizeof(struct rte_flow_item_ipv6),
549 				 error);
550 	if (rc != 0)
551 		return rc;
552 
553 	/*
554 	 * Filtering by IPv6 source and destination addresses requires
555 	 * the appropriate ETHER_TYPE in hardware filters
556 	 */
557 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
558 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
559 		efx_spec->efs_ether_type = ether_type_ipv6;
560 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
561 		rte_flow_error_set(error, EINVAL,
562 			RTE_FLOW_ERROR_TYPE_ITEM, item,
563 			"Ethertype in pattern with IPV6 item should be appropriate");
564 		return -rte_errno;
565 	}
566 
567 	if (spec == NULL)
568 		return 0;
569 
570 	/*
571 	 * IPv6 addresses are in big-endian byte order in item and in
572 	 * efx_spec
573 	 */
574 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
575 		   sizeof(mask->hdr.src_addr)) == 0) {
576 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
577 
578 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
579 				 sizeof(spec->hdr.src_addr));
580 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
581 			   sizeof(efx_spec->efs_rem_host));
582 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
583 				     sizeof(mask->hdr.src_addr))) {
584 		goto fail_bad_mask;
585 	}
586 
587 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
588 		   sizeof(mask->hdr.dst_addr)) == 0) {
589 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
590 
591 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
592 				 sizeof(spec->hdr.dst_addr));
593 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
594 			   sizeof(efx_spec->efs_loc_host));
595 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
596 				     sizeof(mask->hdr.dst_addr))) {
597 		goto fail_bad_mask;
598 	}
599 
600 	if (mask->hdr.proto == supp_mask.hdr.proto) {
601 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
602 		efx_spec->efs_ip_proto = spec->hdr.proto;
603 	} else if (mask->hdr.proto != 0) {
604 		goto fail_bad_mask;
605 	}
606 
607 	return 0;
608 
609 fail_bad_mask:
610 	rte_flow_error_set(error, EINVAL,
611 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
612 			   "Bad mask in the IPV6 pattern item");
613 	return -rte_errno;
614 }
615 
616 /**
617  * Convert TCP item to EFX filter specification.
618  *
619  * @param item[in]
620  *   Item specification. Only source and destination ports fields
621  *   are supported. If the mask is NULL, default mask will be used.
622  *   Ranging is not supported.
623  * @param efx_spec[in, out]
624  *   EFX filter specification to update.
625  * @param[out] error
626  *   Perform verbose error reporting if not NULL.
627  */
628 static int
629 sfc_flow_parse_tcp(const struct rte_flow_item *item,
630 		   efx_filter_spec_t *efx_spec,
631 		   struct rte_flow_error *error)
632 {
633 	int rc;
634 	const struct rte_flow_item_tcp *spec = NULL;
635 	const struct rte_flow_item_tcp *mask = NULL;
636 	const struct rte_flow_item_tcp supp_mask = {
637 		.hdr = {
638 			.src_port = 0xffff,
639 			.dst_port = 0xffff,
640 		}
641 	};
642 
643 	rc = sfc_flow_parse_init(item,
644 				 (const void **)&spec,
645 				 (const void **)&mask,
646 				 &supp_mask,
647 				 &rte_flow_item_tcp_mask,
648 				 sizeof(struct rte_flow_item_tcp),
649 				 error);
650 	if (rc != 0)
651 		return rc;
652 
653 	/*
654 	 * Filtering by TCP source and destination ports requires
655 	 * the appropriate IP_PROTO in hardware filters
656 	 */
657 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
658 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
659 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
660 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
661 		rte_flow_error_set(error, EINVAL,
662 			RTE_FLOW_ERROR_TYPE_ITEM, item,
663 			"IP proto in pattern with TCP item should be appropriate");
664 		return -rte_errno;
665 	}
666 
667 	if (spec == NULL)
668 		return 0;
669 
670 	/*
671 	 * Source and destination ports are in big-endian byte order in item and
672 	 * in little-endian in efx_spec, so byte swap is used
673 	 */
674 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
675 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
676 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
677 	} else if (mask->hdr.src_port != 0) {
678 		goto fail_bad_mask;
679 	}
680 
681 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
682 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
683 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
684 	} else if (mask->hdr.dst_port != 0) {
685 		goto fail_bad_mask;
686 	}
687 
688 	return 0;
689 
690 fail_bad_mask:
691 	rte_flow_error_set(error, EINVAL,
692 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
693 			   "Bad mask in the TCP pattern item");
694 	return -rte_errno;
695 }
696 
697 /**
698  * Convert UDP item to EFX filter specification.
699  *
700  * @param item[in]
701  *   Item specification. Only source and destination ports fields
702  *   are supported. If the mask is NULL, default mask will be used.
703  *   Ranging is not supported.
704  * @param efx_spec[in, out]
705  *   EFX filter specification to update.
706  * @param[out] error
707  *   Perform verbose error reporting if not NULL.
708  */
709 static int
710 sfc_flow_parse_udp(const struct rte_flow_item *item,
711 		   efx_filter_spec_t *efx_spec,
712 		   struct rte_flow_error *error)
713 {
714 	int rc;
715 	const struct rte_flow_item_udp *spec = NULL;
716 	const struct rte_flow_item_udp *mask = NULL;
717 	const struct rte_flow_item_udp supp_mask = {
718 		.hdr = {
719 			.src_port = 0xffff,
720 			.dst_port = 0xffff,
721 		}
722 	};
723 
724 	rc = sfc_flow_parse_init(item,
725 				 (const void **)&spec,
726 				 (const void **)&mask,
727 				 &supp_mask,
728 				 &rte_flow_item_udp_mask,
729 				 sizeof(struct rte_flow_item_udp),
730 				 error);
731 	if (rc != 0)
732 		return rc;
733 
734 	/*
735 	 * Filtering by UDP source and destination ports requires
736 	 * the appropriate IP_PROTO in hardware filters
737 	 */
738 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
739 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
740 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
741 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
742 		rte_flow_error_set(error, EINVAL,
743 			RTE_FLOW_ERROR_TYPE_ITEM, item,
744 			"IP proto in pattern with UDP item should be appropriate");
745 		return -rte_errno;
746 	}
747 
748 	if (spec == NULL)
749 		return 0;
750 
751 	/*
752 	 * Source and destination ports are in big-endian byte order in item and
753 	 * in little-endian in efx_spec, so byte swap is used
754 	 */
755 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
756 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
757 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
758 	} else if (mask->hdr.src_port != 0) {
759 		goto fail_bad_mask;
760 	}
761 
762 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
763 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
764 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
765 	} else if (mask->hdr.dst_port != 0) {
766 		goto fail_bad_mask;
767 	}
768 
769 	return 0;
770 
771 fail_bad_mask:
772 	rte_flow_error_set(error, EINVAL,
773 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
774 			   "Bad mask in the UDP pattern item");
775 	return -rte_errno;
776 }
777 
778 /*
779  * Filters for encapsulated packets match based on the EtherType and IP
780  * protocol in the outer frame.
781  */
782 static int
783 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
784 					efx_filter_spec_t *efx_spec,
785 					uint8_t ip_proto,
786 					struct rte_flow_error *error)
787 {
788 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
789 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
790 		efx_spec->efs_ip_proto = ip_proto;
791 	} else if (efx_spec->efs_ip_proto != ip_proto) {
792 		switch (ip_proto) {
793 		case EFX_IPPROTO_UDP:
794 			rte_flow_error_set(error, EINVAL,
795 				RTE_FLOW_ERROR_TYPE_ITEM, item,
796 				"Outer IP header protocol must be UDP "
797 				"in VxLAN/GENEVE pattern");
798 			return -rte_errno;
799 
800 		case EFX_IPPROTO_GRE:
801 			rte_flow_error_set(error, EINVAL,
802 				RTE_FLOW_ERROR_TYPE_ITEM, item,
803 				"Outer IP header protocol must be GRE "
804 				"in NVGRE pattern");
805 			return -rte_errno;
806 
807 		default:
808 			rte_flow_error_set(error, EINVAL,
809 				RTE_FLOW_ERROR_TYPE_ITEM, item,
810 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
811 				"are supported");
812 			return -rte_errno;
813 		}
814 	}
815 
816 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
817 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
818 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
819 		rte_flow_error_set(error, EINVAL,
820 			RTE_FLOW_ERROR_TYPE_ITEM, item,
821 			"Outer frame EtherType in pattern with tunneling "
822 			"must be IPv4 or IPv6");
823 		return -rte_errno;
824 	}
825 
826 	return 0;
827 }
828 
829 static int
830 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
831 				  const uint8_t *vni_or_vsid_val,
832 				  const uint8_t *vni_or_vsid_mask,
833 				  const struct rte_flow_item *item,
834 				  struct rte_flow_error *error)
835 {
836 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
837 		0xff, 0xff, 0xff
838 	};
839 
840 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
841 		   EFX_VNI_OR_VSID_LEN) == 0) {
842 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
843 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
844 			   EFX_VNI_OR_VSID_LEN);
845 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
846 		rte_flow_error_set(error, EINVAL,
847 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
848 				   "Unsupported VNI/VSID mask");
849 		return -rte_errno;
850 	}
851 
852 	return 0;
853 }
854 
855 /**
856  * Convert VXLAN item to EFX filter specification.
857  *
858  * @param item[in]
859  *   Item specification. Only VXLAN network identifier field is supported.
860  *   If the mask is NULL, default mask will be used.
861  *   Ranging is not supported.
862  * @param efx_spec[in, out]
863  *   EFX filter specification to update.
864  * @param[out] error
865  *   Perform verbose error reporting if not NULL.
866  */
867 static int
868 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
869 		     efx_filter_spec_t *efx_spec,
870 		     struct rte_flow_error *error)
871 {
872 	int rc;
873 	const struct rte_flow_item_vxlan *spec = NULL;
874 	const struct rte_flow_item_vxlan *mask = NULL;
875 	const struct rte_flow_item_vxlan supp_mask = {
876 		.vni = { 0xff, 0xff, 0xff }
877 	};
878 
879 	rc = sfc_flow_parse_init(item,
880 				 (const void **)&spec,
881 				 (const void **)&mask,
882 				 &supp_mask,
883 				 &rte_flow_item_vxlan_mask,
884 				 sizeof(struct rte_flow_item_vxlan),
885 				 error);
886 	if (rc != 0)
887 		return rc;
888 
889 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
890 						     EFX_IPPROTO_UDP, error);
891 	if (rc != 0)
892 		return rc;
893 
894 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
895 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
896 
897 	if (spec == NULL)
898 		return 0;
899 
900 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
901 					       mask->vni, item, error);
902 
903 	return rc;
904 }
905 
906 /**
907  * Convert GENEVE item to EFX filter specification.
908  *
909  * @param item[in]
910  *   Item specification. Only Virtual Network Identifier and protocol type
911  *   fields are supported. But protocol type can be only Ethernet (0x6558).
912  *   If the mask is NULL, default mask will be used.
913  *   Ranging is not supported.
914  * @param efx_spec[in, out]
915  *   EFX filter specification to update.
916  * @param[out] error
917  *   Perform verbose error reporting if not NULL.
918  */
919 static int
920 sfc_flow_parse_geneve(const struct rte_flow_item *item,
921 		      efx_filter_spec_t *efx_spec,
922 		      struct rte_flow_error *error)
923 {
924 	int rc;
925 	const struct rte_flow_item_geneve *spec = NULL;
926 	const struct rte_flow_item_geneve *mask = NULL;
927 	const struct rte_flow_item_geneve supp_mask = {
928 		.protocol = RTE_BE16(0xffff),
929 		.vni = { 0xff, 0xff, 0xff }
930 	};
931 
932 	rc = sfc_flow_parse_init(item,
933 				 (const void **)&spec,
934 				 (const void **)&mask,
935 				 &supp_mask,
936 				 &rte_flow_item_geneve_mask,
937 				 sizeof(struct rte_flow_item_geneve),
938 				 error);
939 	if (rc != 0)
940 		return rc;
941 
942 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
943 						     EFX_IPPROTO_UDP, error);
944 	if (rc != 0)
945 		return rc;
946 
947 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
948 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
949 
950 	if (spec == NULL)
951 		return 0;
952 
953 	if (mask->protocol == supp_mask.protocol) {
954 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
955 			rte_flow_error_set(error, EINVAL,
956 				RTE_FLOW_ERROR_TYPE_ITEM, item,
957 				"GENEVE encap. protocol must be Ethernet "
958 				"(0x6558) in the GENEVE pattern item");
959 			return -rte_errno;
960 		}
961 	} else if (mask->protocol != 0) {
962 		rte_flow_error_set(error, EINVAL,
963 			RTE_FLOW_ERROR_TYPE_ITEM, item,
964 			"Unsupported mask for GENEVE encap. protocol");
965 		return -rte_errno;
966 	}
967 
968 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
969 					       mask->vni, item, error);
970 
971 	return rc;
972 }
973 
974 /**
975  * Convert NVGRE item to EFX filter specification.
976  *
977  * @param item[in]
978  *   Item specification. Only virtual subnet ID field is supported.
979  *   If the mask is NULL, default mask will be used.
980  *   Ranging is not supported.
981  * @param efx_spec[in, out]
982  *   EFX filter specification to update.
983  * @param[out] error
984  *   Perform verbose error reporting if not NULL.
985  */
986 static int
987 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
988 		     efx_filter_spec_t *efx_spec,
989 		     struct rte_flow_error *error)
990 {
991 	int rc;
992 	const struct rte_flow_item_nvgre *spec = NULL;
993 	const struct rte_flow_item_nvgre *mask = NULL;
994 	const struct rte_flow_item_nvgre supp_mask = {
995 		.tni = { 0xff, 0xff, 0xff }
996 	};
997 
998 	rc = sfc_flow_parse_init(item,
999 				 (const void **)&spec,
1000 				 (const void **)&mask,
1001 				 &supp_mask,
1002 				 &rte_flow_item_nvgre_mask,
1003 				 sizeof(struct rte_flow_item_nvgre),
1004 				 error);
1005 	if (rc != 0)
1006 		return rc;
1007 
1008 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1009 						     EFX_IPPROTO_GRE, error);
1010 	if (rc != 0)
1011 		return rc;
1012 
1013 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1014 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1015 
1016 	if (spec == NULL)
1017 		return 0;
1018 
1019 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1020 					       mask->tni, item, error);
1021 
1022 	return rc;
1023 }
1024 
1025 static const struct sfc_flow_item sfc_flow_items[] = {
1026 	{
1027 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1028 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1029 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1030 		.parse = sfc_flow_parse_void,
1031 	},
1032 	{
1033 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1034 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1035 		.layer = SFC_FLOW_ITEM_L2,
1036 		.parse = sfc_flow_parse_eth,
1037 	},
1038 	{
1039 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1040 		.prev_layer = SFC_FLOW_ITEM_L2,
1041 		.layer = SFC_FLOW_ITEM_L2,
1042 		.parse = sfc_flow_parse_vlan,
1043 	},
1044 	{
1045 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1046 		.prev_layer = SFC_FLOW_ITEM_L2,
1047 		.layer = SFC_FLOW_ITEM_L3,
1048 		.parse = sfc_flow_parse_ipv4,
1049 	},
1050 	{
1051 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1052 		.prev_layer = SFC_FLOW_ITEM_L2,
1053 		.layer = SFC_FLOW_ITEM_L3,
1054 		.parse = sfc_flow_parse_ipv6,
1055 	},
1056 	{
1057 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1058 		.prev_layer = SFC_FLOW_ITEM_L3,
1059 		.layer = SFC_FLOW_ITEM_L4,
1060 		.parse = sfc_flow_parse_tcp,
1061 	},
1062 	{
1063 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1064 		.prev_layer = SFC_FLOW_ITEM_L3,
1065 		.layer = SFC_FLOW_ITEM_L4,
1066 		.parse = sfc_flow_parse_udp,
1067 	},
1068 	{
1069 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1070 		.prev_layer = SFC_FLOW_ITEM_L4,
1071 		.layer = SFC_FLOW_ITEM_START_LAYER,
1072 		.parse = sfc_flow_parse_vxlan,
1073 	},
1074 	{
1075 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1076 		.prev_layer = SFC_FLOW_ITEM_L4,
1077 		.layer = SFC_FLOW_ITEM_START_LAYER,
1078 		.parse = sfc_flow_parse_geneve,
1079 	},
1080 	{
1081 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1082 		.prev_layer = SFC_FLOW_ITEM_L3,
1083 		.layer = SFC_FLOW_ITEM_START_LAYER,
1084 		.parse = sfc_flow_parse_nvgre,
1085 	},
1086 };
1087 
1088 /*
1089  * Protocol-independent flow API support
1090  */
1091 static int
1092 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1093 		    struct rte_flow *flow,
1094 		    struct rte_flow_error *error)
1095 {
1096 	if (attr == NULL) {
1097 		rte_flow_error_set(error, EINVAL,
1098 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1099 				   "NULL attribute");
1100 		return -rte_errno;
1101 	}
1102 	if (attr->group != 0) {
1103 		rte_flow_error_set(error, ENOTSUP,
1104 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1105 				   "Groups are not supported");
1106 		return -rte_errno;
1107 	}
1108 	if (attr->priority != 0) {
1109 		rte_flow_error_set(error, ENOTSUP,
1110 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1111 				   "Priorities are not supported");
1112 		return -rte_errno;
1113 	}
1114 	if (attr->egress != 0) {
1115 		rte_flow_error_set(error, ENOTSUP,
1116 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1117 				   "Egress is not supported");
1118 		return -rte_errno;
1119 	}
1120 	if (attr->transfer != 0) {
1121 		rte_flow_error_set(error, ENOTSUP,
1122 				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1123 				   "Transfer is not supported");
1124 		return -rte_errno;
1125 	}
1126 	if (attr->ingress == 0) {
1127 		rte_flow_error_set(error, ENOTSUP,
1128 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1129 				   "Only ingress is supported");
1130 		return -rte_errno;
1131 	}
1132 
1133 	flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1134 	flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1135 
1136 	return 0;
1137 }
1138 
1139 /* Get item from array sfc_flow_items */
1140 static const struct sfc_flow_item *
1141 sfc_flow_get_item(enum rte_flow_item_type type)
1142 {
1143 	unsigned int i;
1144 
1145 	for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1146 		if (sfc_flow_items[i].type == type)
1147 			return &sfc_flow_items[i];
1148 
1149 	return NULL;
1150 }
1151 
1152 static int
1153 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1154 		       struct rte_flow *flow,
1155 		       struct rte_flow_error *error)
1156 {
1157 	int rc;
1158 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1159 	boolean_t is_ifrm = B_FALSE;
1160 	const struct sfc_flow_item *item;
1161 
1162 	if (pattern == NULL) {
1163 		rte_flow_error_set(error, EINVAL,
1164 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1165 				   "NULL pattern");
1166 		return -rte_errno;
1167 	}
1168 
1169 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1170 		item = sfc_flow_get_item(pattern->type);
1171 		if (item == NULL) {
1172 			rte_flow_error_set(error, ENOTSUP,
1173 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1174 					   "Unsupported pattern item");
1175 			return -rte_errno;
1176 		}
1177 
1178 		/*
1179 		 * Omitting one or several protocol layers at the beginning
1180 		 * of pattern is supported
1181 		 */
1182 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1183 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1184 		    item->prev_layer != prev_layer) {
1185 			rte_flow_error_set(error, ENOTSUP,
1186 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1187 					   "Unexpected sequence of pattern items");
1188 			return -rte_errno;
1189 		}
1190 
1191 		/*
1192 		 * Allow only VOID and ETH pattern items in the inner frame.
1193 		 * Also check that there is only one tunneling protocol.
1194 		 */
1195 		switch (item->type) {
1196 		case RTE_FLOW_ITEM_TYPE_VOID:
1197 		case RTE_FLOW_ITEM_TYPE_ETH:
1198 			break;
1199 
1200 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1201 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1202 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1203 			if (is_ifrm) {
1204 				rte_flow_error_set(error, EINVAL,
1205 					RTE_FLOW_ERROR_TYPE_ITEM,
1206 					pattern,
1207 					"More than one tunneling protocol");
1208 				return -rte_errno;
1209 			}
1210 			is_ifrm = B_TRUE;
1211 			break;
1212 
1213 		default:
1214 			if (is_ifrm) {
1215 				rte_flow_error_set(error, EINVAL,
1216 					RTE_FLOW_ERROR_TYPE_ITEM,
1217 					pattern,
1218 					"There is an unsupported pattern item "
1219 					"in the inner frame");
1220 				return -rte_errno;
1221 			}
1222 			break;
1223 		}
1224 
1225 		rc = item->parse(pattern, &flow->spec.template, error);
1226 		if (rc != 0)
1227 			return rc;
1228 
1229 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1230 			prev_layer = item->layer;
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 static int
1237 sfc_flow_parse_queue(struct sfc_adapter *sa,
1238 		     const struct rte_flow_action_queue *queue,
1239 		     struct rte_flow *flow)
1240 {
1241 	struct sfc_rxq *rxq;
1242 
1243 	if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1244 		return -EINVAL;
1245 
1246 	rxq = &sa->rxq_ctrl[queue->index];
1247 	flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1248 
1249 	return 0;
1250 }
1251 
1252 static int
1253 sfc_flow_parse_rss(struct sfc_adapter *sa,
1254 		   const struct rte_flow_action_rss *action_rss,
1255 		   struct rte_flow *flow)
1256 {
1257 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1258 	struct sfc_rss *rss = &sas->rss;
1259 	unsigned int rxq_sw_index;
1260 	struct sfc_rxq *rxq;
1261 	unsigned int rxq_hw_index_min;
1262 	unsigned int rxq_hw_index_max;
1263 	efx_rx_hash_type_t efx_hash_types;
1264 	const uint8_t *rss_key;
1265 	struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1266 	unsigned int i;
1267 
1268 	if (action_rss->queue_num == 0)
1269 		return -EINVAL;
1270 
1271 	rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1272 	rxq = &sa->rxq_ctrl[rxq_sw_index];
1273 	rxq_hw_index_min = rxq->hw_index;
1274 	rxq_hw_index_max = 0;
1275 
1276 	for (i = 0; i < action_rss->queue_num; ++i) {
1277 		rxq_sw_index = action_rss->queue[i];
1278 
1279 		if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1280 			return -EINVAL;
1281 
1282 		rxq = &sa->rxq_ctrl[rxq_sw_index];
1283 
1284 		if (rxq->hw_index < rxq_hw_index_min)
1285 			rxq_hw_index_min = rxq->hw_index;
1286 
1287 		if (rxq->hw_index > rxq_hw_index_max)
1288 			rxq_hw_index_max = rxq->hw_index;
1289 	}
1290 
1291 	switch (action_rss->func) {
1292 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1293 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1294 		break;
1295 	default:
1296 		return -EINVAL;
1297 	}
1298 
1299 	if (action_rss->level)
1300 		return -EINVAL;
1301 
1302 	/*
1303 	 * Dummy RSS action with only one queue and no specific settings
1304 	 * for hash types and key does not require dedicated RSS context
1305 	 * and may be simplified to single queue action.
1306 	 */
1307 	if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1308 	    action_rss->key_len == 0) {
1309 		flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
1310 		return 0;
1311 	}
1312 
1313 	if (action_rss->types) {
1314 		int rc;
1315 
1316 		rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1317 					  &efx_hash_types);
1318 		if (rc != 0)
1319 			return -rc;
1320 	} else {
1321 		unsigned int i;
1322 
1323 		efx_hash_types = 0;
1324 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
1325 			efx_hash_types |= rss->hf_map[i].efx;
1326 	}
1327 
1328 	if (action_rss->key_len) {
1329 		if (action_rss->key_len != sizeof(rss->key))
1330 			return -EINVAL;
1331 
1332 		rss_key = action_rss->key;
1333 	} else {
1334 		rss_key = rss->key;
1335 	}
1336 
1337 	flow->rss = B_TRUE;
1338 
1339 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1340 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1341 	sfc_rss_conf->rss_hash_types = efx_hash_types;
1342 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1343 
1344 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1345 		unsigned int nb_queues = action_rss->queue_num;
1346 		unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1347 		struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1348 
1349 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static int
1356 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1357 		    unsigned int filters_count)
1358 {
1359 	unsigned int i;
1360 	int ret = 0;
1361 
1362 	for (i = 0; i < filters_count; i++) {
1363 		int rc;
1364 
1365 		rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1366 		if (ret == 0 && rc != 0) {
1367 			sfc_err(sa, "failed to remove filter specification "
1368 				"(rc = %d)", rc);
1369 			ret = rc;
1370 		}
1371 	}
1372 
1373 	return ret;
1374 }
1375 
1376 static int
1377 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1378 {
1379 	unsigned int i;
1380 	int rc = 0;
1381 
1382 	for (i = 0; i < spec->count; i++) {
1383 		rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1384 		if (rc != 0) {
1385 			sfc_flow_spec_flush(sa, spec, i);
1386 			break;
1387 		}
1388 	}
1389 
1390 	return rc;
1391 }
1392 
1393 static int
1394 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1395 {
1396 	return sfc_flow_spec_flush(sa, spec, spec->count);
1397 }
1398 
1399 static int
1400 sfc_flow_filter_insert(struct sfc_adapter *sa,
1401 		       struct rte_flow *flow)
1402 {
1403 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1404 	struct sfc_rss *rss = &sas->rss;
1405 	struct sfc_flow_rss *flow_rss = &flow->rss_conf;
1406 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1407 	unsigned int i;
1408 	int rc = 0;
1409 
1410 	if (flow->rss) {
1411 		unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1412 					      flow_rss->rxq_hw_index_min + 1,
1413 					      EFX_MAXRSS);
1414 
1415 		rc = efx_rx_scale_context_alloc(sa->nic,
1416 						EFX_RX_SCALE_EXCLUSIVE,
1417 						rss_spread,
1418 						&efs_rss_context);
1419 		if (rc != 0)
1420 			goto fail_scale_context_alloc;
1421 
1422 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1423 					   rss->hash_alg,
1424 					   flow_rss->rss_hash_types, B_TRUE);
1425 		if (rc != 0)
1426 			goto fail_scale_mode_set;
1427 
1428 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1429 					  flow_rss->rss_key,
1430 					  sizeof(rss->key));
1431 		if (rc != 0)
1432 			goto fail_scale_key_set;
1433 
1434 		/*
1435 		 * At this point, fully elaborated filter specifications
1436 		 * have been produced from the template. To make sure that
1437 		 * RSS behaviour is consistent between them, set the same
1438 		 * RSS context value everywhere.
1439 		 */
1440 		for (i = 0; i < flow->spec.count; i++) {
1441 			efx_filter_spec_t *spec = &flow->spec.filters[i];
1442 
1443 			spec->efs_rss_context = efs_rss_context;
1444 			spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1445 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1446 		}
1447 	}
1448 
1449 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1450 	if (rc != 0)
1451 		goto fail_filter_insert;
1452 
1453 	if (flow->rss) {
1454 		/*
1455 		 * Scale table is set after filter insertion because
1456 		 * the table entries are relative to the base RxQ ID
1457 		 * and the latter is submitted to the HW by means of
1458 		 * inserting a filter, so by the time of the request
1459 		 * the HW knows all the information needed to verify
1460 		 * the table entries, and the operation will succeed
1461 		 */
1462 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1463 					  flow_rss->rss_tbl,
1464 					  RTE_DIM(flow_rss->rss_tbl));
1465 		if (rc != 0)
1466 			goto fail_scale_tbl_set;
1467 	}
1468 
1469 	return 0;
1470 
1471 fail_scale_tbl_set:
1472 	sfc_flow_spec_remove(sa, &flow->spec);
1473 
1474 fail_filter_insert:
1475 fail_scale_key_set:
1476 fail_scale_mode_set:
1477 	if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1478 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1479 
1480 fail_scale_context_alloc:
1481 	return rc;
1482 }
1483 
1484 static int
1485 sfc_flow_filter_remove(struct sfc_adapter *sa,
1486 		       struct rte_flow *flow)
1487 {
1488 	int rc = 0;
1489 
1490 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1491 	if (rc != 0)
1492 		return rc;
1493 
1494 	if (flow->rss) {
1495 		/*
1496 		 * All specifications for a given flow rule have the same RSS
1497 		 * context, so that RSS context value is taken from the first
1498 		 * filter specification
1499 		 */
1500 		efx_filter_spec_t *spec = &flow->spec.filters[0];
1501 
1502 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1503 	}
1504 
1505 	return rc;
1506 }
1507 
1508 static int
1509 sfc_flow_parse_mark(struct sfc_adapter *sa,
1510 		    const struct rte_flow_action_mark *mark,
1511 		    struct rte_flow *flow)
1512 {
1513 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1514 
1515 	if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1516 		return EINVAL;
1517 
1518 	flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1519 	flow->spec.template.efs_mark = mark->id;
1520 
1521 	return 0;
1522 }
1523 
1524 static int
1525 sfc_flow_parse_actions(struct sfc_adapter *sa,
1526 		       const struct rte_flow_action actions[],
1527 		       struct rte_flow *flow,
1528 		       struct rte_flow_error *error)
1529 {
1530 	int rc;
1531 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1532 	uint32_t actions_set = 0;
1533 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1534 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1535 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1536 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1537 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1538 
1539 	if (actions == NULL) {
1540 		rte_flow_error_set(error, EINVAL,
1541 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1542 				   "NULL actions");
1543 		return -rte_errno;
1544 	}
1545 
1546 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1547 	RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1548 
1549 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1550 		switch (actions->type) {
1551 		case RTE_FLOW_ACTION_TYPE_VOID:
1552 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1553 					       actions_set);
1554 			break;
1555 
1556 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1557 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1558 					       actions_set);
1559 			if ((actions_set & fate_actions_mask) != 0)
1560 				goto fail_fate_actions;
1561 
1562 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1563 			if (rc != 0) {
1564 				rte_flow_error_set(error, EINVAL,
1565 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1566 					"Bad QUEUE action");
1567 				return -rte_errno;
1568 			}
1569 			break;
1570 
1571 		case RTE_FLOW_ACTION_TYPE_RSS:
1572 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1573 					       actions_set);
1574 			if ((actions_set & fate_actions_mask) != 0)
1575 				goto fail_fate_actions;
1576 
1577 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1578 			if (rc != 0) {
1579 				rte_flow_error_set(error, -rc,
1580 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1581 					"Bad RSS action");
1582 				return -rte_errno;
1583 			}
1584 			break;
1585 
1586 		case RTE_FLOW_ACTION_TYPE_DROP:
1587 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1588 					       actions_set);
1589 			if ((actions_set & fate_actions_mask) != 0)
1590 				goto fail_fate_actions;
1591 
1592 			flow->spec.template.efs_dmaq_id =
1593 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1594 			break;
1595 
1596 		case RTE_FLOW_ACTION_TYPE_FLAG:
1597 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1598 					       actions_set);
1599 			if ((actions_set & mark_actions_mask) != 0)
1600 				goto fail_actions_overlap;
1601 
1602 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1603 				rte_flow_error_set(error, ENOTSUP,
1604 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1605 					"FLAG action is not supported on the current Rx datapath");
1606 				return -rte_errno;
1607 			}
1608 
1609 			flow->spec.template.efs_flags |=
1610 				EFX_FILTER_FLAG_ACTION_FLAG;
1611 			break;
1612 
1613 		case RTE_FLOW_ACTION_TYPE_MARK:
1614 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1615 					       actions_set);
1616 			if ((actions_set & mark_actions_mask) != 0)
1617 				goto fail_actions_overlap;
1618 
1619 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1620 				rte_flow_error_set(error, ENOTSUP,
1621 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1622 					"MARK action is not supported on the current Rx datapath");
1623 				return -rte_errno;
1624 			}
1625 
1626 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1627 			if (rc != 0) {
1628 				rte_flow_error_set(error, rc,
1629 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1630 					"Bad MARK action");
1631 				return -rte_errno;
1632 			}
1633 			break;
1634 
1635 		default:
1636 			rte_flow_error_set(error, ENOTSUP,
1637 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1638 					   "Action is not supported");
1639 			return -rte_errno;
1640 		}
1641 
1642 		actions_set |= (1UL << actions->type);
1643 	}
1644 #undef SFC_BUILD_SET_OVERFLOW
1645 
1646 	/* When fate is unknown, drop traffic. */
1647 	if ((actions_set & fate_actions_mask) == 0) {
1648 		flow->spec.template.efs_dmaq_id =
1649 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1650 	}
1651 
1652 	return 0;
1653 
1654 fail_fate_actions:
1655 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1656 			   "Cannot combine several fate-deciding actions, "
1657 			   "choose between QUEUE, RSS or DROP");
1658 	return -rte_errno;
1659 
1660 fail_actions_overlap:
1661 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1662 			   "Overlapping actions are not supported");
1663 	return -rte_errno;
1664 }
1665 
1666 /**
1667  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1668  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1669  * specifications after copying.
1670  *
1671  * @param spec[in, out]
1672  *   SFC flow specification to update.
1673  * @param filters_count_for_one_val[in]
1674  *   How many specifications should have the same match flag, what is the
1675  *   number of specifications before copying.
1676  * @param error[out]
1677  *   Perform verbose error reporting if not NULL.
1678  */
1679 static int
1680 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1681 			       unsigned int filters_count_for_one_val,
1682 			       struct rte_flow_error *error)
1683 {
1684 	unsigned int i;
1685 	static const efx_filter_match_flags_t vals[] = {
1686 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1687 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1688 	};
1689 
1690 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1691 		rte_flow_error_set(error, EINVAL,
1692 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1693 			"Number of specifications is incorrect while copying "
1694 			"by unknown destination flags");
1695 		return -rte_errno;
1696 	}
1697 
1698 	for (i = 0; i < spec->count; i++) {
1699 		/* The check above ensures that divisor can't be zero here */
1700 		spec->filters[i].efs_match_flags |=
1701 			vals[i / filters_count_for_one_val];
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 /**
1708  * Check that the following conditions are met:
1709  * - the list of supported filters has a filter
1710  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1711  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1712  *   be inserted.
1713  *
1714  * @param match[in]
1715  *   The match flags of filter.
1716  * @param spec[in]
1717  *   Specification to be supplemented.
1718  * @param filter[in]
1719  *   SFC filter with list of supported filters.
1720  */
1721 static boolean_t
1722 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1723 				 __rte_unused efx_filter_spec_t *spec,
1724 				 struct sfc_filter *filter)
1725 {
1726 	unsigned int i;
1727 	efx_filter_match_flags_t match_mcast_dst;
1728 
1729 	match_mcast_dst =
1730 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1731 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1732 	for (i = 0; i < filter->supported_match_num; i++) {
1733 		if (match_mcast_dst == filter->supported_match[i])
1734 			return B_TRUE;
1735 	}
1736 
1737 	return B_FALSE;
1738 }
1739 
1740 /**
1741  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1742  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1743  * specifications after copying.
1744  *
1745  * @param spec[in, out]
1746  *   SFC flow specification to update.
1747  * @param filters_count_for_one_val[in]
1748  *   How many specifications should have the same EtherType value, what is the
1749  *   number of specifications before copying.
1750  * @param error[out]
1751  *   Perform verbose error reporting if not NULL.
1752  */
1753 static int
1754 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1755 			unsigned int filters_count_for_one_val,
1756 			struct rte_flow_error *error)
1757 {
1758 	unsigned int i;
1759 	static const uint16_t vals[] = {
1760 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1761 	};
1762 
1763 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1764 		rte_flow_error_set(error, EINVAL,
1765 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1766 			"Number of specifications is incorrect "
1767 			"while copying by Ethertype");
1768 		return -rte_errno;
1769 	}
1770 
1771 	for (i = 0; i < spec->count; i++) {
1772 		spec->filters[i].efs_match_flags |=
1773 			EFX_FILTER_MATCH_ETHER_TYPE;
1774 
1775 		/*
1776 		 * The check above ensures that
1777 		 * filters_count_for_one_val is not 0
1778 		 */
1779 		spec->filters[i].efs_ether_type =
1780 			vals[i / filters_count_for_one_val];
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 /**
1787  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1788  * in the same specifications after copying.
1789  *
1790  * @param spec[in, out]
1791  *   SFC flow specification to update.
1792  * @param filters_count_for_one_val[in]
1793  *   How many specifications should have the same match flag, what is the
1794  *   number of specifications before copying.
1795  * @param error[out]
1796  *   Perform verbose error reporting if not NULL.
1797  */
1798 static int
1799 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1800 			    unsigned int filters_count_for_one_val,
1801 			    struct rte_flow_error *error)
1802 {
1803 	unsigned int i;
1804 
1805 	if (filters_count_for_one_val != spec->count) {
1806 		rte_flow_error_set(error, EINVAL,
1807 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1808 			"Number of specifications is incorrect "
1809 			"while copying by outer VLAN ID");
1810 		return -rte_errno;
1811 	}
1812 
1813 	for (i = 0; i < spec->count; i++) {
1814 		spec->filters[i].efs_match_flags |=
1815 			EFX_FILTER_MATCH_OUTER_VID;
1816 
1817 		spec->filters[i].efs_outer_vid = 0;
1818 	}
1819 
1820 	return 0;
1821 }
1822 
1823 /**
1824  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1825  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1826  * specifications after copying.
1827  *
1828  * @param spec[in, out]
1829  *   SFC flow specification to update.
1830  * @param filters_count_for_one_val[in]
1831  *   How many specifications should have the same match flag, what is the
1832  *   number of specifications before copying.
1833  * @param error[out]
1834  *   Perform verbose error reporting if not NULL.
1835  */
1836 static int
1837 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1838 				    unsigned int filters_count_for_one_val,
1839 				    struct rte_flow_error *error)
1840 {
1841 	unsigned int i;
1842 	static const efx_filter_match_flags_t vals[] = {
1843 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1844 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1845 	};
1846 
1847 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1848 		rte_flow_error_set(error, EINVAL,
1849 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1850 			"Number of specifications is incorrect while copying "
1851 			"by inner frame unknown destination flags");
1852 		return -rte_errno;
1853 	}
1854 
1855 	for (i = 0; i < spec->count; i++) {
1856 		/* The check above ensures that divisor can't be zero here */
1857 		spec->filters[i].efs_match_flags |=
1858 			vals[i / filters_count_for_one_val];
1859 	}
1860 
1861 	return 0;
1862 }
1863 
1864 /**
1865  * Check that the following conditions are met:
1866  * - the specification corresponds to a filter for encapsulated traffic
1867  * - the list of supported filters has a filter
1868  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1869  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1870  *   be inserted.
1871  *
1872  * @param match[in]
1873  *   The match flags of filter.
1874  * @param spec[in]
1875  *   Specification to be supplemented.
1876  * @param filter[in]
1877  *   SFC filter with list of supported filters.
1878  */
1879 static boolean_t
1880 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1881 				      efx_filter_spec_t *spec,
1882 				      struct sfc_filter *filter)
1883 {
1884 	unsigned int i;
1885 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1886 	efx_filter_match_flags_t match_mcast_dst;
1887 
1888 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1889 		return B_FALSE;
1890 
1891 	match_mcast_dst =
1892 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1893 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1894 	for (i = 0; i < filter->supported_match_num; i++) {
1895 		if (match_mcast_dst == filter->supported_match[i])
1896 			return B_TRUE;
1897 	}
1898 
1899 	return B_FALSE;
1900 }
1901 
1902 /**
1903  * Check that the list of supported filters has a filter that differs
1904  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1905  * in this case that filter will be used and the flag
1906  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1907  *
1908  * @param match[in]
1909  *   The match flags of filter.
1910  * @param spec[in]
1911  *   Specification to be supplemented.
1912  * @param filter[in]
1913  *   SFC filter with list of supported filters.
1914  */
1915 static boolean_t
1916 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1917 			      __rte_unused efx_filter_spec_t *spec,
1918 			      struct sfc_filter *filter)
1919 {
1920 	unsigned int i;
1921 	efx_filter_match_flags_t match_without_vid =
1922 		match & ~EFX_FILTER_MATCH_OUTER_VID;
1923 
1924 	for (i = 0; i < filter->supported_match_num; i++) {
1925 		if (match_without_vid == filter->supported_match[i])
1926 			return B_FALSE;
1927 	}
1928 
1929 	return B_TRUE;
1930 }
1931 
1932 /*
1933  * Match flags that can be automatically added to filters.
1934  * Selecting the last minimum when searching for the copy flag ensures that the
1935  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1936  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1937  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1938  * filters.
1939  */
1940 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1941 	{
1942 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1943 		.vals_count = 2,
1944 		.set_vals = sfc_flow_set_unknown_dst_flags,
1945 		.spec_check = sfc_flow_check_unknown_dst_flags,
1946 	},
1947 	{
1948 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
1949 		.vals_count = 2,
1950 		.set_vals = sfc_flow_set_ethertypes,
1951 		.spec_check = NULL,
1952 	},
1953 	{
1954 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1955 		.vals_count = 2,
1956 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1957 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1958 	},
1959 	{
1960 		.flag = EFX_FILTER_MATCH_OUTER_VID,
1961 		.vals_count = 1,
1962 		.set_vals = sfc_flow_set_outer_vid_flag,
1963 		.spec_check = sfc_flow_check_outer_vid_flag,
1964 	},
1965 };
1966 
1967 /* Get item from array sfc_flow_copy_flags */
1968 static const struct sfc_flow_copy_flag *
1969 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1970 {
1971 	unsigned int i;
1972 
1973 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1974 		if (sfc_flow_copy_flags[i].flag == flag)
1975 			return &sfc_flow_copy_flags[i];
1976 	}
1977 
1978 	return NULL;
1979 }
1980 
1981 /**
1982  * Make copies of the specifications, set match flag and values
1983  * of the field that corresponds to it.
1984  *
1985  * @param spec[in, out]
1986  *   SFC flow specification to update.
1987  * @param flag[in]
1988  *   The match flag to add.
1989  * @param error[out]
1990  *   Perform verbose error reporting if not NULL.
1991  */
1992 static int
1993 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1994 			     efx_filter_match_flags_t flag,
1995 			     struct rte_flow_error *error)
1996 {
1997 	unsigned int i;
1998 	unsigned int new_filters_count;
1999 	unsigned int filters_count_for_one_val;
2000 	const struct sfc_flow_copy_flag *copy_flag;
2001 	int rc;
2002 
2003 	copy_flag = sfc_flow_get_copy_flag(flag);
2004 	if (copy_flag == NULL) {
2005 		rte_flow_error_set(error, ENOTSUP,
2006 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2007 				   "Unsupported spec field for copying");
2008 		return -rte_errno;
2009 	}
2010 
2011 	new_filters_count = spec->count * copy_flag->vals_count;
2012 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2013 		rte_flow_error_set(error, EINVAL,
2014 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2015 			"Too much EFX specifications in the flow rule");
2016 		return -rte_errno;
2017 	}
2018 
2019 	/* Copy filters specifications */
2020 	for (i = spec->count; i < new_filters_count; i++)
2021 		spec->filters[i] = spec->filters[i - spec->count];
2022 
2023 	filters_count_for_one_val = spec->count;
2024 	spec->count = new_filters_count;
2025 
2026 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2027 	if (rc != 0)
2028 		return rc;
2029 
2030 	return 0;
2031 }
2032 
2033 /**
2034  * Check that the given set of match flags missing in the original filter spec
2035  * could be covered by adding spec copies which specify the corresponding
2036  * flags and packet field values to match.
2037  *
2038  * @param miss_flags[in]
2039  *   Flags that are missing until the supported filter.
2040  * @param spec[in]
2041  *   Specification to be supplemented.
2042  * @param filter[in]
2043  *   SFC filter.
2044  *
2045  * @return
2046  *   Number of specifications after copy or 0, if the flags can not be added.
2047  */
2048 static unsigned int
2049 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2050 			     efx_filter_spec_t *spec,
2051 			     struct sfc_filter *filter)
2052 {
2053 	unsigned int i;
2054 	efx_filter_match_flags_t copy_flags = 0;
2055 	efx_filter_match_flags_t flag;
2056 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2057 	sfc_flow_spec_check *check;
2058 	unsigned int multiplier = 1;
2059 
2060 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2061 		flag = sfc_flow_copy_flags[i].flag;
2062 		check = sfc_flow_copy_flags[i].spec_check;
2063 		if ((flag & miss_flags) == flag) {
2064 			if (check != NULL && (!check(match, spec, filter)))
2065 				continue;
2066 
2067 			copy_flags |= flag;
2068 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2069 		}
2070 	}
2071 
2072 	if (copy_flags == miss_flags)
2073 		return multiplier;
2074 
2075 	return 0;
2076 }
2077 
2078 /**
2079  * Attempt to supplement the specification template to the minimally
2080  * supported set of match flags. To do this, it is necessary to copy
2081  * the specifications, filling them with the values of fields that
2082  * correspond to the missing flags.
2083  * The necessary and sufficient filter is built from the fewest number
2084  * of copies which could be made to cover the minimally required set
2085  * of flags.
2086  *
2087  * @param sa[in]
2088  *   SFC adapter.
2089  * @param spec[in, out]
2090  *   SFC flow specification to update.
2091  * @param error[out]
2092  *   Perform verbose error reporting if not NULL.
2093  */
2094 static int
2095 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2096 			       struct sfc_flow_spec *spec,
2097 			       struct rte_flow_error *error)
2098 {
2099 	struct sfc_filter *filter = &sa->filter;
2100 	efx_filter_match_flags_t miss_flags;
2101 	efx_filter_match_flags_t min_miss_flags = 0;
2102 	efx_filter_match_flags_t match;
2103 	unsigned int min_multiplier = UINT_MAX;
2104 	unsigned int multiplier;
2105 	unsigned int i;
2106 	int rc;
2107 
2108 	match = spec->template.efs_match_flags;
2109 	for (i = 0; i < filter->supported_match_num; i++) {
2110 		if ((match & filter->supported_match[i]) == match) {
2111 			miss_flags = filter->supported_match[i] & (~match);
2112 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2113 				&spec->template, filter);
2114 			if (multiplier > 0) {
2115 				if (multiplier <= min_multiplier) {
2116 					min_multiplier = multiplier;
2117 					min_miss_flags = miss_flags;
2118 				}
2119 			}
2120 		}
2121 	}
2122 
2123 	if (min_multiplier == UINT_MAX) {
2124 		rte_flow_error_set(error, ENOTSUP,
2125 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2126 				   "The flow rule pattern is unsupported");
2127 		return -rte_errno;
2128 	}
2129 
2130 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2131 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2132 
2133 		if ((flag & min_miss_flags) == flag) {
2134 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2135 			if (rc != 0)
2136 				return rc;
2137 		}
2138 	}
2139 
2140 	return 0;
2141 }
2142 
2143 /**
2144  * Check that set of match flags is referred to by a filter. Filter is
2145  * described by match flags with the ability to add OUTER_VID and INNER_VID
2146  * flags.
2147  *
2148  * @param match_flags[in]
2149  *   Set of match flags.
2150  * @param flags_pattern[in]
2151  *   Pattern of filter match flags.
2152  */
2153 static boolean_t
2154 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2155 			    efx_filter_match_flags_t flags_pattern)
2156 {
2157 	if ((match_flags & flags_pattern) != flags_pattern)
2158 		return B_FALSE;
2159 
2160 	switch (match_flags & ~flags_pattern) {
2161 	case 0:
2162 	case EFX_FILTER_MATCH_OUTER_VID:
2163 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2164 		return B_TRUE;
2165 	default:
2166 		return B_FALSE;
2167 	}
2168 }
2169 
2170 /**
2171  * Check whether the spec maps to a hardware filter which is known to be
2172  * ineffective despite being valid.
2173  *
2174  * @param filter[in]
2175  *   SFC filter with list of supported filters.
2176  * @param spec[in]
2177  *   SFC flow specification.
2178  */
2179 static boolean_t
2180 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2181 				  struct sfc_flow_spec *spec)
2182 {
2183 	unsigned int i;
2184 	uint16_t ether_type;
2185 	uint8_t ip_proto;
2186 	efx_filter_match_flags_t match_flags;
2187 
2188 	for (i = 0; i < spec->count; i++) {
2189 		match_flags = spec->filters[i].efs_match_flags;
2190 
2191 		if (sfc_flow_is_match_with_vids(match_flags,
2192 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2193 		    sfc_flow_is_match_with_vids(match_flags,
2194 						EFX_FILTER_MATCH_ETHER_TYPE |
2195 						EFX_FILTER_MATCH_LOC_MAC)) {
2196 			ether_type = spec->filters[i].efs_ether_type;
2197 			if (filter->supports_ip_proto_or_addr_filter &&
2198 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2199 			     ether_type == EFX_ETHER_TYPE_IPV6))
2200 				return B_TRUE;
2201 		} else if (sfc_flow_is_match_with_vids(match_flags,
2202 				EFX_FILTER_MATCH_ETHER_TYPE |
2203 				EFX_FILTER_MATCH_IP_PROTO) ||
2204 			   sfc_flow_is_match_with_vids(match_flags,
2205 				EFX_FILTER_MATCH_ETHER_TYPE |
2206 				EFX_FILTER_MATCH_IP_PROTO |
2207 				EFX_FILTER_MATCH_LOC_MAC)) {
2208 			ip_proto = spec->filters[i].efs_ip_proto;
2209 			if (filter->supports_rem_or_local_port_filter &&
2210 			    (ip_proto == EFX_IPPROTO_TCP ||
2211 			     ip_proto == EFX_IPPROTO_UDP))
2212 				return B_TRUE;
2213 		}
2214 	}
2215 
2216 	return B_FALSE;
2217 }
2218 
2219 static int
2220 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2221 			      struct rte_flow *flow,
2222 			      struct rte_flow_error *error)
2223 {
2224 	efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2225 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2226 	int rc;
2227 
2228 	/* Initialize the first filter spec with template */
2229 	flow->spec.filters[0] = *spec_tmpl;
2230 	flow->spec.count = 1;
2231 
2232 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2233 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2234 		if (rc != 0)
2235 			return rc;
2236 	}
2237 
2238 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2239 		rte_flow_error_set(error, ENOTSUP,
2240 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2241 			"The flow rule pattern is unsupported");
2242 		return -rte_errno;
2243 	}
2244 
2245 	return 0;
2246 }
2247 
2248 static int
2249 sfc_flow_parse(struct rte_eth_dev *dev,
2250 	       const struct rte_flow_attr *attr,
2251 	       const struct rte_flow_item pattern[],
2252 	       const struct rte_flow_action actions[],
2253 	       struct rte_flow *flow,
2254 	       struct rte_flow_error *error)
2255 {
2256 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2257 	int rc;
2258 
2259 	rc = sfc_flow_parse_attr(attr, flow, error);
2260 	if (rc != 0)
2261 		goto fail_bad_value;
2262 
2263 	rc = sfc_flow_parse_pattern(pattern, flow, error);
2264 	if (rc != 0)
2265 		goto fail_bad_value;
2266 
2267 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2268 	if (rc != 0)
2269 		goto fail_bad_value;
2270 
2271 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2272 	if (rc != 0)
2273 		goto fail_bad_value;
2274 
2275 	return 0;
2276 
2277 fail_bad_value:
2278 	return rc;
2279 }
2280 
2281 static int
2282 sfc_flow_validate(struct rte_eth_dev *dev,
2283 		  const struct rte_flow_attr *attr,
2284 		  const struct rte_flow_item pattern[],
2285 		  const struct rte_flow_action actions[],
2286 		  struct rte_flow_error *error)
2287 {
2288 	struct rte_flow flow;
2289 
2290 	memset(&flow, 0, sizeof(flow));
2291 
2292 	return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2293 }
2294 
2295 static struct rte_flow *
2296 sfc_flow_create(struct rte_eth_dev *dev,
2297 		const struct rte_flow_attr *attr,
2298 		const struct rte_flow_item pattern[],
2299 		const struct rte_flow_action actions[],
2300 		struct rte_flow_error *error)
2301 {
2302 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2303 	struct rte_flow *flow = NULL;
2304 	int rc;
2305 
2306 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2307 	if (flow == NULL) {
2308 		rte_flow_error_set(error, ENOMEM,
2309 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2310 				   "Failed to allocate memory");
2311 		goto fail_no_mem;
2312 	}
2313 
2314 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2315 	if (rc != 0)
2316 		goto fail_bad_value;
2317 
2318 	TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2319 
2320 	sfc_adapter_lock(sa);
2321 
2322 	if (sa->state == SFC_ADAPTER_STARTED) {
2323 		rc = sfc_flow_filter_insert(sa, flow);
2324 		if (rc != 0) {
2325 			rte_flow_error_set(error, rc,
2326 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2327 				"Failed to insert filter");
2328 			goto fail_filter_insert;
2329 		}
2330 	}
2331 
2332 	sfc_adapter_unlock(sa);
2333 
2334 	return flow;
2335 
2336 fail_filter_insert:
2337 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2338 
2339 fail_bad_value:
2340 	rte_free(flow);
2341 	sfc_adapter_unlock(sa);
2342 
2343 fail_no_mem:
2344 	return NULL;
2345 }
2346 
2347 static int
2348 sfc_flow_remove(struct sfc_adapter *sa,
2349 		struct rte_flow *flow,
2350 		struct rte_flow_error *error)
2351 {
2352 	int rc = 0;
2353 
2354 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2355 
2356 	if (sa->state == SFC_ADAPTER_STARTED) {
2357 		rc = sfc_flow_filter_remove(sa, flow);
2358 		if (rc != 0)
2359 			rte_flow_error_set(error, rc,
2360 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2361 				"Failed to destroy flow rule");
2362 	}
2363 
2364 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2365 	rte_free(flow);
2366 
2367 	return rc;
2368 }
2369 
2370 static int
2371 sfc_flow_destroy(struct rte_eth_dev *dev,
2372 		 struct rte_flow *flow,
2373 		 struct rte_flow_error *error)
2374 {
2375 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2376 	struct rte_flow *flow_ptr;
2377 	int rc = EINVAL;
2378 
2379 	sfc_adapter_lock(sa);
2380 
2381 	TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2382 		if (flow_ptr == flow)
2383 			rc = 0;
2384 	}
2385 	if (rc != 0) {
2386 		rte_flow_error_set(error, rc,
2387 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2388 				   "Failed to find flow rule to destroy");
2389 		goto fail_bad_value;
2390 	}
2391 
2392 	rc = sfc_flow_remove(sa, flow, error);
2393 
2394 fail_bad_value:
2395 	sfc_adapter_unlock(sa);
2396 
2397 	return -rc;
2398 }
2399 
2400 static int
2401 sfc_flow_flush(struct rte_eth_dev *dev,
2402 	       struct rte_flow_error *error)
2403 {
2404 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2405 	struct rte_flow *flow;
2406 	int rc = 0;
2407 	int ret = 0;
2408 
2409 	sfc_adapter_lock(sa);
2410 
2411 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2412 		rc = sfc_flow_remove(sa, flow, error);
2413 		if (rc != 0)
2414 			ret = rc;
2415 	}
2416 
2417 	sfc_adapter_unlock(sa);
2418 
2419 	return -ret;
2420 }
2421 
2422 static int
2423 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2424 		 struct rte_flow_error *error)
2425 {
2426 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2427 	int ret = 0;
2428 
2429 	sfc_adapter_lock(sa);
2430 	if (sa->state != SFC_ADAPTER_INITIALIZED) {
2431 		rte_flow_error_set(error, EBUSY,
2432 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2433 				   NULL, "please close the port first");
2434 		ret = -rte_errno;
2435 	} else {
2436 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2437 	}
2438 	sfc_adapter_unlock(sa);
2439 
2440 	return ret;
2441 }
2442 
2443 const struct rte_flow_ops sfc_flow_ops = {
2444 	.validate = sfc_flow_validate,
2445 	.create = sfc_flow_create,
2446 	.destroy = sfc_flow_destroy,
2447 	.flush = sfc_flow_flush,
2448 	.query = NULL,
2449 	.isolate = sfc_flow_isolate,
2450 };
2451 
2452 void
2453 sfc_flow_init(struct sfc_adapter *sa)
2454 {
2455 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2456 
2457 	TAILQ_INIT(&sa->filter.flow_list);
2458 }
2459 
2460 void
2461 sfc_flow_fini(struct sfc_adapter *sa)
2462 {
2463 	struct rte_flow *flow;
2464 
2465 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2466 
2467 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2468 		TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2469 		rte_free(flow);
2470 	}
2471 }
2472 
2473 void
2474 sfc_flow_stop(struct sfc_adapter *sa)
2475 {
2476 	struct rte_flow *flow;
2477 
2478 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2479 
2480 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2481 		sfc_flow_filter_remove(sa, flow);
2482 }
2483 
2484 int
2485 sfc_flow_start(struct sfc_adapter *sa)
2486 {
2487 	struct rte_flow *flow;
2488 	int rc = 0;
2489 
2490 	sfc_log_init(sa, "entry");
2491 
2492 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2493 
2494 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2495 		rc = sfc_flow_filter_insert(sa, flow);
2496 		if (rc != 0)
2497 			goto fail_bad_flow;
2498 	}
2499 
2500 	sfc_log_init(sa, "done");
2501 
2502 fail_bad_flow:
2503 	return rc;
2504 }
2505