xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 0dff3f26d6faad4e51f75e5245f0387ee9bb0c6d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_flow_tunnel.h"
26 #include "sfc_log.h"
27 #include "sfc_dp_rx.h"
28 #include "sfc_mae_counter.h"
29 #include "sfc_switch.h"
30 
31 struct sfc_flow_ops_by_spec {
32 	sfc_flow_parse_cb_t	*parse;
33 	sfc_flow_verify_cb_t	*verify;
34 	sfc_flow_cleanup_cb_t	*cleanup;
35 	sfc_flow_insert_cb_t	*insert;
36 	sfc_flow_remove_cb_t	*remove;
37 	sfc_flow_query_cb_t	*query;
38 };
39 
40 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
41 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
42 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
43 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
44 
45 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
46 	.parse = sfc_flow_parse_rte_to_filter,
47 	.verify = NULL,
48 	.cleanup = NULL,
49 	.insert = sfc_flow_filter_insert,
50 	.remove = sfc_flow_filter_remove,
51 	.query = NULL,
52 };
53 
54 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
55 	.parse = sfc_flow_parse_rte_to_mae,
56 	.verify = sfc_mae_flow_verify,
57 	.cleanup = sfc_mae_flow_cleanup,
58 	.insert = sfc_mae_flow_insert,
59 	.remove = sfc_mae_flow_remove,
60 	.query = sfc_mae_flow_query,
61 };
62 
63 static const struct sfc_flow_ops_by_spec *
64 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
65 {
66 	struct sfc_flow_spec *spec = &flow->spec;
67 	const struct sfc_flow_ops_by_spec *ops = NULL;
68 
69 	switch (spec->type) {
70 	case SFC_FLOW_SPEC_FILTER:
71 		ops = &sfc_flow_ops_filter;
72 		break;
73 	case SFC_FLOW_SPEC_MAE:
74 		ops = &sfc_flow_ops_mae;
75 		break;
76 	default:
77 		SFC_ASSERT(false);
78 		break;
79 	}
80 
81 	return ops;
82 }
83 
84 /*
85  * Currently, filter-based (VNIC) flow API is implemented in such a manner
86  * that each flow rule is converted to one or more hardware filters.
87  * All elements of flow rule (attributes, pattern items, actions)
88  * correspond to one or more fields in the efx_filter_spec_s structure
89  * that is responsible for the hardware filter.
90  * If some required field is unset in the flow rule, then a handful
91  * of filter copies will be created to cover all possible values
92  * of such a field.
93  */
94 
95 static sfc_flow_item_parse sfc_flow_parse_void;
96 static sfc_flow_item_parse sfc_flow_parse_eth;
97 static sfc_flow_item_parse sfc_flow_parse_vlan;
98 static sfc_flow_item_parse sfc_flow_parse_ipv4;
99 static sfc_flow_item_parse sfc_flow_parse_ipv6;
100 static sfc_flow_item_parse sfc_flow_parse_tcp;
101 static sfc_flow_item_parse sfc_flow_parse_udp;
102 static sfc_flow_item_parse sfc_flow_parse_vxlan;
103 static sfc_flow_item_parse sfc_flow_parse_geneve;
104 static sfc_flow_item_parse sfc_flow_parse_nvgre;
105 static sfc_flow_item_parse sfc_flow_parse_pppoex;
106 
107 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
108 				     unsigned int filters_count_for_one_val,
109 				     struct rte_flow_error *error);
110 
111 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
112 					efx_filter_spec_t *spec,
113 					struct sfc_filter *filter);
114 
115 struct sfc_flow_copy_flag {
116 	/* EFX filter specification match flag */
117 	efx_filter_match_flags_t flag;
118 	/* Number of values of corresponding field */
119 	unsigned int vals_count;
120 	/* Function to set values in specifications */
121 	sfc_flow_spec_set_vals *set_vals;
122 	/*
123 	 * Function to check that the specification is suitable
124 	 * for adding this match flag
125 	 */
126 	sfc_flow_spec_check *spec_check;
127 };
128 
129 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
130 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
131 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
132 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
133 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
134 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
135 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
136 
137 static boolean_t
138 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
139 {
140 	uint8_t sum = 0;
141 	unsigned int i;
142 
143 	for (i = 0; i < size; i++)
144 		sum |= buf[i];
145 
146 	return (sum == 0) ? B_TRUE : B_FALSE;
147 }
148 
149 /*
150  * Validate item and prepare structures spec and mask for parsing
151  */
152 int
153 sfc_flow_parse_init(const struct rte_flow_item *item,
154 		    const void **spec_ptr,
155 		    const void **mask_ptr,
156 		    const void *supp_mask,
157 		    const void *def_mask,
158 		    unsigned int size,
159 		    struct rte_flow_error *error)
160 {
161 	const uint8_t *spec;
162 	const uint8_t *mask;
163 	const uint8_t *last;
164 	uint8_t supp;
165 	unsigned int i;
166 
167 	if (item == NULL) {
168 		rte_flow_error_set(error, EINVAL,
169 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
170 				   "NULL item");
171 		return -rte_errno;
172 	}
173 
174 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
175 		rte_flow_error_set(error, EINVAL,
176 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
177 				   "Mask or last is set without spec");
178 		return -rte_errno;
179 	}
180 
181 	/*
182 	 * If "mask" is not set, default mask is used,
183 	 * but if default mask is NULL, "mask" should be set
184 	 */
185 	if (item->mask == NULL) {
186 		if (def_mask == NULL) {
187 			rte_flow_error_set(error, EINVAL,
188 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
189 				"Mask should be specified");
190 			return -rte_errno;
191 		}
192 
193 		mask = def_mask;
194 	} else {
195 		mask = item->mask;
196 	}
197 
198 	spec = item->spec;
199 	last = item->last;
200 
201 	if (spec == NULL)
202 		goto exit;
203 
204 	/*
205 	 * If field values in "last" are either 0 or equal to the corresponding
206 	 * values in "spec" then they are ignored
207 	 */
208 	if (last != NULL &&
209 	    !sfc_flow_is_zero(last, size) &&
210 	    memcmp(last, spec, size) != 0) {
211 		rte_flow_error_set(error, ENOTSUP,
212 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
213 				   "Ranging is not supported");
214 		return -rte_errno;
215 	}
216 
217 	if (supp_mask == NULL) {
218 		rte_flow_error_set(error, EINVAL,
219 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
220 			"Supported mask for item should be specified");
221 		return -rte_errno;
222 	}
223 
224 	/* Check that mask does not ask for more match than supp_mask */
225 	for (i = 0; i < size; i++) {
226 		supp = ((const uint8_t *)supp_mask)[i];
227 
228 		if (~supp & mask[i]) {
229 			rte_flow_error_set(error, ENOTSUP,
230 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
231 					   "Item's field is not supported");
232 			return -rte_errno;
233 		}
234 	}
235 
236 exit:
237 	*spec_ptr = spec;
238 	*mask_ptr = mask;
239 	return 0;
240 }
241 
242 /*
243  * Protocol parsers.
244  * Masking is not supported, so masks in items should be either
245  * full or empty (zeroed) and set only for supported fields which
246  * are specified in the supp_mask.
247  */
248 
249 static int
250 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
251 		    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
252 		    __rte_unused struct rte_flow_error *error)
253 {
254 	return 0;
255 }
256 
257 /**
258  * Convert Ethernet item to EFX filter specification.
259  *
260  * @param item[in]
261  *   Item specification. Outer frame specification may only comprise
262  *   source/destination addresses and Ethertype field.
263  *   Inner frame specification may contain destination address only.
264  *   There is support for individual/group mask as well as for empty and full.
265  *   If the mask is NULL, default mask will be used. Ranging is not supported.
266  * @param efx_spec[in, out]
267  *   EFX filter specification to update.
268  * @param[out] error
269  *   Perform verbose error reporting if not NULL.
270  */
271 static int
272 sfc_flow_parse_eth(const struct rte_flow_item *item,
273 		   struct sfc_flow_parse_ctx *parse_ctx,
274 		   struct rte_flow_error *error)
275 {
276 	int rc;
277 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
278 	const struct rte_flow_item_eth *spec = NULL;
279 	const struct rte_flow_item_eth *mask = NULL;
280 	const struct rte_flow_item_eth supp_mask = {
281 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
282 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
283 		.type = 0xffff,
284 	};
285 	const struct rte_flow_item_eth ifrm_supp_mask = {
286 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
287 	};
288 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
289 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
290 	};
291 	const struct rte_flow_item_eth *supp_mask_p;
292 	const struct rte_flow_item_eth *def_mask_p;
293 	uint8_t *loc_mac = NULL;
294 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
295 		EFX_TUNNEL_PROTOCOL_NONE);
296 
297 	if (is_ifrm) {
298 		supp_mask_p = &ifrm_supp_mask;
299 		def_mask_p = &ifrm_supp_mask;
300 		loc_mac = efx_spec->efs_ifrm_loc_mac;
301 	} else {
302 		supp_mask_p = &supp_mask;
303 		def_mask_p = &rte_flow_item_eth_mask;
304 		loc_mac = efx_spec->efs_loc_mac;
305 	}
306 
307 	rc = sfc_flow_parse_init(item,
308 				 (const void **)&spec,
309 				 (const void **)&mask,
310 				 supp_mask_p, def_mask_p,
311 				 sizeof(struct rte_flow_item_eth),
312 				 error);
313 	if (rc != 0)
314 		return rc;
315 
316 	/* If "spec" is not set, could be any Ethernet */
317 	if (spec == NULL)
318 		return 0;
319 
320 	if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
321 		efx_spec->efs_match_flags |= is_ifrm ?
322 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
323 			EFX_FILTER_MATCH_LOC_MAC;
324 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
325 			   EFX_MAC_ADDR_LEN);
326 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
327 			  EFX_MAC_ADDR_LEN) == 0) {
328 		if (rte_is_unicast_ether_addr(&spec->dst))
329 			efx_spec->efs_match_flags |= is_ifrm ?
330 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
331 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
332 		else
333 			efx_spec->efs_match_flags |= is_ifrm ?
334 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
335 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
336 	} else if (!rte_is_zero_ether_addr(&mask->dst)) {
337 		goto fail_bad_mask;
338 	}
339 
340 	/*
341 	 * ifrm_supp_mask ensures that the source address and
342 	 * ethertype masks are equal to zero in inner frame,
343 	 * so these fields are filled in only for the outer frame
344 	 */
345 	if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
346 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
347 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
348 			   EFX_MAC_ADDR_LEN);
349 	} else if (!rte_is_zero_ether_addr(&mask->src)) {
350 		goto fail_bad_mask;
351 	}
352 
353 	/*
354 	 * Ether type is in big-endian byte order in item and
355 	 * in little-endian in efx_spec, so byte swap is used
356 	 */
357 	if (mask->type == supp_mask.type) {
358 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
359 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
360 	} else if (mask->type != 0) {
361 		goto fail_bad_mask;
362 	}
363 
364 	return 0;
365 
366 fail_bad_mask:
367 	rte_flow_error_set(error, EINVAL,
368 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
369 			   "Bad mask in the ETH pattern item");
370 	return -rte_errno;
371 }
372 
373 /**
374  * Convert VLAN item to EFX filter specification.
375  *
376  * @param item[in]
377  *   Item specification. Only VID field is supported.
378  *   The mask can not be NULL. Ranging is not supported.
379  * @param efx_spec[in, out]
380  *   EFX filter specification to update.
381  * @param[out] error
382  *   Perform verbose error reporting if not NULL.
383  */
384 static int
385 sfc_flow_parse_vlan(const struct rte_flow_item *item,
386 		    struct sfc_flow_parse_ctx *parse_ctx,
387 		    struct rte_flow_error *error)
388 {
389 	int rc;
390 	uint16_t vid;
391 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
392 	const struct rte_flow_item_vlan *spec = NULL;
393 	const struct rte_flow_item_vlan *mask = NULL;
394 	const struct rte_flow_item_vlan supp_mask = {
395 		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
396 		.inner_type = RTE_BE16(0xffff),
397 	};
398 
399 	rc = sfc_flow_parse_init(item,
400 				 (const void **)&spec,
401 				 (const void **)&mask,
402 				 &supp_mask,
403 				 NULL,
404 				 sizeof(struct rte_flow_item_vlan),
405 				 error);
406 	if (rc != 0)
407 		return rc;
408 
409 	/*
410 	 * VID is in big-endian byte order in item and
411 	 * in little-endian in efx_spec, so byte swap is used.
412 	 * If two VLAN items are included, the first matches
413 	 * the outer tag and the next matches the inner tag.
414 	 */
415 	if (mask->tci == supp_mask.tci) {
416 		/* Apply mask to keep VID only */
417 		vid = rte_bswap16(spec->tci & mask->tci);
418 
419 		if (!(efx_spec->efs_match_flags &
420 		      EFX_FILTER_MATCH_OUTER_VID)) {
421 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
422 			efx_spec->efs_outer_vid = vid;
423 		} else if (!(efx_spec->efs_match_flags &
424 			     EFX_FILTER_MATCH_INNER_VID)) {
425 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
426 			efx_spec->efs_inner_vid = vid;
427 		} else {
428 			rte_flow_error_set(error, EINVAL,
429 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
430 					   "More than two VLAN items");
431 			return -rte_errno;
432 		}
433 	} else {
434 		rte_flow_error_set(error, EINVAL,
435 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
436 				   "VLAN ID in TCI match is required");
437 		return -rte_errno;
438 	}
439 
440 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
441 		rte_flow_error_set(error, EINVAL,
442 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
443 				   "VLAN TPID matching is not supported");
444 		return -rte_errno;
445 	}
446 	if (mask->inner_type == supp_mask.inner_type) {
447 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
448 		efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
449 	} else if (mask->inner_type) {
450 		rte_flow_error_set(error, EINVAL,
451 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
452 				   "Bad mask for VLAN inner_type");
453 		return -rte_errno;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * Convert IPv4 item to EFX filter specification.
461  *
462  * @param item[in]
463  *   Item specification. Only source and destination addresses and
464  *   protocol fields are supported. If the mask is NULL, default
465  *   mask will be used. Ranging is not supported.
466  * @param efx_spec[in, out]
467  *   EFX filter specification to update.
468  * @param[out] error
469  *   Perform verbose error reporting if not NULL.
470  */
471 static int
472 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
473 		    struct sfc_flow_parse_ctx *parse_ctx,
474 		    struct rte_flow_error *error)
475 {
476 	int rc;
477 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
478 	const struct rte_flow_item_ipv4 *spec = NULL;
479 	const struct rte_flow_item_ipv4 *mask = NULL;
480 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
481 	const struct rte_flow_item_ipv4 supp_mask = {
482 		.hdr = {
483 			.src_addr = 0xffffffff,
484 			.dst_addr = 0xffffffff,
485 			.next_proto_id = 0xff,
486 		}
487 	};
488 
489 	rc = sfc_flow_parse_init(item,
490 				 (const void **)&spec,
491 				 (const void **)&mask,
492 				 &supp_mask,
493 				 &rte_flow_item_ipv4_mask,
494 				 sizeof(struct rte_flow_item_ipv4),
495 				 error);
496 	if (rc != 0)
497 		return rc;
498 
499 	/*
500 	 * Filtering by IPv4 source and destination addresses requires
501 	 * the appropriate ETHER_TYPE in hardware filters
502 	 */
503 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
504 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
505 		efx_spec->efs_ether_type = ether_type_ipv4;
506 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
507 		rte_flow_error_set(error, EINVAL,
508 			RTE_FLOW_ERROR_TYPE_ITEM, item,
509 			"Ethertype in pattern with IPV4 item should be appropriate");
510 		return -rte_errno;
511 	}
512 
513 	if (spec == NULL)
514 		return 0;
515 
516 	/*
517 	 * IPv4 addresses are in big-endian byte order in item and in
518 	 * efx_spec
519 	 */
520 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
521 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
522 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
523 	} else if (mask->hdr.src_addr != 0) {
524 		goto fail_bad_mask;
525 	}
526 
527 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
528 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
529 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
530 	} else if (mask->hdr.dst_addr != 0) {
531 		goto fail_bad_mask;
532 	}
533 
534 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
535 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
536 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
537 	} else if (mask->hdr.next_proto_id != 0) {
538 		goto fail_bad_mask;
539 	}
540 
541 	return 0;
542 
543 fail_bad_mask:
544 	rte_flow_error_set(error, EINVAL,
545 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
546 			   "Bad mask in the IPV4 pattern item");
547 	return -rte_errno;
548 }
549 
550 /**
551  * Convert IPv6 item to EFX filter specification.
552  *
553  * @param item[in]
554  *   Item specification. Only source and destination addresses and
555  *   next header fields are supported. If the mask is NULL, default
556  *   mask will be used. Ranging is not supported.
557  * @param efx_spec[in, out]
558  *   EFX filter specification to update.
559  * @param[out] error
560  *   Perform verbose error reporting if not NULL.
561  */
562 static int
563 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
564 		    struct sfc_flow_parse_ctx *parse_ctx,
565 		    struct rte_flow_error *error)
566 {
567 	int rc;
568 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
569 	const struct rte_flow_item_ipv6 *spec = NULL;
570 	const struct rte_flow_item_ipv6 *mask = NULL;
571 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
572 	const struct rte_flow_item_ipv6 supp_mask = {
573 		.hdr = {
574 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
575 				      0xff, 0xff, 0xff, 0xff,
576 				      0xff, 0xff, 0xff, 0xff,
577 				      0xff, 0xff, 0xff, 0xff },
578 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
579 				      0xff, 0xff, 0xff, 0xff,
580 				      0xff, 0xff, 0xff, 0xff,
581 				      0xff, 0xff, 0xff, 0xff },
582 			.proto = 0xff,
583 		}
584 	};
585 
586 	rc = sfc_flow_parse_init(item,
587 				 (const void **)&spec,
588 				 (const void **)&mask,
589 				 &supp_mask,
590 				 &rte_flow_item_ipv6_mask,
591 				 sizeof(struct rte_flow_item_ipv6),
592 				 error);
593 	if (rc != 0)
594 		return rc;
595 
596 	/*
597 	 * Filtering by IPv6 source and destination addresses requires
598 	 * the appropriate ETHER_TYPE in hardware filters
599 	 */
600 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
601 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
602 		efx_spec->efs_ether_type = ether_type_ipv6;
603 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
604 		rte_flow_error_set(error, EINVAL,
605 			RTE_FLOW_ERROR_TYPE_ITEM, item,
606 			"Ethertype in pattern with IPV6 item should be appropriate");
607 		return -rte_errno;
608 	}
609 
610 	if (spec == NULL)
611 		return 0;
612 
613 	/*
614 	 * IPv6 addresses are in big-endian byte order in item and in
615 	 * efx_spec
616 	 */
617 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
618 		   sizeof(mask->hdr.src_addr)) == 0) {
619 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
620 
621 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
622 				 sizeof(spec->hdr.src_addr));
623 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
624 			   sizeof(efx_spec->efs_rem_host));
625 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
626 				     sizeof(mask->hdr.src_addr))) {
627 		goto fail_bad_mask;
628 	}
629 
630 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
631 		   sizeof(mask->hdr.dst_addr)) == 0) {
632 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
633 
634 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
635 				 sizeof(spec->hdr.dst_addr));
636 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
637 			   sizeof(efx_spec->efs_loc_host));
638 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
639 				     sizeof(mask->hdr.dst_addr))) {
640 		goto fail_bad_mask;
641 	}
642 
643 	if (mask->hdr.proto == supp_mask.hdr.proto) {
644 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
645 		efx_spec->efs_ip_proto = spec->hdr.proto;
646 	} else if (mask->hdr.proto != 0) {
647 		goto fail_bad_mask;
648 	}
649 
650 	return 0;
651 
652 fail_bad_mask:
653 	rte_flow_error_set(error, EINVAL,
654 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
655 			   "Bad mask in the IPV6 pattern item");
656 	return -rte_errno;
657 }
658 
659 /**
660  * Convert TCP item to EFX filter specification.
661  *
662  * @param item[in]
663  *   Item specification. Only source and destination ports fields
664  *   are supported. If the mask is NULL, default mask will be used.
665  *   Ranging is not supported.
666  * @param efx_spec[in, out]
667  *   EFX filter specification to update.
668  * @param[out] error
669  *   Perform verbose error reporting if not NULL.
670  */
671 static int
672 sfc_flow_parse_tcp(const struct rte_flow_item *item,
673 		   struct sfc_flow_parse_ctx *parse_ctx,
674 		   struct rte_flow_error *error)
675 {
676 	int rc;
677 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
678 	const struct rte_flow_item_tcp *spec = NULL;
679 	const struct rte_flow_item_tcp *mask = NULL;
680 	const struct rte_flow_item_tcp supp_mask = {
681 		.hdr = {
682 			.src_port = 0xffff,
683 			.dst_port = 0xffff,
684 		}
685 	};
686 
687 	rc = sfc_flow_parse_init(item,
688 				 (const void **)&spec,
689 				 (const void **)&mask,
690 				 &supp_mask,
691 				 &rte_flow_item_tcp_mask,
692 				 sizeof(struct rte_flow_item_tcp),
693 				 error);
694 	if (rc != 0)
695 		return rc;
696 
697 	/*
698 	 * Filtering by TCP source and destination ports requires
699 	 * the appropriate IP_PROTO in hardware filters
700 	 */
701 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
702 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
703 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
704 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
705 		rte_flow_error_set(error, EINVAL,
706 			RTE_FLOW_ERROR_TYPE_ITEM, item,
707 			"IP proto in pattern with TCP item should be appropriate");
708 		return -rte_errno;
709 	}
710 
711 	if (spec == NULL)
712 		return 0;
713 
714 	/*
715 	 * Source and destination ports are in big-endian byte order in item and
716 	 * in little-endian in efx_spec, so byte swap is used
717 	 */
718 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
719 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
720 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
721 	} else if (mask->hdr.src_port != 0) {
722 		goto fail_bad_mask;
723 	}
724 
725 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
726 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
727 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
728 	} else if (mask->hdr.dst_port != 0) {
729 		goto fail_bad_mask;
730 	}
731 
732 	return 0;
733 
734 fail_bad_mask:
735 	rte_flow_error_set(error, EINVAL,
736 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
737 			   "Bad mask in the TCP pattern item");
738 	return -rte_errno;
739 }
740 
741 /**
742  * Convert UDP item to EFX filter specification.
743  *
744  * @param item[in]
745  *   Item specification. Only source and destination ports fields
746  *   are supported. If the mask is NULL, default mask will be used.
747  *   Ranging is not supported.
748  * @param efx_spec[in, out]
749  *   EFX filter specification to update.
750  * @param[out] error
751  *   Perform verbose error reporting if not NULL.
752  */
753 static int
754 sfc_flow_parse_udp(const struct rte_flow_item *item,
755 		   struct sfc_flow_parse_ctx *parse_ctx,
756 		   struct rte_flow_error *error)
757 {
758 	int rc;
759 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
760 	const struct rte_flow_item_udp *spec = NULL;
761 	const struct rte_flow_item_udp *mask = NULL;
762 	const struct rte_flow_item_udp supp_mask = {
763 		.hdr = {
764 			.src_port = 0xffff,
765 			.dst_port = 0xffff,
766 		}
767 	};
768 
769 	rc = sfc_flow_parse_init(item,
770 				 (const void **)&spec,
771 				 (const void **)&mask,
772 				 &supp_mask,
773 				 &rte_flow_item_udp_mask,
774 				 sizeof(struct rte_flow_item_udp),
775 				 error);
776 	if (rc != 0)
777 		return rc;
778 
779 	/*
780 	 * Filtering by UDP source and destination ports requires
781 	 * the appropriate IP_PROTO in hardware filters
782 	 */
783 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
784 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
785 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
786 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
787 		rte_flow_error_set(error, EINVAL,
788 			RTE_FLOW_ERROR_TYPE_ITEM, item,
789 			"IP proto in pattern with UDP item should be appropriate");
790 		return -rte_errno;
791 	}
792 
793 	if (spec == NULL)
794 		return 0;
795 
796 	/*
797 	 * Source and destination ports are in big-endian byte order in item and
798 	 * in little-endian in efx_spec, so byte swap is used
799 	 */
800 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
801 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
802 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
803 	} else if (mask->hdr.src_port != 0) {
804 		goto fail_bad_mask;
805 	}
806 
807 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
808 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
809 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
810 	} else if (mask->hdr.dst_port != 0) {
811 		goto fail_bad_mask;
812 	}
813 
814 	return 0;
815 
816 fail_bad_mask:
817 	rte_flow_error_set(error, EINVAL,
818 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
819 			   "Bad mask in the UDP pattern item");
820 	return -rte_errno;
821 }
822 
823 /*
824  * Filters for encapsulated packets match based on the EtherType and IP
825  * protocol in the outer frame.
826  */
827 static int
828 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
829 					efx_filter_spec_t *efx_spec,
830 					uint8_t ip_proto,
831 					struct rte_flow_error *error)
832 {
833 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
834 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
835 		efx_spec->efs_ip_proto = ip_proto;
836 	} else if (efx_spec->efs_ip_proto != ip_proto) {
837 		switch (ip_proto) {
838 		case EFX_IPPROTO_UDP:
839 			rte_flow_error_set(error, EINVAL,
840 				RTE_FLOW_ERROR_TYPE_ITEM, item,
841 				"Outer IP header protocol must be UDP "
842 				"in VxLAN/GENEVE pattern");
843 			return -rte_errno;
844 
845 		case EFX_IPPROTO_GRE:
846 			rte_flow_error_set(error, EINVAL,
847 				RTE_FLOW_ERROR_TYPE_ITEM, item,
848 				"Outer IP header protocol must be GRE "
849 				"in NVGRE pattern");
850 			return -rte_errno;
851 
852 		default:
853 			rte_flow_error_set(error, EINVAL,
854 				RTE_FLOW_ERROR_TYPE_ITEM, item,
855 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
856 				"are supported");
857 			return -rte_errno;
858 		}
859 	}
860 
861 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
862 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
863 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
864 		rte_flow_error_set(error, EINVAL,
865 			RTE_FLOW_ERROR_TYPE_ITEM, item,
866 			"Outer frame EtherType in pattern with tunneling "
867 			"must be IPv4 or IPv6");
868 		return -rte_errno;
869 	}
870 
871 	return 0;
872 }
873 
874 static int
875 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
876 				  const uint8_t *vni_or_vsid_val,
877 				  const uint8_t *vni_or_vsid_mask,
878 				  const struct rte_flow_item *item,
879 				  struct rte_flow_error *error)
880 {
881 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
882 		0xff, 0xff, 0xff
883 	};
884 
885 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
886 		   EFX_VNI_OR_VSID_LEN) == 0) {
887 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
888 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
889 			   EFX_VNI_OR_VSID_LEN);
890 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
891 		rte_flow_error_set(error, EINVAL,
892 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
893 				   "Unsupported VNI/VSID mask");
894 		return -rte_errno;
895 	}
896 
897 	return 0;
898 }
899 
900 /**
901  * Convert VXLAN item to EFX filter specification.
902  *
903  * @param item[in]
904  *   Item specification. Only VXLAN network identifier field is supported.
905  *   If the mask is NULL, default mask will be used.
906  *   Ranging is not supported.
907  * @param efx_spec[in, out]
908  *   EFX filter specification to update.
909  * @param[out] error
910  *   Perform verbose error reporting if not NULL.
911  */
912 static int
913 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
914 		     struct sfc_flow_parse_ctx *parse_ctx,
915 		     struct rte_flow_error *error)
916 {
917 	int rc;
918 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
919 	const struct rte_flow_item_vxlan *spec = NULL;
920 	const struct rte_flow_item_vxlan *mask = NULL;
921 	const struct rte_flow_item_vxlan supp_mask = {
922 		.vni = { 0xff, 0xff, 0xff }
923 	};
924 
925 	rc = sfc_flow_parse_init(item,
926 				 (const void **)&spec,
927 				 (const void **)&mask,
928 				 &supp_mask,
929 				 &rte_flow_item_vxlan_mask,
930 				 sizeof(struct rte_flow_item_vxlan),
931 				 error);
932 	if (rc != 0)
933 		return rc;
934 
935 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
936 						     EFX_IPPROTO_UDP, error);
937 	if (rc != 0)
938 		return rc;
939 
940 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
941 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
942 
943 	if (spec == NULL)
944 		return 0;
945 
946 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
947 					       mask->vni, item, error);
948 
949 	return rc;
950 }
951 
952 /**
953  * Convert GENEVE item to EFX filter specification.
954  *
955  * @param item[in]
956  *   Item specification. Only Virtual Network Identifier and protocol type
957  *   fields are supported. But protocol type can be only Ethernet (0x6558).
958  *   If the mask is NULL, default mask will be used.
959  *   Ranging is not supported.
960  * @param efx_spec[in, out]
961  *   EFX filter specification to update.
962  * @param[out] error
963  *   Perform verbose error reporting if not NULL.
964  */
965 static int
966 sfc_flow_parse_geneve(const struct rte_flow_item *item,
967 		      struct sfc_flow_parse_ctx *parse_ctx,
968 		      struct rte_flow_error *error)
969 {
970 	int rc;
971 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
972 	const struct rte_flow_item_geneve *spec = NULL;
973 	const struct rte_flow_item_geneve *mask = NULL;
974 	const struct rte_flow_item_geneve supp_mask = {
975 		.protocol = RTE_BE16(0xffff),
976 		.vni = { 0xff, 0xff, 0xff }
977 	};
978 
979 	rc = sfc_flow_parse_init(item,
980 				 (const void **)&spec,
981 				 (const void **)&mask,
982 				 &supp_mask,
983 				 &rte_flow_item_geneve_mask,
984 				 sizeof(struct rte_flow_item_geneve),
985 				 error);
986 	if (rc != 0)
987 		return rc;
988 
989 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
990 						     EFX_IPPROTO_UDP, error);
991 	if (rc != 0)
992 		return rc;
993 
994 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
995 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
996 
997 	if (spec == NULL)
998 		return 0;
999 
1000 	if (mask->protocol == supp_mask.protocol) {
1001 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
1002 			rte_flow_error_set(error, EINVAL,
1003 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1004 				"GENEVE encap. protocol must be Ethernet "
1005 				"(0x6558) in the GENEVE pattern item");
1006 			return -rte_errno;
1007 		}
1008 	} else if (mask->protocol != 0) {
1009 		rte_flow_error_set(error, EINVAL,
1010 			RTE_FLOW_ERROR_TYPE_ITEM, item,
1011 			"Unsupported mask for GENEVE encap. protocol");
1012 		return -rte_errno;
1013 	}
1014 
1015 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1016 					       mask->vni, item, error);
1017 
1018 	return rc;
1019 }
1020 
1021 /**
1022  * Convert NVGRE item to EFX filter specification.
1023  *
1024  * @param item[in]
1025  *   Item specification. Only virtual subnet ID field is supported.
1026  *   If the mask is NULL, default mask will be used.
1027  *   Ranging is not supported.
1028  * @param efx_spec[in, out]
1029  *   EFX filter specification to update.
1030  * @param[out] error
1031  *   Perform verbose error reporting if not NULL.
1032  */
1033 static int
1034 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1035 		     struct sfc_flow_parse_ctx *parse_ctx,
1036 		     struct rte_flow_error *error)
1037 {
1038 	int rc;
1039 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1040 	const struct rte_flow_item_nvgre *spec = NULL;
1041 	const struct rte_flow_item_nvgre *mask = NULL;
1042 	const struct rte_flow_item_nvgre supp_mask = {
1043 		.tni = { 0xff, 0xff, 0xff }
1044 	};
1045 
1046 	rc = sfc_flow_parse_init(item,
1047 				 (const void **)&spec,
1048 				 (const void **)&mask,
1049 				 &supp_mask,
1050 				 &rte_flow_item_nvgre_mask,
1051 				 sizeof(struct rte_flow_item_nvgre),
1052 				 error);
1053 	if (rc != 0)
1054 		return rc;
1055 
1056 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1057 						     EFX_IPPROTO_GRE, error);
1058 	if (rc != 0)
1059 		return rc;
1060 
1061 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1062 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1063 
1064 	if (spec == NULL)
1065 		return 0;
1066 
1067 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1068 					       mask->tni, item, error);
1069 
1070 	return rc;
1071 }
1072 
1073 /**
1074  * Convert PPPoEx item to EFX filter specification.
1075  *
1076  * @param item[in]
1077  *   Item specification.
1078  *   Matching on PPPoEx fields is not supported.
1079  *   This item can only be used to set or validate the EtherType filter.
1080  *   Only zero masks are allowed.
1081  *   Ranging is not supported.
1082  * @param efx_spec[in, out]
1083  *   EFX filter specification to update.
1084  * @param[out] error
1085  *   Perform verbose error reporting if not NULL.
1086  */
1087 static int
1088 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1089 		      struct sfc_flow_parse_ctx *parse_ctx,
1090 		      struct rte_flow_error *error)
1091 {
1092 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1093 	const struct rte_flow_item_pppoe *spec = NULL;
1094 	const struct rte_flow_item_pppoe *mask = NULL;
1095 	const struct rte_flow_item_pppoe supp_mask = {};
1096 	const struct rte_flow_item_pppoe def_mask = {};
1097 	uint16_t ether_type;
1098 	int rc;
1099 
1100 	rc = sfc_flow_parse_init(item,
1101 				 (const void **)&spec,
1102 				 (const void **)&mask,
1103 				 &supp_mask,
1104 				 &def_mask,
1105 				 sizeof(struct rte_flow_item_pppoe),
1106 				 error);
1107 	if (rc != 0)
1108 		return rc;
1109 
1110 	if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1111 		ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1112 	else
1113 		ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1114 
1115 	if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1116 		if (efx_spec->efs_ether_type != ether_type) {
1117 			rte_flow_error_set(error, EINVAL,
1118 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
1119 					   "Invalid EtherType for a PPPoE flow item");
1120 			return -rte_errno;
1121 		}
1122 	} else {
1123 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1124 		efx_spec->efs_ether_type = ether_type;
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 static const struct sfc_flow_item sfc_flow_items[] = {
1131 	{
1132 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1133 		.name = "VOID",
1134 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1135 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1136 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1137 		.parse = sfc_flow_parse_void,
1138 	},
1139 	{
1140 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1141 		.name = "ETH",
1142 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1143 		.layer = SFC_FLOW_ITEM_L2,
1144 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1145 		.parse = sfc_flow_parse_eth,
1146 	},
1147 	{
1148 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1149 		.name = "VLAN",
1150 		.prev_layer = SFC_FLOW_ITEM_L2,
1151 		.layer = SFC_FLOW_ITEM_L2,
1152 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1153 		.parse = sfc_flow_parse_vlan,
1154 	},
1155 	{
1156 		.type = RTE_FLOW_ITEM_TYPE_PPPOED,
1157 		.name = "PPPOED",
1158 		.prev_layer = SFC_FLOW_ITEM_L2,
1159 		.layer = SFC_FLOW_ITEM_L2,
1160 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1161 		.parse = sfc_flow_parse_pppoex,
1162 	},
1163 	{
1164 		.type = RTE_FLOW_ITEM_TYPE_PPPOES,
1165 		.name = "PPPOES",
1166 		.prev_layer = SFC_FLOW_ITEM_L2,
1167 		.layer = SFC_FLOW_ITEM_L2,
1168 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1169 		.parse = sfc_flow_parse_pppoex,
1170 	},
1171 	{
1172 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1173 		.name = "IPV4",
1174 		.prev_layer = SFC_FLOW_ITEM_L2,
1175 		.layer = SFC_FLOW_ITEM_L3,
1176 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1177 		.parse = sfc_flow_parse_ipv4,
1178 	},
1179 	{
1180 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1181 		.name = "IPV6",
1182 		.prev_layer = SFC_FLOW_ITEM_L2,
1183 		.layer = SFC_FLOW_ITEM_L3,
1184 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1185 		.parse = sfc_flow_parse_ipv6,
1186 	},
1187 	{
1188 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1189 		.name = "TCP",
1190 		.prev_layer = SFC_FLOW_ITEM_L3,
1191 		.layer = SFC_FLOW_ITEM_L4,
1192 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1193 		.parse = sfc_flow_parse_tcp,
1194 	},
1195 	{
1196 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1197 		.name = "UDP",
1198 		.prev_layer = SFC_FLOW_ITEM_L3,
1199 		.layer = SFC_FLOW_ITEM_L4,
1200 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1201 		.parse = sfc_flow_parse_udp,
1202 	},
1203 	{
1204 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1205 		.name = "VXLAN",
1206 		.prev_layer = SFC_FLOW_ITEM_L4,
1207 		.layer = SFC_FLOW_ITEM_START_LAYER,
1208 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1209 		.parse = sfc_flow_parse_vxlan,
1210 	},
1211 	{
1212 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1213 		.name = "GENEVE",
1214 		.prev_layer = SFC_FLOW_ITEM_L4,
1215 		.layer = SFC_FLOW_ITEM_START_LAYER,
1216 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1217 		.parse = sfc_flow_parse_geneve,
1218 	},
1219 	{
1220 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1221 		.name = "NVGRE",
1222 		.prev_layer = SFC_FLOW_ITEM_L3,
1223 		.layer = SFC_FLOW_ITEM_START_LAYER,
1224 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1225 		.parse = sfc_flow_parse_nvgre,
1226 	},
1227 };
1228 
1229 /*
1230  * Protocol-independent flow API support
1231  */
1232 static int
1233 sfc_flow_parse_attr(struct sfc_adapter *sa,
1234 		    const struct rte_flow_attr *attr,
1235 		    struct rte_flow *flow,
1236 		    struct rte_flow_error *error)
1237 {
1238 	struct sfc_flow_spec *spec = &flow->spec;
1239 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1240 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1241 	struct sfc_mae *mae = &sa->mae;
1242 
1243 	if (attr == NULL) {
1244 		rte_flow_error_set(error, EINVAL,
1245 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1246 				   "NULL attribute");
1247 		return -rte_errno;
1248 	}
1249 	if (attr->group != 0) {
1250 		rte_flow_error_set(error, ENOTSUP,
1251 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1252 				   "Groups are not supported");
1253 		return -rte_errno;
1254 	}
1255 	if (attr->egress != 0 && attr->transfer == 0) {
1256 		rte_flow_error_set(error, ENOTSUP,
1257 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1258 				   "Egress is not supported");
1259 		return -rte_errno;
1260 	}
1261 	if (attr->ingress == 0 && attr->transfer == 0) {
1262 		rte_flow_error_set(error, ENOTSUP,
1263 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1264 				   "Ingress is compulsory");
1265 		return -rte_errno;
1266 	}
1267 	if (attr->transfer == 0) {
1268 		if (attr->priority != 0) {
1269 			rte_flow_error_set(error, ENOTSUP,
1270 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1271 					   attr, "Priorities are unsupported");
1272 			return -rte_errno;
1273 		}
1274 		spec->type = SFC_FLOW_SPEC_FILTER;
1275 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1276 		spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1277 		spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1278 	} else {
1279 		if (mae->status != SFC_MAE_STATUS_ADMIN) {
1280 			rte_flow_error_set(error, ENOTSUP,
1281 					   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1282 					   attr, "Transfer is not supported");
1283 			return -rte_errno;
1284 		}
1285 		if (attr->priority > mae->nb_action_rule_prios_max) {
1286 			rte_flow_error_set(error, ENOTSUP,
1287 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1288 					   attr, "Unsupported priority level");
1289 			return -rte_errno;
1290 		}
1291 		spec->type = SFC_FLOW_SPEC_MAE;
1292 		spec_mae->priority = attr->priority;
1293 		spec_mae->match_spec = NULL;
1294 		spec_mae->action_set = NULL;
1295 		spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 /* Get item from array sfc_flow_items */
1302 static const struct sfc_flow_item *
1303 sfc_flow_get_item(const struct sfc_flow_item *items,
1304 		  unsigned int nb_items,
1305 		  enum rte_flow_item_type type)
1306 {
1307 	unsigned int i;
1308 
1309 	for (i = 0; i < nb_items; i++)
1310 		if (items[i].type == type)
1311 			return &items[i];
1312 
1313 	return NULL;
1314 }
1315 
1316 int
1317 sfc_flow_parse_pattern(struct sfc_adapter *sa,
1318 		       const struct sfc_flow_item *flow_items,
1319 		       unsigned int nb_flow_items,
1320 		       const struct rte_flow_item pattern[],
1321 		       struct sfc_flow_parse_ctx *parse_ctx,
1322 		       struct rte_flow_error *error)
1323 {
1324 	int rc;
1325 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1326 	boolean_t is_ifrm = B_FALSE;
1327 	const struct sfc_flow_item *item;
1328 
1329 	if (pattern == NULL) {
1330 		rte_flow_error_set(error, EINVAL,
1331 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1332 				   "NULL pattern");
1333 		return -rte_errno;
1334 	}
1335 
1336 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1337 		item = sfc_flow_get_item(flow_items, nb_flow_items,
1338 					 pattern->type);
1339 		if (item == NULL) {
1340 			rte_flow_error_set(error, ENOTSUP,
1341 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1342 					   "Unsupported pattern item");
1343 			return -rte_errno;
1344 		}
1345 
1346 		/*
1347 		 * Omitting one or several protocol layers at the beginning
1348 		 * of pattern is supported
1349 		 */
1350 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1351 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1352 		    item->prev_layer != prev_layer) {
1353 			rte_flow_error_set(error, ENOTSUP,
1354 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1355 					   "Unexpected sequence of pattern items");
1356 			return -rte_errno;
1357 		}
1358 
1359 		/*
1360 		 * Allow only VOID and ETH pattern items in the inner frame.
1361 		 * Also check that there is only one tunneling protocol.
1362 		 */
1363 		switch (item->type) {
1364 		case RTE_FLOW_ITEM_TYPE_VOID:
1365 		case RTE_FLOW_ITEM_TYPE_ETH:
1366 			break;
1367 
1368 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1369 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1370 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1371 			if (is_ifrm) {
1372 				rte_flow_error_set(error, EINVAL,
1373 					RTE_FLOW_ERROR_TYPE_ITEM,
1374 					pattern,
1375 					"More than one tunneling protocol");
1376 				return -rte_errno;
1377 			}
1378 			is_ifrm = B_TRUE;
1379 			break;
1380 
1381 		default:
1382 			if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1383 			    is_ifrm) {
1384 				rte_flow_error_set(error, EINVAL,
1385 					RTE_FLOW_ERROR_TYPE_ITEM,
1386 					pattern,
1387 					"There is an unsupported pattern item "
1388 					"in the inner frame");
1389 				return -rte_errno;
1390 			}
1391 			break;
1392 		}
1393 
1394 		if (parse_ctx->type != item->ctx_type) {
1395 			rte_flow_error_set(error, EINVAL,
1396 					RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1397 					"Parse context type mismatch");
1398 			return -rte_errno;
1399 		}
1400 
1401 		rc = item->parse(pattern, parse_ctx, error);
1402 		if (rc != 0) {
1403 			sfc_err(sa, "failed to parse item %s: %s",
1404 				item->name, strerror(-rc));
1405 			return rc;
1406 		}
1407 
1408 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1409 			prev_layer = item->layer;
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 static int
1416 sfc_flow_parse_queue(struct sfc_adapter *sa,
1417 		     const struct rte_flow_action_queue *queue,
1418 		     struct rte_flow *flow)
1419 {
1420 	struct sfc_flow_spec *spec = &flow->spec;
1421 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1422 	struct sfc_rxq *rxq;
1423 	struct sfc_rxq_info *rxq_info;
1424 
1425 	if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1426 		return -EINVAL;
1427 
1428 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1429 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1430 
1431 	rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1432 	spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1433 					    SFC_RXQ_FLAG_RSS_HASH);
1434 
1435 	return 0;
1436 }
1437 
1438 static int
1439 sfc_flow_parse_rss(struct sfc_adapter *sa,
1440 		   const struct rte_flow_action_rss *action_rss,
1441 		   struct rte_flow *flow)
1442 {
1443 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1444 	struct sfc_rss *rss = &sas->rss;
1445 	sfc_ethdev_qid_t ethdev_qid;
1446 	struct sfc_rxq *rxq;
1447 	unsigned int rxq_hw_index_min;
1448 	unsigned int rxq_hw_index_max;
1449 	efx_rx_hash_type_t efx_hash_types;
1450 	const uint8_t *rss_key;
1451 	struct sfc_flow_spec *spec = &flow->spec;
1452 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1453 	struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1454 	unsigned int i;
1455 
1456 	if (action_rss->queue_num == 0)
1457 		return -EINVAL;
1458 
1459 	ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
1460 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1461 	rxq_hw_index_min = rxq->hw_index;
1462 	rxq_hw_index_max = 0;
1463 
1464 	for (i = 0; i < action_rss->queue_num; ++i) {
1465 		ethdev_qid = action_rss->queue[i];
1466 
1467 		if ((unsigned int)ethdev_qid >=
1468 		    sfc_sa2shared(sa)->ethdev_rxq_count)
1469 			return -EINVAL;
1470 
1471 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1472 
1473 		if (rxq->hw_index < rxq_hw_index_min)
1474 			rxq_hw_index_min = rxq->hw_index;
1475 
1476 		if (rxq->hw_index > rxq_hw_index_max)
1477 			rxq_hw_index_max = rxq->hw_index;
1478 	}
1479 
1480 	if (rxq_hw_index_max - rxq_hw_index_min + 1 > EFX_MAXRSS)
1481 		return -EINVAL;
1482 
1483 	switch (action_rss->func) {
1484 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1485 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1486 		break;
1487 	default:
1488 		return -EINVAL;
1489 	}
1490 
1491 	if (action_rss->level)
1492 		return -EINVAL;
1493 
1494 	/*
1495 	 * Dummy RSS action with only one queue and no specific settings
1496 	 * for hash types and key does not require dedicated RSS context
1497 	 * and may be simplified to single queue action.
1498 	 */
1499 	if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1500 	    action_rss->key_len == 0) {
1501 		spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1502 		return 0;
1503 	}
1504 
1505 	if (action_rss->types) {
1506 		int rc;
1507 
1508 		rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1509 					  &efx_hash_types);
1510 		if (rc != 0)
1511 			return -rc;
1512 	} else {
1513 		unsigned int i;
1514 
1515 		efx_hash_types = 0;
1516 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
1517 			efx_hash_types |= rss->hf_map[i].efx;
1518 	}
1519 
1520 	if (action_rss->key_len) {
1521 		if (action_rss->key_len != sizeof(rss->key))
1522 			return -EINVAL;
1523 
1524 		rss_key = action_rss->key;
1525 	} else {
1526 		rss_key = rss->key;
1527 	}
1528 
1529 	spec_filter->rss = B_TRUE;
1530 
1531 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1532 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1533 	sfc_rss_conf->rss_hash_types = efx_hash_types;
1534 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1535 
1536 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1537 		unsigned int nb_queues = action_rss->queue_num;
1538 		struct sfc_rxq *rxq;
1539 
1540 		ethdev_qid = action_rss->queue[i % nb_queues];
1541 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1542 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1543 	}
1544 
1545 	return 0;
1546 }
1547 
1548 static int
1549 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1550 		    unsigned int filters_count)
1551 {
1552 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1553 	unsigned int i;
1554 	int ret = 0;
1555 
1556 	for (i = 0; i < filters_count; i++) {
1557 		int rc;
1558 
1559 		rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1560 		if (ret == 0 && rc != 0) {
1561 			sfc_err(sa, "failed to remove filter specification "
1562 				"(rc = %d)", rc);
1563 			ret = rc;
1564 		}
1565 	}
1566 
1567 	return ret;
1568 }
1569 
1570 static int
1571 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1572 {
1573 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1574 	unsigned int i;
1575 	int rc = 0;
1576 
1577 	for (i = 0; i < spec_filter->count; i++) {
1578 		rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1579 		if (rc != 0) {
1580 			sfc_flow_spec_flush(sa, spec, i);
1581 			break;
1582 		}
1583 	}
1584 
1585 	return rc;
1586 }
1587 
1588 static int
1589 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1590 {
1591 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1592 
1593 	return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1594 }
1595 
1596 static int
1597 sfc_flow_filter_insert(struct sfc_adapter *sa,
1598 		       struct rte_flow *flow)
1599 {
1600 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1601 	struct sfc_rss *rss = &sas->rss;
1602 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1603 	struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1604 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1605 	boolean_t create_context;
1606 	unsigned int i;
1607 	int rc = 0;
1608 
1609 	create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1610 			rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1611 
1612 	if (create_context) {
1613 		unsigned int rss_spread;
1614 		unsigned int rss_hash_types;
1615 		uint8_t *rss_key;
1616 
1617 		if (spec_filter->rss) {
1618 			rss_spread = flow_rss->rxq_hw_index_max -
1619 				     flow_rss->rxq_hw_index_min + 1;
1620 			rss_hash_types = flow_rss->rss_hash_types;
1621 			rss_key = flow_rss->rss_key;
1622 		} else {
1623 			/*
1624 			 * Initialize dummy RSS context parameters to have
1625 			 * valid RSS hash. Use default RSS hash function and
1626 			 * key.
1627 			 */
1628 			rss_spread = 1;
1629 			rss_hash_types = rss->hash_types;
1630 			rss_key = rss->key;
1631 		}
1632 
1633 		rc = efx_rx_scale_context_alloc(sa->nic,
1634 						EFX_RX_SCALE_EXCLUSIVE,
1635 						rss_spread,
1636 						&efs_rss_context);
1637 		if (rc != 0)
1638 			goto fail_scale_context_alloc;
1639 
1640 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1641 					   rss->hash_alg,
1642 					   rss_hash_types, B_TRUE);
1643 		if (rc != 0)
1644 			goto fail_scale_mode_set;
1645 
1646 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1647 					  rss_key, sizeof(rss->key));
1648 		if (rc != 0)
1649 			goto fail_scale_key_set;
1650 	} else {
1651 		efs_rss_context = rss->dummy_rss_context;
1652 	}
1653 
1654 	if (spec_filter->rss || spec_filter->rss_hash_required) {
1655 		/*
1656 		 * At this point, fully elaborated filter specifications
1657 		 * have been produced from the template. To make sure that
1658 		 * RSS behaviour is consistent between them, set the same
1659 		 * RSS context value everywhere.
1660 		 */
1661 		for (i = 0; i < spec_filter->count; i++) {
1662 			efx_filter_spec_t *spec = &spec_filter->filters[i];
1663 
1664 			spec->efs_rss_context = efs_rss_context;
1665 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1666 			if (spec_filter->rss)
1667 				spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1668 		}
1669 	}
1670 
1671 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1672 	if (rc != 0)
1673 		goto fail_filter_insert;
1674 
1675 	if (create_context) {
1676 		unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1677 		unsigned int *tbl;
1678 
1679 		tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1680 
1681 		/*
1682 		 * Scale table is set after filter insertion because
1683 		 * the table entries are relative to the base RxQ ID
1684 		 * and the latter is submitted to the HW by means of
1685 		 * inserting a filter, so by the time of the request
1686 		 * the HW knows all the information needed to verify
1687 		 * the table entries, and the operation will succeed
1688 		 */
1689 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1690 					  tbl, RTE_DIM(flow_rss->rss_tbl));
1691 		if (rc != 0)
1692 			goto fail_scale_tbl_set;
1693 
1694 		/* Remember created dummy RSS context */
1695 		if (!spec_filter->rss)
1696 			rss->dummy_rss_context = efs_rss_context;
1697 	}
1698 
1699 	return 0;
1700 
1701 fail_scale_tbl_set:
1702 	sfc_flow_spec_remove(sa, &flow->spec);
1703 
1704 fail_filter_insert:
1705 fail_scale_key_set:
1706 fail_scale_mode_set:
1707 	if (create_context)
1708 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1709 
1710 fail_scale_context_alloc:
1711 	return rc;
1712 }
1713 
1714 static int
1715 sfc_flow_filter_remove(struct sfc_adapter *sa,
1716 		       struct rte_flow *flow)
1717 {
1718 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1719 	int rc = 0;
1720 
1721 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1722 	if (rc != 0)
1723 		return rc;
1724 
1725 	if (spec_filter->rss) {
1726 		/*
1727 		 * All specifications for a given flow rule have the same RSS
1728 		 * context, so that RSS context value is taken from the first
1729 		 * filter specification
1730 		 */
1731 		efx_filter_spec_t *spec = &spec_filter->filters[0];
1732 
1733 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1734 	}
1735 
1736 	return rc;
1737 }
1738 
1739 static int
1740 sfc_flow_parse_mark(struct sfc_adapter *sa,
1741 		    const struct rte_flow_action_mark *mark,
1742 		    struct rte_flow *flow)
1743 {
1744 	struct sfc_flow_spec *spec = &flow->spec;
1745 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1746 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1747 	uint32_t mark_max;
1748 
1749 	mark_max = encp->enc_filter_action_mark_max;
1750 	if (sfc_flow_tunnel_is_active(sa))
1751 		mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
1752 
1753 	if (mark == NULL || mark->id > mark_max)
1754 		return EINVAL;
1755 
1756 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1757 	spec_filter->template.efs_mark = mark->id;
1758 
1759 	return 0;
1760 }
1761 
1762 static int
1763 sfc_flow_parse_actions(struct sfc_adapter *sa,
1764 		       const struct rte_flow_action actions[],
1765 		       struct rte_flow *flow,
1766 		       struct rte_flow_error *error)
1767 {
1768 	int rc;
1769 	struct sfc_flow_spec *spec = &flow->spec;
1770 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1771 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1772 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
1773 	uint32_t actions_set = 0;
1774 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1775 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1776 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1777 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1778 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1779 
1780 	if (actions == NULL) {
1781 		rte_flow_error_set(error, EINVAL,
1782 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1783 				   "NULL actions");
1784 		return -rte_errno;
1785 	}
1786 
1787 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1788 		switch (actions->type) {
1789 		case RTE_FLOW_ACTION_TYPE_VOID:
1790 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1791 					       actions_set);
1792 			break;
1793 
1794 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1795 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1796 					       actions_set);
1797 			if ((actions_set & fate_actions_mask) != 0)
1798 				goto fail_fate_actions;
1799 
1800 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1801 			if (rc != 0) {
1802 				rte_flow_error_set(error, EINVAL,
1803 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1804 					"Bad QUEUE action");
1805 				return -rte_errno;
1806 			}
1807 			break;
1808 
1809 		case RTE_FLOW_ACTION_TYPE_RSS:
1810 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1811 					       actions_set);
1812 			if ((actions_set & fate_actions_mask) != 0)
1813 				goto fail_fate_actions;
1814 
1815 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1816 			if (rc != 0) {
1817 				rte_flow_error_set(error, -rc,
1818 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1819 					"Bad RSS action");
1820 				return -rte_errno;
1821 			}
1822 			break;
1823 
1824 		case RTE_FLOW_ACTION_TYPE_DROP:
1825 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1826 					       actions_set);
1827 			if ((actions_set & fate_actions_mask) != 0)
1828 				goto fail_fate_actions;
1829 
1830 			spec_filter->template.efs_dmaq_id =
1831 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1832 			break;
1833 
1834 		case RTE_FLOW_ACTION_TYPE_FLAG:
1835 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1836 					       actions_set);
1837 			if ((actions_set & mark_actions_mask) != 0)
1838 				goto fail_actions_overlap;
1839 
1840 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1841 				rte_flow_error_set(error, ENOTSUP,
1842 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1843 					"FLAG action is not supported on the current Rx datapath");
1844 				return -rte_errno;
1845 			} else if ((rx_metadata &
1846 				    RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
1847 				rte_flow_error_set(error, ENOTSUP,
1848 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1849 					"flag delivery has not been negotiated");
1850 				return -rte_errno;
1851 			}
1852 
1853 			spec_filter->template.efs_flags |=
1854 				EFX_FILTER_FLAG_ACTION_FLAG;
1855 			break;
1856 
1857 		case RTE_FLOW_ACTION_TYPE_MARK:
1858 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1859 					       actions_set);
1860 			if ((actions_set & mark_actions_mask) != 0)
1861 				goto fail_actions_overlap;
1862 
1863 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1864 				rte_flow_error_set(error, ENOTSUP,
1865 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1866 					"MARK action is not supported on the current Rx datapath");
1867 				return -rte_errno;
1868 			} else if ((rx_metadata &
1869 				    RTE_ETH_RX_METADATA_USER_MARK) == 0) {
1870 				rte_flow_error_set(error, ENOTSUP,
1871 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1872 					"mark delivery has not been negotiated");
1873 				return -rte_errno;
1874 			}
1875 
1876 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1877 			if (rc != 0) {
1878 				rte_flow_error_set(error, rc,
1879 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1880 					"Bad MARK action");
1881 				return -rte_errno;
1882 			}
1883 			break;
1884 
1885 		default:
1886 			rte_flow_error_set(error, ENOTSUP,
1887 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1888 					   "Action is not supported");
1889 			return -rte_errno;
1890 		}
1891 
1892 		actions_set |= (1UL << actions->type);
1893 	}
1894 
1895 	/* When fate is unknown, drop traffic. */
1896 	if ((actions_set & fate_actions_mask) == 0) {
1897 		spec_filter->template.efs_dmaq_id =
1898 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1899 	}
1900 
1901 	return 0;
1902 
1903 fail_fate_actions:
1904 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1905 			   "Cannot combine several fate-deciding actions, "
1906 			   "choose between QUEUE, RSS or DROP");
1907 	return -rte_errno;
1908 
1909 fail_actions_overlap:
1910 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1911 			   "Overlapping actions are not supported");
1912 	return -rte_errno;
1913 }
1914 
1915 /**
1916  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1917  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1918  * specifications after copying.
1919  *
1920  * @param spec[in, out]
1921  *   SFC flow specification to update.
1922  * @param filters_count_for_one_val[in]
1923  *   How many specifications should have the same match flag, what is the
1924  *   number of specifications before copying.
1925  * @param error[out]
1926  *   Perform verbose error reporting if not NULL.
1927  */
1928 static int
1929 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1930 			       unsigned int filters_count_for_one_val,
1931 			       struct rte_flow_error *error)
1932 {
1933 	unsigned int i;
1934 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1935 	static const efx_filter_match_flags_t vals[] = {
1936 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1937 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1938 	};
1939 
1940 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1941 		rte_flow_error_set(error, EINVAL,
1942 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1943 			"Number of specifications is incorrect while copying "
1944 			"by unknown destination flags");
1945 		return -rte_errno;
1946 	}
1947 
1948 	for (i = 0; i < spec_filter->count; i++) {
1949 		/* The check above ensures that divisor can't be zero here */
1950 		spec_filter->filters[i].efs_match_flags |=
1951 			vals[i / filters_count_for_one_val];
1952 	}
1953 
1954 	return 0;
1955 }
1956 
1957 /**
1958  * Check that the following conditions are met:
1959  * - the list of supported filters has a filter
1960  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1961  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1962  *   be inserted.
1963  *
1964  * @param match[in]
1965  *   The match flags of filter.
1966  * @param spec[in]
1967  *   Specification to be supplemented.
1968  * @param filter[in]
1969  *   SFC filter with list of supported filters.
1970  */
1971 static boolean_t
1972 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1973 				 __rte_unused efx_filter_spec_t *spec,
1974 				 struct sfc_filter *filter)
1975 {
1976 	unsigned int i;
1977 	efx_filter_match_flags_t match_mcast_dst;
1978 
1979 	match_mcast_dst =
1980 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1981 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1982 	for (i = 0; i < filter->supported_match_num; i++) {
1983 		if (match_mcast_dst == filter->supported_match[i])
1984 			return B_TRUE;
1985 	}
1986 
1987 	return B_FALSE;
1988 }
1989 
1990 /**
1991  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1992  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1993  * specifications after copying.
1994  *
1995  * @param spec[in, out]
1996  *   SFC flow specification to update.
1997  * @param filters_count_for_one_val[in]
1998  *   How many specifications should have the same EtherType value, what is the
1999  *   number of specifications before copying.
2000  * @param error[out]
2001  *   Perform verbose error reporting if not NULL.
2002  */
2003 static int
2004 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
2005 			unsigned int filters_count_for_one_val,
2006 			struct rte_flow_error *error)
2007 {
2008 	unsigned int i;
2009 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2010 	static const uint16_t vals[] = {
2011 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
2012 	};
2013 
2014 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2015 		rte_flow_error_set(error, EINVAL,
2016 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2017 			"Number of specifications is incorrect "
2018 			"while copying by Ethertype");
2019 		return -rte_errno;
2020 	}
2021 
2022 	for (i = 0; i < spec_filter->count; i++) {
2023 		spec_filter->filters[i].efs_match_flags |=
2024 			EFX_FILTER_MATCH_ETHER_TYPE;
2025 
2026 		/*
2027 		 * The check above ensures that
2028 		 * filters_count_for_one_val is not 0
2029 		 */
2030 		spec_filter->filters[i].efs_ether_type =
2031 			vals[i / filters_count_for_one_val];
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 /**
2038  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
2039  * in the same specifications after copying.
2040  *
2041  * @param spec[in, out]
2042  *   SFC flow specification to update.
2043  * @param filters_count_for_one_val[in]
2044  *   How many specifications should have the same match flag, what is the
2045  *   number of specifications before copying.
2046  * @param error[out]
2047  *   Perform verbose error reporting if not NULL.
2048  */
2049 static int
2050 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
2051 			    unsigned int filters_count_for_one_val,
2052 			    struct rte_flow_error *error)
2053 {
2054 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2055 	unsigned int i;
2056 
2057 	if (filters_count_for_one_val != spec_filter->count) {
2058 		rte_flow_error_set(error, EINVAL,
2059 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2060 			"Number of specifications is incorrect "
2061 			"while copying by outer VLAN ID");
2062 		return -rte_errno;
2063 	}
2064 
2065 	for (i = 0; i < spec_filter->count; i++) {
2066 		spec_filter->filters[i].efs_match_flags |=
2067 			EFX_FILTER_MATCH_OUTER_VID;
2068 
2069 		spec_filter->filters[i].efs_outer_vid = 0;
2070 	}
2071 
2072 	return 0;
2073 }
2074 
2075 /**
2076  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
2077  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
2078  * specifications after copying.
2079  *
2080  * @param spec[in, out]
2081  *   SFC flow specification to update.
2082  * @param filters_count_for_one_val[in]
2083  *   How many specifications should have the same match flag, what is the
2084  *   number of specifications before copying.
2085  * @param error[out]
2086  *   Perform verbose error reporting if not NULL.
2087  */
2088 static int
2089 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
2090 				    unsigned int filters_count_for_one_val,
2091 				    struct rte_flow_error *error)
2092 {
2093 	unsigned int i;
2094 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2095 	static const efx_filter_match_flags_t vals[] = {
2096 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2097 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
2098 	};
2099 
2100 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2101 		rte_flow_error_set(error, EINVAL,
2102 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2103 			"Number of specifications is incorrect while copying "
2104 			"by inner frame unknown destination flags");
2105 		return -rte_errno;
2106 	}
2107 
2108 	for (i = 0; i < spec_filter->count; i++) {
2109 		/* The check above ensures that divisor can't be zero here */
2110 		spec_filter->filters[i].efs_match_flags |=
2111 			vals[i / filters_count_for_one_val];
2112 	}
2113 
2114 	return 0;
2115 }
2116 
2117 /**
2118  * Check that the following conditions are met:
2119  * - the specification corresponds to a filter for encapsulated traffic
2120  * - the list of supported filters has a filter
2121  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2122  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2123  *   be inserted.
2124  *
2125  * @param match[in]
2126  *   The match flags of filter.
2127  * @param spec[in]
2128  *   Specification to be supplemented.
2129  * @param filter[in]
2130  *   SFC filter with list of supported filters.
2131  */
2132 static boolean_t
2133 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2134 				      efx_filter_spec_t *spec,
2135 				      struct sfc_filter *filter)
2136 {
2137 	unsigned int i;
2138 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2139 	efx_filter_match_flags_t match_mcast_dst;
2140 
2141 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2142 		return B_FALSE;
2143 
2144 	match_mcast_dst =
2145 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2146 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2147 	for (i = 0; i < filter->supported_match_num; i++) {
2148 		if (match_mcast_dst == filter->supported_match[i])
2149 			return B_TRUE;
2150 	}
2151 
2152 	return B_FALSE;
2153 }
2154 
2155 /**
2156  * Check that the list of supported filters has a filter that differs
2157  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2158  * in this case that filter will be used and the flag
2159  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2160  *
2161  * @param match[in]
2162  *   The match flags of filter.
2163  * @param spec[in]
2164  *   Specification to be supplemented.
2165  * @param filter[in]
2166  *   SFC filter with list of supported filters.
2167  */
2168 static boolean_t
2169 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2170 			      __rte_unused efx_filter_spec_t *spec,
2171 			      struct sfc_filter *filter)
2172 {
2173 	unsigned int i;
2174 	efx_filter_match_flags_t match_without_vid =
2175 		match & ~EFX_FILTER_MATCH_OUTER_VID;
2176 
2177 	for (i = 0; i < filter->supported_match_num; i++) {
2178 		if (match_without_vid == filter->supported_match[i])
2179 			return B_FALSE;
2180 	}
2181 
2182 	return B_TRUE;
2183 }
2184 
2185 /*
2186  * Match flags that can be automatically added to filters.
2187  * Selecting the last minimum when searching for the copy flag ensures that the
2188  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2189  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2190  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2191  * filters.
2192  */
2193 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2194 	{
2195 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2196 		.vals_count = 2,
2197 		.set_vals = sfc_flow_set_unknown_dst_flags,
2198 		.spec_check = sfc_flow_check_unknown_dst_flags,
2199 	},
2200 	{
2201 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
2202 		.vals_count = 2,
2203 		.set_vals = sfc_flow_set_ethertypes,
2204 		.spec_check = NULL,
2205 	},
2206 	{
2207 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2208 		.vals_count = 2,
2209 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2210 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2211 	},
2212 	{
2213 		.flag = EFX_FILTER_MATCH_OUTER_VID,
2214 		.vals_count = 1,
2215 		.set_vals = sfc_flow_set_outer_vid_flag,
2216 		.spec_check = sfc_flow_check_outer_vid_flag,
2217 	},
2218 };
2219 
2220 /* Get item from array sfc_flow_copy_flags */
2221 static const struct sfc_flow_copy_flag *
2222 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2223 {
2224 	unsigned int i;
2225 
2226 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2227 		if (sfc_flow_copy_flags[i].flag == flag)
2228 			return &sfc_flow_copy_flags[i];
2229 	}
2230 
2231 	return NULL;
2232 }
2233 
2234 /**
2235  * Make copies of the specifications, set match flag and values
2236  * of the field that corresponds to it.
2237  *
2238  * @param spec[in, out]
2239  *   SFC flow specification to update.
2240  * @param flag[in]
2241  *   The match flag to add.
2242  * @param error[out]
2243  *   Perform verbose error reporting if not NULL.
2244  */
2245 static int
2246 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2247 			     efx_filter_match_flags_t flag,
2248 			     struct rte_flow_error *error)
2249 {
2250 	unsigned int i;
2251 	unsigned int new_filters_count;
2252 	unsigned int filters_count_for_one_val;
2253 	const struct sfc_flow_copy_flag *copy_flag;
2254 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2255 	int rc;
2256 
2257 	copy_flag = sfc_flow_get_copy_flag(flag);
2258 	if (copy_flag == NULL) {
2259 		rte_flow_error_set(error, ENOTSUP,
2260 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2261 				   "Unsupported spec field for copying");
2262 		return -rte_errno;
2263 	}
2264 
2265 	new_filters_count = spec_filter->count * copy_flag->vals_count;
2266 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2267 		rte_flow_error_set(error, EINVAL,
2268 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2269 			"Too much EFX specifications in the flow rule");
2270 		return -rte_errno;
2271 	}
2272 
2273 	/* Copy filters specifications */
2274 	for (i = spec_filter->count; i < new_filters_count; i++) {
2275 		spec_filter->filters[i] =
2276 			spec_filter->filters[i - spec_filter->count];
2277 	}
2278 
2279 	filters_count_for_one_val = spec_filter->count;
2280 	spec_filter->count = new_filters_count;
2281 
2282 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2283 	if (rc != 0)
2284 		return rc;
2285 
2286 	return 0;
2287 }
2288 
2289 /**
2290  * Check that the given set of match flags missing in the original filter spec
2291  * could be covered by adding spec copies which specify the corresponding
2292  * flags and packet field values to match.
2293  *
2294  * @param miss_flags[in]
2295  *   Flags that are missing until the supported filter.
2296  * @param spec[in]
2297  *   Specification to be supplemented.
2298  * @param filter[in]
2299  *   SFC filter.
2300  *
2301  * @return
2302  *   Number of specifications after copy or 0, if the flags can not be added.
2303  */
2304 static unsigned int
2305 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2306 			     efx_filter_spec_t *spec,
2307 			     struct sfc_filter *filter)
2308 {
2309 	unsigned int i;
2310 	efx_filter_match_flags_t copy_flags = 0;
2311 	efx_filter_match_flags_t flag;
2312 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2313 	sfc_flow_spec_check *check;
2314 	unsigned int multiplier = 1;
2315 
2316 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2317 		flag = sfc_flow_copy_flags[i].flag;
2318 		check = sfc_flow_copy_flags[i].spec_check;
2319 		if ((flag & miss_flags) == flag) {
2320 			if (check != NULL && (!check(match, spec, filter)))
2321 				continue;
2322 
2323 			copy_flags |= flag;
2324 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2325 		}
2326 	}
2327 
2328 	if (copy_flags == miss_flags)
2329 		return multiplier;
2330 
2331 	return 0;
2332 }
2333 
2334 /**
2335  * Attempt to supplement the specification template to the minimally
2336  * supported set of match flags. To do this, it is necessary to copy
2337  * the specifications, filling them with the values of fields that
2338  * correspond to the missing flags.
2339  * The necessary and sufficient filter is built from the fewest number
2340  * of copies which could be made to cover the minimally required set
2341  * of flags.
2342  *
2343  * @param sa[in]
2344  *   SFC adapter.
2345  * @param spec[in, out]
2346  *   SFC flow specification to update.
2347  * @param error[out]
2348  *   Perform verbose error reporting if not NULL.
2349  */
2350 static int
2351 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2352 			       struct sfc_flow_spec *spec,
2353 			       struct rte_flow_error *error)
2354 {
2355 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2356 	struct sfc_filter *filter = &sa->filter;
2357 	efx_filter_match_flags_t miss_flags;
2358 	efx_filter_match_flags_t min_miss_flags = 0;
2359 	efx_filter_match_flags_t match;
2360 	unsigned int min_multiplier = UINT_MAX;
2361 	unsigned int multiplier;
2362 	unsigned int i;
2363 	int rc;
2364 
2365 	match = spec_filter->template.efs_match_flags;
2366 	for (i = 0; i < filter->supported_match_num; i++) {
2367 		if ((match & filter->supported_match[i]) == match) {
2368 			miss_flags = filter->supported_match[i] & (~match);
2369 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2370 				&spec_filter->template, filter);
2371 			if (multiplier > 0) {
2372 				if (multiplier <= min_multiplier) {
2373 					min_multiplier = multiplier;
2374 					min_miss_flags = miss_flags;
2375 				}
2376 			}
2377 		}
2378 	}
2379 
2380 	if (min_multiplier == UINT_MAX) {
2381 		rte_flow_error_set(error, ENOTSUP,
2382 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2383 				   "The flow rule pattern is unsupported");
2384 		return -rte_errno;
2385 	}
2386 
2387 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2388 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2389 
2390 		if ((flag & min_miss_flags) == flag) {
2391 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2392 			if (rc != 0)
2393 				return rc;
2394 		}
2395 	}
2396 
2397 	return 0;
2398 }
2399 
2400 /**
2401  * Check that set of match flags is referred to by a filter. Filter is
2402  * described by match flags with the ability to add OUTER_VID and INNER_VID
2403  * flags.
2404  *
2405  * @param match_flags[in]
2406  *   Set of match flags.
2407  * @param flags_pattern[in]
2408  *   Pattern of filter match flags.
2409  */
2410 static boolean_t
2411 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2412 			    efx_filter_match_flags_t flags_pattern)
2413 {
2414 	if ((match_flags & flags_pattern) != flags_pattern)
2415 		return B_FALSE;
2416 
2417 	switch (match_flags & ~flags_pattern) {
2418 	case 0:
2419 	case EFX_FILTER_MATCH_OUTER_VID:
2420 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2421 		return B_TRUE;
2422 	default:
2423 		return B_FALSE;
2424 	}
2425 }
2426 
2427 /**
2428  * Check whether the spec maps to a hardware filter which is known to be
2429  * ineffective despite being valid.
2430  *
2431  * @param filter[in]
2432  *   SFC filter with list of supported filters.
2433  * @param spec[in]
2434  *   SFC flow specification.
2435  */
2436 static boolean_t
2437 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2438 				  struct sfc_flow_spec *spec)
2439 {
2440 	unsigned int i;
2441 	uint16_t ether_type;
2442 	uint8_t ip_proto;
2443 	efx_filter_match_flags_t match_flags;
2444 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2445 
2446 	for (i = 0; i < spec_filter->count; i++) {
2447 		match_flags = spec_filter->filters[i].efs_match_flags;
2448 
2449 		if (sfc_flow_is_match_with_vids(match_flags,
2450 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2451 		    sfc_flow_is_match_with_vids(match_flags,
2452 						EFX_FILTER_MATCH_ETHER_TYPE |
2453 						EFX_FILTER_MATCH_LOC_MAC)) {
2454 			ether_type = spec_filter->filters[i].efs_ether_type;
2455 			if (filter->supports_ip_proto_or_addr_filter &&
2456 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2457 			     ether_type == EFX_ETHER_TYPE_IPV6))
2458 				return B_TRUE;
2459 		} else if (sfc_flow_is_match_with_vids(match_flags,
2460 				EFX_FILTER_MATCH_ETHER_TYPE |
2461 				EFX_FILTER_MATCH_IP_PROTO) ||
2462 			   sfc_flow_is_match_with_vids(match_flags,
2463 				EFX_FILTER_MATCH_ETHER_TYPE |
2464 				EFX_FILTER_MATCH_IP_PROTO |
2465 				EFX_FILTER_MATCH_LOC_MAC)) {
2466 			ip_proto = spec_filter->filters[i].efs_ip_proto;
2467 			if (filter->supports_rem_or_local_port_filter &&
2468 			    (ip_proto == EFX_IPPROTO_TCP ||
2469 			     ip_proto == EFX_IPPROTO_UDP))
2470 				return B_TRUE;
2471 		}
2472 	}
2473 
2474 	return B_FALSE;
2475 }
2476 
2477 static int
2478 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2479 			      struct rte_flow *flow,
2480 			      struct rte_flow_error *error)
2481 {
2482 	struct sfc_flow_spec *spec = &flow->spec;
2483 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2484 	efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2485 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2486 	int rc;
2487 
2488 	/* Initialize the first filter spec with template */
2489 	spec_filter->filters[0] = *spec_tmpl;
2490 	spec_filter->count = 1;
2491 
2492 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2493 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2494 		if (rc != 0)
2495 			return rc;
2496 	}
2497 
2498 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2499 		rte_flow_error_set(error, ENOTSUP,
2500 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2501 			"The flow rule pattern is unsupported");
2502 		return -rte_errno;
2503 	}
2504 
2505 	return 0;
2506 }
2507 
2508 static int
2509 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2510 			     const struct rte_flow_item pattern[],
2511 			     const struct rte_flow_action actions[],
2512 			     struct rte_flow *flow,
2513 			     struct rte_flow_error *error)
2514 {
2515 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2516 	struct sfc_flow_spec *spec = &flow->spec;
2517 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2518 	struct sfc_flow_parse_ctx ctx;
2519 	int rc;
2520 
2521 	ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2522 	ctx.filter = &spec_filter->template;
2523 
2524 	rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2525 				    pattern, &ctx, error);
2526 	if (rc != 0)
2527 		goto fail_bad_value;
2528 
2529 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2530 	if (rc != 0)
2531 		goto fail_bad_value;
2532 
2533 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2534 	if (rc != 0)
2535 		goto fail_bad_value;
2536 
2537 	return 0;
2538 
2539 fail_bad_value:
2540 	return rc;
2541 }
2542 
2543 static int
2544 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2545 			  const struct rte_flow_item pattern[],
2546 			  const struct rte_flow_action actions[],
2547 			  struct rte_flow *flow,
2548 			  struct rte_flow_error *error)
2549 {
2550 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2551 	struct sfc_flow_spec *spec = &flow->spec;
2552 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2553 	int rc;
2554 
2555 	/*
2556 	 * If the flow is meant to be a JUMP rule in tunnel offload,
2557 	 * preparse its actions and save its properties in spec_mae.
2558 	 */
2559 	rc = sfc_flow_tunnel_detect_jump_rule(sa, actions, spec_mae, error);
2560 	if (rc != 0)
2561 		goto fail;
2562 
2563 	rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2564 	if (rc != 0)
2565 		goto fail;
2566 
2567 	if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
2568 		/*
2569 		 * By design, this flow should be represented solely by the
2570 		 * outer rule. But the HW/FW hasn't got support for setting
2571 		 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2572 		 * does it support outer rule counters. As a workaround, an
2573 		 * action rule of lower priority is used to do the job.
2574 		 *
2575 		 * So don't skip sfc_mae_rule_parse_actions() below.
2576 		 */
2577 	}
2578 
2579 	rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2580 	if (rc != 0)
2581 		goto fail;
2582 
2583 	if (spec_mae->ft != NULL) {
2584 		if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
2585 			spec_mae->ft->jump_rule_is_set = B_TRUE;
2586 
2587 		++(spec_mae->ft->refcnt);
2588 	}
2589 
2590 	return 0;
2591 
2592 fail:
2593 	/* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
2594 	spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
2595 	spec_mae->ft = NULL;
2596 
2597 	return rc;
2598 }
2599 
2600 static int
2601 sfc_flow_parse(struct rte_eth_dev *dev,
2602 	       const struct rte_flow_attr *attr,
2603 	       const struct rte_flow_item pattern[],
2604 	       const struct rte_flow_action actions[],
2605 	       struct rte_flow *flow,
2606 	       struct rte_flow_error *error)
2607 {
2608 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2609 	const struct sfc_flow_ops_by_spec *ops;
2610 	int rc;
2611 
2612 	rc = sfc_flow_parse_attr(sa, attr, flow, error);
2613 	if (rc != 0)
2614 		return rc;
2615 
2616 	ops = sfc_flow_get_ops_by_spec(flow);
2617 	if (ops == NULL || ops->parse == NULL) {
2618 		rte_flow_error_set(error, ENOTSUP,
2619 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2620 				   "No backend to handle this flow");
2621 		return -rte_errno;
2622 	}
2623 
2624 	return ops->parse(dev, pattern, actions, flow, error);
2625 }
2626 
2627 static struct rte_flow *
2628 sfc_flow_zmalloc(struct rte_flow_error *error)
2629 {
2630 	struct rte_flow *flow;
2631 
2632 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2633 	if (flow == NULL) {
2634 		rte_flow_error_set(error, ENOMEM,
2635 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2636 				   "Failed to allocate memory");
2637 	}
2638 
2639 	return flow;
2640 }
2641 
2642 static void
2643 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2644 {
2645 	const struct sfc_flow_ops_by_spec *ops;
2646 
2647 	ops = sfc_flow_get_ops_by_spec(flow);
2648 	if (ops != NULL && ops->cleanup != NULL)
2649 		ops->cleanup(sa, flow);
2650 
2651 	rte_free(flow);
2652 }
2653 
2654 static int
2655 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2656 		struct rte_flow_error *error)
2657 {
2658 	const struct sfc_flow_ops_by_spec *ops;
2659 	int rc;
2660 
2661 	ops = sfc_flow_get_ops_by_spec(flow);
2662 	if (ops == NULL || ops->insert == NULL) {
2663 		rte_flow_error_set(error, ENOTSUP,
2664 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2665 				   "No backend to handle this flow");
2666 		return rte_errno;
2667 	}
2668 
2669 	rc = ops->insert(sa, flow);
2670 	if (rc != 0) {
2671 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2672 				   NULL, "Failed to insert the flow rule");
2673 	}
2674 
2675 	return rc;
2676 }
2677 
2678 static int
2679 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2680 		struct rte_flow_error *error)
2681 {
2682 	const struct sfc_flow_ops_by_spec *ops;
2683 	int rc;
2684 
2685 	ops = sfc_flow_get_ops_by_spec(flow);
2686 	if (ops == NULL || ops->remove == NULL) {
2687 		rte_flow_error_set(error, ENOTSUP,
2688 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2689 				   "No backend to handle this flow");
2690 		return rte_errno;
2691 	}
2692 
2693 	rc = ops->remove(sa, flow);
2694 	if (rc != 0) {
2695 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2696 				   NULL, "Failed to remove the flow rule");
2697 	}
2698 
2699 	return rc;
2700 }
2701 
2702 static int
2703 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2704 		struct rte_flow_error *error)
2705 {
2706 	const struct sfc_flow_ops_by_spec *ops;
2707 	int rc = 0;
2708 
2709 	ops = sfc_flow_get_ops_by_spec(flow);
2710 	if (ops == NULL) {
2711 		rte_flow_error_set(error, ENOTSUP,
2712 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2713 				   "No backend to handle this flow");
2714 		return -rte_errno;
2715 	}
2716 
2717 	if (ops->verify != NULL) {
2718 		SFC_ASSERT(sfc_adapter_is_locked(sa));
2719 		rc = ops->verify(sa, flow);
2720 	}
2721 
2722 	if (rc != 0) {
2723 		rte_flow_error_set(error, rc,
2724 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2725 			"Failed to verify flow validity with FW");
2726 		return -rte_errno;
2727 	}
2728 
2729 	return 0;
2730 }
2731 
2732 static int
2733 sfc_flow_validate(struct rte_eth_dev *dev,
2734 		  const struct rte_flow_attr *attr,
2735 		  const struct rte_flow_item pattern[],
2736 		  const struct rte_flow_action actions[],
2737 		  struct rte_flow_error *error)
2738 {
2739 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2740 	struct rte_flow *flow;
2741 	int rc;
2742 
2743 	flow = sfc_flow_zmalloc(error);
2744 	if (flow == NULL)
2745 		return -rte_errno;
2746 
2747 	sfc_adapter_lock(sa);
2748 
2749 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2750 	if (rc == 0)
2751 		rc = sfc_flow_verify(sa, flow, error);
2752 
2753 	sfc_flow_free(sa, flow);
2754 
2755 	sfc_adapter_unlock(sa);
2756 
2757 	return rc;
2758 }
2759 
2760 static struct rte_flow *
2761 sfc_flow_create(struct rte_eth_dev *dev,
2762 		const struct rte_flow_attr *attr,
2763 		const struct rte_flow_item pattern[],
2764 		const struct rte_flow_action actions[],
2765 		struct rte_flow_error *error)
2766 {
2767 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2768 	struct rte_flow *flow = NULL;
2769 	int rc;
2770 
2771 	flow = sfc_flow_zmalloc(error);
2772 	if (flow == NULL)
2773 		goto fail_no_mem;
2774 
2775 	sfc_adapter_lock(sa);
2776 
2777 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2778 	if (rc != 0)
2779 		goto fail_bad_value;
2780 
2781 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2782 
2783 	if (sa->state == SFC_ETHDEV_STARTED) {
2784 		rc = sfc_flow_insert(sa, flow, error);
2785 		if (rc != 0)
2786 			goto fail_flow_insert;
2787 	}
2788 
2789 	sfc_adapter_unlock(sa);
2790 
2791 	return flow;
2792 
2793 fail_flow_insert:
2794 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2795 
2796 fail_bad_value:
2797 	sfc_flow_free(sa, flow);
2798 	sfc_adapter_unlock(sa);
2799 
2800 fail_no_mem:
2801 	return NULL;
2802 }
2803 
2804 static int
2805 sfc_flow_destroy(struct rte_eth_dev *dev,
2806 		 struct rte_flow *flow,
2807 		 struct rte_flow_error *error)
2808 {
2809 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2810 	struct rte_flow *flow_ptr;
2811 	int rc = EINVAL;
2812 
2813 	sfc_adapter_lock(sa);
2814 
2815 	TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2816 		if (flow_ptr == flow)
2817 			rc = 0;
2818 	}
2819 	if (rc != 0) {
2820 		rte_flow_error_set(error, rc,
2821 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2822 				   "Failed to find flow rule to destroy");
2823 		goto fail_bad_value;
2824 	}
2825 
2826 	if (sa->state == SFC_ETHDEV_STARTED)
2827 		rc = sfc_flow_remove(sa, flow, error);
2828 
2829 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2830 	sfc_flow_free(sa, flow);
2831 
2832 fail_bad_value:
2833 	sfc_adapter_unlock(sa);
2834 
2835 	return -rc;
2836 }
2837 
2838 static int
2839 sfc_flow_flush(struct rte_eth_dev *dev,
2840 	       struct rte_flow_error *error)
2841 {
2842 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2843 	struct rte_flow *flow;
2844 	int ret = 0;
2845 
2846 	sfc_adapter_lock(sa);
2847 
2848 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2849 		if (sa->state == SFC_ETHDEV_STARTED) {
2850 			int rc;
2851 
2852 			rc = sfc_flow_remove(sa, flow, error);
2853 			if (rc != 0)
2854 				ret = rc;
2855 		}
2856 
2857 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2858 		sfc_flow_free(sa, flow);
2859 	}
2860 
2861 	sfc_adapter_unlock(sa);
2862 
2863 	return -ret;
2864 }
2865 
2866 static int
2867 sfc_flow_query(struct rte_eth_dev *dev,
2868 	       struct rte_flow *flow,
2869 	       const struct rte_flow_action *action,
2870 	       void *data,
2871 	       struct rte_flow_error *error)
2872 {
2873 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2874 	const struct sfc_flow_ops_by_spec *ops;
2875 	int ret;
2876 
2877 	sfc_adapter_lock(sa);
2878 
2879 	ops = sfc_flow_get_ops_by_spec(flow);
2880 	if (ops == NULL || ops->query == NULL) {
2881 		ret = rte_flow_error_set(error, ENOTSUP,
2882 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2883 			"No backend to handle this flow");
2884 		goto fail_no_backend;
2885 	}
2886 
2887 	if (sa->state != SFC_ETHDEV_STARTED) {
2888 		ret = rte_flow_error_set(error, EINVAL,
2889 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2890 			"Can't query the flow: the adapter is not started");
2891 		goto fail_not_started;
2892 	}
2893 
2894 	ret = ops->query(dev, flow, action, data, error);
2895 	if (ret != 0)
2896 		goto fail_query;
2897 
2898 	sfc_adapter_unlock(sa);
2899 
2900 	return 0;
2901 
2902 fail_query:
2903 fail_not_started:
2904 fail_no_backend:
2905 	sfc_adapter_unlock(sa);
2906 	return ret;
2907 }
2908 
2909 static int
2910 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2911 		 struct rte_flow_error *error)
2912 {
2913 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2914 	int ret = 0;
2915 
2916 	sfc_adapter_lock(sa);
2917 	if (sa->state != SFC_ETHDEV_INITIALIZED) {
2918 		rte_flow_error_set(error, EBUSY,
2919 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2920 				   NULL, "please close the port first");
2921 		ret = -rte_errno;
2922 	} else {
2923 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2924 	}
2925 	sfc_adapter_unlock(sa);
2926 
2927 	return ret;
2928 }
2929 
2930 static int
2931 sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
2932 			     uint16_t *transfer_proxy_port,
2933 			     struct rte_flow_error *error)
2934 {
2935 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2936 	int ret;
2937 
2938 	ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
2939 					      transfer_proxy_port);
2940 	if (ret != 0) {
2941 		return rte_flow_error_set(error, ret,
2942 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2943 					  NULL, NULL);
2944 	}
2945 
2946 	return 0;
2947 }
2948 
2949 const struct rte_flow_ops sfc_flow_ops = {
2950 	.validate = sfc_flow_validate,
2951 	.create = sfc_flow_create,
2952 	.destroy = sfc_flow_destroy,
2953 	.flush = sfc_flow_flush,
2954 	.query = sfc_flow_query,
2955 	.isolate = sfc_flow_isolate,
2956 	.tunnel_decap_set = sfc_flow_tunnel_decap_set,
2957 	.tunnel_match = sfc_flow_tunnel_match,
2958 	.tunnel_action_decap_release = sfc_flow_tunnel_action_decap_release,
2959 	.tunnel_item_release = sfc_flow_tunnel_item_release,
2960 	.get_restore_info = sfc_flow_tunnel_get_restore_info,
2961 	.pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
2962 };
2963 
2964 void
2965 sfc_flow_init(struct sfc_adapter *sa)
2966 {
2967 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2968 
2969 	TAILQ_INIT(&sa->flow_list);
2970 }
2971 
2972 void
2973 sfc_flow_fini(struct sfc_adapter *sa)
2974 {
2975 	struct rte_flow *flow;
2976 
2977 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2978 
2979 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2980 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2981 		sfc_flow_free(sa, flow);
2982 	}
2983 }
2984 
2985 void
2986 sfc_flow_stop(struct sfc_adapter *sa)
2987 {
2988 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2989 	struct sfc_rss *rss = &sas->rss;
2990 	struct rte_flow *flow;
2991 
2992 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2993 
2994 	TAILQ_FOREACH(flow, &sa->flow_list, entries)
2995 		sfc_flow_remove(sa, flow, NULL);
2996 
2997 	if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2998 		efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2999 		rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
3000 	}
3001 
3002 	/*
3003 	 * MAE counter service is not stopped on flow rule remove to avoid
3004 	 * extra work. Make sure that it is stopped here.
3005 	 */
3006 	sfc_mae_counter_stop(sa);
3007 }
3008 
3009 int
3010 sfc_flow_start(struct sfc_adapter *sa)
3011 {
3012 	struct rte_flow *flow;
3013 	int rc = 0;
3014 
3015 	sfc_log_init(sa, "entry");
3016 
3017 	SFC_ASSERT(sfc_adapter_is_locked(sa));
3018 
3019 	sfc_flow_tunnel_reset_hit_counters(sa);
3020 
3021 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
3022 		rc = sfc_flow_insert(sa, flow, NULL);
3023 		if (rc != 0)
3024 			goto fail_bad_flow;
3025 	}
3026 
3027 	sfc_log_init(sa, "done");
3028 
3029 fail_bad_flow:
3030 	return rc;
3031 }
3032