xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_flow_tunnel.h"
26 #include "sfc_log.h"
27 #include "sfc_dp_rx.h"
28 #include "sfc_mae_counter.h"
29 #include "sfc_switch.h"
30 
31 struct sfc_flow_ops_by_spec {
32 	sfc_flow_parse_cb_t	*parse;
33 	sfc_flow_verify_cb_t	*verify;
34 	sfc_flow_cleanup_cb_t	*cleanup;
35 	sfc_flow_insert_cb_t	*insert;
36 	sfc_flow_remove_cb_t	*remove;
37 	sfc_flow_query_cb_t	*query;
38 };
39 
40 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
41 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
42 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
43 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
44 
45 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
46 	.parse = sfc_flow_parse_rte_to_filter,
47 	.verify = NULL,
48 	.cleanup = NULL,
49 	.insert = sfc_flow_filter_insert,
50 	.remove = sfc_flow_filter_remove,
51 	.query = NULL,
52 };
53 
54 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
55 	.parse = sfc_flow_parse_rte_to_mae,
56 	.verify = sfc_mae_flow_verify,
57 	.cleanup = sfc_mae_flow_cleanup,
58 	.insert = sfc_mae_flow_insert,
59 	.remove = sfc_mae_flow_remove,
60 	.query = sfc_mae_flow_query,
61 };
62 
63 static const struct sfc_flow_ops_by_spec *
64 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
65 {
66 	struct sfc_flow_spec *spec = &flow->spec;
67 	const struct sfc_flow_ops_by_spec *ops = NULL;
68 
69 	switch (spec->type) {
70 	case SFC_FLOW_SPEC_FILTER:
71 		ops = &sfc_flow_ops_filter;
72 		break;
73 	case SFC_FLOW_SPEC_MAE:
74 		ops = &sfc_flow_ops_mae;
75 		break;
76 	default:
77 		SFC_ASSERT(false);
78 		break;
79 	}
80 
81 	return ops;
82 }
83 
84 /*
85  * Currently, filter-based (VNIC) flow API is implemented in such a manner
86  * that each flow rule is converted to one or more hardware filters.
87  * All elements of flow rule (attributes, pattern items, actions)
88  * correspond to one or more fields in the efx_filter_spec_s structure
89  * that is responsible for the hardware filter.
90  * If some required field is unset in the flow rule, then a handful
91  * of filter copies will be created to cover all possible values
92  * of such a field.
93  */
94 
95 static sfc_flow_item_parse sfc_flow_parse_void;
96 static sfc_flow_item_parse sfc_flow_parse_eth;
97 static sfc_flow_item_parse sfc_flow_parse_vlan;
98 static sfc_flow_item_parse sfc_flow_parse_ipv4;
99 static sfc_flow_item_parse sfc_flow_parse_ipv6;
100 static sfc_flow_item_parse sfc_flow_parse_tcp;
101 static sfc_flow_item_parse sfc_flow_parse_udp;
102 static sfc_flow_item_parse sfc_flow_parse_vxlan;
103 static sfc_flow_item_parse sfc_flow_parse_geneve;
104 static sfc_flow_item_parse sfc_flow_parse_nvgre;
105 static sfc_flow_item_parse sfc_flow_parse_pppoex;
106 
107 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
108 				     unsigned int filters_count_for_one_val,
109 				     struct rte_flow_error *error);
110 
111 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
112 					efx_filter_spec_t *spec,
113 					struct sfc_filter *filter);
114 
115 struct sfc_flow_copy_flag {
116 	/* EFX filter specification match flag */
117 	efx_filter_match_flags_t flag;
118 	/* Number of values of corresponding field */
119 	unsigned int vals_count;
120 	/* Function to set values in specifications */
121 	sfc_flow_spec_set_vals *set_vals;
122 	/*
123 	 * Function to check that the specification is suitable
124 	 * for adding this match flag
125 	 */
126 	sfc_flow_spec_check *spec_check;
127 };
128 
129 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
130 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
131 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
132 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
133 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
134 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
135 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
136 
137 static boolean_t
138 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
139 {
140 	uint8_t sum = 0;
141 	unsigned int i;
142 
143 	for (i = 0; i < size; i++)
144 		sum |= buf[i];
145 
146 	return (sum == 0) ? B_TRUE : B_FALSE;
147 }
148 
149 /*
150  * Validate item and prepare structures spec and mask for parsing
151  */
152 int
153 sfc_flow_parse_init(const struct rte_flow_item *item,
154 		    const void **spec_ptr,
155 		    const void **mask_ptr,
156 		    const void *supp_mask,
157 		    const void *def_mask,
158 		    unsigned int size,
159 		    struct rte_flow_error *error)
160 {
161 	const uint8_t *spec;
162 	const uint8_t *mask;
163 	const uint8_t *last;
164 	uint8_t supp;
165 	unsigned int i;
166 
167 	if (item == NULL) {
168 		rte_flow_error_set(error, EINVAL,
169 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
170 				   "NULL item");
171 		return -rte_errno;
172 	}
173 
174 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
175 		rte_flow_error_set(error, EINVAL,
176 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
177 				   "Mask or last is set without spec");
178 		return -rte_errno;
179 	}
180 
181 	/*
182 	 * If "mask" is not set, default mask is used,
183 	 * but if default mask is NULL, "mask" should be set
184 	 */
185 	if (item->mask == NULL) {
186 		if (def_mask == NULL) {
187 			rte_flow_error_set(error, EINVAL,
188 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
189 				"Mask should be specified");
190 			return -rte_errno;
191 		}
192 
193 		mask = def_mask;
194 	} else {
195 		mask = item->mask;
196 	}
197 
198 	spec = item->spec;
199 	last = item->last;
200 
201 	if (spec == NULL)
202 		goto exit;
203 
204 	/*
205 	 * If field values in "last" are either 0 or equal to the corresponding
206 	 * values in "spec" then they are ignored
207 	 */
208 	if (last != NULL &&
209 	    !sfc_flow_is_zero(last, size) &&
210 	    memcmp(last, spec, size) != 0) {
211 		rte_flow_error_set(error, ENOTSUP,
212 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
213 				   "Ranging is not supported");
214 		return -rte_errno;
215 	}
216 
217 	if (supp_mask == NULL) {
218 		rte_flow_error_set(error, EINVAL,
219 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
220 			"Supported mask for item should be specified");
221 		return -rte_errno;
222 	}
223 
224 	/* Check that mask does not ask for more match than supp_mask */
225 	for (i = 0; i < size; i++) {
226 		supp = ((const uint8_t *)supp_mask)[i];
227 
228 		if (~supp & mask[i]) {
229 			rte_flow_error_set(error, ENOTSUP,
230 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
231 					   "Item's field is not supported");
232 			return -rte_errno;
233 		}
234 	}
235 
236 exit:
237 	*spec_ptr = spec;
238 	*mask_ptr = mask;
239 	return 0;
240 }
241 
242 /*
243  * Protocol parsers.
244  * Masking is not supported, so masks in items should be either
245  * full or empty (zeroed) and set only for supported fields which
246  * are specified in the supp_mask.
247  */
248 
249 static int
250 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
251 		    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
252 		    __rte_unused struct rte_flow_error *error)
253 {
254 	return 0;
255 }
256 
257 /**
258  * Convert Ethernet item to EFX filter specification.
259  *
260  * @param item[in]
261  *   Item specification. Outer frame specification may only comprise
262  *   source/destination addresses and Ethertype field.
263  *   Inner frame specification may contain destination address only.
264  *   There is support for individual/group mask as well as for empty and full.
265  *   If the mask is NULL, default mask will be used. Ranging is not supported.
266  * @param efx_spec[in, out]
267  *   EFX filter specification to update.
268  * @param[out] error
269  *   Perform verbose error reporting if not NULL.
270  */
271 static int
272 sfc_flow_parse_eth(const struct rte_flow_item *item,
273 		   struct sfc_flow_parse_ctx *parse_ctx,
274 		   struct rte_flow_error *error)
275 {
276 	int rc;
277 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
278 	const struct rte_flow_item_eth *spec = NULL;
279 	const struct rte_flow_item_eth *mask = NULL;
280 	const struct rte_flow_item_eth supp_mask = {
281 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
282 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
283 		.type = 0xffff,
284 	};
285 	const struct rte_flow_item_eth ifrm_supp_mask = {
286 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
287 	};
288 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
289 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
290 	};
291 	const struct rte_flow_item_eth *supp_mask_p;
292 	const struct rte_flow_item_eth *def_mask_p;
293 	uint8_t *loc_mac = NULL;
294 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
295 		EFX_TUNNEL_PROTOCOL_NONE);
296 
297 	if (is_ifrm) {
298 		supp_mask_p = &ifrm_supp_mask;
299 		def_mask_p = &ifrm_supp_mask;
300 		loc_mac = efx_spec->efs_ifrm_loc_mac;
301 	} else {
302 		supp_mask_p = &supp_mask;
303 		def_mask_p = &rte_flow_item_eth_mask;
304 		loc_mac = efx_spec->efs_loc_mac;
305 	}
306 
307 	rc = sfc_flow_parse_init(item,
308 				 (const void **)&spec,
309 				 (const void **)&mask,
310 				 supp_mask_p, def_mask_p,
311 				 sizeof(struct rte_flow_item_eth),
312 				 error);
313 	if (rc != 0)
314 		return rc;
315 
316 	/* If "spec" is not set, could be any Ethernet */
317 	if (spec == NULL)
318 		return 0;
319 
320 	if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
321 		efx_spec->efs_match_flags |= is_ifrm ?
322 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
323 			EFX_FILTER_MATCH_LOC_MAC;
324 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
325 			   EFX_MAC_ADDR_LEN);
326 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
327 			  EFX_MAC_ADDR_LEN) == 0) {
328 		if (rte_is_unicast_ether_addr(&spec->dst))
329 			efx_spec->efs_match_flags |= is_ifrm ?
330 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
331 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
332 		else
333 			efx_spec->efs_match_flags |= is_ifrm ?
334 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
335 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
336 	} else if (!rte_is_zero_ether_addr(&mask->dst)) {
337 		goto fail_bad_mask;
338 	}
339 
340 	/*
341 	 * ifrm_supp_mask ensures that the source address and
342 	 * ethertype masks are equal to zero in inner frame,
343 	 * so these fields are filled in only for the outer frame
344 	 */
345 	if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
346 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
347 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
348 			   EFX_MAC_ADDR_LEN);
349 	} else if (!rte_is_zero_ether_addr(&mask->src)) {
350 		goto fail_bad_mask;
351 	}
352 
353 	/*
354 	 * Ether type is in big-endian byte order in item and
355 	 * in little-endian in efx_spec, so byte swap is used
356 	 */
357 	if (mask->type == supp_mask.type) {
358 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
359 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
360 	} else if (mask->type != 0) {
361 		goto fail_bad_mask;
362 	}
363 
364 	return 0;
365 
366 fail_bad_mask:
367 	rte_flow_error_set(error, EINVAL,
368 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
369 			   "Bad mask in the ETH pattern item");
370 	return -rte_errno;
371 }
372 
373 /**
374  * Convert VLAN item to EFX filter specification.
375  *
376  * @param item[in]
377  *   Item specification. Only VID field is supported.
378  *   The mask can not be NULL. Ranging is not supported.
379  * @param efx_spec[in, out]
380  *   EFX filter specification to update.
381  * @param[out] error
382  *   Perform verbose error reporting if not NULL.
383  */
384 static int
385 sfc_flow_parse_vlan(const struct rte_flow_item *item,
386 		    struct sfc_flow_parse_ctx *parse_ctx,
387 		    struct rte_flow_error *error)
388 {
389 	int rc;
390 	uint16_t vid;
391 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
392 	const struct rte_flow_item_vlan *spec = NULL;
393 	const struct rte_flow_item_vlan *mask = NULL;
394 	const struct rte_flow_item_vlan supp_mask = {
395 		.tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
396 		.inner_type = RTE_BE16(0xffff),
397 	};
398 
399 	rc = sfc_flow_parse_init(item,
400 				 (const void **)&spec,
401 				 (const void **)&mask,
402 				 &supp_mask,
403 				 NULL,
404 				 sizeof(struct rte_flow_item_vlan),
405 				 error);
406 	if (rc != 0)
407 		return rc;
408 
409 	/*
410 	 * VID is in big-endian byte order in item and
411 	 * in little-endian in efx_spec, so byte swap is used.
412 	 * If two VLAN items are included, the first matches
413 	 * the outer tag and the next matches the inner tag.
414 	 */
415 	if (mask->tci == supp_mask.tci) {
416 		/* Apply mask to keep VID only */
417 		vid = rte_bswap16(spec->tci & mask->tci);
418 
419 		if (!(efx_spec->efs_match_flags &
420 		      EFX_FILTER_MATCH_OUTER_VID)) {
421 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
422 			efx_spec->efs_outer_vid = vid;
423 		} else if (!(efx_spec->efs_match_flags &
424 			     EFX_FILTER_MATCH_INNER_VID)) {
425 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
426 			efx_spec->efs_inner_vid = vid;
427 		} else {
428 			rte_flow_error_set(error, EINVAL,
429 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
430 					   "More than two VLAN items");
431 			return -rte_errno;
432 		}
433 	} else {
434 		rte_flow_error_set(error, EINVAL,
435 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
436 				   "VLAN ID in TCI match is required");
437 		return -rte_errno;
438 	}
439 
440 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
441 		rte_flow_error_set(error, EINVAL,
442 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
443 				   "VLAN TPID matching is not supported");
444 		return -rte_errno;
445 	}
446 	if (mask->inner_type == supp_mask.inner_type) {
447 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
448 		efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
449 	} else if (mask->inner_type) {
450 		rte_flow_error_set(error, EINVAL,
451 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
452 				   "Bad mask for VLAN inner_type");
453 		return -rte_errno;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * Convert IPv4 item to EFX filter specification.
461  *
462  * @param item[in]
463  *   Item specification. Only source and destination addresses and
464  *   protocol fields are supported. If the mask is NULL, default
465  *   mask will be used. Ranging is not supported.
466  * @param efx_spec[in, out]
467  *   EFX filter specification to update.
468  * @param[out] error
469  *   Perform verbose error reporting if not NULL.
470  */
471 static int
472 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
473 		    struct sfc_flow_parse_ctx *parse_ctx,
474 		    struct rte_flow_error *error)
475 {
476 	int rc;
477 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
478 	const struct rte_flow_item_ipv4 *spec = NULL;
479 	const struct rte_flow_item_ipv4 *mask = NULL;
480 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
481 	const struct rte_flow_item_ipv4 supp_mask = {
482 		.hdr = {
483 			.src_addr = 0xffffffff,
484 			.dst_addr = 0xffffffff,
485 			.next_proto_id = 0xff,
486 		}
487 	};
488 
489 	rc = sfc_flow_parse_init(item,
490 				 (const void **)&spec,
491 				 (const void **)&mask,
492 				 &supp_mask,
493 				 &rte_flow_item_ipv4_mask,
494 				 sizeof(struct rte_flow_item_ipv4),
495 				 error);
496 	if (rc != 0)
497 		return rc;
498 
499 	/*
500 	 * Filtering by IPv4 source and destination addresses requires
501 	 * the appropriate ETHER_TYPE in hardware filters
502 	 */
503 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
504 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
505 		efx_spec->efs_ether_type = ether_type_ipv4;
506 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
507 		rte_flow_error_set(error, EINVAL,
508 			RTE_FLOW_ERROR_TYPE_ITEM, item,
509 			"Ethertype in pattern with IPV4 item should be appropriate");
510 		return -rte_errno;
511 	}
512 
513 	if (spec == NULL)
514 		return 0;
515 
516 	/*
517 	 * IPv4 addresses are in big-endian byte order in item and in
518 	 * efx_spec
519 	 */
520 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
521 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
522 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
523 	} else if (mask->hdr.src_addr != 0) {
524 		goto fail_bad_mask;
525 	}
526 
527 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
528 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
529 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
530 	} else if (mask->hdr.dst_addr != 0) {
531 		goto fail_bad_mask;
532 	}
533 
534 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
535 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
536 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
537 	} else if (mask->hdr.next_proto_id != 0) {
538 		goto fail_bad_mask;
539 	}
540 
541 	return 0;
542 
543 fail_bad_mask:
544 	rte_flow_error_set(error, EINVAL,
545 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
546 			   "Bad mask in the IPV4 pattern item");
547 	return -rte_errno;
548 }
549 
550 /**
551  * Convert IPv6 item to EFX filter specification.
552  *
553  * @param item[in]
554  *   Item specification. Only source and destination addresses and
555  *   next header fields are supported. If the mask is NULL, default
556  *   mask will be used. Ranging is not supported.
557  * @param efx_spec[in, out]
558  *   EFX filter specification to update.
559  * @param[out] error
560  *   Perform verbose error reporting if not NULL.
561  */
562 static int
563 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
564 		    struct sfc_flow_parse_ctx *parse_ctx,
565 		    struct rte_flow_error *error)
566 {
567 	int rc;
568 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
569 	const struct rte_flow_item_ipv6 *spec = NULL;
570 	const struct rte_flow_item_ipv6 *mask = NULL;
571 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
572 	const struct rte_flow_item_ipv6 supp_mask = {
573 		.hdr = {
574 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
575 				      0xff, 0xff, 0xff, 0xff,
576 				      0xff, 0xff, 0xff, 0xff,
577 				      0xff, 0xff, 0xff, 0xff },
578 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
579 				      0xff, 0xff, 0xff, 0xff,
580 				      0xff, 0xff, 0xff, 0xff,
581 				      0xff, 0xff, 0xff, 0xff },
582 			.proto = 0xff,
583 		}
584 	};
585 
586 	rc = sfc_flow_parse_init(item,
587 				 (const void **)&spec,
588 				 (const void **)&mask,
589 				 &supp_mask,
590 				 &rte_flow_item_ipv6_mask,
591 				 sizeof(struct rte_flow_item_ipv6),
592 				 error);
593 	if (rc != 0)
594 		return rc;
595 
596 	/*
597 	 * Filtering by IPv6 source and destination addresses requires
598 	 * the appropriate ETHER_TYPE in hardware filters
599 	 */
600 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
601 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
602 		efx_spec->efs_ether_type = ether_type_ipv6;
603 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
604 		rte_flow_error_set(error, EINVAL,
605 			RTE_FLOW_ERROR_TYPE_ITEM, item,
606 			"Ethertype in pattern with IPV6 item should be appropriate");
607 		return -rte_errno;
608 	}
609 
610 	if (spec == NULL)
611 		return 0;
612 
613 	/*
614 	 * IPv6 addresses are in big-endian byte order in item and in
615 	 * efx_spec
616 	 */
617 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
618 		   sizeof(mask->hdr.src_addr)) == 0) {
619 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
620 
621 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
622 				 sizeof(spec->hdr.src_addr));
623 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
624 			   sizeof(efx_spec->efs_rem_host));
625 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
626 				     sizeof(mask->hdr.src_addr))) {
627 		goto fail_bad_mask;
628 	}
629 
630 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
631 		   sizeof(mask->hdr.dst_addr)) == 0) {
632 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
633 
634 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
635 				 sizeof(spec->hdr.dst_addr));
636 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
637 			   sizeof(efx_spec->efs_loc_host));
638 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
639 				     sizeof(mask->hdr.dst_addr))) {
640 		goto fail_bad_mask;
641 	}
642 
643 	if (mask->hdr.proto == supp_mask.hdr.proto) {
644 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
645 		efx_spec->efs_ip_proto = spec->hdr.proto;
646 	} else if (mask->hdr.proto != 0) {
647 		goto fail_bad_mask;
648 	}
649 
650 	return 0;
651 
652 fail_bad_mask:
653 	rte_flow_error_set(error, EINVAL,
654 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
655 			   "Bad mask in the IPV6 pattern item");
656 	return -rte_errno;
657 }
658 
659 /**
660  * Convert TCP item to EFX filter specification.
661  *
662  * @param item[in]
663  *   Item specification. Only source and destination ports fields
664  *   are supported. If the mask is NULL, default mask will be used.
665  *   Ranging is not supported.
666  * @param efx_spec[in, out]
667  *   EFX filter specification to update.
668  * @param[out] error
669  *   Perform verbose error reporting if not NULL.
670  */
671 static int
672 sfc_flow_parse_tcp(const struct rte_flow_item *item,
673 		   struct sfc_flow_parse_ctx *parse_ctx,
674 		   struct rte_flow_error *error)
675 {
676 	int rc;
677 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
678 	const struct rte_flow_item_tcp *spec = NULL;
679 	const struct rte_flow_item_tcp *mask = NULL;
680 	const struct rte_flow_item_tcp supp_mask = {
681 		.hdr = {
682 			.src_port = 0xffff,
683 			.dst_port = 0xffff,
684 		}
685 	};
686 
687 	rc = sfc_flow_parse_init(item,
688 				 (const void **)&spec,
689 				 (const void **)&mask,
690 				 &supp_mask,
691 				 &rte_flow_item_tcp_mask,
692 				 sizeof(struct rte_flow_item_tcp),
693 				 error);
694 	if (rc != 0)
695 		return rc;
696 
697 	/*
698 	 * Filtering by TCP source and destination ports requires
699 	 * the appropriate IP_PROTO in hardware filters
700 	 */
701 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
702 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
703 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
704 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
705 		rte_flow_error_set(error, EINVAL,
706 			RTE_FLOW_ERROR_TYPE_ITEM, item,
707 			"IP proto in pattern with TCP item should be appropriate");
708 		return -rte_errno;
709 	}
710 
711 	if (spec == NULL)
712 		return 0;
713 
714 	/*
715 	 * Source and destination ports are in big-endian byte order in item and
716 	 * in little-endian in efx_spec, so byte swap is used
717 	 */
718 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
719 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
720 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
721 	} else if (mask->hdr.src_port != 0) {
722 		goto fail_bad_mask;
723 	}
724 
725 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
726 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
727 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
728 	} else if (mask->hdr.dst_port != 0) {
729 		goto fail_bad_mask;
730 	}
731 
732 	return 0;
733 
734 fail_bad_mask:
735 	rte_flow_error_set(error, EINVAL,
736 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
737 			   "Bad mask in the TCP pattern item");
738 	return -rte_errno;
739 }
740 
741 /**
742  * Convert UDP item to EFX filter specification.
743  *
744  * @param item[in]
745  *   Item specification. Only source and destination ports fields
746  *   are supported. If the mask is NULL, default mask will be used.
747  *   Ranging is not supported.
748  * @param efx_spec[in, out]
749  *   EFX filter specification to update.
750  * @param[out] error
751  *   Perform verbose error reporting if not NULL.
752  */
753 static int
754 sfc_flow_parse_udp(const struct rte_flow_item *item,
755 		   struct sfc_flow_parse_ctx *parse_ctx,
756 		   struct rte_flow_error *error)
757 {
758 	int rc;
759 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
760 	const struct rte_flow_item_udp *spec = NULL;
761 	const struct rte_flow_item_udp *mask = NULL;
762 	const struct rte_flow_item_udp supp_mask = {
763 		.hdr = {
764 			.src_port = 0xffff,
765 			.dst_port = 0xffff,
766 		}
767 	};
768 
769 	rc = sfc_flow_parse_init(item,
770 				 (const void **)&spec,
771 				 (const void **)&mask,
772 				 &supp_mask,
773 				 &rte_flow_item_udp_mask,
774 				 sizeof(struct rte_flow_item_udp),
775 				 error);
776 	if (rc != 0)
777 		return rc;
778 
779 	/*
780 	 * Filtering by UDP source and destination ports requires
781 	 * the appropriate IP_PROTO in hardware filters
782 	 */
783 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
784 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
785 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
786 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
787 		rte_flow_error_set(error, EINVAL,
788 			RTE_FLOW_ERROR_TYPE_ITEM, item,
789 			"IP proto in pattern with UDP item should be appropriate");
790 		return -rte_errno;
791 	}
792 
793 	if (spec == NULL)
794 		return 0;
795 
796 	/*
797 	 * Source and destination ports are in big-endian byte order in item and
798 	 * in little-endian in efx_spec, so byte swap is used
799 	 */
800 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
801 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
802 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
803 	} else if (mask->hdr.src_port != 0) {
804 		goto fail_bad_mask;
805 	}
806 
807 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
808 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
809 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
810 	} else if (mask->hdr.dst_port != 0) {
811 		goto fail_bad_mask;
812 	}
813 
814 	return 0;
815 
816 fail_bad_mask:
817 	rte_flow_error_set(error, EINVAL,
818 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
819 			   "Bad mask in the UDP pattern item");
820 	return -rte_errno;
821 }
822 
823 /*
824  * Filters for encapsulated packets match based on the EtherType and IP
825  * protocol in the outer frame.
826  */
827 static int
828 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
829 					efx_filter_spec_t *efx_spec,
830 					uint8_t ip_proto,
831 					struct rte_flow_error *error)
832 {
833 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
834 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
835 		efx_spec->efs_ip_proto = ip_proto;
836 	} else if (efx_spec->efs_ip_proto != ip_proto) {
837 		switch (ip_proto) {
838 		case EFX_IPPROTO_UDP:
839 			rte_flow_error_set(error, EINVAL,
840 				RTE_FLOW_ERROR_TYPE_ITEM, item,
841 				"Outer IP header protocol must be UDP "
842 				"in VxLAN/GENEVE pattern");
843 			return -rte_errno;
844 
845 		case EFX_IPPROTO_GRE:
846 			rte_flow_error_set(error, EINVAL,
847 				RTE_FLOW_ERROR_TYPE_ITEM, item,
848 				"Outer IP header protocol must be GRE "
849 				"in NVGRE pattern");
850 			return -rte_errno;
851 
852 		default:
853 			rte_flow_error_set(error, EINVAL,
854 				RTE_FLOW_ERROR_TYPE_ITEM, item,
855 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
856 				"are supported");
857 			return -rte_errno;
858 		}
859 	}
860 
861 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
862 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
863 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
864 		rte_flow_error_set(error, EINVAL,
865 			RTE_FLOW_ERROR_TYPE_ITEM, item,
866 			"Outer frame EtherType in pattern with tunneling "
867 			"must be IPv4 or IPv6");
868 		return -rte_errno;
869 	}
870 
871 	return 0;
872 }
873 
874 static int
875 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
876 				  const uint8_t *vni_or_vsid_val,
877 				  const uint8_t *vni_or_vsid_mask,
878 				  const struct rte_flow_item *item,
879 				  struct rte_flow_error *error)
880 {
881 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
882 		0xff, 0xff, 0xff
883 	};
884 
885 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
886 		   EFX_VNI_OR_VSID_LEN) == 0) {
887 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
888 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
889 			   EFX_VNI_OR_VSID_LEN);
890 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
891 		rte_flow_error_set(error, EINVAL,
892 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
893 				   "Unsupported VNI/VSID mask");
894 		return -rte_errno;
895 	}
896 
897 	return 0;
898 }
899 
900 /**
901  * Convert VXLAN item to EFX filter specification.
902  *
903  * @param item[in]
904  *   Item specification. Only VXLAN network identifier field is supported.
905  *   If the mask is NULL, default mask will be used.
906  *   Ranging is not supported.
907  * @param efx_spec[in, out]
908  *   EFX filter specification to update.
909  * @param[out] error
910  *   Perform verbose error reporting if not NULL.
911  */
912 static int
913 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
914 		     struct sfc_flow_parse_ctx *parse_ctx,
915 		     struct rte_flow_error *error)
916 {
917 	int rc;
918 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
919 	const struct rte_flow_item_vxlan *spec = NULL;
920 	const struct rte_flow_item_vxlan *mask = NULL;
921 	const struct rte_flow_item_vxlan supp_mask = {
922 		.vni = { 0xff, 0xff, 0xff }
923 	};
924 
925 	rc = sfc_flow_parse_init(item,
926 				 (const void **)&spec,
927 				 (const void **)&mask,
928 				 &supp_mask,
929 				 &rte_flow_item_vxlan_mask,
930 				 sizeof(struct rte_flow_item_vxlan),
931 				 error);
932 	if (rc != 0)
933 		return rc;
934 
935 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
936 						     EFX_IPPROTO_UDP, error);
937 	if (rc != 0)
938 		return rc;
939 
940 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
941 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
942 
943 	if (spec == NULL)
944 		return 0;
945 
946 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
947 					       mask->vni, item, error);
948 
949 	return rc;
950 }
951 
952 /**
953  * Convert GENEVE item to EFX filter specification.
954  *
955  * @param item[in]
956  *   Item specification. Only Virtual Network Identifier and protocol type
957  *   fields are supported. But protocol type can be only Ethernet (0x6558).
958  *   If the mask is NULL, default mask will be used.
959  *   Ranging is not supported.
960  * @param efx_spec[in, out]
961  *   EFX filter specification to update.
962  * @param[out] error
963  *   Perform verbose error reporting if not NULL.
964  */
965 static int
966 sfc_flow_parse_geneve(const struct rte_flow_item *item,
967 		      struct sfc_flow_parse_ctx *parse_ctx,
968 		      struct rte_flow_error *error)
969 {
970 	int rc;
971 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
972 	const struct rte_flow_item_geneve *spec = NULL;
973 	const struct rte_flow_item_geneve *mask = NULL;
974 	const struct rte_flow_item_geneve supp_mask = {
975 		.protocol = RTE_BE16(0xffff),
976 		.vni = { 0xff, 0xff, 0xff }
977 	};
978 
979 	rc = sfc_flow_parse_init(item,
980 				 (const void **)&spec,
981 				 (const void **)&mask,
982 				 &supp_mask,
983 				 &rte_flow_item_geneve_mask,
984 				 sizeof(struct rte_flow_item_geneve),
985 				 error);
986 	if (rc != 0)
987 		return rc;
988 
989 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
990 						     EFX_IPPROTO_UDP, error);
991 	if (rc != 0)
992 		return rc;
993 
994 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
995 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
996 
997 	if (spec == NULL)
998 		return 0;
999 
1000 	if (mask->protocol == supp_mask.protocol) {
1001 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
1002 			rte_flow_error_set(error, EINVAL,
1003 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1004 				"GENEVE encap. protocol must be Ethernet "
1005 				"(0x6558) in the GENEVE pattern item");
1006 			return -rte_errno;
1007 		}
1008 	} else if (mask->protocol != 0) {
1009 		rte_flow_error_set(error, EINVAL,
1010 			RTE_FLOW_ERROR_TYPE_ITEM, item,
1011 			"Unsupported mask for GENEVE encap. protocol");
1012 		return -rte_errno;
1013 	}
1014 
1015 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1016 					       mask->vni, item, error);
1017 
1018 	return rc;
1019 }
1020 
1021 /**
1022  * Convert NVGRE item to EFX filter specification.
1023  *
1024  * @param item[in]
1025  *   Item specification. Only virtual subnet ID field is supported.
1026  *   If the mask is NULL, default mask will be used.
1027  *   Ranging is not supported.
1028  * @param efx_spec[in, out]
1029  *   EFX filter specification to update.
1030  * @param[out] error
1031  *   Perform verbose error reporting if not NULL.
1032  */
1033 static int
1034 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1035 		     struct sfc_flow_parse_ctx *parse_ctx,
1036 		     struct rte_flow_error *error)
1037 {
1038 	int rc;
1039 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1040 	const struct rte_flow_item_nvgre *spec = NULL;
1041 	const struct rte_flow_item_nvgre *mask = NULL;
1042 	const struct rte_flow_item_nvgre supp_mask = {
1043 		.tni = { 0xff, 0xff, 0xff }
1044 	};
1045 
1046 	rc = sfc_flow_parse_init(item,
1047 				 (const void **)&spec,
1048 				 (const void **)&mask,
1049 				 &supp_mask,
1050 				 &rte_flow_item_nvgre_mask,
1051 				 sizeof(struct rte_flow_item_nvgre),
1052 				 error);
1053 	if (rc != 0)
1054 		return rc;
1055 
1056 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1057 						     EFX_IPPROTO_GRE, error);
1058 	if (rc != 0)
1059 		return rc;
1060 
1061 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1062 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1063 
1064 	if (spec == NULL)
1065 		return 0;
1066 
1067 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1068 					       mask->tni, item, error);
1069 
1070 	return rc;
1071 }
1072 
1073 /**
1074  * Convert PPPoEx item to EFX filter specification.
1075  *
1076  * @param item[in]
1077  *   Item specification.
1078  *   Matching on PPPoEx fields is not supported.
1079  *   This item can only be used to set or validate the EtherType filter.
1080  *   Only zero masks are allowed.
1081  *   Ranging is not supported.
1082  * @param efx_spec[in, out]
1083  *   EFX filter specification to update.
1084  * @param[out] error
1085  *   Perform verbose error reporting if not NULL.
1086  */
1087 static int
1088 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1089 		      struct sfc_flow_parse_ctx *parse_ctx,
1090 		      struct rte_flow_error *error)
1091 {
1092 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1093 	const struct rte_flow_item_pppoe *spec = NULL;
1094 	const struct rte_flow_item_pppoe *mask = NULL;
1095 	const struct rte_flow_item_pppoe supp_mask = {};
1096 	const struct rte_flow_item_pppoe def_mask = {};
1097 	uint16_t ether_type;
1098 	int rc;
1099 
1100 	rc = sfc_flow_parse_init(item,
1101 				 (const void **)&spec,
1102 				 (const void **)&mask,
1103 				 &supp_mask,
1104 				 &def_mask,
1105 				 sizeof(struct rte_flow_item_pppoe),
1106 				 error);
1107 	if (rc != 0)
1108 		return rc;
1109 
1110 	if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1111 		ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1112 	else
1113 		ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1114 
1115 	if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1116 		if (efx_spec->efs_ether_type != ether_type) {
1117 			rte_flow_error_set(error, EINVAL,
1118 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
1119 					   "Invalid EtherType for a PPPoE flow item");
1120 			return -rte_errno;
1121 		}
1122 	} else {
1123 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1124 		efx_spec->efs_ether_type = ether_type;
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 static const struct sfc_flow_item sfc_flow_items[] = {
1131 	{
1132 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1133 		.name = "VOID",
1134 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1135 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1136 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1137 		.parse = sfc_flow_parse_void,
1138 	},
1139 	{
1140 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1141 		.name = "ETH",
1142 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1143 		.layer = SFC_FLOW_ITEM_L2,
1144 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1145 		.parse = sfc_flow_parse_eth,
1146 	},
1147 	{
1148 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1149 		.name = "VLAN",
1150 		.prev_layer = SFC_FLOW_ITEM_L2,
1151 		.layer = SFC_FLOW_ITEM_L2,
1152 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1153 		.parse = sfc_flow_parse_vlan,
1154 	},
1155 	{
1156 		.type = RTE_FLOW_ITEM_TYPE_PPPOED,
1157 		.name = "PPPOED",
1158 		.prev_layer = SFC_FLOW_ITEM_L2,
1159 		.layer = SFC_FLOW_ITEM_L2,
1160 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1161 		.parse = sfc_flow_parse_pppoex,
1162 	},
1163 	{
1164 		.type = RTE_FLOW_ITEM_TYPE_PPPOES,
1165 		.name = "PPPOES",
1166 		.prev_layer = SFC_FLOW_ITEM_L2,
1167 		.layer = SFC_FLOW_ITEM_L2,
1168 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1169 		.parse = sfc_flow_parse_pppoex,
1170 	},
1171 	{
1172 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1173 		.name = "IPV4",
1174 		.prev_layer = SFC_FLOW_ITEM_L2,
1175 		.layer = SFC_FLOW_ITEM_L3,
1176 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1177 		.parse = sfc_flow_parse_ipv4,
1178 	},
1179 	{
1180 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1181 		.name = "IPV6",
1182 		.prev_layer = SFC_FLOW_ITEM_L2,
1183 		.layer = SFC_FLOW_ITEM_L3,
1184 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1185 		.parse = sfc_flow_parse_ipv6,
1186 	},
1187 	{
1188 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1189 		.name = "TCP",
1190 		.prev_layer = SFC_FLOW_ITEM_L3,
1191 		.layer = SFC_FLOW_ITEM_L4,
1192 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1193 		.parse = sfc_flow_parse_tcp,
1194 	},
1195 	{
1196 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1197 		.name = "UDP",
1198 		.prev_layer = SFC_FLOW_ITEM_L3,
1199 		.layer = SFC_FLOW_ITEM_L4,
1200 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1201 		.parse = sfc_flow_parse_udp,
1202 	},
1203 	{
1204 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1205 		.name = "VXLAN",
1206 		.prev_layer = SFC_FLOW_ITEM_L4,
1207 		.layer = SFC_FLOW_ITEM_START_LAYER,
1208 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1209 		.parse = sfc_flow_parse_vxlan,
1210 	},
1211 	{
1212 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1213 		.name = "GENEVE",
1214 		.prev_layer = SFC_FLOW_ITEM_L4,
1215 		.layer = SFC_FLOW_ITEM_START_LAYER,
1216 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1217 		.parse = sfc_flow_parse_geneve,
1218 	},
1219 	{
1220 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1221 		.name = "NVGRE",
1222 		.prev_layer = SFC_FLOW_ITEM_L3,
1223 		.layer = SFC_FLOW_ITEM_START_LAYER,
1224 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1225 		.parse = sfc_flow_parse_nvgre,
1226 	},
1227 };
1228 
1229 /*
1230  * Protocol-independent flow API support
1231  */
1232 static int
1233 sfc_flow_parse_attr(struct sfc_adapter *sa,
1234 		    const struct rte_flow_attr *attr,
1235 		    struct rte_flow *flow,
1236 		    struct rte_flow_error *error)
1237 {
1238 	struct sfc_flow_spec *spec = &flow->spec;
1239 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1240 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1241 	struct sfc_mae *mae = &sa->mae;
1242 
1243 	if (attr == NULL) {
1244 		rte_flow_error_set(error, EINVAL,
1245 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1246 				   "NULL attribute");
1247 		return -rte_errno;
1248 	}
1249 	if (attr->group != 0) {
1250 		rte_flow_error_set(error, ENOTSUP,
1251 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1252 				   "Groups are not supported");
1253 		return -rte_errno;
1254 	}
1255 	if (attr->egress != 0 && attr->transfer == 0) {
1256 		rte_flow_error_set(error, ENOTSUP,
1257 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1258 				   "Egress is not supported");
1259 		return -rte_errno;
1260 	}
1261 	if (attr->ingress == 0 && attr->transfer == 0) {
1262 		rte_flow_error_set(error, ENOTSUP,
1263 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1264 				   "Ingress is compulsory");
1265 		return -rte_errno;
1266 	}
1267 	if (attr->transfer == 0) {
1268 		if (attr->priority != 0) {
1269 			rte_flow_error_set(error, ENOTSUP,
1270 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1271 					   attr, "Priorities are unsupported");
1272 			return -rte_errno;
1273 		}
1274 		spec->type = SFC_FLOW_SPEC_FILTER;
1275 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1276 		spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1277 		spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1278 	} else {
1279 		if (mae->status != SFC_MAE_STATUS_ADMIN) {
1280 			rte_flow_error_set(error, ENOTSUP,
1281 					   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1282 					   attr, "Transfer is not supported");
1283 			return -rte_errno;
1284 		}
1285 		if (attr->priority > mae->nb_action_rule_prios_max) {
1286 			rte_flow_error_set(error, ENOTSUP,
1287 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1288 					   attr, "Unsupported priority level");
1289 			return -rte_errno;
1290 		}
1291 		spec->type = SFC_FLOW_SPEC_MAE;
1292 		spec_mae->priority = attr->priority;
1293 		spec_mae->match_spec = NULL;
1294 		spec_mae->action_set = NULL;
1295 		spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 /* Get item from array sfc_flow_items */
1302 static const struct sfc_flow_item *
1303 sfc_flow_get_item(const struct sfc_flow_item *items,
1304 		  unsigned int nb_items,
1305 		  enum rte_flow_item_type type)
1306 {
1307 	unsigned int i;
1308 
1309 	for (i = 0; i < nb_items; i++)
1310 		if (items[i].type == type)
1311 			return &items[i];
1312 
1313 	return NULL;
1314 }
1315 
1316 int
1317 sfc_flow_parse_pattern(struct sfc_adapter *sa,
1318 		       const struct sfc_flow_item *flow_items,
1319 		       unsigned int nb_flow_items,
1320 		       const struct rte_flow_item pattern[],
1321 		       struct sfc_flow_parse_ctx *parse_ctx,
1322 		       struct rte_flow_error *error)
1323 {
1324 	int rc;
1325 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1326 	boolean_t is_ifrm = B_FALSE;
1327 	const struct sfc_flow_item *item;
1328 
1329 	if (pattern == NULL) {
1330 		rte_flow_error_set(error, EINVAL,
1331 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1332 				   "NULL pattern");
1333 		return -rte_errno;
1334 	}
1335 
1336 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1337 		item = sfc_flow_get_item(flow_items, nb_flow_items,
1338 					 pattern->type);
1339 		if (item == NULL) {
1340 			rte_flow_error_set(error, ENOTSUP,
1341 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1342 					   "Unsupported pattern item");
1343 			return -rte_errno;
1344 		}
1345 
1346 		/*
1347 		 * Omitting one or several protocol layers at the beginning
1348 		 * of pattern is supported
1349 		 */
1350 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1351 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1352 		    item->prev_layer != prev_layer) {
1353 			rte_flow_error_set(error, ENOTSUP,
1354 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1355 					   "Unexpected sequence of pattern items");
1356 			return -rte_errno;
1357 		}
1358 
1359 		/*
1360 		 * Allow only VOID and ETH pattern items in the inner frame.
1361 		 * Also check that there is only one tunneling protocol.
1362 		 */
1363 		switch (item->type) {
1364 		case RTE_FLOW_ITEM_TYPE_VOID:
1365 		case RTE_FLOW_ITEM_TYPE_ETH:
1366 			break;
1367 
1368 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1369 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1370 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1371 			if (is_ifrm) {
1372 				rte_flow_error_set(error, EINVAL,
1373 					RTE_FLOW_ERROR_TYPE_ITEM,
1374 					pattern,
1375 					"More than one tunneling protocol");
1376 				return -rte_errno;
1377 			}
1378 			is_ifrm = B_TRUE;
1379 			break;
1380 
1381 		default:
1382 			if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1383 			    is_ifrm) {
1384 				rte_flow_error_set(error, EINVAL,
1385 					RTE_FLOW_ERROR_TYPE_ITEM,
1386 					pattern,
1387 					"There is an unsupported pattern item "
1388 					"in the inner frame");
1389 				return -rte_errno;
1390 			}
1391 			break;
1392 		}
1393 
1394 		if (parse_ctx->type != item->ctx_type) {
1395 			rte_flow_error_set(error, EINVAL,
1396 					RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1397 					"Parse context type mismatch");
1398 			return -rte_errno;
1399 		}
1400 
1401 		rc = item->parse(pattern, parse_ctx, error);
1402 		if (rc != 0) {
1403 			sfc_err(sa, "failed to parse item %s: %s",
1404 				item->name, strerror(-rc));
1405 			return rc;
1406 		}
1407 
1408 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1409 			prev_layer = item->layer;
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 static int
1416 sfc_flow_parse_queue(struct sfc_adapter *sa,
1417 		     const struct rte_flow_action_queue *queue,
1418 		     struct rte_flow *flow)
1419 {
1420 	struct sfc_flow_spec *spec = &flow->spec;
1421 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1422 	struct sfc_rxq *rxq;
1423 	struct sfc_rxq_info *rxq_info;
1424 
1425 	if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1426 		return -EINVAL;
1427 
1428 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1429 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1430 
1431 	rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1432 	spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1433 					    SFC_RXQ_FLAG_RSS_HASH);
1434 
1435 	return 0;
1436 }
1437 
1438 static int
1439 sfc_flow_parse_rss(struct sfc_adapter *sa,
1440 		   const struct rte_flow_action_rss *action_rss,
1441 		   struct rte_flow *flow)
1442 {
1443 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1444 	struct sfc_rss *rss = &sas->rss;
1445 	sfc_ethdev_qid_t ethdev_qid;
1446 	struct sfc_rxq *rxq;
1447 	unsigned int rxq_hw_index_min;
1448 	unsigned int rxq_hw_index_max;
1449 	efx_rx_hash_type_t efx_hash_types;
1450 	const uint8_t *rss_key;
1451 	struct sfc_flow_spec *spec = &flow->spec;
1452 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1453 	struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1454 	unsigned int i;
1455 
1456 	if (action_rss->queue_num == 0)
1457 		return -EINVAL;
1458 
1459 	ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
1460 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1461 	rxq_hw_index_min = rxq->hw_index;
1462 	rxq_hw_index_max = 0;
1463 
1464 	for (i = 0; i < action_rss->queue_num; ++i) {
1465 		ethdev_qid = action_rss->queue[i];
1466 
1467 		if ((unsigned int)ethdev_qid >=
1468 		    sfc_sa2shared(sa)->ethdev_rxq_count)
1469 			return -EINVAL;
1470 
1471 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1472 
1473 		if (rxq->hw_index < rxq_hw_index_min)
1474 			rxq_hw_index_min = rxq->hw_index;
1475 
1476 		if (rxq->hw_index > rxq_hw_index_max)
1477 			rxq_hw_index_max = rxq->hw_index;
1478 	}
1479 
1480 	switch (action_rss->func) {
1481 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1482 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1483 		break;
1484 	default:
1485 		return -EINVAL;
1486 	}
1487 
1488 	if (action_rss->level)
1489 		return -EINVAL;
1490 
1491 	/*
1492 	 * Dummy RSS action with only one queue and no specific settings
1493 	 * for hash types and key does not require dedicated RSS context
1494 	 * and may be simplified to single queue action.
1495 	 */
1496 	if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1497 	    action_rss->key_len == 0) {
1498 		spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1499 		return 0;
1500 	}
1501 
1502 	if (action_rss->types) {
1503 		int rc;
1504 
1505 		rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1506 					  &efx_hash_types);
1507 		if (rc != 0)
1508 			return -rc;
1509 	} else {
1510 		unsigned int i;
1511 
1512 		efx_hash_types = 0;
1513 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
1514 			efx_hash_types |= rss->hf_map[i].efx;
1515 	}
1516 
1517 	if (action_rss->key_len) {
1518 		if (action_rss->key_len != sizeof(rss->key))
1519 			return -EINVAL;
1520 
1521 		rss_key = action_rss->key;
1522 	} else {
1523 		rss_key = rss->key;
1524 	}
1525 
1526 	spec_filter->rss = B_TRUE;
1527 
1528 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1529 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1530 	sfc_rss_conf->rss_hash_types = efx_hash_types;
1531 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1532 
1533 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1534 		unsigned int nb_queues = action_rss->queue_num;
1535 		struct sfc_rxq *rxq;
1536 
1537 		ethdev_qid = action_rss->queue[i % nb_queues];
1538 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1539 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 static int
1546 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1547 		    unsigned int filters_count)
1548 {
1549 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1550 	unsigned int i;
1551 	int ret = 0;
1552 
1553 	for (i = 0; i < filters_count; i++) {
1554 		int rc;
1555 
1556 		rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1557 		if (ret == 0 && rc != 0) {
1558 			sfc_err(sa, "failed to remove filter specification "
1559 				"(rc = %d)", rc);
1560 			ret = rc;
1561 		}
1562 	}
1563 
1564 	return ret;
1565 }
1566 
1567 static int
1568 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1569 {
1570 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1571 	unsigned int i;
1572 	int rc = 0;
1573 
1574 	for (i = 0; i < spec_filter->count; i++) {
1575 		rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1576 		if (rc != 0) {
1577 			sfc_flow_spec_flush(sa, spec, i);
1578 			break;
1579 		}
1580 	}
1581 
1582 	return rc;
1583 }
1584 
1585 static int
1586 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1587 {
1588 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1589 
1590 	return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1591 }
1592 
1593 static int
1594 sfc_flow_filter_insert(struct sfc_adapter *sa,
1595 		       struct rte_flow *flow)
1596 {
1597 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1598 	struct sfc_rss *rss = &sas->rss;
1599 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1600 	struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1601 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1602 	boolean_t create_context;
1603 	unsigned int i;
1604 	int rc = 0;
1605 
1606 	create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1607 			rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1608 
1609 	if (create_context) {
1610 		unsigned int rss_spread;
1611 		unsigned int rss_hash_types;
1612 		uint8_t *rss_key;
1613 
1614 		if (spec_filter->rss) {
1615 			rss_spread = MIN(flow_rss->rxq_hw_index_max -
1616 					flow_rss->rxq_hw_index_min + 1,
1617 					EFX_MAXRSS);
1618 			rss_hash_types = flow_rss->rss_hash_types;
1619 			rss_key = flow_rss->rss_key;
1620 		} else {
1621 			/*
1622 			 * Initialize dummy RSS context parameters to have
1623 			 * valid RSS hash. Use default RSS hash function and
1624 			 * key.
1625 			 */
1626 			rss_spread = 1;
1627 			rss_hash_types = rss->hash_types;
1628 			rss_key = rss->key;
1629 		}
1630 
1631 		rc = efx_rx_scale_context_alloc(sa->nic,
1632 						EFX_RX_SCALE_EXCLUSIVE,
1633 						rss_spread,
1634 						&efs_rss_context);
1635 		if (rc != 0)
1636 			goto fail_scale_context_alloc;
1637 
1638 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1639 					   rss->hash_alg,
1640 					   rss_hash_types, B_TRUE);
1641 		if (rc != 0)
1642 			goto fail_scale_mode_set;
1643 
1644 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1645 					  rss_key, sizeof(rss->key));
1646 		if (rc != 0)
1647 			goto fail_scale_key_set;
1648 	} else {
1649 		efs_rss_context = rss->dummy_rss_context;
1650 	}
1651 
1652 	if (spec_filter->rss || spec_filter->rss_hash_required) {
1653 		/*
1654 		 * At this point, fully elaborated filter specifications
1655 		 * have been produced from the template. To make sure that
1656 		 * RSS behaviour is consistent between them, set the same
1657 		 * RSS context value everywhere.
1658 		 */
1659 		for (i = 0; i < spec_filter->count; i++) {
1660 			efx_filter_spec_t *spec = &spec_filter->filters[i];
1661 
1662 			spec->efs_rss_context = efs_rss_context;
1663 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1664 			if (spec_filter->rss)
1665 				spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1666 		}
1667 	}
1668 
1669 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1670 	if (rc != 0)
1671 		goto fail_filter_insert;
1672 
1673 	if (create_context) {
1674 		unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1675 		unsigned int *tbl;
1676 
1677 		tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1678 
1679 		/*
1680 		 * Scale table is set after filter insertion because
1681 		 * the table entries are relative to the base RxQ ID
1682 		 * and the latter is submitted to the HW by means of
1683 		 * inserting a filter, so by the time of the request
1684 		 * the HW knows all the information needed to verify
1685 		 * the table entries, and the operation will succeed
1686 		 */
1687 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1688 					  tbl, RTE_DIM(flow_rss->rss_tbl));
1689 		if (rc != 0)
1690 			goto fail_scale_tbl_set;
1691 
1692 		/* Remember created dummy RSS context */
1693 		if (!spec_filter->rss)
1694 			rss->dummy_rss_context = efs_rss_context;
1695 	}
1696 
1697 	return 0;
1698 
1699 fail_scale_tbl_set:
1700 	sfc_flow_spec_remove(sa, &flow->spec);
1701 
1702 fail_filter_insert:
1703 fail_scale_key_set:
1704 fail_scale_mode_set:
1705 	if (create_context)
1706 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1707 
1708 fail_scale_context_alloc:
1709 	return rc;
1710 }
1711 
1712 static int
1713 sfc_flow_filter_remove(struct sfc_adapter *sa,
1714 		       struct rte_flow *flow)
1715 {
1716 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1717 	int rc = 0;
1718 
1719 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1720 	if (rc != 0)
1721 		return rc;
1722 
1723 	if (spec_filter->rss) {
1724 		/*
1725 		 * All specifications for a given flow rule have the same RSS
1726 		 * context, so that RSS context value is taken from the first
1727 		 * filter specification
1728 		 */
1729 		efx_filter_spec_t *spec = &spec_filter->filters[0];
1730 
1731 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1732 	}
1733 
1734 	return rc;
1735 }
1736 
1737 static int
1738 sfc_flow_parse_mark(struct sfc_adapter *sa,
1739 		    const struct rte_flow_action_mark *mark,
1740 		    struct rte_flow *flow)
1741 {
1742 	struct sfc_flow_spec *spec = &flow->spec;
1743 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1744 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1745 	uint32_t mark_max;
1746 
1747 	mark_max = encp->enc_filter_action_mark_max;
1748 	if (sfc_flow_tunnel_is_active(sa))
1749 		mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
1750 
1751 	if (mark == NULL || mark->id > mark_max)
1752 		return EINVAL;
1753 
1754 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1755 	spec_filter->template.efs_mark = mark->id;
1756 
1757 	return 0;
1758 }
1759 
1760 static int
1761 sfc_flow_parse_actions(struct sfc_adapter *sa,
1762 		       const struct rte_flow_action actions[],
1763 		       struct rte_flow *flow,
1764 		       struct rte_flow_error *error)
1765 {
1766 	int rc;
1767 	struct sfc_flow_spec *spec = &flow->spec;
1768 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1769 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1770 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
1771 	uint32_t actions_set = 0;
1772 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1773 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1774 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1775 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1776 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1777 
1778 	if (actions == NULL) {
1779 		rte_flow_error_set(error, EINVAL,
1780 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1781 				   "NULL actions");
1782 		return -rte_errno;
1783 	}
1784 
1785 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1786 		switch (actions->type) {
1787 		case RTE_FLOW_ACTION_TYPE_VOID:
1788 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1789 					       actions_set);
1790 			break;
1791 
1792 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1793 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1794 					       actions_set);
1795 			if ((actions_set & fate_actions_mask) != 0)
1796 				goto fail_fate_actions;
1797 
1798 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1799 			if (rc != 0) {
1800 				rte_flow_error_set(error, EINVAL,
1801 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1802 					"Bad QUEUE action");
1803 				return -rte_errno;
1804 			}
1805 			break;
1806 
1807 		case RTE_FLOW_ACTION_TYPE_RSS:
1808 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1809 					       actions_set);
1810 			if ((actions_set & fate_actions_mask) != 0)
1811 				goto fail_fate_actions;
1812 
1813 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1814 			if (rc != 0) {
1815 				rte_flow_error_set(error, -rc,
1816 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1817 					"Bad RSS action");
1818 				return -rte_errno;
1819 			}
1820 			break;
1821 
1822 		case RTE_FLOW_ACTION_TYPE_DROP:
1823 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1824 					       actions_set);
1825 			if ((actions_set & fate_actions_mask) != 0)
1826 				goto fail_fate_actions;
1827 
1828 			spec_filter->template.efs_dmaq_id =
1829 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1830 			break;
1831 
1832 		case RTE_FLOW_ACTION_TYPE_FLAG:
1833 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1834 					       actions_set);
1835 			if ((actions_set & mark_actions_mask) != 0)
1836 				goto fail_actions_overlap;
1837 
1838 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1839 				rte_flow_error_set(error, ENOTSUP,
1840 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1841 					"FLAG action is not supported on the current Rx datapath");
1842 				return -rte_errno;
1843 			} else if ((rx_metadata &
1844 				    RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
1845 				rte_flow_error_set(error, ENOTSUP,
1846 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1847 					"flag delivery has not been negotiated");
1848 				return -rte_errno;
1849 			}
1850 
1851 			spec_filter->template.efs_flags |=
1852 				EFX_FILTER_FLAG_ACTION_FLAG;
1853 			break;
1854 
1855 		case RTE_FLOW_ACTION_TYPE_MARK:
1856 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1857 					       actions_set);
1858 			if ((actions_set & mark_actions_mask) != 0)
1859 				goto fail_actions_overlap;
1860 
1861 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1862 				rte_flow_error_set(error, ENOTSUP,
1863 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1864 					"MARK action is not supported on the current Rx datapath");
1865 				return -rte_errno;
1866 			} else if ((rx_metadata &
1867 				    RTE_ETH_RX_METADATA_USER_MARK) == 0) {
1868 				rte_flow_error_set(error, ENOTSUP,
1869 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1870 					"mark delivery has not been negotiated");
1871 				return -rte_errno;
1872 			}
1873 
1874 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1875 			if (rc != 0) {
1876 				rte_flow_error_set(error, rc,
1877 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1878 					"Bad MARK action");
1879 				return -rte_errno;
1880 			}
1881 			break;
1882 
1883 		default:
1884 			rte_flow_error_set(error, ENOTSUP,
1885 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1886 					   "Action is not supported");
1887 			return -rte_errno;
1888 		}
1889 
1890 		actions_set |= (1UL << actions->type);
1891 	}
1892 
1893 	/* When fate is unknown, drop traffic. */
1894 	if ((actions_set & fate_actions_mask) == 0) {
1895 		spec_filter->template.efs_dmaq_id =
1896 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1897 	}
1898 
1899 	return 0;
1900 
1901 fail_fate_actions:
1902 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1903 			   "Cannot combine several fate-deciding actions, "
1904 			   "choose between QUEUE, RSS or DROP");
1905 	return -rte_errno;
1906 
1907 fail_actions_overlap:
1908 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1909 			   "Overlapping actions are not supported");
1910 	return -rte_errno;
1911 }
1912 
1913 /**
1914  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1915  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1916  * specifications after copying.
1917  *
1918  * @param spec[in, out]
1919  *   SFC flow specification to update.
1920  * @param filters_count_for_one_val[in]
1921  *   How many specifications should have the same match flag, what is the
1922  *   number of specifications before copying.
1923  * @param error[out]
1924  *   Perform verbose error reporting if not NULL.
1925  */
1926 static int
1927 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1928 			       unsigned int filters_count_for_one_val,
1929 			       struct rte_flow_error *error)
1930 {
1931 	unsigned int i;
1932 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1933 	static const efx_filter_match_flags_t vals[] = {
1934 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1935 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1936 	};
1937 
1938 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1939 		rte_flow_error_set(error, EINVAL,
1940 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1941 			"Number of specifications is incorrect while copying "
1942 			"by unknown destination flags");
1943 		return -rte_errno;
1944 	}
1945 
1946 	for (i = 0; i < spec_filter->count; i++) {
1947 		/* The check above ensures that divisor can't be zero here */
1948 		spec_filter->filters[i].efs_match_flags |=
1949 			vals[i / filters_count_for_one_val];
1950 	}
1951 
1952 	return 0;
1953 }
1954 
1955 /**
1956  * Check that the following conditions are met:
1957  * - the list of supported filters has a filter
1958  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1959  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1960  *   be inserted.
1961  *
1962  * @param match[in]
1963  *   The match flags of filter.
1964  * @param spec[in]
1965  *   Specification to be supplemented.
1966  * @param filter[in]
1967  *   SFC filter with list of supported filters.
1968  */
1969 static boolean_t
1970 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1971 				 __rte_unused efx_filter_spec_t *spec,
1972 				 struct sfc_filter *filter)
1973 {
1974 	unsigned int i;
1975 	efx_filter_match_flags_t match_mcast_dst;
1976 
1977 	match_mcast_dst =
1978 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1979 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1980 	for (i = 0; i < filter->supported_match_num; i++) {
1981 		if (match_mcast_dst == filter->supported_match[i])
1982 			return B_TRUE;
1983 	}
1984 
1985 	return B_FALSE;
1986 }
1987 
1988 /**
1989  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1990  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1991  * specifications after copying.
1992  *
1993  * @param spec[in, out]
1994  *   SFC flow specification to update.
1995  * @param filters_count_for_one_val[in]
1996  *   How many specifications should have the same EtherType value, what is the
1997  *   number of specifications before copying.
1998  * @param error[out]
1999  *   Perform verbose error reporting if not NULL.
2000  */
2001 static int
2002 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
2003 			unsigned int filters_count_for_one_val,
2004 			struct rte_flow_error *error)
2005 {
2006 	unsigned int i;
2007 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2008 	static const uint16_t vals[] = {
2009 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
2010 	};
2011 
2012 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2013 		rte_flow_error_set(error, EINVAL,
2014 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2015 			"Number of specifications is incorrect "
2016 			"while copying by Ethertype");
2017 		return -rte_errno;
2018 	}
2019 
2020 	for (i = 0; i < spec_filter->count; i++) {
2021 		spec_filter->filters[i].efs_match_flags |=
2022 			EFX_FILTER_MATCH_ETHER_TYPE;
2023 
2024 		/*
2025 		 * The check above ensures that
2026 		 * filters_count_for_one_val is not 0
2027 		 */
2028 		spec_filter->filters[i].efs_ether_type =
2029 			vals[i / filters_count_for_one_val];
2030 	}
2031 
2032 	return 0;
2033 }
2034 
2035 /**
2036  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
2037  * in the same specifications after copying.
2038  *
2039  * @param spec[in, out]
2040  *   SFC flow specification to update.
2041  * @param filters_count_for_one_val[in]
2042  *   How many specifications should have the same match flag, what is the
2043  *   number of specifications before copying.
2044  * @param error[out]
2045  *   Perform verbose error reporting if not NULL.
2046  */
2047 static int
2048 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
2049 			    unsigned int filters_count_for_one_val,
2050 			    struct rte_flow_error *error)
2051 {
2052 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2053 	unsigned int i;
2054 
2055 	if (filters_count_for_one_val != spec_filter->count) {
2056 		rte_flow_error_set(error, EINVAL,
2057 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2058 			"Number of specifications is incorrect "
2059 			"while copying by outer VLAN ID");
2060 		return -rte_errno;
2061 	}
2062 
2063 	for (i = 0; i < spec_filter->count; i++) {
2064 		spec_filter->filters[i].efs_match_flags |=
2065 			EFX_FILTER_MATCH_OUTER_VID;
2066 
2067 		spec_filter->filters[i].efs_outer_vid = 0;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 /**
2074  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
2075  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
2076  * specifications after copying.
2077  *
2078  * @param spec[in, out]
2079  *   SFC flow specification to update.
2080  * @param filters_count_for_one_val[in]
2081  *   How many specifications should have the same match flag, what is the
2082  *   number of specifications before copying.
2083  * @param error[out]
2084  *   Perform verbose error reporting if not NULL.
2085  */
2086 static int
2087 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
2088 				    unsigned int filters_count_for_one_val,
2089 				    struct rte_flow_error *error)
2090 {
2091 	unsigned int i;
2092 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2093 	static const efx_filter_match_flags_t vals[] = {
2094 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2095 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
2096 	};
2097 
2098 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2099 		rte_flow_error_set(error, EINVAL,
2100 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2101 			"Number of specifications is incorrect while copying "
2102 			"by inner frame unknown destination flags");
2103 		return -rte_errno;
2104 	}
2105 
2106 	for (i = 0; i < spec_filter->count; i++) {
2107 		/* The check above ensures that divisor can't be zero here */
2108 		spec_filter->filters[i].efs_match_flags |=
2109 			vals[i / filters_count_for_one_val];
2110 	}
2111 
2112 	return 0;
2113 }
2114 
2115 /**
2116  * Check that the following conditions are met:
2117  * - the specification corresponds to a filter for encapsulated traffic
2118  * - the list of supported filters has a filter
2119  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2120  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2121  *   be inserted.
2122  *
2123  * @param match[in]
2124  *   The match flags of filter.
2125  * @param spec[in]
2126  *   Specification to be supplemented.
2127  * @param filter[in]
2128  *   SFC filter with list of supported filters.
2129  */
2130 static boolean_t
2131 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2132 				      efx_filter_spec_t *spec,
2133 				      struct sfc_filter *filter)
2134 {
2135 	unsigned int i;
2136 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2137 	efx_filter_match_flags_t match_mcast_dst;
2138 
2139 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2140 		return B_FALSE;
2141 
2142 	match_mcast_dst =
2143 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2144 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2145 	for (i = 0; i < filter->supported_match_num; i++) {
2146 		if (match_mcast_dst == filter->supported_match[i])
2147 			return B_TRUE;
2148 	}
2149 
2150 	return B_FALSE;
2151 }
2152 
2153 /**
2154  * Check that the list of supported filters has a filter that differs
2155  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2156  * in this case that filter will be used and the flag
2157  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2158  *
2159  * @param match[in]
2160  *   The match flags of filter.
2161  * @param spec[in]
2162  *   Specification to be supplemented.
2163  * @param filter[in]
2164  *   SFC filter with list of supported filters.
2165  */
2166 static boolean_t
2167 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2168 			      __rte_unused efx_filter_spec_t *spec,
2169 			      struct sfc_filter *filter)
2170 {
2171 	unsigned int i;
2172 	efx_filter_match_flags_t match_without_vid =
2173 		match & ~EFX_FILTER_MATCH_OUTER_VID;
2174 
2175 	for (i = 0; i < filter->supported_match_num; i++) {
2176 		if (match_without_vid == filter->supported_match[i])
2177 			return B_FALSE;
2178 	}
2179 
2180 	return B_TRUE;
2181 }
2182 
2183 /*
2184  * Match flags that can be automatically added to filters.
2185  * Selecting the last minimum when searching for the copy flag ensures that the
2186  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2187  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2188  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2189  * filters.
2190  */
2191 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2192 	{
2193 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2194 		.vals_count = 2,
2195 		.set_vals = sfc_flow_set_unknown_dst_flags,
2196 		.spec_check = sfc_flow_check_unknown_dst_flags,
2197 	},
2198 	{
2199 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
2200 		.vals_count = 2,
2201 		.set_vals = sfc_flow_set_ethertypes,
2202 		.spec_check = NULL,
2203 	},
2204 	{
2205 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2206 		.vals_count = 2,
2207 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2208 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2209 	},
2210 	{
2211 		.flag = EFX_FILTER_MATCH_OUTER_VID,
2212 		.vals_count = 1,
2213 		.set_vals = sfc_flow_set_outer_vid_flag,
2214 		.spec_check = sfc_flow_check_outer_vid_flag,
2215 	},
2216 };
2217 
2218 /* Get item from array sfc_flow_copy_flags */
2219 static const struct sfc_flow_copy_flag *
2220 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2221 {
2222 	unsigned int i;
2223 
2224 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2225 		if (sfc_flow_copy_flags[i].flag == flag)
2226 			return &sfc_flow_copy_flags[i];
2227 	}
2228 
2229 	return NULL;
2230 }
2231 
2232 /**
2233  * Make copies of the specifications, set match flag and values
2234  * of the field that corresponds to it.
2235  *
2236  * @param spec[in, out]
2237  *   SFC flow specification to update.
2238  * @param flag[in]
2239  *   The match flag to add.
2240  * @param error[out]
2241  *   Perform verbose error reporting if not NULL.
2242  */
2243 static int
2244 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2245 			     efx_filter_match_flags_t flag,
2246 			     struct rte_flow_error *error)
2247 {
2248 	unsigned int i;
2249 	unsigned int new_filters_count;
2250 	unsigned int filters_count_for_one_val;
2251 	const struct sfc_flow_copy_flag *copy_flag;
2252 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2253 	int rc;
2254 
2255 	copy_flag = sfc_flow_get_copy_flag(flag);
2256 	if (copy_flag == NULL) {
2257 		rte_flow_error_set(error, ENOTSUP,
2258 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2259 				   "Unsupported spec field for copying");
2260 		return -rte_errno;
2261 	}
2262 
2263 	new_filters_count = spec_filter->count * copy_flag->vals_count;
2264 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2265 		rte_flow_error_set(error, EINVAL,
2266 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2267 			"Too much EFX specifications in the flow rule");
2268 		return -rte_errno;
2269 	}
2270 
2271 	/* Copy filters specifications */
2272 	for (i = spec_filter->count; i < new_filters_count; i++) {
2273 		spec_filter->filters[i] =
2274 			spec_filter->filters[i - spec_filter->count];
2275 	}
2276 
2277 	filters_count_for_one_val = spec_filter->count;
2278 	spec_filter->count = new_filters_count;
2279 
2280 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2281 	if (rc != 0)
2282 		return rc;
2283 
2284 	return 0;
2285 }
2286 
2287 /**
2288  * Check that the given set of match flags missing in the original filter spec
2289  * could be covered by adding spec copies which specify the corresponding
2290  * flags and packet field values to match.
2291  *
2292  * @param miss_flags[in]
2293  *   Flags that are missing until the supported filter.
2294  * @param spec[in]
2295  *   Specification to be supplemented.
2296  * @param filter[in]
2297  *   SFC filter.
2298  *
2299  * @return
2300  *   Number of specifications after copy or 0, if the flags can not be added.
2301  */
2302 static unsigned int
2303 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2304 			     efx_filter_spec_t *spec,
2305 			     struct sfc_filter *filter)
2306 {
2307 	unsigned int i;
2308 	efx_filter_match_flags_t copy_flags = 0;
2309 	efx_filter_match_flags_t flag;
2310 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2311 	sfc_flow_spec_check *check;
2312 	unsigned int multiplier = 1;
2313 
2314 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2315 		flag = sfc_flow_copy_flags[i].flag;
2316 		check = sfc_flow_copy_flags[i].spec_check;
2317 		if ((flag & miss_flags) == flag) {
2318 			if (check != NULL && (!check(match, spec, filter)))
2319 				continue;
2320 
2321 			copy_flags |= flag;
2322 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2323 		}
2324 	}
2325 
2326 	if (copy_flags == miss_flags)
2327 		return multiplier;
2328 
2329 	return 0;
2330 }
2331 
2332 /**
2333  * Attempt to supplement the specification template to the minimally
2334  * supported set of match flags. To do this, it is necessary to copy
2335  * the specifications, filling them with the values of fields that
2336  * correspond to the missing flags.
2337  * The necessary and sufficient filter is built from the fewest number
2338  * of copies which could be made to cover the minimally required set
2339  * of flags.
2340  *
2341  * @param sa[in]
2342  *   SFC adapter.
2343  * @param spec[in, out]
2344  *   SFC flow specification to update.
2345  * @param error[out]
2346  *   Perform verbose error reporting if not NULL.
2347  */
2348 static int
2349 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2350 			       struct sfc_flow_spec *spec,
2351 			       struct rte_flow_error *error)
2352 {
2353 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2354 	struct sfc_filter *filter = &sa->filter;
2355 	efx_filter_match_flags_t miss_flags;
2356 	efx_filter_match_flags_t min_miss_flags = 0;
2357 	efx_filter_match_flags_t match;
2358 	unsigned int min_multiplier = UINT_MAX;
2359 	unsigned int multiplier;
2360 	unsigned int i;
2361 	int rc;
2362 
2363 	match = spec_filter->template.efs_match_flags;
2364 	for (i = 0; i < filter->supported_match_num; i++) {
2365 		if ((match & filter->supported_match[i]) == match) {
2366 			miss_flags = filter->supported_match[i] & (~match);
2367 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2368 				&spec_filter->template, filter);
2369 			if (multiplier > 0) {
2370 				if (multiplier <= min_multiplier) {
2371 					min_multiplier = multiplier;
2372 					min_miss_flags = miss_flags;
2373 				}
2374 			}
2375 		}
2376 	}
2377 
2378 	if (min_multiplier == UINT_MAX) {
2379 		rte_flow_error_set(error, ENOTSUP,
2380 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2381 				   "The flow rule pattern is unsupported");
2382 		return -rte_errno;
2383 	}
2384 
2385 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2386 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2387 
2388 		if ((flag & min_miss_flags) == flag) {
2389 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2390 			if (rc != 0)
2391 				return rc;
2392 		}
2393 	}
2394 
2395 	return 0;
2396 }
2397 
2398 /**
2399  * Check that set of match flags is referred to by a filter. Filter is
2400  * described by match flags with the ability to add OUTER_VID and INNER_VID
2401  * flags.
2402  *
2403  * @param match_flags[in]
2404  *   Set of match flags.
2405  * @param flags_pattern[in]
2406  *   Pattern of filter match flags.
2407  */
2408 static boolean_t
2409 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2410 			    efx_filter_match_flags_t flags_pattern)
2411 {
2412 	if ((match_flags & flags_pattern) != flags_pattern)
2413 		return B_FALSE;
2414 
2415 	switch (match_flags & ~flags_pattern) {
2416 	case 0:
2417 	case EFX_FILTER_MATCH_OUTER_VID:
2418 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2419 		return B_TRUE;
2420 	default:
2421 		return B_FALSE;
2422 	}
2423 }
2424 
2425 /**
2426  * Check whether the spec maps to a hardware filter which is known to be
2427  * ineffective despite being valid.
2428  *
2429  * @param filter[in]
2430  *   SFC filter with list of supported filters.
2431  * @param spec[in]
2432  *   SFC flow specification.
2433  */
2434 static boolean_t
2435 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2436 				  struct sfc_flow_spec *spec)
2437 {
2438 	unsigned int i;
2439 	uint16_t ether_type;
2440 	uint8_t ip_proto;
2441 	efx_filter_match_flags_t match_flags;
2442 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2443 
2444 	for (i = 0; i < spec_filter->count; i++) {
2445 		match_flags = spec_filter->filters[i].efs_match_flags;
2446 
2447 		if (sfc_flow_is_match_with_vids(match_flags,
2448 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2449 		    sfc_flow_is_match_with_vids(match_flags,
2450 						EFX_FILTER_MATCH_ETHER_TYPE |
2451 						EFX_FILTER_MATCH_LOC_MAC)) {
2452 			ether_type = spec_filter->filters[i].efs_ether_type;
2453 			if (filter->supports_ip_proto_or_addr_filter &&
2454 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2455 			     ether_type == EFX_ETHER_TYPE_IPV6))
2456 				return B_TRUE;
2457 		} else if (sfc_flow_is_match_with_vids(match_flags,
2458 				EFX_FILTER_MATCH_ETHER_TYPE |
2459 				EFX_FILTER_MATCH_IP_PROTO) ||
2460 			   sfc_flow_is_match_with_vids(match_flags,
2461 				EFX_FILTER_MATCH_ETHER_TYPE |
2462 				EFX_FILTER_MATCH_IP_PROTO |
2463 				EFX_FILTER_MATCH_LOC_MAC)) {
2464 			ip_proto = spec_filter->filters[i].efs_ip_proto;
2465 			if (filter->supports_rem_or_local_port_filter &&
2466 			    (ip_proto == EFX_IPPROTO_TCP ||
2467 			     ip_proto == EFX_IPPROTO_UDP))
2468 				return B_TRUE;
2469 		}
2470 	}
2471 
2472 	return B_FALSE;
2473 }
2474 
2475 static int
2476 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2477 			      struct rte_flow *flow,
2478 			      struct rte_flow_error *error)
2479 {
2480 	struct sfc_flow_spec *spec = &flow->spec;
2481 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2482 	efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2483 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2484 	int rc;
2485 
2486 	/* Initialize the first filter spec with template */
2487 	spec_filter->filters[0] = *spec_tmpl;
2488 	spec_filter->count = 1;
2489 
2490 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2491 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2492 		if (rc != 0)
2493 			return rc;
2494 	}
2495 
2496 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2497 		rte_flow_error_set(error, ENOTSUP,
2498 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2499 			"The flow rule pattern is unsupported");
2500 		return -rte_errno;
2501 	}
2502 
2503 	return 0;
2504 }
2505 
2506 static int
2507 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2508 			     const struct rte_flow_item pattern[],
2509 			     const struct rte_flow_action actions[],
2510 			     struct rte_flow *flow,
2511 			     struct rte_flow_error *error)
2512 {
2513 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2514 	struct sfc_flow_spec *spec = &flow->spec;
2515 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2516 	struct sfc_flow_parse_ctx ctx;
2517 	int rc;
2518 
2519 	ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2520 	ctx.filter = &spec_filter->template;
2521 
2522 	rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2523 				    pattern, &ctx, error);
2524 	if (rc != 0)
2525 		goto fail_bad_value;
2526 
2527 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2528 	if (rc != 0)
2529 		goto fail_bad_value;
2530 
2531 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2532 	if (rc != 0)
2533 		goto fail_bad_value;
2534 
2535 	return 0;
2536 
2537 fail_bad_value:
2538 	return rc;
2539 }
2540 
2541 static int
2542 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2543 			  const struct rte_flow_item pattern[],
2544 			  const struct rte_flow_action actions[],
2545 			  struct rte_flow *flow,
2546 			  struct rte_flow_error *error)
2547 {
2548 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2549 	struct sfc_flow_spec *spec = &flow->spec;
2550 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2551 	int rc;
2552 
2553 	/*
2554 	 * If the flow is meant to be a JUMP rule in tunnel offload,
2555 	 * preparse its actions and save its properties in spec_mae.
2556 	 */
2557 	rc = sfc_flow_tunnel_detect_jump_rule(sa, actions, spec_mae, error);
2558 	if (rc != 0)
2559 		goto fail;
2560 
2561 	rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2562 	if (rc != 0)
2563 		goto fail;
2564 
2565 	if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
2566 		/*
2567 		 * By design, this flow should be represented solely by the
2568 		 * outer rule. But the HW/FW hasn't got support for setting
2569 		 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2570 		 * does it support outer rule counters. As a workaround, an
2571 		 * action rule of lower priority is used to do the job.
2572 		 *
2573 		 * So don't skip sfc_mae_rule_parse_actions() below.
2574 		 */
2575 	}
2576 
2577 	rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2578 	if (rc != 0)
2579 		goto fail;
2580 
2581 	if (spec_mae->ft != NULL) {
2582 		if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
2583 			spec_mae->ft->jump_rule_is_set = B_TRUE;
2584 
2585 		++(spec_mae->ft->refcnt);
2586 	}
2587 
2588 	return 0;
2589 
2590 fail:
2591 	/* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
2592 	spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
2593 	spec_mae->ft = NULL;
2594 
2595 	return rc;
2596 }
2597 
2598 static int
2599 sfc_flow_parse(struct rte_eth_dev *dev,
2600 	       const struct rte_flow_attr *attr,
2601 	       const struct rte_flow_item pattern[],
2602 	       const struct rte_flow_action actions[],
2603 	       struct rte_flow *flow,
2604 	       struct rte_flow_error *error)
2605 {
2606 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2607 	const struct sfc_flow_ops_by_spec *ops;
2608 	int rc;
2609 
2610 	rc = sfc_flow_parse_attr(sa, attr, flow, error);
2611 	if (rc != 0)
2612 		return rc;
2613 
2614 	ops = sfc_flow_get_ops_by_spec(flow);
2615 	if (ops == NULL || ops->parse == NULL) {
2616 		rte_flow_error_set(error, ENOTSUP,
2617 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2618 				   "No backend to handle this flow");
2619 		return -rte_errno;
2620 	}
2621 
2622 	return ops->parse(dev, pattern, actions, flow, error);
2623 }
2624 
2625 static struct rte_flow *
2626 sfc_flow_zmalloc(struct rte_flow_error *error)
2627 {
2628 	struct rte_flow *flow;
2629 
2630 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2631 	if (flow == NULL) {
2632 		rte_flow_error_set(error, ENOMEM,
2633 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2634 				   "Failed to allocate memory");
2635 	}
2636 
2637 	return flow;
2638 }
2639 
2640 static void
2641 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2642 {
2643 	const struct sfc_flow_ops_by_spec *ops;
2644 
2645 	ops = sfc_flow_get_ops_by_spec(flow);
2646 	if (ops != NULL && ops->cleanup != NULL)
2647 		ops->cleanup(sa, flow);
2648 
2649 	rte_free(flow);
2650 }
2651 
2652 static int
2653 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2654 		struct rte_flow_error *error)
2655 {
2656 	const struct sfc_flow_ops_by_spec *ops;
2657 	int rc;
2658 
2659 	ops = sfc_flow_get_ops_by_spec(flow);
2660 	if (ops == NULL || ops->insert == NULL) {
2661 		rte_flow_error_set(error, ENOTSUP,
2662 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2663 				   "No backend to handle this flow");
2664 		return rte_errno;
2665 	}
2666 
2667 	rc = ops->insert(sa, flow);
2668 	if (rc != 0) {
2669 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2670 				   NULL, "Failed to insert the flow rule");
2671 	}
2672 
2673 	return rc;
2674 }
2675 
2676 static int
2677 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2678 		struct rte_flow_error *error)
2679 {
2680 	const struct sfc_flow_ops_by_spec *ops;
2681 	int rc;
2682 
2683 	ops = sfc_flow_get_ops_by_spec(flow);
2684 	if (ops == NULL || ops->remove == NULL) {
2685 		rte_flow_error_set(error, ENOTSUP,
2686 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2687 				   "No backend to handle this flow");
2688 		return rte_errno;
2689 	}
2690 
2691 	rc = ops->remove(sa, flow);
2692 	if (rc != 0) {
2693 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2694 				   NULL, "Failed to remove the flow rule");
2695 	}
2696 
2697 	return rc;
2698 }
2699 
2700 static int
2701 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2702 		struct rte_flow_error *error)
2703 {
2704 	const struct sfc_flow_ops_by_spec *ops;
2705 	int rc = 0;
2706 
2707 	ops = sfc_flow_get_ops_by_spec(flow);
2708 	if (ops == NULL) {
2709 		rte_flow_error_set(error, ENOTSUP,
2710 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2711 				   "No backend to handle this flow");
2712 		return -rte_errno;
2713 	}
2714 
2715 	if (ops->verify != NULL) {
2716 		SFC_ASSERT(sfc_adapter_is_locked(sa));
2717 		rc = ops->verify(sa, flow);
2718 	}
2719 
2720 	if (rc != 0) {
2721 		rte_flow_error_set(error, rc,
2722 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2723 			"Failed to verify flow validity with FW");
2724 		return -rte_errno;
2725 	}
2726 
2727 	return 0;
2728 }
2729 
2730 static int
2731 sfc_flow_validate(struct rte_eth_dev *dev,
2732 		  const struct rte_flow_attr *attr,
2733 		  const struct rte_flow_item pattern[],
2734 		  const struct rte_flow_action actions[],
2735 		  struct rte_flow_error *error)
2736 {
2737 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2738 	struct rte_flow *flow;
2739 	int rc;
2740 
2741 	flow = sfc_flow_zmalloc(error);
2742 	if (flow == NULL)
2743 		return -rte_errno;
2744 
2745 	sfc_adapter_lock(sa);
2746 
2747 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2748 	if (rc == 0)
2749 		rc = sfc_flow_verify(sa, flow, error);
2750 
2751 	sfc_flow_free(sa, flow);
2752 
2753 	sfc_adapter_unlock(sa);
2754 
2755 	return rc;
2756 }
2757 
2758 static struct rte_flow *
2759 sfc_flow_create(struct rte_eth_dev *dev,
2760 		const struct rte_flow_attr *attr,
2761 		const struct rte_flow_item pattern[],
2762 		const struct rte_flow_action actions[],
2763 		struct rte_flow_error *error)
2764 {
2765 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2766 	struct rte_flow *flow = NULL;
2767 	int rc;
2768 
2769 	flow = sfc_flow_zmalloc(error);
2770 	if (flow == NULL)
2771 		goto fail_no_mem;
2772 
2773 	sfc_adapter_lock(sa);
2774 
2775 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2776 	if (rc != 0)
2777 		goto fail_bad_value;
2778 
2779 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2780 
2781 	if (sa->state == SFC_ETHDEV_STARTED) {
2782 		rc = sfc_flow_insert(sa, flow, error);
2783 		if (rc != 0)
2784 			goto fail_flow_insert;
2785 	}
2786 
2787 	sfc_adapter_unlock(sa);
2788 
2789 	return flow;
2790 
2791 fail_flow_insert:
2792 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2793 
2794 fail_bad_value:
2795 	sfc_flow_free(sa, flow);
2796 	sfc_adapter_unlock(sa);
2797 
2798 fail_no_mem:
2799 	return NULL;
2800 }
2801 
2802 static int
2803 sfc_flow_destroy(struct rte_eth_dev *dev,
2804 		 struct rte_flow *flow,
2805 		 struct rte_flow_error *error)
2806 {
2807 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2808 	struct rte_flow *flow_ptr;
2809 	int rc = EINVAL;
2810 
2811 	sfc_adapter_lock(sa);
2812 
2813 	TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2814 		if (flow_ptr == flow)
2815 			rc = 0;
2816 	}
2817 	if (rc != 0) {
2818 		rte_flow_error_set(error, rc,
2819 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2820 				   "Failed to find flow rule to destroy");
2821 		goto fail_bad_value;
2822 	}
2823 
2824 	if (sa->state == SFC_ETHDEV_STARTED)
2825 		rc = sfc_flow_remove(sa, flow, error);
2826 
2827 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2828 	sfc_flow_free(sa, flow);
2829 
2830 fail_bad_value:
2831 	sfc_adapter_unlock(sa);
2832 
2833 	return -rc;
2834 }
2835 
2836 static int
2837 sfc_flow_flush(struct rte_eth_dev *dev,
2838 	       struct rte_flow_error *error)
2839 {
2840 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2841 	struct rte_flow *flow;
2842 	int ret = 0;
2843 
2844 	sfc_adapter_lock(sa);
2845 
2846 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2847 		if (sa->state == SFC_ETHDEV_STARTED) {
2848 			int rc;
2849 
2850 			rc = sfc_flow_remove(sa, flow, error);
2851 			if (rc != 0)
2852 				ret = rc;
2853 		}
2854 
2855 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2856 		sfc_flow_free(sa, flow);
2857 	}
2858 
2859 	sfc_adapter_unlock(sa);
2860 
2861 	return -ret;
2862 }
2863 
2864 static int
2865 sfc_flow_query(struct rte_eth_dev *dev,
2866 	       struct rte_flow *flow,
2867 	       const struct rte_flow_action *action,
2868 	       void *data,
2869 	       struct rte_flow_error *error)
2870 {
2871 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2872 	const struct sfc_flow_ops_by_spec *ops;
2873 	int ret;
2874 
2875 	sfc_adapter_lock(sa);
2876 
2877 	ops = sfc_flow_get_ops_by_spec(flow);
2878 	if (ops == NULL || ops->query == NULL) {
2879 		ret = rte_flow_error_set(error, ENOTSUP,
2880 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2881 			"No backend to handle this flow");
2882 		goto fail_no_backend;
2883 	}
2884 
2885 	if (sa->state != SFC_ETHDEV_STARTED) {
2886 		ret = rte_flow_error_set(error, EINVAL,
2887 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2888 			"Can't query the flow: the adapter is not started");
2889 		goto fail_not_started;
2890 	}
2891 
2892 	ret = ops->query(dev, flow, action, data, error);
2893 	if (ret != 0)
2894 		goto fail_query;
2895 
2896 	sfc_adapter_unlock(sa);
2897 
2898 	return 0;
2899 
2900 fail_query:
2901 fail_not_started:
2902 fail_no_backend:
2903 	sfc_adapter_unlock(sa);
2904 	return ret;
2905 }
2906 
2907 static int
2908 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2909 		 struct rte_flow_error *error)
2910 {
2911 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2912 	int ret = 0;
2913 
2914 	sfc_adapter_lock(sa);
2915 	if (sa->state != SFC_ETHDEV_INITIALIZED) {
2916 		rte_flow_error_set(error, EBUSY,
2917 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2918 				   NULL, "please close the port first");
2919 		ret = -rte_errno;
2920 	} else {
2921 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2922 	}
2923 	sfc_adapter_unlock(sa);
2924 
2925 	return ret;
2926 }
2927 
2928 static int
2929 sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
2930 			     uint16_t *transfer_proxy_port,
2931 			     struct rte_flow_error *error)
2932 {
2933 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2934 	int ret;
2935 
2936 	ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
2937 					      transfer_proxy_port);
2938 	if (ret != 0) {
2939 		return rte_flow_error_set(error, ret,
2940 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2941 					  NULL, NULL);
2942 	}
2943 
2944 	return 0;
2945 }
2946 
2947 const struct rte_flow_ops sfc_flow_ops = {
2948 	.validate = sfc_flow_validate,
2949 	.create = sfc_flow_create,
2950 	.destroy = sfc_flow_destroy,
2951 	.flush = sfc_flow_flush,
2952 	.query = sfc_flow_query,
2953 	.isolate = sfc_flow_isolate,
2954 	.tunnel_decap_set = sfc_flow_tunnel_decap_set,
2955 	.tunnel_match = sfc_flow_tunnel_match,
2956 	.tunnel_action_decap_release = sfc_flow_tunnel_action_decap_release,
2957 	.tunnel_item_release = sfc_flow_tunnel_item_release,
2958 	.get_restore_info = sfc_flow_tunnel_get_restore_info,
2959 	.pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
2960 };
2961 
2962 void
2963 sfc_flow_init(struct sfc_adapter *sa)
2964 {
2965 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2966 
2967 	TAILQ_INIT(&sa->flow_list);
2968 }
2969 
2970 void
2971 sfc_flow_fini(struct sfc_adapter *sa)
2972 {
2973 	struct rte_flow *flow;
2974 
2975 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2976 
2977 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2978 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2979 		sfc_flow_free(sa, flow);
2980 	}
2981 }
2982 
2983 void
2984 sfc_flow_stop(struct sfc_adapter *sa)
2985 {
2986 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2987 	struct sfc_rss *rss = &sas->rss;
2988 	struct rte_flow *flow;
2989 
2990 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2991 
2992 	TAILQ_FOREACH(flow, &sa->flow_list, entries)
2993 		sfc_flow_remove(sa, flow, NULL);
2994 
2995 	if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2996 		efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2997 		rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2998 	}
2999 
3000 	/*
3001 	 * MAE counter service is not stopped on flow rule remove to avoid
3002 	 * extra work. Make sure that it is stopped here.
3003 	 */
3004 	sfc_mae_counter_stop(sa);
3005 }
3006 
3007 int
3008 sfc_flow_start(struct sfc_adapter *sa)
3009 {
3010 	struct rte_flow *flow;
3011 	int rc = 0;
3012 
3013 	sfc_log_init(sa, "entry");
3014 
3015 	SFC_ASSERT(sfc_adapter_is_locked(sa));
3016 
3017 	sfc_flow_tunnel_reset_hit_counters(sa);
3018 
3019 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
3020 		rc = sfc_flow_insert(sa, flow, NULL);
3021 		if (rc != 0)
3022 			goto fail_bad_flow;
3023 	}
3024 
3025 	sfc_log_init(sa, "done");
3026 
3027 fail_bad_flow:
3028 	return rc;
3029 }
3030