xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 081e42dab11d1add2d038fdf2bd4c86b20043d08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_flow_tunnel.h"
26 #include "sfc_log.h"
27 #include "sfc_dp_rx.h"
28 #include "sfc_mae_counter.h"
29 
30 struct sfc_flow_ops_by_spec {
31 	sfc_flow_parse_cb_t	*parse;
32 	sfc_flow_verify_cb_t	*verify;
33 	sfc_flow_cleanup_cb_t	*cleanup;
34 	sfc_flow_insert_cb_t	*insert;
35 	sfc_flow_remove_cb_t	*remove;
36 	sfc_flow_query_cb_t	*query;
37 };
38 
39 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
40 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
41 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
42 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
43 
44 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
45 	.parse = sfc_flow_parse_rte_to_filter,
46 	.verify = NULL,
47 	.cleanup = NULL,
48 	.insert = sfc_flow_filter_insert,
49 	.remove = sfc_flow_filter_remove,
50 	.query = NULL,
51 };
52 
53 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
54 	.parse = sfc_flow_parse_rte_to_mae,
55 	.verify = sfc_mae_flow_verify,
56 	.cleanup = sfc_mae_flow_cleanup,
57 	.insert = sfc_mae_flow_insert,
58 	.remove = sfc_mae_flow_remove,
59 	.query = sfc_mae_flow_query,
60 };
61 
62 static const struct sfc_flow_ops_by_spec *
63 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
64 {
65 	struct sfc_flow_spec *spec = &flow->spec;
66 	const struct sfc_flow_ops_by_spec *ops = NULL;
67 
68 	switch (spec->type) {
69 	case SFC_FLOW_SPEC_FILTER:
70 		ops = &sfc_flow_ops_filter;
71 		break;
72 	case SFC_FLOW_SPEC_MAE:
73 		ops = &sfc_flow_ops_mae;
74 		break;
75 	default:
76 		SFC_ASSERT(false);
77 		break;
78 	}
79 
80 	return ops;
81 }
82 
83 /*
84  * Currently, filter-based (VNIC) flow API is implemented in such a manner
85  * that each flow rule is converted to one or more hardware filters.
86  * All elements of flow rule (attributes, pattern items, actions)
87  * correspond to one or more fields in the efx_filter_spec_s structure
88  * that is responsible for the hardware filter.
89  * If some required field is unset in the flow rule, then a handful
90  * of filter copies will be created to cover all possible values
91  * of such a field.
92  */
93 
94 static sfc_flow_item_parse sfc_flow_parse_void;
95 static sfc_flow_item_parse sfc_flow_parse_eth;
96 static sfc_flow_item_parse sfc_flow_parse_vlan;
97 static sfc_flow_item_parse sfc_flow_parse_ipv4;
98 static sfc_flow_item_parse sfc_flow_parse_ipv6;
99 static sfc_flow_item_parse sfc_flow_parse_tcp;
100 static sfc_flow_item_parse sfc_flow_parse_udp;
101 static sfc_flow_item_parse sfc_flow_parse_vxlan;
102 static sfc_flow_item_parse sfc_flow_parse_geneve;
103 static sfc_flow_item_parse sfc_flow_parse_nvgre;
104 static sfc_flow_item_parse sfc_flow_parse_pppoex;
105 
106 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
107 				     unsigned int filters_count_for_one_val,
108 				     struct rte_flow_error *error);
109 
110 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
111 					efx_filter_spec_t *spec,
112 					struct sfc_filter *filter);
113 
114 struct sfc_flow_copy_flag {
115 	/* EFX filter specification match flag */
116 	efx_filter_match_flags_t flag;
117 	/* Number of values of corresponding field */
118 	unsigned int vals_count;
119 	/* Function to set values in specifications */
120 	sfc_flow_spec_set_vals *set_vals;
121 	/*
122 	 * Function to check that the specification is suitable
123 	 * for adding this match flag
124 	 */
125 	sfc_flow_spec_check *spec_check;
126 };
127 
128 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
129 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
130 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
131 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
132 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
133 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
134 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
135 
136 static boolean_t
137 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
138 {
139 	uint8_t sum = 0;
140 	unsigned int i;
141 
142 	for (i = 0; i < size; i++)
143 		sum |= buf[i];
144 
145 	return (sum == 0) ? B_TRUE : B_FALSE;
146 }
147 
148 /*
149  * Validate item and prepare structures spec and mask for parsing
150  */
151 int
152 sfc_flow_parse_init(const struct rte_flow_item *item,
153 		    const void **spec_ptr,
154 		    const void **mask_ptr,
155 		    const void *supp_mask,
156 		    const void *def_mask,
157 		    unsigned int size,
158 		    struct rte_flow_error *error)
159 {
160 	const uint8_t *spec;
161 	const uint8_t *mask;
162 	const uint8_t *last;
163 	uint8_t supp;
164 	unsigned int i;
165 
166 	if (item == NULL) {
167 		rte_flow_error_set(error, EINVAL,
168 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
169 				   "NULL item");
170 		return -rte_errno;
171 	}
172 
173 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
174 		rte_flow_error_set(error, EINVAL,
175 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
176 				   "Mask or last is set without spec");
177 		return -rte_errno;
178 	}
179 
180 	/*
181 	 * If "mask" is not set, default mask is used,
182 	 * but if default mask is NULL, "mask" should be set
183 	 */
184 	if (item->mask == NULL) {
185 		if (def_mask == NULL) {
186 			rte_flow_error_set(error, EINVAL,
187 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
188 				"Mask should be specified");
189 			return -rte_errno;
190 		}
191 
192 		mask = def_mask;
193 	} else {
194 		mask = item->mask;
195 	}
196 
197 	spec = item->spec;
198 	last = item->last;
199 
200 	if (spec == NULL)
201 		goto exit;
202 
203 	/*
204 	 * If field values in "last" are either 0 or equal to the corresponding
205 	 * values in "spec" then they are ignored
206 	 */
207 	if (last != NULL &&
208 	    !sfc_flow_is_zero(last, size) &&
209 	    memcmp(last, spec, size) != 0) {
210 		rte_flow_error_set(error, ENOTSUP,
211 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
212 				   "Ranging is not supported");
213 		return -rte_errno;
214 	}
215 
216 	if (supp_mask == NULL) {
217 		rte_flow_error_set(error, EINVAL,
218 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
219 			"Supported mask for item should be specified");
220 		return -rte_errno;
221 	}
222 
223 	/* Check that mask does not ask for more match than supp_mask */
224 	for (i = 0; i < size; i++) {
225 		supp = ((const uint8_t *)supp_mask)[i];
226 
227 		if (~supp & mask[i]) {
228 			rte_flow_error_set(error, ENOTSUP,
229 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
230 					   "Item's field is not supported");
231 			return -rte_errno;
232 		}
233 	}
234 
235 exit:
236 	*spec_ptr = spec;
237 	*mask_ptr = mask;
238 	return 0;
239 }
240 
241 /*
242  * Protocol parsers.
243  * Masking is not supported, so masks in items should be either
244  * full or empty (zeroed) and set only for supported fields which
245  * are specified in the supp_mask.
246  */
247 
248 static int
249 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
250 		    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
251 		    __rte_unused struct rte_flow_error *error)
252 {
253 	return 0;
254 }
255 
256 /**
257  * Convert Ethernet item to EFX filter specification.
258  *
259  * @param item[in]
260  *   Item specification. Outer frame specification may only comprise
261  *   source/destination addresses and Ethertype field.
262  *   Inner frame specification may contain destination address only.
263  *   There is support for individual/group mask as well as for empty and full.
264  *   If the mask is NULL, default mask will be used. Ranging is not supported.
265  * @param efx_spec[in, out]
266  *   EFX filter specification to update.
267  * @param[out] error
268  *   Perform verbose error reporting if not NULL.
269  */
270 static int
271 sfc_flow_parse_eth(const struct rte_flow_item *item,
272 		   struct sfc_flow_parse_ctx *parse_ctx,
273 		   struct rte_flow_error *error)
274 {
275 	int rc;
276 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
277 	const struct rte_flow_item_eth *spec = NULL;
278 	const struct rte_flow_item_eth *mask = NULL;
279 	const struct rte_flow_item_eth supp_mask = {
280 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
281 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
282 		.type = 0xffff,
283 	};
284 	const struct rte_flow_item_eth ifrm_supp_mask = {
285 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
286 	};
287 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
288 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
289 	};
290 	const struct rte_flow_item_eth *supp_mask_p;
291 	const struct rte_flow_item_eth *def_mask_p;
292 	uint8_t *loc_mac = NULL;
293 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
294 		EFX_TUNNEL_PROTOCOL_NONE);
295 
296 	if (is_ifrm) {
297 		supp_mask_p = &ifrm_supp_mask;
298 		def_mask_p = &ifrm_supp_mask;
299 		loc_mac = efx_spec->efs_ifrm_loc_mac;
300 	} else {
301 		supp_mask_p = &supp_mask;
302 		def_mask_p = &rte_flow_item_eth_mask;
303 		loc_mac = efx_spec->efs_loc_mac;
304 	}
305 
306 	rc = sfc_flow_parse_init(item,
307 				 (const void **)&spec,
308 				 (const void **)&mask,
309 				 supp_mask_p, def_mask_p,
310 				 sizeof(struct rte_flow_item_eth),
311 				 error);
312 	if (rc != 0)
313 		return rc;
314 
315 	/* If "spec" is not set, could be any Ethernet */
316 	if (spec == NULL)
317 		return 0;
318 
319 	if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
320 		efx_spec->efs_match_flags |= is_ifrm ?
321 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
322 			EFX_FILTER_MATCH_LOC_MAC;
323 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
324 			   EFX_MAC_ADDR_LEN);
325 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
326 			  EFX_MAC_ADDR_LEN) == 0) {
327 		if (rte_is_unicast_ether_addr(&spec->dst))
328 			efx_spec->efs_match_flags |= is_ifrm ?
329 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
330 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
331 		else
332 			efx_spec->efs_match_flags |= is_ifrm ?
333 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
334 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
335 	} else if (!rte_is_zero_ether_addr(&mask->dst)) {
336 		goto fail_bad_mask;
337 	}
338 
339 	/*
340 	 * ifrm_supp_mask ensures that the source address and
341 	 * ethertype masks are equal to zero in inner frame,
342 	 * so these fields are filled in only for the outer frame
343 	 */
344 	if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
345 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
346 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
347 			   EFX_MAC_ADDR_LEN);
348 	} else if (!rte_is_zero_ether_addr(&mask->src)) {
349 		goto fail_bad_mask;
350 	}
351 
352 	/*
353 	 * Ether type is in big-endian byte order in item and
354 	 * in little-endian in efx_spec, so byte swap is used
355 	 */
356 	if (mask->type == supp_mask.type) {
357 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
358 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
359 	} else if (mask->type != 0) {
360 		goto fail_bad_mask;
361 	}
362 
363 	return 0;
364 
365 fail_bad_mask:
366 	rte_flow_error_set(error, EINVAL,
367 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
368 			   "Bad mask in the ETH pattern item");
369 	return -rte_errno;
370 }
371 
372 /**
373  * Convert VLAN item to EFX filter specification.
374  *
375  * @param item[in]
376  *   Item specification. Only VID field is supported.
377  *   The mask can not be NULL. Ranging is not supported.
378  * @param efx_spec[in, out]
379  *   EFX filter specification to update.
380  * @param[out] error
381  *   Perform verbose error reporting if not NULL.
382  */
383 static int
384 sfc_flow_parse_vlan(const struct rte_flow_item *item,
385 		    struct sfc_flow_parse_ctx *parse_ctx,
386 		    struct rte_flow_error *error)
387 {
388 	int rc;
389 	uint16_t vid;
390 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
391 	const struct rte_flow_item_vlan *spec = NULL;
392 	const struct rte_flow_item_vlan *mask = NULL;
393 	const struct rte_flow_item_vlan supp_mask = {
394 		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
395 		.inner_type = RTE_BE16(0xffff),
396 	};
397 
398 	rc = sfc_flow_parse_init(item,
399 				 (const void **)&spec,
400 				 (const void **)&mask,
401 				 &supp_mask,
402 				 NULL,
403 				 sizeof(struct rte_flow_item_vlan),
404 				 error);
405 	if (rc != 0)
406 		return rc;
407 
408 	/*
409 	 * VID is in big-endian byte order in item and
410 	 * in little-endian in efx_spec, so byte swap is used.
411 	 * If two VLAN items are included, the first matches
412 	 * the outer tag and the next matches the inner tag.
413 	 */
414 	if (mask->tci == supp_mask.tci) {
415 		/* Apply mask to keep VID only */
416 		vid = rte_bswap16(spec->tci & mask->tci);
417 
418 		if (!(efx_spec->efs_match_flags &
419 		      EFX_FILTER_MATCH_OUTER_VID)) {
420 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
421 			efx_spec->efs_outer_vid = vid;
422 		} else if (!(efx_spec->efs_match_flags &
423 			     EFX_FILTER_MATCH_INNER_VID)) {
424 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
425 			efx_spec->efs_inner_vid = vid;
426 		} else {
427 			rte_flow_error_set(error, EINVAL,
428 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
429 					   "More than two VLAN items");
430 			return -rte_errno;
431 		}
432 	} else {
433 		rte_flow_error_set(error, EINVAL,
434 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
435 				   "VLAN ID in TCI match is required");
436 		return -rte_errno;
437 	}
438 
439 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
440 		rte_flow_error_set(error, EINVAL,
441 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
442 				   "VLAN TPID matching is not supported");
443 		return -rte_errno;
444 	}
445 	if (mask->inner_type == supp_mask.inner_type) {
446 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
447 		efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
448 	} else if (mask->inner_type) {
449 		rte_flow_error_set(error, EINVAL,
450 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
451 				   "Bad mask for VLAN inner_type");
452 		return -rte_errno;
453 	}
454 
455 	return 0;
456 }
457 
458 /**
459  * Convert IPv4 item to EFX filter specification.
460  *
461  * @param item[in]
462  *   Item specification. Only source and destination addresses and
463  *   protocol fields are supported. If the mask is NULL, default
464  *   mask will be used. Ranging is not supported.
465  * @param efx_spec[in, out]
466  *   EFX filter specification to update.
467  * @param[out] error
468  *   Perform verbose error reporting if not NULL.
469  */
470 static int
471 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
472 		    struct sfc_flow_parse_ctx *parse_ctx,
473 		    struct rte_flow_error *error)
474 {
475 	int rc;
476 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
477 	const struct rte_flow_item_ipv4 *spec = NULL;
478 	const struct rte_flow_item_ipv4 *mask = NULL;
479 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
480 	const struct rte_flow_item_ipv4 supp_mask = {
481 		.hdr = {
482 			.src_addr = 0xffffffff,
483 			.dst_addr = 0xffffffff,
484 			.next_proto_id = 0xff,
485 		}
486 	};
487 
488 	rc = sfc_flow_parse_init(item,
489 				 (const void **)&spec,
490 				 (const void **)&mask,
491 				 &supp_mask,
492 				 &rte_flow_item_ipv4_mask,
493 				 sizeof(struct rte_flow_item_ipv4),
494 				 error);
495 	if (rc != 0)
496 		return rc;
497 
498 	/*
499 	 * Filtering by IPv4 source and destination addresses requires
500 	 * the appropriate ETHER_TYPE in hardware filters
501 	 */
502 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
503 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
504 		efx_spec->efs_ether_type = ether_type_ipv4;
505 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
506 		rte_flow_error_set(error, EINVAL,
507 			RTE_FLOW_ERROR_TYPE_ITEM, item,
508 			"Ethertype in pattern with IPV4 item should be appropriate");
509 		return -rte_errno;
510 	}
511 
512 	if (spec == NULL)
513 		return 0;
514 
515 	/*
516 	 * IPv4 addresses are in big-endian byte order in item and in
517 	 * efx_spec
518 	 */
519 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
520 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
521 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
522 	} else if (mask->hdr.src_addr != 0) {
523 		goto fail_bad_mask;
524 	}
525 
526 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
527 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
528 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
529 	} else if (mask->hdr.dst_addr != 0) {
530 		goto fail_bad_mask;
531 	}
532 
533 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
534 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
535 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
536 	} else if (mask->hdr.next_proto_id != 0) {
537 		goto fail_bad_mask;
538 	}
539 
540 	return 0;
541 
542 fail_bad_mask:
543 	rte_flow_error_set(error, EINVAL,
544 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
545 			   "Bad mask in the IPV4 pattern item");
546 	return -rte_errno;
547 }
548 
549 /**
550  * Convert IPv6 item to EFX filter specification.
551  *
552  * @param item[in]
553  *   Item specification. Only source and destination addresses and
554  *   next header fields are supported. If the mask is NULL, default
555  *   mask will be used. Ranging is not supported.
556  * @param efx_spec[in, out]
557  *   EFX filter specification to update.
558  * @param[out] error
559  *   Perform verbose error reporting if not NULL.
560  */
561 static int
562 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
563 		    struct sfc_flow_parse_ctx *parse_ctx,
564 		    struct rte_flow_error *error)
565 {
566 	int rc;
567 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
568 	const struct rte_flow_item_ipv6 *spec = NULL;
569 	const struct rte_flow_item_ipv6 *mask = NULL;
570 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
571 	const struct rte_flow_item_ipv6 supp_mask = {
572 		.hdr = {
573 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
574 				      0xff, 0xff, 0xff, 0xff,
575 				      0xff, 0xff, 0xff, 0xff,
576 				      0xff, 0xff, 0xff, 0xff },
577 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
578 				      0xff, 0xff, 0xff, 0xff,
579 				      0xff, 0xff, 0xff, 0xff,
580 				      0xff, 0xff, 0xff, 0xff },
581 			.proto = 0xff,
582 		}
583 	};
584 
585 	rc = sfc_flow_parse_init(item,
586 				 (const void **)&spec,
587 				 (const void **)&mask,
588 				 &supp_mask,
589 				 &rte_flow_item_ipv6_mask,
590 				 sizeof(struct rte_flow_item_ipv6),
591 				 error);
592 	if (rc != 0)
593 		return rc;
594 
595 	/*
596 	 * Filtering by IPv6 source and destination addresses requires
597 	 * the appropriate ETHER_TYPE in hardware filters
598 	 */
599 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
600 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
601 		efx_spec->efs_ether_type = ether_type_ipv6;
602 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
603 		rte_flow_error_set(error, EINVAL,
604 			RTE_FLOW_ERROR_TYPE_ITEM, item,
605 			"Ethertype in pattern with IPV6 item should be appropriate");
606 		return -rte_errno;
607 	}
608 
609 	if (spec == NULL)
610 		return 0;
611 
612 	/*
613 	 * IPv6 addresses are in big-endian byte order in item and in
614 	 * efx_spec
615 	 */
616 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
617 		   sizeof(mask->hdr.src_addr)) == 0) {
618 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
619 
620 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
621 				 sizeof(spec->hdr.src_addr));
622 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
623 			   sizeof(efx_spec->efs_rem_host));
624 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
625 				     sizeof(mask->hdr.src_addr))) {
626 		goto fail_bad_mask;
627 	}
628 
629 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
630 		   sizeof(mask->hdr.dst_addr)) == 0) {
631 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
632 
633 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
634 				 sizeof(spec->hdr.dst_addr));
635 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
636 			   sizeof(efx_spec->efs_loc_host));
637 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
638 				     sizeof(mask->hdr.dst_addr))) {
639 		goto fail_bad_mask;
640 	}
641 
642 	if (mask->hdr.proto == supp_mask.hdr.proto) {
643 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
644 		efx_spec->efs_ip_proto = spec->hdr.proto;
645 	} else if (mask->hdr.proto != 0) {
646 		goto fail_bad_mask;
647 	}
648 
649 	return 0;
650 
651 fail_bad_mask:
652 	rte_flow_error_set(error, EINVAL,
653 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
654 			   "Bad mask in the IPV6 pattern item");
655 	return -rte_errno;
656 }
657 
658 /**
659  * Convert TCP item to EFX filter specification.
660  *
661  * @param item[in]
662  *   Item specification. Only source and destination ports fields
663  *   are supported. If the mask is NULL, default mask will be used.
664  *   Ranging is not supported.
665  * @param efx_spec[in, out]
666  *   EFX filter specification to update.
667  * @param[out] error
668  *   Perform verbose error reporting if not NULL.
669  */
670 static int
671 sfc_flow_parse_tcp(const struct rte_flow_item *item,
672 		   struct sfc_flow_parse_ctx *parse_ctx,
673 		   struct rte_flow_error *error)
674 {
675 	int rc;
676 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
677 	const struct rte_flow_item_tcp *spec = NULL;
678 	const struct rte_flow_item_tcp *mask = NULL;
679 	const struct rte_flow_item_tcp supp_mask = {
680 		.hdr = {
681 			.src_port = 0xffff,
682 			.dst_port = 0xffff,
683 		}
684 	};
685 
686 	rc = sfc_flow_parse_init(item,
687 				 (const void **)&spec,
688 				 (const void **)&mask,
689 				 &supp_mask,
690 				 &rte_flow_item_tcp_mask,
691 				 sizeof(struct rte_flow_item_tcp),
692 				 error);
693 	if (rc != 0)
694 		return rc;
695 
696 	/*
697 	 * Filtering by TCP source and destination ports requires
698 	 * the appropriate IP_PROTO in hardware filters
699 	 */
700 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
701 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
702 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
703 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
704 		rte_flow_error_set(error, EINVAL,
705 			RTE_FLOW_ERROR_TYPE_ITEM, item,
706 			"IP proto in pattern with TCP item should be appropriate");
707 		return -rte_errno;
708 	}
709 
710 	if (spec == NULL)
711 		return 0;
712 
713 	/*
714 	 * Source and destination ports are in big-endian byte order in item and
715 	 * in little-endian in efx_spec, so byte swap is used
716 	 */
717 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
718 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
719 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
720 	} else if (mask->hdr.src_port != 0) {
721 		goto fail_bad_mask;
722 	}
723 
724 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
725 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
726 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
727 	} else if (mask->hdr.dst_port != 0) {
728 		goto fail_bad_mask;
729 	}
730 
731 	return 0;
732 
733 fail_bad_mask:
734 	rte_flow_error_set(error, EINVAL,
735 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
736 			   "Bad mask in the TCP pattern item");
737 	return -rte_errno;
738 }
739 
740 /**
741  * Convert UDP item to EFX filter specification.
742  *
743  * @param item[in]
744  *   Item specification. Only source and destination ports fields
745  *   are supported. If the mask is NULL, default mask will be used.
746  *   Ranging is not supported.
747  * @param efx_spec[in, out]
748  *   EFX filter specification to update.
749  * @param[out] error
750  *   Perform verbose error reporting if not NULL.
751  */
752 static int
753 sfc_flow_parse_udp(const struct rte_flow_item *item,
754 		   struct sfc_flow_parse_ctx *parse_ctx,
755 		   struct rte_flow_error *error)
756 {
757 	int rc;
758 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
759 	const struct rte_flow_item_udp *spec = NULL;
760 	const struct rte_flow_item_udp *mask = NULL;
761 	const struct rte_flow_item_udp supp_mask = {
762 		.hdr = {
763 			.src_port = 0xffff,
764 			.dst_port = 0xffff,
765 		}
766 	};
767 
768 	rc = sfc_flow_parse_init(item,
769 				 (const void **)&spec,
770 				 (const void **)&mask,
771 				 &supp_mask,
772 				 &rte_flow_item_udp_mask,
773 				 sizeof(struct rte_flow_item_udp),
774 				 error);
775 	if (rc != 0)
776 		return rc;
777 
778 	/*
779 	 * Filtering by UDP source and destination ports requires
780 	 * the appropriate IP_PROTO in hardware filters
781 	 */
782 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
783 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
784 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
785 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
786 		rte_flow_error_set(error, EINVAL,
787 			RTE_FLOW_ERROR_TYPE_ITEM, item,
788 			"IP proto in pattern with UDP item should be appropriate");
789 		return -rte_errno;
790 	}
791 
792 	if (spec == NULL)
793 		return 0;
794 
795 	/*
796 	 * Source and destination ports are in big-endian byte order in item and
797 	 * in little-endian in efx_spec, so byte swap is used
798 	 */
799 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
800 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
801 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
802 	} else if (mask->hdr.src_port != 0) {
803 		goto fail_bad_mask;
804 	}
805 
806 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
807 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
808 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
809 	} else if (mask->hdr.dst_port != 0) {
810 		goto fail_bad_mask;
811 	}
812 
813 	return 0;
814 
815 fail_bad_mask:
816 	rte_flow_error_set(error, EINVAL,
817 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
818 			   "Bad mask in the UDP pattern item");
819 	return -rte_errno;
820 }
821 
822 /*
823  * Filters for encapsulated packets match based on the EtherType and IP
824  * protocol in the outer frame.
825  */
826 static int
827 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
828 					efx_filter_spec_t *efx_spec,
829 					uint8_t ip_proto,
830 					struct rte_flow_error *error)
831 {
832 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
833 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
834 		efx_spec->efs_ip_proto = ip_proto;
835 	} else if (efx_spec->efs_ip_proto != ip_proto) {
836 		switch (ip_proto) {
837 		case EFX_IPPROTO_UDP:
838 			rte_flow_error_set(error, EINVAL,
839 				RTE_FLOW_ERROR_TYPE_ITEM, item,
840 				"Outer IP header protocol must be UDP "
841 				"in VxLAN/GENEVE pattern");
842 			return -rte_errno;
843 
844 		case EFX_IPPROTO_GRE:
845 			rte_flow_error_set(error, EINVAL,
846 				RTE_FLOW_ERROR_TYPE_ITEM, item,
847 				"Outer IP header protocol must be GRE "
848 				"in NVGRE pattern");
849 			return -rte_errno;
850 
851 		default:
852 			rte_flow_error_set(error, EINVAL,
853 				RTE_FLOW_ERROR_TYPE_ITEM, item,
854 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
855 				"are supported");
856 			return -rte_errno;
857 		}
858 	}
859 
860 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
861 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
862 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
863 		rte_flow_error_set(error, EINVAL,
864 			RTE_FLOW_ERROR_TYPE_ITEM, item,
865 			"Outer frame EtherType in pattern with tunneling "
866 			"must be IPv4 or IPv6");
867 		return -rte_errno;
868 	}
869 
870 	return 0;
871 }
872 
873 static int
874 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
875 				  const uint8_t *vni_or_vsid_val,
876 				  const uint8_t *vni_or_vsid_mask,
877 				  const struct rte_flow_item *item,
878 				  struct rte_flow_error *error)
879 {
880 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
881 		0xff, 0xff, 0xff
882 	};
883 
884 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
885 		   EFX_VNI_OR_VSID_LEN) == 0) {
886 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
887 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
888 			   EFX_VNI_OR_VSID_LEN);
889 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
890 		rte_flow_error_set(error, EINVAL,
891 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
892 				   "Unsupported VNI/VSID mask");
893 		return -rte_errno;
894 	}
895 
896 	return 0;
897 }
898 
899 /**
900  * Convert VXLAN item to EFX filter specification.
901  *
902  * @param item[in]
903  *   Item specification. Only VXLAN network identifier field is supported.
904  *   If the mask is NULL, default mask will be used.
905  *   Ranging is not supported.
906  * @param efx_spec[in, out]
907  *   EFX filter specification to update.
908  * @param[out] error
909  *   Perform verbose error reporting if not NULL.
910  */
911 static int
912 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
913 		     struct sfc_flow_parse_ctx *parse_ctx,
914 		     struct rte_flow_error *error)
915 {
916 	int rc;
917 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
918 	const struct rte_flow_item_vxlan *spec = NULL;
919 	const struct rte_flow_item_vxlan *mask = NULL;
920 	const struct rte_flow_item_vxlan supp_mask = {
921 		.vni = { 0xff, 0xff, 0xff }
922 	};
923 
924 	rc = sfc_flow_parse_init(item,
925 				 (const void **)&spec,
926 				 (const void **)&mask,
927 				 &supp_mask,
928 				 &rte_flow_item_vxlan_mask,
929 				 sizeof(struct rte_flow_item_vxlan),
930 				 error);
931 	if (rc != 0)
932 		return rc;
933 
934 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
935 						     EFX_IPPROTO_UDP, error);
936 	if (rc != 0)
937 		return rc;
938 
939 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
940 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
941 
942 	if (spec == NULL)
943 		return 0;
944 
945 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
946 					       mask->vni, item, error);
947 
948 	return rc;
949 }
950 
951 /**
952  * Convert GENEVE item to EFX filter specification.
953  *
954  * @param item[in]
955  *   Item specification. Only Virtual Network Identifier and protocol type
956  *   fields are supported. But protocol type can be only Ethernet (0x6558).
957  *   If the mask is NULL, default mask will be used.
958  *   Ranging is not supported.
959  * @param efx_spec[in, out]
960  *   EFX filter specification to update.
961  * @param[out] error
962  *   Perform verbose error reporting if not NULL.
963  */
964 static int
965 sfc_flow_parse_geneve(const struct rte_flow_item *item,
966 		      struct sfc_flow_parse_ctx *parse_ctx,
967 		      struct rte_flow_error *error)
968 {
969 	int rc;
970 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
971 	const struct rte_flow_item_geneve *spec = NULL;
972 	const struct rte_flow_item_geneve *mask = NULL;
973 	const struct rte_flow_item_geneve supp_mask = {
974 		.protocol = RTE_BE16(0xffff),
975 		.vni = { 0xff, 0xff, 0xff }
976 	};
977 
978 	rc = sfc_flow_parse_init(item,
979 				 (const void **)&spec,
980 				 (const void **)&mask,
981 				 &supp_mask,
982 				 &rte_flow_item_geneve_mask,
983 				 sizeof(struct rte_flow_item_geneve),
984 				 error);
985 	if (rc != 0)
986 		return rc;
987 
988 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
989 						     EFX_IPPROTO_UDP, error);
990 	if (rc != 0)
991 		return rc;
992 
993 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
994 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
995 
996 	if (spec == NULL)
997 		return 0;
998 
999 	if (mask->protocol == supp_mask.protocol) {
1000 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
1001 			rte_flow_error_set(error, EINVAL,
1002 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1003 				"GENEVE encap. protocol must be Ethernet "
1004 				"(0x6558) in the GENEVE pattern item");
1005 			return -rte_errno;
1006 		}
1007 	} else if (mask->protocol != 0) {
1008 		rte_flow_error_set(error, EINVAL,
1009 			RTE_FLOW_ERROR_TYPE_ITEM, item,
1010 			"Unsupported mask for GENEVE encap. protocol");
1011 		return -rte_errno;
1012 	}
1013 
1014 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1015 					       mask->vni, item, error);
1016 
1017 	return rc;
1018 }
1019 
1020 /**
1021  * Convert NVGRE item to EFX filter specification.
1022  *
1023  * @param item[in]
1024  *   Item specification. Only virtual subnet ID field is supported.
1025  *   If the mask is NULL, default mask will be used.
1026  *   Ranging is not supported.
1027  * @param efx_spec[in, out]
1028  *   EFX filter specification to update.
1029  * @param[out] error
1030  *   Perform verbose error reporting if not NULL.
1031  */
1032 static int
1033 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1034 		     struct sfc_flow_parse_ctx *parse_ctx,
1035 		     struct rte_flow_error *error)
1036 {
1037 	int rc;
1038 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1039 	const struct rte_flow_item_nvgre *spec = NULL;
1040 	const struct rte_flow_item_nvgre *mask = NULL;
1041 	const struct rte_flow_item_nvgre supp_mask = {
1042 		.tni = { 0xff, 0xff, 0xff }
1043 	};
1044 
1045 	rc = sfc_flow_parse_init(item,
1046 				 (const void **)&spec,
1047 				 (const void **)&mask,
1048 				 &supp_mask,
1049 				 &rte_flow_item_nvgre_mask,
1050 				 sizeof(struct rte_flow_item_nvgre),
1051 				 error);
1052 	if (rc != 0)
1053 		return rc;
1054 
1055 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1056 						     EFX_IPPROTO_GRE, error);
1057 	if (rc != 0)
1058 		return rc;
1059 
1060 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1061 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1062 
1063 	if (spec == NULL)
1064 		return 0;
1065 
1066 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1067 					       mask->tni, item, error);
1068 
1069 	return rc;
1070 }
1071 
1072 /**
1073  * Convert PPPoEx item to EFX filter specification.
1074  *
1075  * @param item[in]
1076  *   Item specification.
1077  *   Matching on PPPoEx fields is not supported.
1078  *   This item can only be used to set or validate the EtherType filter.
1079  *   Only zero masks are allowed.
1080  *   Ranging is not supported.
1081  * @param efx_spec[in, out]
1082  *   EFX filter specification to update.
1083  * @param[out] error
1084  *   Perform verbose error reporting if not NULL.
1085  */
1086 static int
1087 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1088 		      struct sfc_flow_parse_ctx *parse_ctx,
1089 		      struct rte_flow_error *error)
1090 {
1091 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1092 	const struct rte_flow_item_pppoe *spec = NULL;
1093 	const struct rte_flow_item_pppoe *mask = NULL;
1094 	const struct rte_flow_item_pppoe supp_mask = {};
1095 	const struct rte_flow_item_pppoe def_mask = {};
1096 	uint16_t ether_type;
1097 	int rc;
1098 
1099 	rc = sfc_flow_parse_init(item,
1100 				 (const void **)&spec,
1101 				 (const void **)&mask,
1102 				 &supp_mask,
1103 				 &def_mask,
1104 				 sizeof(struct rte_flow_item_pppoe),
1105 				 error);
1106 	if (rc != 0)
1107 		return rc;
1108 
1109 	if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1110 		ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1111 	else
1112 		ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1113 
1114 	if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1115 		if (efx_spec->efs_ether_type != ether_type) {
1116 			rte_flow_error_set(error, EINVAL,
1117 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
1118 					   "Invalid EtherType for a PPPoE flow item");
1119 			return -rte_errno;
1120 		}
1121 	} else {
1122 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1123 		efx_spec->efs_ether_type = ether_type;
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 static const struct sfc_flow_item sfc_flow_items[] = {
1130 	{
1131 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1132 		.name = "VOID",
1133 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1134 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1135 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1136 		.parse = sfc_flow_parse_void,
1137 	},
1138 	{
1139 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1140 		.name = "ETH",
1141 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1142 		.layer = SFC_FLOW_ITEM_L2,
1143 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1144 		.parse = sfc_flow_parse_eth,
1145 	},
1146 	{
1147 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1148 		.name = "VLAN",
1149 		.prev_layer = SFC_FLOW_ITEM_L2,
1150 		.layer = SFC_FLOW_ITEM_L2,
1151 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1152 		.parse = sfc_flow_parse_vlan,
1153 	},
1154 	{
1155 		.type = RTE_FLOW_ITEM_TYPE_PPPOED,
1156 		.name = "PPPOED",
1157 		.prev_layer = SFC_FLOW_ITEM_L2,
1158 		.layer = SFC_FLOW_ITEM_L2,
1159 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1160 		.parse = sfc_flow_parse_pppoex,
1161 	},
1162 	{
1163 		.type = RTE_FLOW_ITEM_TYPE_PPPOES,
1164 		.name = "PPPOES",
1165 		.prev_layer = SFC_FLOW_ITEM_L2,
1166 		.layer = SFC_FLOW_ITEM_L2,
1167 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1168 		.parse = sfc_flow_parse_pppoex,
1169 	},
1170 	{
1171 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1172 		.name = "IPV4",
1173 		.prev_layer = SFC_FLOW_ITEM_L2,
1174 		.layer = SFC_FLOW_ITEM_L3,
1175 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1176 		.parse = sfc_flow_parse_ipv4,
1177 	},
1178 	{
1179 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1180 		.name = "IPV6",
1181 		.prev_layer = SFC_FLOW_ITEM_L2,
1182 		.layer = SFC_FLOW_ITEM_L3,
1183 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1184 		.parse = sfc_flow_parse_ipv6,
1185 	},
1186 	{
1187 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1188 		.name = "TCP",
1189 		.prev_layer = SFC_FLOW_ITEM_L3,
1190 		.layer = SFC_FLOW_ITEM_L4,
1191 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1192 		.parse = sfc_flow_parse_tcp,
1193 	},
1194 	{
1195 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1196 		.name = "UDP",
1197 		.prev_layer = SFC_FLOW_ITEM_L3,
1198 		.layer = SFC_FLOW_ITEM_L4,
1199 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1200 		.parse = sfc_flow_parse_udp,
1201 	},
1202 	{
1203 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1204 		.name = "VXLAN",
1205 		.prev_layer = SFC_FLOW_ITEM_L4,
1206 		.layer = SFC_FLOW_ITEM_START_LAYER,
1207 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1208 		.parse = sfc_flow_parse_vxlan,
1209 	},
1210 	{
1211 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1212 		.name = "GENEVE",
1213 		.prev_layer = SFC_FLOW_ITEM_L4,
1214 		.layer = SFC_FLOW_ITEM_START_LAYER,
1215 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1216 		.parse = sfc_flow_parse_geneve,
1217 	},
1218 	{
1219 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1220 		.name = "NVGRE",
1221 		.prev_layer = SFC_FLOW_ITEM_L3,
1222 		.layer = SFC_FLOW_ITEM_START_LAYER,
1223 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1224 		.parse = sfc_flow_parse_nvgre,
1225 	},
1226 };
1227 
1228 /*
1229  * Protocol-independent flow API support
1230  */
1231 static int
1232 sfc_flow_parse_attr(struct sfc_adapter *sa,
1233 		    const struct rte_flow_attr *attr,
1234 		    struct rte_flow *flow,
1235 		    struct rte_flow_error *error)
1236 {
1237 	struct sfc_flow_spec *spec = &flow->spec;
1238 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1239 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1240 	struct sfc_mae *mae = &sa->mae;
1241 
1242 	if (attr == NULL) {
1243 		rte_flow_error_set(error, EINVAL,
1244 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1245 				   "NULL attribute");
1246 		return -rte_errno;
1247 	}
1248 	if (attr->group != 0) {
1249 		rte_flow_error_set(error, ENOTSUP,
1250 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1251 				   "Groups are not supported");
1252 		return -rte_errno;
1253 	}
1254 	if (attr->egress != 0) {
1255 		rte_flow_error_set(error, ENOTSUP,
1256 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1257 				   "Egress is not supported");
1258 		return -rte_errno;
1259 	}
1260 	if (attr->ingress == 0) {
1261 		rte_flow_error_set(error, ENOTSUP,
1262 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1263 				   "Ingress is compulsory");
1264 		return -rte_errno;
1265 	}
1266 	if (attr->transfer == 0) {
1267 		if (attr->priority != 0) {
1268 			rte_flow_error_set(error, ENOTSUP,
1269 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1270 					   attr, "Priorities are unsupported");
1271 			return -rte_errno;
1272 		}
1273 		spec->type = SFC_FLOW_SPEC_FILTER;
1274 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1275 		spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1276 		spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1277 	} else {
1278 		if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
1279 			rte_flow_error_set(error, ENOTSUP,
1280 					   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1281 					   attr, "Transfer is not supported");
1282 			return -rte_errno;
1283 		}
1284 		if (attr->priority > mae->nb_action_rule_prios_max) {
1285 			rte_flow_error_set(error, ENOTSUP,
1286 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1287 					   attr, "Unsupported priority level");
1288 			return -rte_errno;
1289 		}
1290 		spec->type = SFC_FLOW_SPEC_MAE;
1291 		spec_mae->priority = attr->priority;
1292 		spec_mae->match_spec = NULL;
1293 		spec_mae->action_set = NULL;
1294 		spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1295 	}
1296 
1297 	return 0;
1298 }
1299 
1300 /* Get item from array sfc_flow_items */
1301 static const struct sfc_flow_item *
1302 sfc_flow_get_item(const struct sfc_flow_item *items,
1303 		  unsigned int nb_items,
1304 		  enum rte_flow_item_type type)
1305 {
1306 	unsigned int i;
1307 
1308 	for (i = 0; i < nb_items; i++)
1309 		if (items[i].type == type)
1310 			return &items[i];
1311 
1312 	return NULL;
1313 }
1314 
1315 int
1316 sfc_flow_parse_pattern(struct sfc_adapter *sa,
1317 		       const struct sfc_flow_item *flow_items,
1318 		       unsigned int nb_flow_items,
1319 		       const struct rte_flow_item pattern[],
1320 		       struct sfc_flow_parse_ctx *parse_ctx,
1321 		       struct rte_flow_error *error)
1322 {
1323 	int rc;
1324 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1325 	boolean_t is_ifrm = B_FALSE;
1326 	const struct sfc_flow_item *item;
1327 
1328 	if (pattern == NULL) {
1329 		rte_flow_error_set(error, EINVAL,
1330 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1331 				   "NULL pattern");
1332 		return -rte_errno;
1333 	}
1334 
1335 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1336 		item = sfc_flow_get_item(flow_items, nb_flow_items,
1337 					 pattern->type);
1338 		if (item == NULL) {
1339 			rte_flow_error_set(error, ENOTSUP,
1340 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1341 					   "Unsupported pattern item");
1342 			return -rte_errno;
1343 		}
1344 
1345 		/*
1346 		 * Omitting one or several protocol layers at the beginning
1347 		 * of pattern is supported
1348 		 */
1349 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1350 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1351 		    item->prev_layer != prev_layer) {
1352 			rte_flow_error_set(error, ENOTSUP,
1353 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1354 					   "Unexpected sequence of pattern items");
1355 			return -rte_errno;
1356 		}
1357 
1358 		/*
1359 		 * Allow only VOID and ETH pattern items in the inner frame.
1360 		 * Also check that there is only one tunneling protocol.
1361 		 */
1362 		switch (item->type) {
1363 		case RTE_FLOW_ITEM_TYPE_VOID:
1364 		case RTE_FLOW_ITEM_TYPE_ETH:
1365 			break;
1366 
1367 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1368 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1369 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1370 			if (is_ifrm) {
1371 				rte_flow_error_set(error, EINVAL,
1372 					RTE_FLOW_ERROR_TYPE_ITEM,
1373 					pattern,
1374 					"More than one tunneling protocol");
1375 				return -rte_errno;
1376 			}
1377 			is_ifrm = B_TRUE;
1378 			break;
1379 
1380 		default:
1381 			if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1382 			    is_ifrm) {
1383 				rte_flow_error_set(error, EINVAL,
1384 					RTE_FLOW_ERROR_TYPE_ITEM,
1385 					pattern,
1386 					"There is an unsupported pattern item "
1387 					"in the inner frame");
1388 				return -rte_errno;
1389 			}
1390 			break;
1391 		}
1392 
1393 		if (parse_ctx->type != item->ctx_type) {
1394 			rte_flow_error_set(error, EINVAL,
1395 					RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1396 					"Parse context type mismatch");
1397 			return -rte_errno;
1398 		}
1399 
1400 		rc = item->parse(pattern, parse_ctx, error);
1401 		if (rc != 0) {
1402 			sfc_err(sa, "failed to parse item %s: %s",
1403 				item->name, strerror(-rc));
1404 			return rc;
1405 		}
1406 
1407 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1408 			prev_layer = item->layer;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 static int
1415 sfc_flow_parse_queue(struct sfc_adapter *sa,
1416 		     const struct rte_flow_action_queue *queue,
1417 		     struct rte_flow *flow)
1418 {
1419 	struct sfc_flow_spec *spec = &flow->spec;
1420 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1421 	struct sfc_rxq *rxq;
1422 	struct sfc_rxq_info *rxq_info;
1423 
1424 	if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1425 		return -EINVAL;
1426 
1427 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1428 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1429 
1430 	rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1431 	spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1432 					    SFC_RXQ_FLAG_RSS_HASH);
1433 
1434 	return 0;
1435 }
1436 
1437 static int
1438 sfc_flow_parse_rss(struct sfc_adapter *sa,
1439 		   const struct rte_flow_action_rss *action_rss,
1440 		   struct rte_flow *flow)
1441 {
1442 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1443 	struct sfc_rss *rss = &sas->rss;
1444 	sfc_ethdev_qid_t ethdev_qid;
1445 	struct sfc_rxq *rxq;
1446 	unsigned int rxq_hw_index_min;
1447 	unsigned int rxq_hw_index_max;
1448 	efx_rx_hash_type_t efx_hash_types;
1449 	const uint8_t *rss_key;
1450 	struct sfc_flow_spec *spec = &flow->spec;
1451 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1452 	struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1453 	unsigned int i;
1454 
1455 	if (action_rss->queue_num == 0)
1456 		return -EINVAL;
1457 
1458 	ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
1459 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1460 	rxq_hw_index_min = rxq->hw_index;
1461 	rxq_hw_index_max = 0;
1462 
1463 	for (i = 0; i < action_rss->queue_num; ++i) {
1464 		ethdev_qid = action_rss->queue[i];
1465 
1466 		if ((unsigned int)ethdev_qid >=
1467 		    sfc_sa2shared(sa)->ethdev_rxq_count)
1468 			return -EINVAL;
1469 
1470 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1471 
1472 		if (rxq->hw_index < rxq_hw_index_min)
1473 			rxq_hw_index_min = rxq->hw_index;
1474 
1475 		if (rxq->hw_index > rxq_hw_index_max)
1476 			rxq_hw_index_max = rxq->hw_index;
1477 	}
1478 
1479 	switch (action_rss->func) {
1480 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1481 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1482 		break;
1483 	default:
1484 		return -EINVAL;
1485 	}
1486 
1487 	if (action_rss->level)
1488 		return -EINVAL;
1489 
1490 	/*
1491 	 * Dummy RSS action with only one queue and no specific settings
1492 	 * for hash types and key does not require dedicated RSS context
1493 	 * and may be simplified to single queue action.
1494 	 */
1495 	if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1496 	    action_rss->key_len == 0) {
1497 		spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1498 		return 0;
1499 	}
1500 
1501 	if (action_rss->types) {
1502 		int rc;
1503 
1504 		rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1505 					  &efx_hash_types);
1506 		if (rc != 0)
1507 			return -rc;
1508 	} else {
1509 		unsigned int i;
1510 
1511 		efx_hash_types = 0;
1512 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
1513 			efx_hash_types |= rss->hf_map[i].efx;
1514 	}
1515 
1516 	if (action_rss->key_len) {
1517 		if (action_rss->key_len != sizeof(rss->key))
1518 			return -EINVAL;
1519 
1520 		rss_key = action_rss->key;
1521 	} else {
1522 		rss_key = rss->key;
1523 	}
1524 
1525 	spec_filter->rss = B_TRUE;
1526 
1527 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1528 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1529 	sfc_rss_conf->rss_hash_types = efx_hash_types;
1530 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1531 
1532 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1533 		unsigned int nb_queues = action_rss->queue_num;
1534 		struct sfc_rxq *rxq;
1535 
1536 		ethdev_qid = action_rss->queue[i % nb_queues];
1537 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1538 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 static int
1545 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1546 		    unsigned int filters_count)
1547 {
1548 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1549 	unsigned int i;
1550 	int ret = 0;
1551 
1552 	for (i = 0; i < filters_count; i++) {
1553 		int rc;
1554 
1555 		rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1556 		if (ret == 0 && rc != 0) {
1557 			sfc_err(sa, "failed to remove filter specification "
1558 				"(rc = %d)", rc);
1559 			ret = rc;
1560 		}
1561 	}
1562 
1563 	return ret;
1564 }
1565 
1566 static int
1567 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1568 {
1569 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1570 	unsigned int i;
1571 	int rc = 0;
1572 
1573 	for (i = 0; i < spec_filter->count; i++) {
1574 		rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1575 		if (rc != 0) {
1576 			sfc_flow_spec_flush(sa, spec, i);
1577 			break;
1578 		}
1579 	}
1580 
1581 	return rc;
1582 }
1583 
1584 static int
1585 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1586 {
1587 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1588 
1589 	return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1590 }
1591 
1592 static int
1593 sfc_flow_filter_insert(struct sfc_adapter *sa,
1594 		       struct rte_flow *flow)
1595 {
1596 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1597 	struct sfc_rss *rss = &sas->rss;
1598 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1599 	struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1600 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1601 	boolean_t create_context;
1602 	unsigned int i;
1603 	int rc = 0;
1604 
1605 	create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1606 			rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1607 
1608 	if (create_context) {
1609 		unsigned int rss_spread;
1610 		unsigned int rss_hash_types;
1611 		uint8_t *rss_key;
1612 
1613 		if (spec_filter->rss) {
1614 			rss_spread = MIN(flow_rss->rxq_hw_index_max -
1615 					flow_rss->rxq_hw_index_min + 1,
1616 					EFX_MAXRSS);
1617 			rss_hash_types = flow_rss->rss_hash_types;
1618 			rss_key = flow_rss->rss_key;
1619 		} else {
1620 			/*
1621 			 * Initialize dummy RSS context parameters to have
1622 			 * valid RSS hash. Use default RSS hash function and
1623 			 * key.
1624 			 */
1625 			rss_spread = 1;
1626 			rss_hash_types = rss->hash_types;
1627 			rss_key = rss->key;
1628 		}
1629 
1630 		rc = efx_rx_scale_context_alloc(sa->nic,
1631 						EFX_RX_SCALE_EXCLUSIVE,
1632 						rss_spread,
1633 						&efs_rss_context);
1634 		if (rc != 0)
1635 			goto fail_scale_context_alloc;
1636 
1637 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1638 					   rss->hash_alg,
1639 					   rss_hash_types, B_TRUE);
1640 		if (rc != 0)
1641 			goto fail_scale_mode_set;
1642 
1643 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1644 					  rss_key, sizeof(rss->key));
1645 		if (rc != 0)
1646 			goto fail_scale_key_set;
1647 	} else {
1648 		efs_rss_context = rss->dummy_rss_context;
1649 	}
1650 
1651 	if (spec_filter->rss || spec_filter->rss_hash_required) {
1652 		/*
1653 		 * At this point, fully elaborated filter specifications
1654 		 * have been produced from the template. To make sure that
1655 		 * RSS behaviour is consistent between them, set the same
1656 		 * RSS context value everywhere.
1657 		 */
1658 		for (i = 0; i < spec_filter->count; i++) {
1659 			efx_filter_spec_t *spec = &spec_filter->filters[i];
1660 
1661 			spec->efs_rss_context = efs_rss_context;
1662 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1663 			if (spec_filter->rss)
1664 				spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1665 		}
1666 	}
1667 
1668 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1669 	if (rc != 0)
1670 		goto fail_filter_insert;
1671 
1672 	if (create_context) {
1673 		unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1674 		unsigned int *tbl;
1675 
1676 		tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1677 
1678 		/*
1679 		 * Scale table is set after filter insertion because
1680 		 * the table entries are relative to the base RxQ ID
1681 		 * and the latter is submitted to the HW by means of
1682 		 * inserting a filter, so by the time of the request
1683 		 * the HW knows all the information needed to verify
1684 		 * the table entries, and the operation will succeed
1685 		 */
1686 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1687 					  tbl, RTE_DIM(flow_rss->rss_tbl));
1688 		if (rc != 0)
1689 			goto fail_scale_tbl_set;
1690 
1691 		/* Remember created dummy RSS context */
1692 		if (!spec_filter->rss)
1693 			rss->dummy_rss_context = efs_rss_context;
1694 	}
1695 
1696 	return 0;
1697 
1698 fail_scale_tbl_set:
1699 	sfc_flow_spec_remove(sa, &flow->spec);
1700 
1701 fail_filter_insert:
1702 fail_scale_key_set:
1703 fail_scale_mode_set:
1704 	if (create_context)
1705 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1706 
1707 fail_scale_context_alloc:
1708 	return rc;
1709 }
1710 
1711 static int
1712 sfc_flow_filter_remove(struct sfc_adapter *sa,
1713 		       struct rte_flow *flow)
1714 {
1715 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1716 	int rc = 0;
1717 
1718 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1719 	if (rc != 0)
1720 		return rc;
1721 
1722 	if (spec_filter->rss) {
1723 		/*
1724 		 * All specifications for a given flow rule have the same RSS
1725 		 * context, so that RSS context value is taken from the first
1726 		 * filter specification
1727 		 */
1728 		efx_filter_spec_t *spec = &spec_filter->filters[0];
1729 
1730 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1731 	}
1732 
1733 	return rc;
1734 }
1735 
1736 static int
1737 sfc_flow_parse_mark(struct sfc_adapter *sa,
1738 		    const struct rte_flow_action_mark *mark,
1739 		    struct rte_flow *flow)
1740 {
1741 	struct sfc_flow_spec *spec = &flow->spec;
1742 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1743 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1744 	uint32_t mark_max;
1745 
1746 	mark_max = encp->enc_filter_action_mark_max;
1747 	if (sfc_flow_tunnel_is_active(sa))
1748 		mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
1749 
1750 	if (mark == NULL || mark->id > mark_max)
1751 		return EINVAL;
1752 
1753 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1754 	spec_filter->template.efs_mark = mark->id;
1755 
1756 	return 0;
1757 }
1758 
1759 static int
1760 sfc_flow_parse_actions(struct sfc_adapter *sa,
1761 		       const struct rte_flow_action actions[],
1762 		       struct rte_flow *flow,
1763 		       struct rte_flow_error *error)
1764 {
1765 	int rc;
1766 	struct sfc_flow_spec *spec = &flow->spec;
1767 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1768 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1769 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
1770 	uint32_t actions_set = 0;
1771 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1772 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1773 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1774 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1775 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1776 
1777 	if (actions == NULL) {
1778 		rte_flow_error_set(error, EINVAL,
1779 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1780 				   "NULL actions");
1781 		return -rte_errno;
1782 	}
1783 
1784 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1785 		switch (actions->type) {
1786 		case RTE_FLOW_ACTION_TYPE_VOID:
1787 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1788 					       actions_set);
1789 			break;
1790 
1791 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1792 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1793 					       actions_set);
1794 			if ((actions_set & fate_actions_mask) != 0)
1795 				goto fail_fate_actions;
1796 
1797 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1798 			if (rc != 0) {
1799 				rte_flow_error_set(error, EINVAL,
1800 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1801 					"Bad QUEUE action");
1802 				return -rte_errno;
1803 			}
1804 			break;
1805 
1806 		case RTE_FLOW_ACTION_TYPE_RSS:
1807 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1808 					       actions_set);
1809 			if ((actions_set & fate_actions_mask) != 0)
1810 				goto fail_fate_actions;
1811 
1812 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1813 			if (rc != 0) {
1814 				rte_flow_error_set(error, -rc,
1815 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1816 					"Bad RSS action");
1817 				return -rte_errno;
1818 			}
1819 			break;
1820 
1821 		case RTE_FLOW_ACTION_TYPE_DROP:
1822 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1823 					       actions_set);
1824 			if ((actions_set & fate_actions_mask) != 0)
1825 				goto fail_fate_actions;
1826 
1827 			spec_filter->template.efs_dmaq_id =
1828 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1829 			break;
1830 
1831 		case RTE_FLOW_ACTION_TYPE_FLAG:
1832 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1833 					       actions_set);
1834 			if ((actions_set & mark_actions_mask) != 0)
1835 				goto fail_actions_overlap;
1836 
1837 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1838 				rte_flow_error_set(error, ENOTSUP,
1839 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1840 					"FLAG action is not supported on the current Rx datapath");
1841 				return -rte_errno;
1842 			} else if ((rx_metadata &
1843 				    RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
1844 				rte_flow_error_set(error, ENOTSUP,
1845 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1846 					"flag delivery has not been negotiated");
1847 				return -rte_errno;
1848 			}
1849 
1850 			spec_filter->template.efs_flags |=
1851 				EFX_FILTER_FLAG_ACTION_FLAG;
1852 			break;
1853 
1854 		case RTE_FLOW_ACTION_TYPE_MARK:
1855 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1856 					       actions_set);
1857 			if ((actions_set & mark_actions_mask) != 0)
1858 				goto fail_actions_overlap;
1859 
1860 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1861 				rte_flow_error_set(error, ENOTSUP,
1862 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1863 					"MARK action is not supported on the current Rx datapath");
1864 				return -rte_errno;
1865 			} else if ((rx_metadata &
1866 				    RTE_ETH_RX_METADATA_USER_MARK) == 0) {
1867 				rte_flow_error_set(error, ENOTSUP,
1868 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1869 					"mark delivery has not been negotiated");
1870 				return -rte_errno;
1871 			}
1872 
1873 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1874 			if (rc != 0) {
1875 				rte_flow_error_set(error, rc,
1876 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1877 					"Bad MARK action");
1878 				return -rte_errno;
1879 			}
1880 			break;
1881 
1882 		default:
1883 			rte_flow_error_set(error, ENOTSUP,
1884 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1885 					   "Action is not supported");
1886 			return -rte_errno;
1887 		}
1888 
1889 		actions_set |= (1UL << actions->type);
1890 	}
1891 
1892 	/* When fate is unknown, drop traffic. */
1893 	if ((actions_set & fate_actions_mask) == 0) {
1894 		spec_filter->template.efs_dmaq_id =
1895 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1896 	}
1897 
1898 	return 0;
1899 
1900 fail_fate_actions:
1901 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1902 			   "Cannot combine several fate-deciding actions, "
1903 			   "choose between QUEUE, RSS or DROP");
1904 	return -rte_errno;
1905 
1906 fail_actions_overlap:
1907 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1908 			   "Overlapping actions are not supported");
1909 	return -rte_errno;
1910 }
1911 
1912 /**
1913  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1914  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1915  * specifications after copying.
1916  *
1917  * @param spec[in, out]
1918  *   SFC flow specification to update.
1919  * @param filters_count_for_one_val[in]
1920  *   How many specifications should have the same match flag, what is the
1921  *   number of specifications before copying.
1922  * @param error[out]
1923  *   Perform verbose error reporting if not NULL.
1924  */
1925 static int
1926 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1927 			       unsigned int filters_count_for_one_val,
1928 			       struct rte_flow_error *error)
1929 {
1930 	unsigned int i;
1931 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1932 	static const efx_filter_match_flags_t vals[] = {
1933 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1934 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1935 	};
1936 
1937 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1938 		rte_flow_error_set(error, EINVAL,
1939 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1940 			"Number of specifications is incorrect while copying "
1941 			"by unknown destination flags");
1942 		return -rte_errno;
1943 	}
1944 
1945 	for (i = 0; i < spec_filter->count; i++) {
1946 		/* The check above ensures that divisor can't be zero here */
1947 		spec_filter->filters[i].efs_match_flags |=
1948 			vals[i / filters_count_for_one_val];
1949 	}
1950 
1951 	return 0;
1952 }
1953 
1954 /**
1955  * Check that the following conditions are met:
1956  * - the list of supported filters has a filter
1957  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1958  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1959  *   be inserted.
1960  *
1961  * @param match[in]
1962  *   The match flags of filter.
1963  * @param spec[in]
1964  *   Specification to be supplemented.
1965  * @param filter[in]
1966  *   SFC filter with list of supported filters.
1967  */
1968 static boolean_t
1969 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1970 				 __rte_unused efx_filter_spec_t *spec,
1971 				 struct sfc_filter *filter)
1972 {
1973 	unsigned int i;
1974 	efx_filter_match_flags_t match_mcast_dst;
1975 
1976 	match_mcast_dst =
1977 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1978 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1979 	for (i = 0; i < filter->supported_match_num; i++) {
1980 		if (match_mcast_dst == filter->supported_match[i])
1981 			return B_TRUE;
1982 	}
1983 
1984 	return B_FALSE;
1985 }
1986 
1987 /**
1988  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1989  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1990  * specifications after copying.
1991  *
1992  * @param spec[in, out]
1993  *   SFC flow specification to update.
1994  * @param filters_count_for_one_val[in]
1995  *   How many specifications should have the same EtherType value, what is the
1996  *   number of specifications before copying.
1997  * @param error[out]
1998  *   Perform verbose error reporting if not NULL.
1999  */
2000 static int
2001 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
2002 			unsigned int filters_count_for_one_val,
2003 			struct rte_flow_error *error)
2004 {
2005 	unsigned int i;
2006 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2007 	static const uint16_t vals[] = {
2008 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
2009 	};
2010 
2011 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2012 		rte_flow_error_set(error, EINVAL,
2013 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2014 			"Number of specifications is incorrect "
2015 			"while copying by Ethertype");
2016 		return -rte_errno;
2017 	}
2018 
2019 	for (i = 0; i < spec_filter->count; i++) {
2020 		spec_filter->filters[i].efs_match_flags |=
2021 			EFX_FILTER_MATCH_ETHER_TYPE;
2022 
2023 		/*
2024 		 * The check above ensures that
2025 		 * filters_count_for_one_val is not 0
2026 		 */
2027 		spec_filter->filters[i].efs_ether_type =
2028 			vals[i / filters_count_for_one_val];
2029 	}
2030 
2031 	return 0;
2032 }
2033 
2034 /**
2035  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
2036  * in the same specifications after copying.
2037  *
2038  * @param spec[in, out]
2039  *   SFC flow specification to update.
2040  * @param filters_count_for_one_val[in]
2041  *   How many specifications should have the same match flag, what is the
2042  *   number of specifications before copying.
2043  * @param error[out]
2044  *   Perform verbose error reporting if not NULL.
2045  */
2046 static int
2047 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
2048 			    unsigned int filters_count_for_one_val,
2049 			    struct rte_flow_error *error)
2050 {
2051 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2052 	unsigned int i;
2053 
2054 	if (filters_count_for_one_val != spec_filter->count) {
2055 		rte_flow_error_set(error, EINVAL,
2056 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2057 			"Number of specifications is incorrect "
2058 			"while copying by outer VLAN ID");
2059 		return -rte_errno;
2060 	}
2061 
2062 	for (i = 0; i < spec_filter->count; i++) {
2063 		spec_filter->filters[i].efs_match_flags |=
2064 			EFX_FILTER_MATCH_OUTER_VID;
2065 
2066 		spec_filter->filters[i].efs_outer_vid = 0;
2067 	}
2068 
2069 	return 0;
2070 }
2071 
2072 /**
2073  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
2074  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
2075  * specifications after copying.
2076  *
2077  * @param spec[in, out]
2078  *   SFC flow specification to update.
2079  * @param filters_count_for_one_val[in]
2080  *   How many specifications should have the same match flag, what is the
2081  *   number of specifications before copying.
2082  * @param error[out]
2083  *   Perform verbose error reporting if not NULL.
2084  */
2085 static int
2086 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
2087 				    unsigned int filters_count_for_one_val,
2088 				    struct rte_flow_error *error)
2089 {
2090 	unsigned int i;
2091 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2092 	static const efx_filter_match_flags_t vals[] = {
2093 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2094 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
2095 	};
2096 
2097 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2098 		rte_flow_error_set(error, EINVAL,
2099 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2100 			"Number of specifications is incorrect while copying "
2101 			"by inner frame unknown destination flags");
2102 		return -rte_errno;
2103 	}
2104 
2105 	for (i = 0; i < spec_filter->count; i++) {
2106 		/* The check above ensures that divisor can't be zero here */
2107 		spec_filter->filters[i].efs_match_flags |=
2108 			vals[i / filters_count_for_one_val];
2109 	}
2110 
2111 	return 0;
2112 }
2113 
2114 /**
2115  * Check that the following conditions are met:
2116  * - the specification corresponds to a filter for encapsulated traffic
2117  * - the list of supported filters has a filter
2118  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2119  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2120  *   be inserted.
2121  *
2122  * @param match[in]
2123  *   The match flags of filter.
2124  * @param spec[in]
2125  *   Specification to be supplemented.
2126  * @param filter[in]
2127  *   SFC filter with list of supported filters.
2128  */
2129 static boolean_t
2130 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2131 				      efx_filter_spec_t *spec,
2132 				      struct sfc_filter *filter)
2133 {
2134 	unsigned int i;
2135 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2136 	efx_filter_match_flags_t match_mcast_dst;
2137 
2138 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2139 		return B_FALSE;
2140 
2141 	match_mcast_dst =
2142 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2143 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2144 	for (i = 0; i < filter->supported_match_num; i++) {
2145 		if (match_mcast_dst == filter->supported_match[i])
2146 			return B_TRUE;
2147 	}
2148 
2149 	return B_FALSE;
2150 }
2151 
2152 /**
2153  * Check that the list of supported filters has a filter that differs
2154  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2155  * in this case that filter will be used and the flag
2156  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2157  *
2158  * @param match[in]
2159  *   The match flags of filter.
2160  * @param spec[in]
2161  *   Specification to be supplemented.
2162  * @param filter[in]
2163  *   SFC filter with list of supported filters.
2164  */
2165 static boolean_t
2166 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2167 			      __rte_unused efx_filter_spec_t *spec,
2168 			      struct sfc_filter *filter)
2169 {
2170 	unsigned int i;
2171 	efx_filter_match_flags_t match_without_vid =
2172 		match & ~EFX_FILTER_MATCH_OUTER_VID;
2173 
2174 	for (i = 0; i < filter->supported_match_num; i++) {
2175 		if (match_without_vid == filter->supported_match[i])
2176 			return B_FALSE;
2177 	}
2178 
2179 	return B_TRUE;
2180 }
2181 
2182 /*
2183  * Match flags that can be automatically added to filters.
2184  * Selecting the last minimum when searching for the copy flag ensures that the
2185  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2186  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2187  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2188  * filters.
2189  */
2190 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2191 	{
2192 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2193 		.vals_count = 2,
2194 		.set_vals = sfc_flow_set_unknown_dst_flags,
2195 		.spec_check = sfc_flow_check_unknown_dst_flags,
2196 	},
2197 	{
2198 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
2199 		.vals_count = 2,
2200 		.set_vals = sfc_flow_set_ethertypes,
2201 		.spec_check = NULL,
2202 	},
2203 	{
2204 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2205 		.vals_count = 2,
2206 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2207 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2208 	},
2209 	{
2210 		.flag = EFX_FILTER_MATCH_OUTER_VID,
2211 		.vals_count = 1,
2212 		.set_vals = sfc_flow_set_outer_vid_flag,
2213 		.spec_check = sfc_flow_check_outer_vid_flag,
2214 	},
2215 };
2216 
2217 /* Get item from array sfc_flow_copy_flags */
2218 static const struct sfc_flow_copy_flag *
2219 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2220 {
2221 	unsigned int i;
2222 
2223 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2224 		if (sfc_flow_copy_flags[i].flag == flag)
2225 			return &sfc_flow_copy_flags[i];
2226 	}
2227 
2228 	return NULL;
2229 }
2230 
2231 /**
2232  * Make copies of the specifications, set match flag and values
2233  * of the field that corresponds to it.
2234  *
2235  * @param spec[in, out]
2236  *   SFC flow specification to update.
2237  * @param flag[in]
2238  *   The match flag to add.
2239  * @param error[out]
2240  *   Perform verbose error reporting if not NULL.
2241  */
2242 static int
2243 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2244 			     efx_filter_match_flags_t flag,
2245 			     struct rte_flow_error *error)
2246 {
2247 	unsigned int i;
2248 	unsigned int new_filters_count;
2249 	unsigned int filters_count_for_one_val;
2250 	const struct sfc_flow_copy_flag *copy_flag;
2251 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2252 	int rc;
2253 
2254 	copy_flag = sfc_flow_get_copy_flag(flag);
2255 	if (copy_flag == NULL) {
2256 		rte_flow_error_set(error, ENOTSUP,
2257 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2258 				   "Unsupported spec field for copying");
2259 		return -rte_errno;
2260 	}
2261 
2262 	new_filters_count = spec_filter->count * copy_flag->vals_count;
2263 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2264 		rte_flow_error_set(error, EINVAL,
2265 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2266 			"Too much EFX specifications in the flow rule");
2267 		return -rte_errno;
2268 	}
2269 
2270 	/* Copy filters specifications */
2271 	for (i = spec_filter->count; i < new_filters_count; i++) {
2272 		spec_filter->filters[i] =
2273 			spec_filter->filters[i - spec_filter->count];
2274 	}
2275 
2276 	filters_count_for_one_val = spec_filter->count;
2277 	spec_filter->count = new_filters_count;
2278 
2279 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2280 	if (rc != 0)
2281 		return rc;
2282 
2283 	return 0;
2284 }
2285 
2286 /**
2287  * Check that the given set of match flags missing in the original filter spec
2288  * could be covered by adding spec copies which specify the corresponding
2289  * flags and packet field values to match.
2290  *
2291  * @param miss_flags[in]
2292  *   Flags that are missing until the supported filter.
2293  * @param spec[in]
2294  *   Specification to be supplemented.
2295  * @param filter[in]
2296  *   SFC filter.
2297  *
2298  * @return
2299  *   Number of specifications after copy or 0, if the flags can not be added.
2300  */
2301 static unsigned int
2302 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2303 			     efx_filter_spec_t *spec,
2304 			     struct sfc_filter *filter)
2305 {
2306 	unsigned int i;
2307 	efx_filter_match_flags_t copy_flags = 0;
2308 	efx_filter_match_flags_t flag;
2309 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2310 	sfc_flow_spec_check *check;
2311 	unsigned int multiplier = 1;
2312 
2313 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2314 		flag = sfc_flow_copy_flags[i].flag;
2315 		check = sfc_flow_copy_flags[i].spec_check;
2316 		if ((flag & miss_flags) == flag) {
2317 			if (check != NULL && (!check(match, spec, filter)))
2318 				continue;
2319 
2320 			copy_flags |= flag;
2321 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2322 		}
2323 	}
2324 
2325 	if (copy_flags == miss_flags)
2326 		return multiplier;
2327 
2328 	return 0;
2329 }
2330 
2331 /**
2332  * Attempt to supplement the specification template to the minimally
2333  * supported set of match flags. To do this, it is necessary to copy
2334  * the specifications, filling them with the values of fields that
2335  * correspond to the missing flags.
2336  * The necessary and sufficient filter is built from the fewest number
2337  * of copies which could be made to cover the minimally required set
2338  * of flags.
2339  *
2340  * @param sa[in]
2341  *   SFC adapter.
2342  * @param spec[in, out]
2343  *   SFC flow specification to update.
2344  * @param error[out]
2345  *   Perform verbose error reporting if not NULL.
2346  */
2347 static int
2348 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2349 			       struct sfc_flow_spec *spec,
2350 			       struct rte_flow_error *error)
2351 {
2352 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2353 	struct sfc_filter *filter = &sa->filter;
2354 	efx_filter_match_flags_t miss_flags;
2355 	efx_filter_match_flags_t min_miss_flags = 0;
2356 	efx_filter_match_flags_t match;
2357 	unsigned int min_multiplier = UINT_MAX;
2358 	unsigned int multiplier;
2359 	unsigned int i;
2360 	int rc;
2361 
2362 	match = spec_filter->template.efs_match_flags;
2363 	for (i = 0; i < filter->supported_match_num; i++) {
2364 		if ((match & filter->supported_match[i]) == match) {
2365 			miss_flags = filter->supported_match[i] & (~match);
2366 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2367 				&spec_filter->template, filter);
2368 			if (multiplier > 0) {
2369 				if (multiplier <= min_multiplier) {
2370 					min_multiplier = multiplier;
2371 					min_miss_flags = miss_flags;
2372 				}
2373 			}
2374 		}
2375 	}
2376 
2377 	if (min_multiplier == UINT_MAX) {
2378 		rte_flow_error_set(error, ENOTSUP,
2379 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2380 				   "The flow rule pattern is unsupported");
2381 		return -rte_errno;
2382 	}
2383 
2384 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2385 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2386 
2387 		if ((flag & min_miss_flags) == flag) {
2388 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2389 			if (rc != 0)
2390 				return rc;
2391 		}
2392 	}
2393 
2394 	return 0;
2395 }
2396 
2397 /**
2398  * Check that set of match flags is referred to by a filter. Filter is
2399  * described by match flags with the ability to add OUTER_VID and INNER_VID
2400  * flags.
2401  *
2402  * @param match_flags[in]
2403  *   Set of match flags.
2404  * @param flags_pattern[in]
2405  *   Pattern of filter match flags.
2406  */
2407 static boolean_t
2408 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2409 			    efx_filter_match_flags_t flags_pattern)
2410 {
2411 	if ((match_flags & flags_pattern) != flags_pattern)
2412 		return B_FALSE;
2413 
2414 	switch (match_flags & ~flags_pattern) {
2415 	case 0:
2416 	case EFX_FILTER_MATCH_OUTER_VID:
2417 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2418 		return B_TRUE;
2419 	default:
2420 		return B_FALSE;
2421 	}
2422 }
2423 
2424 /**
2425  * Check whether the spec maps to a hardware filter which is known to be
2426  * ineffective despite being valid.
2427  *
2428  * @param filter[in]
2429  *   SFC filter with list of supported filters.
2430  * @param spec[in]
2431  *   SFC flow specification.
2432  */
2433 static boolean_t
2434 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2435 				  struct sfc_flow_spec *spec)
2436 {
2437 	unsigned int i;
2438 	uint16_t ether_type;
2439 	uint8_t ip_proto;
2440 	efx_filter_match_flags_t match_flags;
2441 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2442 
2443 	for (i = 0; i < spec_filter->count; i++) {
2444 		match_flags = spec_filter->filters[i].efs_match_flags;
2445 
2446 		if (sfc_flow_is_match_with_vids(match_flags,
2447 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2448 		    sfc_flow_is_match_with_vids(match_flags,
2449 						EFX_FILTER_MATCH_ETHER_TYPE |
2450 						EFX_FILTER_MATCH_LOC_MAC)) {
2451 			ether_type = spec_filter->filters[i].efs_ether_type;
2452 			if (filter->supports_ip_proto_or_addr_filter &&
2453 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2454 			     ether_type == EFX_ETHER_TYPE_IPV6))
2455 				return B_TRUE;
2456 		} else if (sfc_flow_is_match_with_vids(match_flags,
2457 				EFX_FILTER_MATCH_ETHER_TYPE |
2458 				EFX_FILTER_MATCH_IP_PROTO) ||
2459 			   sfc_flow_is_match_with_vids(match_flags,
2460 				EFX_FILTER_MATCH_ETHER_TYPE |
2461 				EFX_FILTER_MATCH_IP_PROTO |
2462 				EFX_FILTER_MATCH_LOC_MAC)) {
2463 			ip_proto = spec_filter->filters[i].efs_ip_proto;
2464 			if (filter->supports_rem_or_local_port_filter &&
2465 			    (ip_proto == EFX_IPPROTO_TCP ||
2466 			     ip_proto == EFX_IPPROTO_UDP))
2467 				return B_TRUE;
2468 		}
2469 	}
2470 
2471 	return B_FALSE;
2472 }
2473 
2474 static int
2475 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2476 			      struct rte_flow *flow,
2477 			      struct rte_flow_error *error)
2478 {
2479 	struct sfc_flow_spec *spec = &flow->spec;
2480 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2481 	efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2482 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2483 	int rc;
2484 
2485 	/* Initialize the first filter spec with template */
2486 	spec_filter->filters[0] = *spec_tmpl;
2487 	spec_filter->count = 1;
2488 
2489 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2490 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2491 		if (rc != 0)
2492 			return rc;
2493 	}
2494 
2495 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2496 		rte_flow_error_set(error, ENOTSUP,
2497 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2498 			"The flow rule pattern is unsupported");
2499 		return -rte_errno;
2500 	}
2501 
2502 	return 0;
2503 }
2504 
2505 static int
2506 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2507 			     const struct rte_flow_item pattern[],
2508 			     const struct rte_flow_action actions[],
2509 			     struct rte_flow *flow,
2510 			     struct rte_flow_error *error)
2511 {
2512 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2513 	struct sfc_flow_spec *spec = &flow->spec;
2514 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2515 	struct sfc_flow_parse_ctx ctx;
2516 	int rc;
2517 
2518 	ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2519 	ctx.filter = &spec_filter->template;
2520 
2521 	rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2522 				    pattern, &ctx, error);
2523 	if (rc != 0)
2524 		goto fail_bad_value;
2525 
2526 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2527 	if (rc != 0)
2528 		goto fail_bad_value;
2529 
2530 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2531 	if (rc != 0)
2532 		goto fail_bad_value;
2533 
2534 	return 0;
2535 
2536 fail_bad_value:
2537 	return rc;
2538 }
2539 
2540 static int
2541 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2542 			  const struct rte_flow_item pattern[],
2543 			  const struct rte_flow_action actions[],
2544 			  struct rte_flow *flow,
2545 			  struct rte_flow_error *error)
2546 {
2547 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2548 	struct sfc_flow_spec *spec = &flow->spec;
2549 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2550 	int rc;
2551 
2552 	/*
2553 	 * If the flow is meant to be a JUMP rule in tunnel offload,
2554 	 * preparse its actions and save its properties in spec_mae.
2555 	 */
2556 	rc = sfc_flow_tunnel_detect_jump_rule(sa, actions, spec_mae, error);
2557 	if (rc != 0)
2558 		goto fail;
2559 
2560 	rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2561 	if (rc != 0)
2562 		goto fail;
2563 
2564 	if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
2565 		/*
2566 		 * By design, this flow should be represented solely by the
2567 		 * outer rule. But the HW/FW hasn't got support for setting
2568 		 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2569 		 * does it support outer rule counters. As a workaround, an
2570 		 * action rule of lower priority is used to do the job.
2571 		 *
2572 		 * So don't skip sfc_mae_rule_parse_actions() below.
2573 		 */
2574 	}
2575 
2576 	rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2577 	if (rc != 0)
2578 		goto fail;
2579 
2580 	if (spec_mae->ft != NULL) {
2581 		if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
2582 			spec_mae->ft->jump_rule_is_set = B_TRUE;
2583 
2584 		++(spec_mae->ft->refcnt);
2585 	}
2586 
2587 	return 0;
2588 
2589 fail:
2590 	/* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
2591 	spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
2592 	spec_mae->ft = NULL;
2593 
2594 	return rc;
2595 }
2596 
2597 static int
2598 sfc_flow_parse(struct rte_eth_dev *dev,
2599 	       const struct rte_flow_attr *attr,
2600 	       const struct rte_flow_item pattern[],
2601 	       const struct rte_flow_action actions[],
2602 	       struct rte_flow *flow,
2603 	       struct rte_flow_error *error)
2604 {
2605 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2606 	const struct sfc_flow_ops_by_spec *ops;
2607 	int rc;
2608 
2609 	rc = sfc_flow_parse_attr(sa, attr, flow, error);
2610 	if (rc != 0)
2611 		return rc;
2612 
2613 	ops = sfc_flow_get_ops_by_spec(flow);
2614 	if (ops == NULL || ops->parse == NULL) {
2615 		rte_flow_error_set(error, ENOTSUP,
2616 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2617 				   "No backend to handle this flow");
2618 		return -rte_errno;
2619 	}
2620 
2621 	return ops->parse(dev, pattern, actions, flow, error);
2622 }
2623 
2624 static struct rte_flow *
2625 sfc_flow_zmalloc(struct rte_flow_error *error)
2626 {
2627 	struct rte_flow *flow;
2628 
2629 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2630 	if (flow == NULL) {
2631 		rte_flow_error_set(error, ENOMEM,
2632 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2633 				   "Failed to allocate memory");
2634 	}
2635 
2636 	return flow;
2637 }
2638 
2639 static void
2640 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2641 {
2642 	const struct sfc_flow_ops_by_spec *ops;
2643 
2644 	ops = sfc_flow_get_ops_by_spec(flow);
2645 	if (ops != NULL && ops->cleanup != NULL)
2646 		ops->cleanup(sa, flow);
2647 
2648 	rte_free(flow);
2649 }
2650 
2651 static int
2652 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2653 		struct rte_flow_error *error)
2654 {
2655 	const struct sfc_flow_ops_by_spec *ops;
2656 	int rc;
2657 
2658 	ops = sfc_flow_get_ops_by_spec(flow);
2659 	if (ops == NULL || ops->insert == NULL) {
2660 		rte_flow_error_set(error, ENOTSUP,
2661 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2662 				   "No backend to handle this flow");
2663 		return rte_errno;
2664 	}
2665 
2666 	rc = ops->insert(sa, flow);
2667 	if (rc != 0) {
2668 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2669 				   NULL, "Failed to insert the flow rule");
2670 	}
2671 
2672 	return rc;
2673 }
2674 
2675 static int
2676 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2677 		struct rte_flow_error *error)
2678 {
2679 	const struct sfc_flow_ops_by_spec *ops;
2680 	int rc;
2681 
2682 	ops = sfc_flow_get_ops_by_spec(flow);
2683 	if (ops == NULL || ops->remove == NULL) {
2684 		rte_flow_error_set(error, ENOTSUP,
2685 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2686 				   "No backend to handle this flow");
2687 		return rte_errno;
2688 	}
2689 
2690 	rc = ops->remove(sa, flow);
2691 	if (rc != 0) {
2692 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2693 				   NULL, "Failed to remove the flow rule");
2694 	}
2695 
2696 	return rc;
2697 }
2698 
2699 static int
2700 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2701 		struct rte_flow_error *error)
2702 {
2703 	const struct sfc_flow_ops_by_spec *ops;
2704 	int rc = 0;
2705 
2706 	ops = sfc_flow_get_ops_by_spec(flow);
2707 	if (ops == NULL) {
2708 		rte_flow_error_set(error, ENOTSUP,
2709 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2710 				   "No backend to handle this flow");
2711 		return -rte_errno;
2712 	}
2713 
2714 	if (ops->verify != NULL) {
2715 		SFC_ASSERT(sfc_adapter_is_locked(sa));
2716 		rc = ops->verify(sa, flow);
2717 	}
2718 
2719 	if (rc != 0) {
2720 		rte_flow_error_set(error, rc,
2721 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2722 			"Failed to verify flow validity with FW");
2723 		return -rte_errno;
2724 	}
2725 
2726 	return 0;
2727 }
2728 
2729 static int
2730 sfc_flow_validate(struct rte_eth_dev *dev,
2731 		  const struct rte_flow_attr *attr,
2732 		  const struct rte_flow_item pattern[],
2733 		  const struct rte_flow_action actions[],
2734 		  struct rte_flow_error *error)
2735 {
2736 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2737 	struct rte_flow *flow;
2738 	int rc;
2739 
2740 	flow = sfc_flow_zmalloc(error);
2741 	if (flow == NULL)
2742 		return -rte_errno;
2743 
2744 	sfc_adapter_lock(sa);
2745 
2746 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2747 	if (rc == 0)
2748 		rc = sfc_flow_verify(sa, flow, error);
2749 
2750 	sfc_flow_free(sa, flow);
2751 
2752 	sfc_adapter_unlock(sa);
2753 
2754 	return rc;
2755 }
2756 
2757 static struct rte_flow *
2758 sfc_flow_create(struct rte_eth_dev *dev,
2759 		const struct rte_flow_attr *attr,
2760 		const struct rte_flow_item pattern[],
2761 		const struct rte_flow_action actions[],
2762 		struct rte_flow_error *error)
2763 {
2764 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2765 	struct rte_flow *flow = NULL;
2766 	int rc;
2767 
2768 	flow = sfc_flow_zmalloc(error);
2769 	if (flow == NULL)
2770 		goto fail_no_mem;
2771 
2772 	sfc_adapter_lock(sa);
2773 
2774 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2775 	if (rc != 0)
2776 		goto fail_bad_value;
2777 
2778 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2779 
2780 	if (sa->state == SFC_ETHDEV_STARTED) {
2781 		rc = sfc_flow_insert(sa, flow, error);
2782 		if (rc != 0)
2783 			goto fail_flow_insert;
2784 	}
2785 
2786 	sfc_adapter_unlock(sa);
2787 
2788 	return flow;
2789 
2790 fail_flow_insert:
2791 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2792 
2793 fail_bad_value:
2794 	sfc_flow_free(sa, flow);
2795 	sfc_adapter_unlock(sa);
2796 
2797 fail_no_mem:
2798 	return NULL;
2799 }
2800 
2801 static int
2802 sfc_flow_destroy(struct rte_eth_dev *dev,
2803 		 struct rte_flow *flow,
2804 		 struct rte_flow_error *error)
2805 {
2806 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2807 	struct rte_flow *flow_ptr;
2808 	int rc = EINVAL;
2809 
2810 	sfc_adapter_lock(sa);
2811 
2812 	TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2813 		if (flow_ptr == flow)
2814 			rc = 0;
2815 	}
2816 	if (rc != 0) {
2817 		rte_flow_error_set(error, rc,
2818 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2819 				   "Failed to find flow rule to destroy");
2820 		goto fail_bad_value;
2821 	}
2822 
2823 	if (sa->state == SFC_ETHDEV_STARTED)
2824 		rc = sfc_flow_remove(sa, flow, error);
2825 
2826 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2827 	sfc_flow_free(sa, flow);
2828 
2829 fail_bad_value:
2830 	sfc_adapter_unlock(sa);
2831 
2832 	return -rc;
2833 }
2834 
2835 static int
2836 sfc_flow_flush(struct rte_eth_dev *dev,
2837 	       struct rte_flow_error *error)
2838 {
2839 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2840 	struct rte_flow *flow;
2841 	int ret = 0;
2842 
2843 	sfc_adapter_lock(sa);
2844 
2845 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2846 		if (sa->state == SFC_ETHDEV_STARTED) {
2847 			int rc;
2848 
2849 			rc = sfc_flow_remove(sa, flow, error);
2850 			if (rc != 0)
2851 				ret = rc;
2852 		}
2853 
2854 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2855 		sfc_flow_free(sa, flow);
2856 	}
2857 
2858 	sfc_adapter_unlock(sa);
2859 
2860 	return -ret;
2861 }
2862 
2863 static int
2864 sfc_flow_query(struct rte_eth_dev *dev,
2865 	       struct rte_flow *flow,
2866 	       const struct rte_flow_action *action,
2867 	       void *data,
2868 	       struct rte_flow_error *error)
2869 {
2870 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2871 	const struct sfc_flow_ops_by_spec *ops;
2872 	int ret;
2873 
2874 	sfc_adapter_lock(sa);
2875 
2876 	ops = sfc_flow_get_ops_by_spec(flow);
2877 	if (ops == NULL || ops->query == NULL) {
2878 		ret = rte_flow_error_set(error, ENOTSUP,
2879 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2880 			"No backend to handle this flow");
2881 		goto fail_no_backend;
2882 	}
2883 
2884 	if (sa->state != SFC_ETHDEV_STARTED) {
2885 		ret = rte_flow_error_set(error, EINVAL,
2886 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2887 			"Can't query the flow: the adapter is not started");
2888 		goto fail_not_started;
2889 	}
2890 
2891 	ret = ops->query(dev, flow, action, data, error);
2892 	if (ret != 0)
2893 		goto fail_query;
2894 
2895 	sfc_adapter_unlock(sa);
2896 
2897 	return 0;
2898 
2899 fail_query:
2900 fail_not_started:
2901 fail_no_backend:
2902 	sfc_adapter_unlock(sa);
2903 	return ret;
2904 }
2905 
2906 static int
2907 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2908 		 struct rte_flow_error *error)
2909 {
2910 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2911 	int ret = 0;
2912 
2913 	sfc_adapter_lock(sa);
2914 	if (sa->state != SFC_ETHDEV_INITIALIZED) {
2915 		rte_flow_error_set(error, EBUSY,
2916 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2917 				   NULL, "please close the port first");
2918 		ret = -rte_errno;
2919 	} else {
2920 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2921 	}
2922 	sfc_adapter_unlock(sa);
2923 
2924 	return ret;
2925 }
2926 
2927 const struct rte_flow_ops sfc_flow_ops = {
2928 	.validate = sfc_flow_validate,
2929 	.create = sfc_flow_create,
2930 	.destroy = sfc_flow_destroy,
2931 	.flush = sfc_flow_flush,
2932 	.query = sfc_flow_query,
2933 	.isolate = sfc_flow_isolate,
2934 	.tunnel_decap_set = sfc_flow_tunnel_decap_set,
2935 	.tunnel_match = sfc_flow_tunnel_match,
2936 	.tunnel_action_decap_release = sfc_flow_tunnel_action_decap_release,
2937 	.tunnel_item_release = sfc_flow_tunnel_item_release,
2938 	.get_restore_info = sfc_flow_tunnel_get_restore_info,
2939 };
2940 
2941 void
2942 sfc_flow_init(struct sfc_adapter *sa)
2943 {
2944 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2945 
2946 	TAILQ_INIT(&sa->flow_list);
2947 }
2948 
2949 void
2950 sfc_flow_fini(struct sfc_adapter *sa)
2951 {
2952 	struct rte_flow *flow;
2953 
2954 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2955 
2956 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2957 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2958 		sfc_flow_free(sa, flow);
2959 	}
2960 }
2961 
2962 void
2963 sfc_flow_stop(struct sfc_adapter *sa)
2964 {
2965 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2966 	struct sfc_rss *rss = &sas->rss;
2967 	struct rte_flow *flow;
2968 
2969 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2970 
2971 	TAILQ_FOREACH(flow, &sa->flow_list, entries)
2972 		sfc_flow_remove(sa, flow, NULL);
2973 
2974 	if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2975 		efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2976 		rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2977 	}
2978 
2979 	/*
2980 	 * MAE counter service is not stopped on flow rule remove to avoid
2981 	 * extra work. Make sure that it is stopped here.
2982 	 */
2983 	sfc_mae_counter_stop(sa);
2984 }
2985 
2986 int
2987 sfc_flow_start(struct sfc_adapter *sa)
2988 {
2989 	struct rte_flow *flow;
2990 	int rc = 0;
2991 
2992 	sfc_log_init(sa, "entry");
2993 
2994 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2995 
2996 	sfc_flow_tunnel_reset_hit_counters(sa);
2997 
2998 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2999 		rc = sfc_flow_insert(sa, flow, NULL);
3000 		if (rc != 0)
3001 			goto fail_bad_flow;
3002 	}
3003 
3004 	sfc_log_init(sa, "done");
3005 
3006 fail_bad_flow:
3007 	return rc;
3008 }
3009