xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 96fd2bd69b58b0500b19f242c0cf6aa464d0a195)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <ethdev_driver.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_debug.h"
22 #include "sfc_rx.h"
23 #include "sfc_filter.h"
24 #include "sfc_flow.h"
25 #include "sfc_log.h"
26 #include "sfc_dp_rx.h"
27 #include "sfc_mae_counter.h"
28 
29 struct sfc_flow_ops_by_spec {
30 	sfc_flow_parse_cb_t	*parse;
31 	sfc_flow_verify_cb_t	*verify;
32 	sfc_flow_cleanup_cb_t	*cleanup;
33 	sfc_flow_insert_cb_t	*insert;
34 	sfc_flow_remove_cb_t	*remove;
35 };
36 
37 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
38 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
39 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
40 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
41 
42 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
43 	.parse = sfc_flow_parse_rte_to_filter,
44 	.verify = NULL,
45 	.cleanup = NULL,
46 	.insert = sfc_flow_filter_insert,
47 	.remove = sfc_flow_filter_remove,
48 };
49 
50 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
51 	.parse = sfc_flow_parse_rte_to_mae,
52 	.verify = sfc_mae_flow_verify,
53 	.cleanup = sfc_mae_flow_cleanup,
54 	.insert = sfc_mae_flow_insert,
55 	.remove = sfc_mae_flow_remove,
56 };
57 
58 static const struct sfc_flow_ops_by_spec *
59 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
60 {
61 	struct sfc_flow_spec *spec = &flow->spec;
62 	const struct sfc_flow_ops_by_spec *ops = NULL;
63 
64 	switch (spec->type) {
65 	case SFC_FLOW_SPEC_FILTER:
66 		ops = &sfc_flow_ops_filter;
67 		break;
68 	case SFC_FLOW_SPEC_MAE:
69 		ops = &sfc_flow_ops_mae;
70 		break;
71 	default:
72 		SFC_ASSERT(false);
73 		break;
74 	}
75 
76 	return ops;
77 }
78 
79 /*
80  * Currently, filter-based (VNIC) flow API is implemented in such a manner
81  * that each flow rule is converted to one or more hardware filters.
82  * All elements of flow rule (attributes, pattern items, actions)
83  * correspond to one or more fields in the efx_filter_spec_s structure
84  * that is responsible for the hardware filter.
85  * If some required field is unset in the flow rule, then a handful
86  * of filter copies will be created to cover all possible values
87  * of such a field.
88  */
89 
90 static sfc_flow_item_parse sfc_flow_parse_void;
91 static sfc_flow_item_parse sfc_flow_parse_eth;
92 static sfc_flow_item_parse sfc_flow_parse_vlan;
93 static sfc_flow_item_parse sfc_flow_parse_ipv4;
94 static sfc_flow_item_parse sfc_flow_parse_ipv6;
95 static sfc_flow_item_parse sfc_flow_parse_tcp;
96 static sfc_flow_item_parse sfc_flow_parse_udp;
97 static sfc_flow_item_parse sfc_flow_parse_vxlan;
98 static sfc_flow_item_parse sfc_flow_parse_geneve;
99 static sfc_flow_item_parse sfc_flow_parse_nvgre;
100 static sfc_flow_item_parse sfc_flow_parse_pppoex;
101 
102 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
103 				     unsigned int filters_count_for_one_val,
104 				     struct rte_flow_error *error);
105 
106 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
107 					efx_filter_spec_t *spec,
108 					struct sfc_filter *filter);
109 
110 struct sfc_flow_copy_flag {
111 	/* EFX filter specification match flag */
112 	efx_filter_match_flags_t flag;
113 	/* Number of values of corresponding field */
114 	unsigned int vals_count;
115 	/* Function to set values in specifications */
116 	sfc_flow_spec_set_vals *set_vals;
117 	/*
118 	 * Function to check that the specification is suitable
119 	 * for adding this match flag
120 	 */
121 	sfc_flow_spec_check *spec_check;
122 };
123 
124 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
125 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
126 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
127 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
128 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
129 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
130 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
131 
132 static boolean_t
133 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
134 {
135 	uint8_t sum = 0;
136 	unsigned int i;
137 
138 	for (i = 0; i < size; i++)
139 		sum |= buf[i];
140 
141 	return (sum == 0) ? B_TRUE : B_FALSE;
142 }
143 
144 /*
145  * Validate item and prepare structures spec and mask for parsing
146  */
147 int
148 sfc_flow_parse_init(const struct rte_flow_item *item,
149 		    const void **spec_ptr,
150 		    const void **mask_ptr,
151 		    const void *supp_mask,
152 		    const void *def_mask,
153 		    unsigned int size,
154 		    struct rte_flow_error *error)
155 {
156 	const uint8_t *spec;
157 	const uint8_t *mask;
158 	const uint8_t *last;
159 	uint8_t supp;
160 	unsigned int i;
161 
162 	if (item == NULL) {
163 		rte_flow_error_set(error, EINVAL,
164 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
165 				   "NULL item");
166 		return -rte_errno;
167 	}
168 
169 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
170 		rte_flow_error_set(error, EINVAL,
171 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
172 				   "Mask or last is set without spec");
173 		return -rte_errno;
174 	}
175 
176 	/*
177 	 * If "mask" is not set, default mask is used,
178 	 * but if default mask is NULL, "mask" should be set
179 	 */
180 	if (item->mask == NULL) {
181 		if (def_mask == NULL) {
182 			rte_flow_error_set(error, EINVAL,
183 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
184 				"Mask should be specified");
185 			return -rte_errno;
186 		}
187 
188 		mask = def_mask;
189 	} else {
190 		mask = item->mask;
191 	}
192 
193 	spec = item->spec;
194 	last = item->last;
195 
196 	if (spec == NULL)
197 		goto exit;
198 
199 	/*
200 	 * If field values in "last" are either 0 or equal to the corresponding
201 	 * values in "spec" then they are ignored
202 	 */
203 	if (last != NULL &&
204 	    !sfc_flow_is_zero(last, size) &&
205 	    memcmp(last, spec, size) != 0) {
206 		rte_flow_error_set(error, ENOTSUP,
207 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
208 				   "Ranging is not supported");
209 		return -rte_errno;
210 	}
211 
212 	if (supp_mask == NULL) {
213 		rte_flow_error_set(error, EINVAL,
214 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
215 			"Supported mask for item should be specified");
216 		return -rte_errno;
217 	}
218 
219 	/* Check that mask does not ask for more match than supp_mask */
220 	for (i = 0; i < size; i++) {
221 		supp = ((const uint8_t *)supp_mask)[i];
222 
223 		if (~supp & mask[i]) {
224 			rte_flow_error_set(error, ENOTSUP,
225 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
226 					   "Item's field is not supported");
227 			return -rte_errno;
228 		}
229 	}
230 
231 exit:
232 	*spec_ptr = spec;
233 	*mask_ptr = mask;
234 	return 0;
235 }
236 
237 /*
238  * Protocol parsers.
239  * Masking is not supported, so masks in items should be either
240  * full or empty (zeroed) and set only for supported fields which
241  * are specified in the supp_mask.
242  */
243 
244 static int
245 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
246 		    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
247 		    __rte_unused struct rte_flow_error *error)
248 {
249 	return 0;
250 }
251 
252 /**
253  * Convert Ethernet item to EFX filter specification.
254  *
255  * @param item[in]
256  *   Item specification. Outer frame specification may only comprise
257  *   source/destination addresses and Ethertype field.
258  *   Inner frame specification may contain destination address only.
259  *   There is support for individual/group mask as well as for empty and full.
260  *   If the mask is NULL, default mask will be used. Ranging is not supported.
261  * @param efx_spec[in, out]
262  *   EFX filter specification to update.
263  * @param[out] error
264  *   Perform verbose error reporting if not NULL.
265  */
266 static int
267 sfc_flow_parse_eth(const struct rte_flow_item *item,
268 		   struct sfc_flow_parse_ctx *parse_ctx,
269 		   struct rte_flow_error *error)
270 {
271 	int rc;
272 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
273 	const struct rte_flow_item_eth *spec = NULL;
274 	const struct rte_flow_item_eth *mask = NULL;
275 	const struct rte_flow_item_eth supp_mask = {
276 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
277 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
278 		.type = 0xffff,
279 	};
280 	const struct rte_flow_item_eth ifrm_supp_mask = {
281 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
282 	};
283 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
284 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
285 	};
286 	const struct rte_flow_item_eth *supp_mask_p;
287 	const struct rte_flow_item_eth *def_mask_p;
288 	uint8_t *loc_mac = NULL;
289 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
290 		EFX_TUNNEL_PROTOCOL_NONE);
291 
292 	if (is_ifrm) {
293 		supp_mask_p = &ifrm_supp_mask;
294 		def_mask_p = &ifrm_supp_mask;
295 		loc_mac = efx_spec->efs_ifrm_loc_mac;
296 	} else {
297 		supp_mask_p = &supp_mask;
298 		def_mask_p = &rte_flow_item_eth_mask;
299 		loc_mac = efx_spec->efs_loc_mac;
300 	}
301 
302 	rc = sfc_flow_parse_init(item,
303 				 (const void **)&spec,
304 				 (const void **)&mask,
305 				 supp_mask_p, def_mask_p,
306 				 sizeof(struct rte_flow_item_eth),
307 				 error);
308 	if (rc != 0)
309 		return rc;
310 
311 	/* If "spec" is not set, could be any Ethernet */
312 	if (spec == NULL)
313 		return 0;
314 
315 	if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
316 		efx_spec->efs_match_flags |= is_ifrm ?
317 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
318 			EFX_FILTER_MATCH_LOC_MAC;
319 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
320 			   EFX_MAC_ADDR_LEN);
321 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
322 			  EFX_MAC_ADDR_LEN) == 0) {
323 		if (rte_is_unicast_ether_addr(&spec->dst))
324 			efx_spec->efs_match_flags |= is_ifrm ?
325 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
326 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
327 		else
328 			efx_spec->efs_match_flags |= is_ifrm ?
329 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
330 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
331 	} else if (!rte_is_zero_ether_addr(&mask->dst)) {
332 		goto fail_bad_mask;
333 	}
334 
335 	/*
336 	 * ifrm_supp_mask ensures that the source address and
337 	 * ethertype masks are equal to zero in inner frame,
338 	 * so these fields are filled in only for the outer frame
339 	 */
340 	if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
341 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
342 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
343 			   EFX_MAC_ADDR_LEN);
344 	} else if (!rte_is_zero_ether_addr(&mask->src)) {
345 		goto fail_bad_mask;
346 	}
347 
348 	/*
349 	 * Ether type is in big-endian byte order in item and
350 	 * in little-endian in efx_spec, so byte swap is used
351 	 */
352 	if (mask->type == supp_mask.type) {
353 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
354 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
355 	} else if (mask->type != 0) {
356 		goto fail_bad_mask;
357 	}
358 
359 	return 0;
360 
361 fail_bad_mask:
362 	rte_flow_error_set(error, EINVAL,
363 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
364 			   "Bad mask in the ETH pattern item");
365 	return -rte_errno;
366 }
367 
368 /**
369  * Convert VLAN item to EFX filter specification.
370  *
371  * @param item[in]
372  *   Item specification. Only VID field is supported.
373  *   The mask can not be NULL. Ranging is not supported.
374  * @param efx_spec[in, out]
375  *   EFX filter specification to update.
376  * @param[out] error
377  *   Perform verbose error reporting if not NULL.
378  */
379 static int
380 sfc_flow_parse_vlan(const struct rte_flow_item *item,
381 		    struct sfc_flow_parse_ctx *parse_ctx,
382 		    struct rte_flow_error *error)
383 {
384 	int rc;
385 	uint16_t vid;
386 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
387 	const struct rte_flow_item_vlan *spec = NULL;
388 	const struct rte_flow_item_vlan *mask = NULL;
389 	const struct rte_flow_item_vlan supp_mask = {
390 		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
391 		.inner_type = RTE_BE16(0xffff),
392 	};
393 
394 	rc = sfc_flow_parse_init(item,
395 				 (const void **)&spec,
396 				 (const void **)&mask,
397 				 &supp_mask,
398 				 NULL,
399 				 sizeof(struct rte_flow_item_vlan),
400 				 error);
401 	if (rc != 0)
402 		return rc;
403 
404 	/*
405 	 * VID is in big-endian byte order in item and
406 	 * in little-endian in efx_spec, so byte swap is used.
407 	 * If two VLAN items are included, the first matches
408 	 * the outer tag and the next matches the inner tag.
409 	 */
410 	if (mask->tci == supp_mask.tci) {
411 		/* Apply mask to keep VID only */
412 		vid = rte_bswap16(spec->tci & mask->tci);
413 
414 		if (!(efx_spec->efs_match_flags &
415 		      EFX_FILTER_MATCH_OUTER_VID)) {
416 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
417 			efx_spec->efs_outer_vid = vid;
418 		} else if (!(efx_spec->efs_match_flags &
419 			     EFX_FILTER_MATCH_INNER_VID)) {
420 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
421 			efx_spec->efs_inner_vid = vid;
422 		} else {
423 			rte_flow_error_set(error, EINVAL,
424 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
425 					   "More than two VLAN items");
426 			return -rte_errno;
427 		}
428 	} else {
429 		rte_flow_error_set(error, EINVAL,
430 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
431 				   "VLAN ID in TCI match is required");
432 		return -rte_errno;
433 	}
434 
435 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
436 		rte_flow_error_set(error, EINVAL,
437 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
438 				   "VLAN TPID matching is not supported");
439 		return -rte_errno;
440 	}
441 	if (mask->inner_type == supp_mask.inner_type) {
442 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
443 		efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
444 	} else if (mask->inner_type) {
445 		rte_flow_error_set(error, EINVAL,
446 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
447 				   "Bad mask for VLAN inner_type");
448 		return -rte_errno;
449 	}
450 
451 	return 0;
452 }
453 
454 /**
455  * Convert IPv4 item to EFX filter specification.
456  *
457  * @param item[in]
458  *   Item specification. Only source and destination addresses and
459  *   protocol fields are supported. If the mask is NULL, default
460  *   mask will be used. Ranging is not supported.
461  * @param efx_spec[in, out]
462  *   EFX filter specification to update.
463  * @param[out] error
464  *   Perform verbose error reporting if not NULL.
465  */
466 static int
467 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
468 		    struct sfc_flow_parse_ctx *parse_ctx,
469 		    struct rte_flow_error *error)
470 {
471 	int rc;
472 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
473 	const struct rte_flow_item_ipv4 *spec = NULL;
474 	const struct rte_flow_item_ipv4 *mask = NULL;
475 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
476 	const struct rte_flow_item_ipv4 supp_mask = {
477 		.hdr = {
478 			.src_addr = 0xffffffff,
479 			.dst_addr = 0xffffffff,
480 			.next_proto_id = 0xff,
481 		}
482 	};
483 
484 	rc = sfc_flow_parse_init(item,
485 				 (const void **)&spec,
486 				 (const void **)&mask,
487 				 &supp_mask,
488 				 &rte_flow_item_ipv4_mask,
489 				 sizeof(struct rte_flow_item_ipv4),
490 				 error);
491 	if (rc != 0)
492 		return rc;
493 
494 	/*
495 	 * Filtering by IPv4 source and destination addresses requires
496 	 * the appropriate ETHER_TYPE in hardware filters
497 	 */
498 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
499 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
500 		efx_spec->efs_ether_type = ether_type_ipv4;
501 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
502 		rte_flow_error_set(error, EINVAL,
503 			RTE_FLOW_ERROR_TYPE_ITEM, item,
504 			"Ethertype in pattern with IPV4 item should be appropriate");
505 		return -rte_errno;
506 	}
507 
508 	if (spec == NULL)
509 		return 0;
510 
511 	/*
512 	 * IPv4 addresses are in big-endian byte order in item and in
513 	 * efx_spec
514 	 */
515 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
516 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
517 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
518 	} else if (mask->hdr.src_addr != 0) {
519 		goto fail_bad_mask;
520 	}
521 
522 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
523 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
524 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
525 	} else if (mask->hdr.dst_addr != 0) {
526 		goto fail_bad_mask;
527 	}
528 
529 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
530 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
531 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
532 	} else if (mask->hdr.next_proto_id != 0) {
533 		goto fail_bad_mask;
534 	}
535 
536 	return 0;
537 
538 fail_bad_mask:
539 	rte_flow_error_set(error, EINVAL,
540 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
541 			   "Bad mask in the IPV4 pattern item");
542 	return -rte_errno;
543 }
544 
545 /**
546  * Convert IPv6 item to EFX filter specification.
547  *
548  * @param item[in]
549  *   Item specification. Only source and destination addresses and
550  *   next header fields are supported. If the mask is NULL, default
551  *   mask will be used. Ranging is not supported.
552  * @param efx_spec[in, out]
553  *   EFX filter specification to update.
554  * @param[out] error
555  *   Perform verbose error reporting if not NULL.
556  */
557 static int
558 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
559 		    struct sfc_flow_parse_ctx *parse_ctx,
560 		    struct rte_flow_error *error)
561 {
562 	int rc;
563 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
564 	const struct rte_flow_item_ipv6 *spec = NULL;
565 	const struct rte_flow_item_ipv6 *mask = NULL;
566 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
567 	const struct rte_flow_item_ipv6 supp_mask = {
568 		.hdr = {
569 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
570 				      0xff, 0xff, 0xff, 0xff,
571 				      0xff, 0xff, 0xff, 0xff,
572 				      0xff, 0xff, 0xff, 0xff },
573 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
574 				      0xff, 0xff, 0xff, 0xff,
575 				      0xff, 0xff, 0xff, 0xff,
576 				      0xff, 0xff, 0xff, 0xff },
577 			.proto = 0xff,
578 		}
579 	};
580 
581 	rc = sfc_flow_parse_init(item,
582 				 (const void **)&spec,
583 				 (const void **)&mask,
584 				 &supp_mask,
585 				 &rte_flow_item_ipv6_mask,
586 				 sizeof(struct rte_flow_item_ipv6),
587 				 error);
588 	if (rc != 0)
589 		return rc;
590 
591 	/*
592 	 * Filtering by IPv6 source and destination addresses requires
593 	 * the appropriate ETHER_TYPE in hardware filters
594 	 */
595 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
596 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
597 		efx_spec->efs_ether_type = ether_type_ipv6;
598 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
599 		rte_flow_error_set(error, EINVAL,
600 			RTE_FLOW_ERROR_TYPE_ITEM, item,
601 			"Ethertype in pattern with IPV6 item should be appropriate");
602 		return -rte_errno;
603 	}
604 
605 	if (spec == NULL)
606 		return 0;
607 
608 	/*
609 	 * IPv6 addresses are in big-endian byte order in item and in
610 	 * efx_spec
611 	 */
612 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
613 		   sizeof(mask->hdr.src_addr)) == 0) {
614 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
615 
616 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
617 				 sizeof(spec->hdr.src_addr));
618 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
619 			   sizeof(efx_spec->efs_rem_host));
620 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
621 				     sizeof(mask->hdr.src_addr))) {
622 		goto fail_bad_mask;
623 	}
624 
625 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
626 		   sizeof(mask->hdr.dst_addr)) == 0) {
627 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
628 
629 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
630 				 sizeof(spec->hdr.dst_addr));
631 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
632 			   sizeof(efx_spec->efs_loc_host));
633 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
634 				     sizeof(mask->hdr.dst_addr))) {
635 		goto fail_bad_mask;
636 	}
637 
638 	if (mask->hdr.proto == supp_mask.hdr.proto) {
639 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
640 		efx_spec->efs_ip_proto = spec->hdr.proto;
641 	} else if (mask->hdr.proto != 0) {
642 		goto fail_bad_mask;
643 	}
644 
645 	return 0;
646 
647 fail_bad_mask:
648 	rte_flow_error_set(error, EINVAL,
649 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
650 			   "Bad mask in the IPV6 pattern item");
651 	return -rte_errno;
652 }
653 
654 /**
655  * Convert TCP item to EFX filter specification.
656  *
657  * @param item[in]
658  *   Item specification. Only source and destination ports fields
659  *   are supported. If the mask is NULL, default mask will be used.
660  *   Ranging is not supported.
661  * @param efx_spec[in, out]
662  *   EFX filter specification to update.
663  * @param[out] error
664  *   Perform verbose error reporting if not NULL.
665  */
666 static int
667 sfc_flow_parse_tcp(const struct rte_flow_item *item,
668 		   struct sfc_flow_parse_ctx *parse_ctx,
669 		   struct rte_flow_error *error)
670 {
671 	int rc;
672 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
673 	const struct rte_flow_item_tcp *spec = NULL;
674 	const struct rte_flow_item_tcp *mask = NULL;
675 	const struct rte_flow_item_tcp supp_mask = {
676 		.hdr = {
677 			.src_port = 0xffff,
678 			.dst_port = 0xffff,
679 		}
680 	};
681 
682 	rc = sfc_flow_parse_init(item,
683 				 (const void **)&spec,
684 				 (const void **)&mask,
685 				 &supp_mask,
686 				 &rte_flow_item_tcp_mask,
687 				 sizeof(struct rte_flow_item_tcp),
688 				 error);
689 	if (rc != 0)
690 		return rc;
691 
692 	/*
693 	 * Filtering by TCP source and destination ports requires
694 	 * the appropriate IP_PROTO in hardware filters
695 	 */
696 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
697 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
698 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
699 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
700 		rte_flow_error_set(error, EINVAL,
701 			RTE_FLOW_ERROR_TYPE_ITEM, item,
702 			"IP proto in pattern with TCP item should be appropriate");
703 		return -rte_errno;
704 	}
705 
706 	if (spec == NULL)
707 		return 0;
708 
709 	/*
710 	 * Source and destination ports are in big-endian byte order in item and
711 	 * in little-endian in efx_spec, so byte swap is used
712 	 */
713 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
714 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
715 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
716 	} else if (mask->hdr.src_port != 0) {
717 		goto fail_bad_mask;
718 	}
719 
720 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
721 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
722 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
723 	} else if (mask->hdr.dst_port != 0) {
724 		goto fail_bad_mask;
725 	}
726 
727 	return 0;
728 
729 fail_bad_mask:
730 	rte_flow_error_set(error, EINVAL,
731 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
732 			   "Bad mask in the TCP pattern item");
733 	return -rte_errno;
734 }
735 
736 /**
737  * Convert UDP item to EFX filter specification.
738  *
739  * @param item[in]
740  *   Item specification. Only source and destination ports fields
741  *   are supported. If the mask is NULL, default mask will be used.
742  *   Ranging is not supported.
743  * @param efx_spec[in, out]
744  *   EFX filter specification to update.
745  * @param[out] error
746  *   Perform verbose error reporting if not NULL.
747  */
748 static int
749 sfc_flow_parse_udp(const struct rte_flow_item *item,
750 		   struct sfc_flow_parse_ctx *parse_ctx,
751 		   struct rte_flow_error *error)
752 {
753 	int rc;
754 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
755 	const struct rte_flow_item_udp *spec = NULL;
756 	const struct rte_flow_item_udp *mask = NULL;
757 	const struct rte_flow_item_udp supp_mask = {
758 		.hdr = {
759 			.src_port = 0xffff,
760 			.dst_port = 0xffff,
761 		}
762 	};
763 
764 	rc = sfc_flow_parse_init(item,
765 				 (const void **)&spec,
766 				 (const void **)&mask,
767 				 &supp_mask,
768 				 &rte_flow_item_udp_mask,
769 				 sizeof(struct rte_flow_item_udp),
770 				 error);
771 	if (rc != 0)
772 		return rc;
773 
774 	/*
775 	 * Filtering by UDP source and destination ports requires
776 	 * the appropriate IP_PROTO in hardware filters
777 	 */
778 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
779 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
780 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
781 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
782 		rte_flow_error_set(error, EINVAL,
783 			RTE_FLOW_ERROR_TYPE_ITEM, item,
784 			"IP proto in pattern with UDP item should be appropriate");
785 		return -rte_errno;
786 	}
787 
788 	if (spec == NULL)
789 		return 0;
790 
791 	/*
792 	 * Source and destination ports are in big-endian byte order in item and
793 	 * in little-endian in efx_spec, so byte swap is used
794 	 */
795 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
796 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
797 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
798 	} else if (mask->hdr.src_port != 0) {
799 		goto fail_bad_mask;
800 	}
801 
802 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
803 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
804 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
805 	} else if (mask->hdr.dst_port != 0) {
806 		goto fail_bad_mask;
807 	}
808 
809 	return 0;
810 
811 fail_bad_mask:
812 	rte_flow_error_set(error, EINVAL,
813 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
814 			   "Bad mask in the UDP pattern item");
815 	return -rte_errno;
816 }
817 
818 /*
819  * Filters for encapsulated packets match based on the EtherType and IP
820  * protocol in the outer frame.
821  */
822 static int
823 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
824 					efx_filter_spec_t *efx_spec,
825 					uint8_t ip_proto,
826 					struct rte_flow_error *error)
827 {
828 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
829 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
830 		efx_spec->efs_ip_proto = ip_proto;
831 	} else if (efx_spec->efs_ip_proto != ip_proto) {
832 		switch (ip_proto) {
833 		case EFX_IPPROTO_UDP:
834 			rte_flow_error_set(error, EINVAL,
835 				RTE_FLOW_ERROR_TYPE_ITEM, item,
836 				"Outer IP header protocol must be UDP "
837 				"in VxLAN/GENEVE pattern");
838 			return -rte_errno;
839 
840 		case EFX_IPPROTO_GRE:
841 			rte_flow_error_set(error, EINVAL,
842 				RTE_FLOW_ERROR_TYPE_ITEM, item,
843 				"Outer IP header protocol must be GRE "
844 				"in NVGRE pattern");
845 			return -rte_errno;
846 
847 		default:
848 			rte_flow_error_set(error, EINVAL,
849 				RTE_FLOW_ERROR_TYPE_ITEM, item,
850 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
851 				"are supported");
852 			return -rte_errno;
853 		}
854 	}
855 
856 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
857 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
858 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
859 		rte_flow_error_set(error, EINVAL,
860 			RTE_FLOW_ERROR_TYPE_ITEM, item,
861 			"Outer frame EtherType in pattern with tunneling "
862 			"must be IPv4 or IPv6");
863 		return -rte_errno;
864 	}
865 
866 	return 0;
867 }
868 
869 static int
870 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
871 				  const uint8_t *vni_or_vsid_val,
872 				  const uint8_t *vni_or_vsid_mask,
873 				  const struct rte_flow_item *item,
874 				  struct rte_flow_error *error)
875 {
876 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
877 		0xff, 0xff, 0xff
878 	};
879 
880 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
881 		   EFX_VNI_OR_VSID_LEN) == 0) {
882 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
883 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
884 			   EFX_VNI_OR_VSID_LEN);
885 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
886 		rte_flow_error_set(error, EINVAL,
887 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
888 				   "Unsupported VNI/VSID mask");
889 		return -rte_errno;
890 	}
891 
892 	return 0;
893 }
894 
895 /**
896  * Convert VXLAN item to EFX filter specification.
897  *
898  * @param item[in]
899  *   Item specification. Only VXLAN network identifier field is supported.
900  *   If the mask is NULL, default mask will be used.
901  *   Ranging is not supported.
902  * @param efx_spec[in, out]
903  *   EFX filter specification to update.
904  * @param[out] error
905  *   Perform verbose error reporting if not NULL.
906  */
907 static int
908 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
909 		     struct sfc_flow_parse_ctx *parse_ctx,
910 		     struct rte_flow_error *error)
911 {
912 	int rc;
913 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
914 	const struct rte_flow_item_vxlan *spec = NULL;
915 	const struct rte_flow_item_vxlan *mask = NULL;
916 	const struct rte_flow_item_vxlan supp_mask = {
917 		.vni = { 0xff, 0xff, 0xff }
918 	};
919 
920 	rc = sfc_flow_parse_init(item,
921 				 (const void **)&spec,
922 				 (const void **)&mask,
923 				 &supp_mask,
924 				 &rte_flow_item_vxlan_mask,
925 				 sizeof(struct rte_flow_item_vxlan),
926 				 error);
927 	if (rc != 0)
928 		return rc;
929 
930 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
931 						     EFX_IPPROTO_UDP, error);
932 	if (rc != 0)
933 		return rc;
934 
935 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
936 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
937 
938 	if (spec == NULL)
939 		return 0;
940 
941 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
942 					       mask->vni, item, error);
943 
944 	return rc;
945 }
946 
947 /**
948  * Convert GENEVE item to EFX filter specification.
949  *
950  * @param item[in]
951  *   Item specification. Only Virtual Network Identifier and protocol type
952  *   fields are supported. But protocol type can be only Ethernet (0x6558).
953  *   If the mask is NULL, default mask will be used.
954  *   Ranging is not supported.
955  * @param efx_spec[in, out]
956  *   EFX filter specification to update.
957  * @param[out] error
958  *   Perform verbose error reporting if not NULL.
959  */
960 static int
961 sfc_flow_parse_geneve(const struct rte_flow_item *item,
962 		      struct sfc_flow_parse_ctx *parse_ctx,
963 		      struct rte_flow_error *error)
964 {
965 	int rc;
966 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
967 	const struct rte_flow_item_geneve *spec = NULL;
968 	const struct rte_flow_item_geneve *mask = NULL;
969 	const struct rte_flow_item_geneve supp_mask = {
970 		.protocol = RTE_BE16(0xffff),
971 		.vni = { 0xff, 0xff, 0xff }
972 	};
973 
974 	rc = sfc_flow_parse_init(item,
975 				 (const void **)&spec,
976 				 (const void **)&mask,
977 				 &supp_mask,
978 				 &rte_flow_item_geneve_mask,
979 				 sizeof(struct rte_flow_item_geneve),
980 				 error);
981 	if (rc != 0)
982 		return rc;
983 
984 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
985 						     EFX_IPPROTO_UDP, error);
986 	if (rc != 0)
987 		return rc;
988 
989 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
990 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
991 
992 	if (spec == NULL)
993 		return 0;
994 
995 	if (mask->protocol == supp_mask.protocol) {
996 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
997 			rte_flow_error_set(error, EINVAL,
998 				RTE_FLOW_ERROR_TYPE_ITEM, item,
999 				"GENEVE encap. protocol must be Ethernet "
1000 				"(0x6558) in the GENEVE pattern item");
1001 			return -rte_errno;
1002 		}
1003 	} else if (mask->protocol != 0) {
1004 		rte_flow_error_set(error, EINVAL,
1005 			RTE_FLOW_ERROR_TYPE_ITEM, item,
1006 			"Unsupported mask for GENEVE encap. protocol");
1007 		return -rte_errno;
1008 	}
1009 
1010 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1011 					       mask->vni, item, error);
1012 
1013 	return rc;
1014 }
1015 
1016 /**
1017  * Convert NVGRE item to EFX filter specification.
1018  *
1019  * @param item[in]
1020  *   Item specification. Only virtual subnet ID field is supported.
1021  *   If the mask is NULL, default mask will be used.
1022  *   Ranging is not supported.
1023  * @param efx_spec[in, out]
1024  *   EFX filter specification to update.
1025  * @param[out] error
1026  *   Perform verbose error reporting if not NULL.
1027  */
1028 static int
1029 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1030 		     struct sfc_flow_parse_ctx *parse_ctx,
1031 		     struct rte_flow_error *error)
1032 {
1033 	int rc;
1034 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1035 	const struct rte_flow_item_nvgre *spec = NULL;
1036 	const struct rte_flow_item_nvgre *mask = NULL;
1037 	const struct rte_flow_item_nvgre supp_mask = {
1038 		.tni = { 0xff, 0xff, 0xff }
1039 	};
1040 
1041 	rc = sfc_flow_parse_init(item,
1042 				 (const void **)&spec,
1043 				 (const void **)&mask,
1044 				 &supp_mask,
1045 				 &rte_flow_item_nvgre_mask,
1046 				 sizeof(struct rte_flow_item_nvgre),
1047 				 error);
1048 	if (rc != 0)
1049 		return rc;
1050 
1051 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1052 						     EFX_IPPROTO_GRE, error);
1053 	if (rc != 0)
1054 		return rc;
1055 
1056 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1057 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1058 
1059 	if (spec == NULL)
1060 		return 0;
1061 
1062 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1063 					       mask->tni, item, error);
1064 
1065 	return rc;
1066 }
1067 
1068 /**
1069  * Convert PPPoEx item to EFX filter specification.
1070  *
1071  * @param item[in]
1072  *   Item specification.
1073  *   Matching on PPPoEx fields is not supported.
1074  *   This item can only be used to set or validate the EtherType filter.
1075  *   Only zero masks are allowed.
1076  *   Ranging is not supported.
1077  * @param efx_spec[in, out]
1078  *   EFX filter specification to update.
1079  * @param[out] error
1080  *   Perform verbose error reporting if not NULL.
1081  */
1082 static int
1083 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1084 		      struct sfc_flow_parse_ctx *parse_ctx,
1085 		      struct rte_flow_error *error)
1086 {
1087 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1088 	const struct rte_flow_item_pppoe *spec = NULL;
1089 	const struct rte_flow_item_pppoe *mask = NULL;
1090 	const struct rte_flow_item_pppoe supp_mask = {};
1091 	const struct rte_flow_item_pppoe def_mask = {};
1092 	uint16_t ether_type;
1093 	int rc;
1094 
1095 	rc = sfc_flow_parse_init(item,
1096 				 (const void **)&spec,
1097 				 (const void **)&mask,
1098 				 &supp_mask,
1099 				 &def_mask,
1100 				 sizeof(struct rte_flow_item_pppoe),
1101 				 error);
1102 	if (rc != 0)
1103 		return rc;
1104 
1105 	if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1106 		ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1107 	else
1108 		ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1109 
1110 	if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1111 		if (efx_spec->efs_ether_type != ether_type) {
1112 			rte_flow_error_set(error, EINVAL,
1113 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
1114 					   "Invalid EtherType for a PPPoE flow item");
1115 			return -rte_errno;
1116 		}
1117 	} else {
1118 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1119 		efx_spec->efs_ether_type = ether_type;
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 static const struct sfc_flow_item sfc_flow_items[] = {
1126 	{
1127 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1128 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1129 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1130 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1131 		.parse = sfc_flow_parse_void,
1132 	},
1133 	{
1134 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1135 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1136 		.layer = SFC_FLOW_ITEM_L2,
1137 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1138 		.parse = sfc_flow_parse_eth,
1139 	},
1140 	{
1141 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1142 		.prev_layer = SFC_FLOW_ITEM_L2,
1143 		.layer = SFC_FLOW_ITEM_L2,
1144 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1145 		.parse = sfc_flow_parse_vlan,
1146 	},
1147 	{
1148 		.type = RTE_FLOW_ITEM_TYPE_PPPOED,
1149 		.prev_layer = SFC_FLOW_ITEM_L2,
1150 		.layer = SFC_FLOW_ITEM_L2,
1151 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1152 		.parse = sfc_flow_parse_pppoex,
1153 	},
1154 	{
1155 		.type = RTE_FLOW_ITEM_TYPE_PPPOES,
1156 		.prev_layer = SFC_FLOW_ITEM_L2,
1157 		.layer = SFC_FLOW_ITEM_L2,
1158 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1159 		.parse = sfc_flow_parse_pppoex,
1160 	},
1161 	{
1162 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1163 		.prev_layer = SFC_FLOW_ITEM_L2,
1164 		.layer = SFC_FLOW_ITEM_L3,
1165 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1166 		.parse = sfc_flow_parse_ipv4,
1167 	},
1168 	{
1169 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1170 		.prev_layer = SFC_FLOW_ITEM_L2,
1171 		.layer = SFC_FLOW_ITEM_L3,
1172 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1173 		.parse = sfc_flow_parse_ipv6,
1174 	},
1175 	{
1176 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1177 		.prev_layer = SFC_FLOW_ITEM_L3,
1178 		.layer = SFC_FLOW_ITEM_L4,
1179 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1180 		.parse = sfc_flow_parse_tcp,
1181 	},
1182 	{
1183 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1184 		.prev_layer = SFC_FLOW_ITEM_L3,
1185 		.layer = SFC_FLOW_ITEM_L4,
1186 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1187 		.parse = sfc_flow_parse_udp,
1188 	},
1189 	{
1190 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1191 		.prev_layer = SFC_FLOW_ITEM_L4,
1192 		.layer = SFC_FLOW_ITEM_START_LAYER,
1193 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1194 		.parse = sfc_flow_parse_vxlan,
1195 	},
1196 	{
1197 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1198 		.prev_layer = SFC_FLOW_ITEM_L4,
1199 		.layer = SFC_FLOW_ITEM_START_LAYER,
1200 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1201 		.parse = sfc_flow_parse_geneve,
1202 	},
1203 	{
1204 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1205 		.prev_layer = SFC_FLOW_ITEM_L3,
1206 		.layer = SFC_FLOW_ITEM_START_LAYER,
1207 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1208 		.parse = sfc_flow_parse_nvgre,
1209 	},
1210 };
1211 
1212 /*
1213  * Protocol-independent flow API support
1214  */
1215 static int
1216 sfc_flow_parse_attr(struct sfc_adapter *sa,
1217 		    const struct rte_flow_attr *attr,
1218 		    struct rte_flow *flow,
1219 		    struct rte_flow_error *error)
1220 {
1221 	struct sfc_flow_spec *spec = &flow->spec;
1222 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1223 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1224 	struct sfc_mae *mae = &sa->mae;
1225 
1226 	if (attr == NULL) {
1227 		rte_flow_error_set(error, EINVAL,
1228 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1229 				   "NULL attribute");
1230 		return -rte_errno;
1231 	}
1232 	if (attr->group != 0) {
1233 		rte_flow_error_set(error, ENOTSUP,
1234 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1235 				   "Groups are not supported");
1236 		return -rte_errno;
1237 	}
1238 	if (attr->egress != 0) {
1239 		rte_flow_error_set(error, ENOTSUP,
1240 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1241 				   "Egress is not supported");
1242 		return -rte_errno;
1243 	}
1244 	if (attr->ingress == 0) {
1245 		rte_flow_error_set(error, ENOTSUP,
1246 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1247 				   "Ingress is compulsory");
1248 		return -rte_errno;
1249 	}
1250 	if (attr->transfer == 0) {
1251 		if (attr->priority != 0) {
1252 			rte_flow_error_set(error, ENOTSUP,
1253 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1254 					   attr, "Priorities are unsupported");
1255 			return -rte_errno;
1256 		}
1257 		spec->type = SFC_FLOW_SPEC_FILTER;
1258 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1259 		spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1260 		spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1261 	} else {
1262 		if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
1263 			rte_flow_error_set(error, ENOTSUP,
1264 					   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1265 					   attr, "Transfer is not supported");
1266 			return -rte_errno;
1267 		}
1268 		if (attr->priority > mae->nb_action_rule_prios_max) {
1269 			rte_flow_error_set(error, ENOTSUP,
1270 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1271 					   attr, "Unsupported priority level");
1272 			return -rte_errno;
1273 		}
1274 		spec->type = SFC_FLOW_SPEC_MAE;
1275 		spec_mae->priority = attr->priority;
1276 		spec_mae->match_spec = NULL;
1277 		spec_mae->action_set = NULL;
1278 		spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1279 	}
1280 
1281 	return 0;
1282 }
1283 
1284 /* Get item from array sfc_flow_items */
1285 static const struct sfc_flow_item *
1286 sfc_flow_get_item(const struct sfc_flow_item *items,
1287 		  unsigned int nb_items,
1288 		  enum rte_flow_item_type type)
1289 {
1290 	unsigned int i;
1291 
1292 	for (i = 0; i < nb_items; i++)
1293 		if (items[i].type == type)
1294 			return &items[i];
1295 
1296 	return NULL;
1297 }
1298 
1299 int
1300 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1301 		       unsigned int nb_flow_items,
1302 		       const struct rte_flow_item pattern[],
1303 		       struct sfc_flow_parse_ctx *parse_ctx,
1304 		       struct rte_flow_error *error)
1305 {
1306 	int rc;
1307 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1308 	boolean_t is_ifrm = B_FALSE;
1309 	const struct sfc_flow_item *item;
1310 
1311 	if (pattern == NULL) {
1312 		rte_flow_error_set(error, EINVAL,
1313 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1314 				   "NULL pattern");
1315 		return -rte_errno;
1316 	}
1317 
1318 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1319 		item = sfc_flow_get_item(flow_items, nb_flow_items,
1320 					 pattern->type);
1321 		if (item == NULL) {
1322 			rte_flow_error_set(error, ENOTSUP,
1323 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1324 					   "Unsupported pattern item");
1325 			return -rte_errno;
1326 		}
1327 
1328 		/*
1329 		 * Omitting one or several protocol layers at the beginning
1330 		 * of pattern is supported
1331 		 */
1332 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1333 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1334 		    item->prev_layer != prev_layer) {
1335 			rte_flow_error_set(error, ENOTSUP,
1336 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1337 					   "Unexpected sequence of pattern items");
1338 			return -rte_errno;
1339 		}
1340 
1341 		/*
1342 		 * Allow only VOID and ETH pattern items in the inner frame.
1343 		 * Also check that there is only one tunneling protocol.
1344 		 */
1345 		switch (item->type) {
1346 		case RTE_FLOW_ITEM_TYPE_VOID:
1347 		case RTE_FLOW_ITEM_TYPE_ETH:
1348 			break;
1349 
1350 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1351 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1352 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1353 			if (is_ifrm) {
1354 				rte_flow_error_set(error, EINVAL,
1355 					RTE_FLOW_ERROR_TYPE_ITEM,
1356 					pattern,
1357 					"More than one tunneling protocol");
1358 				return -rte_errno;
1359 			}
1360 			is_ifrm = B_TRUE;
1361 			break;
1362 
1363 		default:
1364 			if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1365 			    is_ifrm) {
1366 				rte_flow_error_set(error, EINVAL,
1367 					RTE_FLOW_ERROR_TYPE_ITEM,
1368 					pattern,
1369 					"There is an unsupported pattern item "
1370 					"in the inner frame");
1371 				return -rte_errno;
1372 			}
1373 			break;
1374 		}
1375 
1376 		if (parse_ctx->type != item->ctx_type) {
1377 			rte_flow_error_set(error, EINVAL,
1378 					RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1379 					"Parse context type mismatch");
1380 			return -rte_errno;
1381 		}
1382 
1383 		rc = item->parse(pattern, parse_ctx, error);
1384 		if (rc != 0)
1385 			return rc;
1386 
1387 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1388 			prev_layer = item->layer;
1389 	}
1390 
1391 	return 0;
1392 }
1393 
1394 static int
1395 sfc_flow_parse_queue(struct sfc_adapter *sa,
1396 		     const struct rte_flow_action_queue *queue,
1397 		     struct rte_flow *flow)
1398 {
1399 	struct sfc_flow_spec *spec = &flow->spec;
1400 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1401 	struct sfc_rxq *rxq;
1402 	struct sfc_rxq_info *rxq_info;
1403 
1404 	if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1405 		return -EINVAL;
1406 
1407 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1408 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1409 
1410 	rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1411 	spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
1412 					    SFC_RXQ_FLAG_RSS_HASH);
1413 
1414 	return 0;
1415 }
1416 
1417 static int
1418 sfc_flow_parse_rss(struct sfc_adapter *sa,
1419 		   const struct rte_flow_action_rss *action_rss,
1420 		   struct rte_flow *flow)
1421 {
1422 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1423 	struct sfc_rss *rss = &sas->rss;
1424 	sfc_ethdev_qid_t ethdev_qid;
1425 	struct sfc_rxq *rxq;
1426 	unsigned int rxq_hw_index_min;
1427 	unsigned int rxq_hw_index_max;
1428 	efx_rx_hash_type_t efx_hash_types;
1429 	const uint8_t *rss_key;
1430 	struct sfc_flow_spec *spec = &flow->spec;
1431 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1432 	struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1433 	unsigned int i;
1434 
1435 	if (action_rss->queue_num == 0)
1436 		return -EINVAL;
1437 
1438 	ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
1439 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1440 	rxq_hw_index_min = rxq->hw_index;
1441 	rxq_hw_index_max = 0;
1442 
1443 	for (i = 0; i < action_rss->queue_num; ++i) {
1444 		ethdev_qid = action_rss->queue[i];
1445 
1446 		if ((unsigned int)ethdev_qid >=
1447 		    sfc_sa2shared(sa)->ethdev_rxq_count)
1448 			return -EINVAL;
1449 
1450 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1451 
1452 		if (rxq->hw_index < rxq_hw_index_min)
1453 			rxq_hw_index_min = rxq->hw_index;
1454 
1455 		if (rxq->hw_index > rxq_hw_index_max)
1456 			rxq_hw_index_max = rxq->hw_index;
1457 	}
1458 
1459 	switch (action_rss->func) {
1460 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1461 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1462 		break;
1463 	default:
1464 		return -EINVAL;
1465 	}
1466 
1467 	if (action_rss->level)
1468 		return -EINVAL;
1469 
1470 	/*
1471 	 * Dummy RSS action with only one queue and no specific settings
1472 	 * for hash types and key does not require dedicated RSS context
1473 	 * and may be simplified to single queue action.
1474 	 */
1475 	if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1476 	    action_rss->key_len == 0) {
1477 		spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1478 		return 0;
1479 	}
1480 
1481 	if (action_rss->types) {
1482 		int rc;
1483 
1484 		rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1485 					  &efx_hash_types);
1486 		if (rc != 0)
1487 			return -rc;
1488 	} else {
1489 		unsigned int i;
1490 
1491 		efx_hash_types = 0;
1492 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
1493 			efx_hash_types |= rss->hf_map[i].efx;
1494 	}
1495 
1496 	if (action_rss->key_len) {
1497 		if (action_rss->key_len != sizeof(rss->key))
1498 			return -EINVAL;
1499 
1500 		rss_key = action_rss->key;
1501 	} else {
1502 		rss_key = rss->key;
1503 	}
1504 
1505 	spec_filter->rss = B_TRUE;
1506 
1507 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1508 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1509 	sfc_rss_conf->rss_hash_types = efx_hash_types;
1510 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1511 
1512 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1513 		unsigned int nb_queues = action_rss->queue_num;
1514 		struct sfc_rxq *rxq;
1515 
1516 		ethdev_qid = action_rss->queue[i % nb_queues];
1517 		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
1518 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1519 	}
1520 
1521 	return 0;
1522 }
1523 
1524 static int
1525 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1526 		    unsigned int filters_count)
1527 {
1528 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1529 	unsigned int i;
1530 	int ret = 0;
1531 
1532 	for (i = 0; i < filters_count; i++) {
1533 		int rc;
1534 
1535 		rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1536 		if (ret == 0 && rc != 0) {
1537 			sfc_err(sa, "failed to remove filter specification "
1538 				"(rc = %d)", rc);
1539 			ret = rc;
1540 		}
1541 	}
1542 
1543 	return ret;
1544 }
1545 
1546 static int
1547 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1548 {
1549 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1550 	unsigned int i;
1551 	int rc = 0;
1552 
1553 	for (i = 0; i < spec_filter->count; i++) {
1554 		rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1555 		if (rc != 0) {
1556 			sfc_flow_spec_flush(sa, spec, i);
1557 			break;
1558 		}
1559 	}
1560 
1561 	return rc;
1562 }
1563 
1564 static int
1565 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1566 {
1567 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1568 
1569 	return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1570 }
1571 
1572 static int
1573 sfc_flow_filter_insert(struct sfc_adapter *sa,
1574 		       struct rte_flow *flow)
1575 {
1576 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1577 	struct sfc_rss *rss = &sas->rss;
1578 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1579 	struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1580 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1581 	boolean_t create_context;
1582 	unsigned int i;
1583 	int rc = 0;
1584 
1585 	create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
1586 			rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
1587 
1588 	if (create_context) {
1589 		unsigned int rss_spread;
1590 		unsigned int rss_hash_types;
1591 		uint8_t *rss_key;
1592 
1593 		if (spec_filter->rss) {
1594 			rss_spread = MIN(flow_rss->rxq_hw_index_max -
1595 					flow_rss->rxq_hw_index_min + 1,
1596 					EFX_MAXRSS);
1597 			rss_hash_types = flow_rss->rss_hash_types;
1598 			rss_key = flow_rss->rss_key;
1599 		} else {
1600 			/*
1601 			 * Initialize dummy RSS context parameters to have
1602 			 * valid RSS hash. Use default RSS hash function and
1603 			 * key.
1604 			 */
1605 			rss_spread = 1;
1606 			rss_hash_types = rss->hash_types;
1607 			rss_key = rss->key;
1608 		}
1609 
1610 		rc = efx_rx_scale_context_alloc(sa->nic,
1611 						EFX_RX_SCALE_EXCLUSIVE,
1612 						rss_spread,
1613 						&efs_rss_context);
1614 		if (rc != 0)
1615 			goto fail_scale_context_alloc;
1616 
1617 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1618 					   rss->hash_alg,
1619 					   rss_hash_types, B_TRUE);
1620 		if (rc != 0)
1621 			goto fail_scale_mode_set;
1622 
1623 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1624 					  rss_key, sizeof(rss->key));
1625 		if (rc != 0)
1626 			goto fail_scale_key_set;
1627 	} else {
1628 		efs_rss_context = rss->dummy_rss_context;
1629 	}
1630 
1631 	if (spec_filter->rss || spec_filter->rss_hash_required) {
1632 		/*
1633 		 * At this point, fully elaborated filter specifications
1634 		 * have been produced from the template. To make sure that
1635 		 * RSS behaviour is consistent between them, set the same
1636 		 * RSS context value everywhere.
1637 		 */
1638 		for (i = 0; i < spec_filter->count; i++) {
1639 			efx_filter_spec_t *spec = &spec_filter->filters[i];
1640 
1641 			spec->efs_rss_context = efs_rss_context;
1642 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1643 			if (spec_filter->rss)
1644 				spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1645 		}
1646 	}
1647 
1648 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1649 	if (rc != 0)
1650 		goto fail_filter_insert;
1651 
1652 	if (create_context) {
1653 		unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
1654 		unsigned int *tbl;
1655 
1656 		tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
1657 
1658 		/*
1659 		 * Scale table is set after filter insertion because
1660 		 * the table entries are relative to the base RxQ ID
1661 		 * and the latter is submitted to the HW by means of
1662 		 * inserting a filter, so by the time of the request
1663 		 * the HW knows all the information needed to verify
1664 		 * the table entries, and the operation will succeed
1665 		 */
1666 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1667 					  tbl, RTE_DIM(flow_rss->rss_tbl));
1668 		if (rc != 0)
1669 			goto fail_scale_tbl_set;
1670 
1671 		/* Remember created dummy RSS context */
1672 		if (!spec_filter->rss)
1673 			rss->dummy_rss_context = efs_rss_context;
1674 	}
1675 
1676 	return 0;
1677 
1678 fail_scale_tbl_set:
1679 	sfc_flow_spec_remove(sa, &flow->spec);
1680 
1681 fail_filter_insert:
1682 fail_scale_key_set:
1683 fail_scale_mode_set:
1684 	if (create_context)
1685 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1686 
1687 fail_scale_context_alloc:
1688 	return rc;
1689 }
1690 
1691 static int
1692 sfc_flow_filter_remove(struct sfc_adapter *sa,
1693 		       struct rte_flow *flow)
1694 {
1695 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1696 	int rc = 0;
1697 
1698 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1699 	if (rc != 0)
1700 		return rc;
1701 
1702 	if (spec_filter->rss) {
1703 		/*
1704 		 * All specifications for a given flow rule have the same RSS
1705 		 * context, so that RSS context value is taken from the first
1706 		 * filter specification
1707 		 */
1708 		efx_filter_spec_t *spec = &spec_filter->filters[0];
1709 
1710 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1711 	}
1712 
1713 	return rc;
1714 }
1715 
1716 static int
1717 sfc_flow_parse_mark(struct sfc_adapter *sa,
1718 		    const struct rte_flow_action_mark *mark,
1719 		    struct rte_flow *flow)
1720 {
1721 	struct sfc_flow_spec *spec = &flow->spec;
1722 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1723 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1724 
1725 	if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1726 		return EINVAL;
1727 
1728 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1729 	spec_filter->template.efs_mark = mark->id;
1730 
1731 	return 0;
1732 }
1733 
1734 static int
1735 sfc_flow_parse_actions(struct sfc_adapter *sa,
1736 		       const struct rte_flow_action actions[],
1737 		       struct rte_flow *flow,
1738 		       struct rte_flow_error *error)
1739 {
1740 	int rc;
1741 	struct sfc_flow_spec *spec = &flow->spec;
1742 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1743 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1744 	uint32_t actions_set = 0;
1745 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1746 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1747 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1748 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1749 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1750 
1751 	if (actions == NULL) {
1752 		rte_flow_error_set(error, EINVAL,
1753 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1754 				   "NULL actions");
1755 		return -rte_errno;
1756 	}
1757 
1758 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1759 		switch (actions->type) {
1760 		case RTE_FLOW_ACTION_TYPE_VOID:
1761 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1762 					       actions_set);
1763 			break;
1764 
1765 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1766 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1767 					       actions_set);
1768 			if ((actions_set & fate_actions_mask) != 0)
1769 				goto fail_fate_actions;
1770 
1771 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1772 			if (rc != 0) {
1773 				rte_flow_error_set(error, EINVAL,
1774 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1775 					"Bad QUEUE action");
1776 				return -rte_errno;
1777 			}
1778 			break;
1779 
1780 		case RTE_FLOW_ACTION_TYPE_RSS:
1781 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1782 					       actions_set);
1783 			if ((actions_set & fate_actions_mask) != 0)
1784 				goto fail_fate_actions;
1785 
1786 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1787 			if (rc != 0) {
1788 				rte_flow_error_set(error, -rc,
1789 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1790 					"Bad RSS action");
1791 				return -rte_errno;
1792 			}
1793 			break;
1794 
1795 		case RTE_FLOW_ACTION_TYPE_DROP:
1796 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1797 					       actions_set);
1798 			if ((actions_set & fate_actions_mask) != 0)
1799 				goto fail_fate_actions;
1800 
1801 			spec_filter->template.efs_dmaq_id =
1802 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1803 			break;
1804 
1805 		case RTE_FLOW_ACTION_TYPE_FLAG:
1806 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1807 					       actions_set);
1808 			if ((actions_set & mark_actions_mask) != 0)
1809 				goto fail_actions_overlap;
1810 
1811 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1812 				rte_flow_error_set(error, ENOTSUP,
1813 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1814 					"FLAG action is not supported on the current Rx datapath");
1815 				return -rte_errno;
1816 			}
1817 
1818 			spec_filter->template.efs_flags |=
1819 				EFX_FILTER_FLAG_ACTION_FLAG;
1820 			break;
1821 
1822 		case RTE_FLOW_ACTION_TYPE_MARK:
1823 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1824 					       actions_set);
1825 			if ((actions_set & mark_actions_mask) != 0)
1826 				goto fail_actions_overlap;
1827 
1828 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1829 				rte_flow_error_set(error, ENOTSUP,
1830 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1831 					"MARK action is not supported on the current Rx datapath");
1832 				return -rte_errno;
1833 			}
1834 
1835 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1836 			if (rc != 0) {
1837 				rte_flow_error_set(error, rc,
1838 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1839 					"Bad MARK action");
1840 				return -rte_errno;
1841 			}
1842 			break;
1843 
1844 		default:
1845 			rte_flow_error_set(error, ENOTSUP,
1846 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1847 					   "Action is not supported");
1848 			return -rte_errno;
1849 		}
1850 
1851 		actions_set |= (1UL << actions->type);
1852 	}
1853 
1854 	/* When fate is unknown, drop traffic. */
1855 	if ((actions_set & fate_actions_mask) == 0) {
1856 		spec_filter->template.efs_dmaq_id =
1857 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1858 	}
1859 
1860 	return 0;
1861 
1862 fail_fate_actions:
1863 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1864 			   "Cannot combine several fate-deciding actions, "
1865 			   "choose between QUEUE, RSS or DROP");
1866 	return -rte_errno;
1867 
1868 fail_actions_overlap:
1869 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1870 			   "Overlapping actions are not supported");
1871 	return -rte_errno;
1872 }
1873 
1874 /**
1875  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1876  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1877  * specifications after copying.
1878  *
1879  * @param spec[in, out]
1880  *   SFC flow specification to update.
1881  * @param filters_count_for_one_val[in]
1882  *   How many specifications should have the same match flag, what is the
1883  *   number of specifications before copying.
1884  * @param error[out]
1885  *   Perform verbose error reporting if not NULL.
1886  */
1887 static int
1888 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1889 			       unsigned int filters_count_for_one_val,
1890 			       struct rte_flow_error *error)
1891 {
1892 	unsigned int i;
1893 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1894 	static const efx_filter_match_flags_t vals[] = {
1895 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1896 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1897 	};
1898 
1899 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1900 		rte_flow_error_set(error, EINVAL,
1901 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1902 			"Number of specifications is incorrect while copying "
1903 			"by unknown destination flags");
1904 		return -rte_errno;
1905 	}
1906 
1907 	for (i = 0; i < spec_filter->count; i++) {
1908 		/* The check above ensures that divisor can't be zero here */
1909 		spec_filter->filters[i].efs_match_flags |=
1910 			vals[i / filters_count_for_one_val];
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 /**
1917  * Check that the following conditions are met:
1918  * - the list of supported filters has a filter
1919  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1920  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1921  *   be inserted.
1922  *
1923  * @param match[in]
1924  *   The match flags of filter.
1925  * @param spec[in]
1926  *   Specification to be supplemented.
1927  * @param filter[in]
1928  *   SFC filter with list of supported filters.
1929  */
1930 static boolean_t
1931 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1932 				 __rte_unused efx_filter_spec_t *spec,
1933 				 struct sfc_filter *filter)
1934 {
1935 	unsigned int i;
1936 	efx_filter_match_flags_t match_mcast_dst;
1937 
1938 	match_mcast_dst =
1939 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1940 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1941 	for (i = 0; i < filter->supported_match_num; i++) {
1942 		if (match_mcast_dst == filter->supported_match[i])
1943 			return B_TRUE;
1944 	}
1945 
1946 	return B_FALSE;
1947 }
1948 
1949 /**
1950  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1951  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1952  * specifications after copying.
1953  *
1954  * @param spec[in, out]
1955  *   SFC flow specification to update.
1956  * @param filters_count_for_one_val[in]
1957  *   How many specifications should have the same EtherType value, what is the
1958  *   number of specifications before copying.
1959  * @param error[out]
1960  *   Perform verbose error reporting if not NULL.
1961  */
1962 static int
1963 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1964 			unsigned int filters_count_for_one_val,
1965 			struct rte_flow_error *error)
1966 {
1967 	unsigned int i;
1968 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1969 	static const uint16_t vals[] = {
1970 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1971 	};
1972 
1973 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1974 		rte_flow_error_set(error, EINVAL,
1975 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1976 			"Number of specifications is incorrect "
1977 			"while copying by Ethertype");
1978 		return -rte_errno;
1979 	}
1980 
1981 	for (i = 0; i < spec_filter->count; i++) {
1982 		spec_filter->filters[i].efs_match_flags |=
1983 			EFX_FILTER_MATCH_ETHER_TYPE;
1984 
1985 		/*
1986 		 * The check above ensures that
1987 		 * filters_count_for_one_val is not 0
1988 		 */
1989 		spec_filter->filters[i].efs_ether_type =
1990 			vals[i / filters_count_for_one_val];
1991 	}
1992 
1993 	return 0;
1994 }
1995 
1996 /**
1997  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1998  * in the same specifications after copying.
1999  *
2000  * @param spec[in, out]
2001  *   SFC flow specification to update.
2002  * @param filters_count_for_one_val[in]
2003  *   How many specifications should have the same match flag, what is the
2004  *   number of specifications before copying.
2005  * @param error[out]
2006  *   Perform verbose error reporting if not NULL.
2007  */
2008 static int
2009 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
2010 			    unsigned int filters_count_for_one_val,
2011 			    struct rte_flow_error *error)
2012 {
2013 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2014 	unsigned int i;
2015 
2016 	if (filters_count_for_one_val != spec_filter->count) {
2017 		rte_flow_error_set(error, EINVAL,
2018 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2019 			"Number of specifications is incorrect "
2020 			"while copying by outer VLAN ID");
2021 		return -rte_errno;
2022 	}
2023 
2024 	for (i = 0; i < spec_filter->count; i++) {
2025 		spec_filter->filters[i].efs_match_flags |=
2026 			EFX_FILTER_MATCH_OUTER_VID;
2027 
2028 		spec_filter->filters[i].efs_outer_vid = 0;
2029 	}
2030 
2031 	return 0;
2032 }
2033 
2034 /**
2035  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
2036  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
2037  * specifications after copying.
2038  *
2039  * @param spec[in, out]
2040  *   SFC flow specification to update.
2041  * @param filters_count_for_one_val[in]
2042  *   How many specifications should have the same match flag, what is the
2043  *   number of specifications before copying.
2044  * @param error[out]
2045  *   Perform verbose error reporting if not NULL.
2046  */
2047 static int
2048 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
2049 				    unsigned int filters_count_for_one_val,
2050 				    struct rte_flow_error *error)
2051 {
2052 	unsigned int i;
2053 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2054 	static const efx_filter_match_flags_t vals[] = {
2055 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2056 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
2057 	};
2058 
2059 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
2060 		rte_flow_error_set(error, EINVAL,
2061 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2062 			"Number of specifications is incorrect while copying "
2063 			"by inner frame unknown destination flags");
2064 		return -rte_errno;
2065 	}
2066 
2067 	for (i = 0; i < spec_filter->count; i++) {
2068 		/* The check above ensures that divisor can't be zero here */
2069 		spec_filter->filters[i].efs_match_flags |=
2070 			vals[i / filters_count_for_one_val];
2071 	}
2072 
2073 	return 0;
2074 }
2075 
2076 /**
2077  * Check that the following conditions are met:
2078  * - the specification corresponds to a filter for encapsulated traffic
2079  * - the list of supported filters has a filter
2080  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
2081  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
2082  *   be inserted.
2083  *
2084  * @param match[in]
2085  *   The match flags of filter.
2086  * @param spec[in]
2087  *   Specification to be supplemented.
2088  * @param filter[in]
2089  *   SFC filter with list of supported filters.
2090  */
2091 static boolean_t
2092 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
2093 				      efx_filter_spec_t *spec,
2094 				      struct sfc_filter *filter)
2095 {
2096 	unsigned int i;
2097 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
2098 	efx_filter_match_flags_t match_mcast_dst;
2099 
2100 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
2101 		return B_FALSE;
2102 
2103 	match_mcast_dst =
2104 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
2105 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
2106 	for (i = 0; i < filter->supported_match_num; i++) {
2107 		if (match_mcast_dst == filter->supported_match[i])
2108 			return B_TRUE;
2109 	}
2110 
2111 	return B_FALSE;
2112 }
2113 
2114 /**
2115  * Check that the list of supported filters has a filter that differs
2116  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2117  * in this case that filter will be used and the flag
2118  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2119  *
2120  * @param match[in]
2121  *   The match flags of filter.
2122  * @param spec[in]
2123  *   Specification to be supplemented.
2124  * @param filter[in]
2125  *   SFC filter with list of supported filters.
2126  */
2127 static boolean_t
2128 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2129 			      __rte_unused efx_filter_spec_t *spec,
2130 			      struct sfc_filter *filter)
2131 {
2132 	unsigned int i;
2133 	efx_filter_match_flags_t match_without_vid =
2134 		match & ~EFX_FILTER_MATCH_OUTER_VID;
2135 
2136 	for (i = 0; i < filter->supported_match_num; i++) {
2137 		if (match_without_vid == filter->supported_match[i])
2138 			return B_FALSE;
2139 	}
2140 
2141 	return B_TRUE;
2142 }
2143 
2144 /*
2145  * Match flags that can be automatically added to filters.
2146  * Selecting the last minimum when searching for the copy flag ensures that the
2147  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2148  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2149  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2150  * filters.
2151  */
2152 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2153 	{
2154 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2155 		.vals_count = 2,
2156 		.set_vals = sfc_flow_set_unknown_dst_flags,
2157 		.spec_check = sfc_flow_check_unknown_dst_flags,
2158 	},
2159 	{
2160 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
2161 		.vals_count = 2,
2162 		.set_vals = sfc_flow_set_ethertypes,
2163 		.spec_check = NULL,
2164 	},
2165 	{
2166 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2167 		.vals_count = 2,
2168 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2169 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2170 	},
2171 	{
2172 		.flag = EFX_FILTER_MATCH_OUTER_VID,
2173 		.vals_count = 1,
2174 		.set_vals = sfc_flow_set_outer_vid_flag,
2175 		.spec_check = sfc_flow_check_outer_vid_flag,
2176 	},
2177 };
2178 
2179 /* Get item from array sfc_flow_copy_flags */
2180 static const struct sfc_flow_copy_flag *
2181 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2182 {
2183 	unsigned int i;
2184 
2185 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2186 		if (sfc_flow_copy_flags[i].flag == flag)
2187 			return &sfc_flow_copy_flags[i];
2188 	}
2189 
2190 	return NULL;
2191 }
2192 
2193 /**
2194  * Make copies of the specifications, set match flag and values
2195  * of the field that corresponds to it.
2196  *
2197  * @param spec[in, out]
2198  *   SFC flow specification to update.
2199  * @param flag[in]
2200  *   The match flag to add.
2201  * @param error[out]
2202  *   Perform verbose error reporting if not NULL.
2203  */
2204 static int
2205 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2206 			     efx_filter_match_flags_t flag,
2207 			     struct rte_flow_error *error)
2208 {
2209 	unsigned int i;
2210 	unsigned int new_filters_count;
2211 	unsigned int filters_count_for_one_val;
2212 	const struct sfc_flow_copy_flag *copy_flag;
2213 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2214 	int rc;
2215 
2216 	copy_flag = sfc_flow_get_copy_flag(flag);
2217 	if (copy_flag == NULL) {
2218 		rte_flow_error_set(error, ENOTSUP,
2219 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2220 				   "Unsupported spec field for copying");
2221 		return -rte_errno;
2222 	}
2223 
2224 	new_filters_count = spec_filter->count * copy_flag->vals_count;
2225 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2226 		rte_flow_error_set(error, EINVAL,
2227 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2228 			"Too much EFX specifications in the flow rule");
2229 		return -rte_errno;
2230 	}
2231 
2232 	/* Copy filters specifications */
2233 	for (i = spec_filter->count; i < new_filters_count; i++) {
2234 		spec_filter->filters[i] =
2235 			spec_filter->filters[i - spec_filter->count];
2236 	}
2237 
2238 	filters_count_for_one_val = spec_filter->count;
2239 	spec_filter->count = new_filters_count;
2240 
2241 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2242 	if (rc != 0)
2243 		return rc;
2244 
2245 	return 0;
2246 }
2247 
2248 /**
2249  * Check that the given set of match flags missing in the original filter spec
2250  * could be covered by adding spec copies which specify the corresponding
2251  * flags and packet field values to match.
2252  *
2253  * @param miss_flags[in]
2254  *   Flags that are missing until the supported filter.
2255  * @param spec[in]
2256  *   Specification to be supplemented.
2257  * @param filter[in]
2258  *   SFC filter.
2259  *
2260  * @return
2261  *   Number of specifications after copy or 0, if the flags can not be added.
2262  */
2263 static unsigned int
2264 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2265 			     efx_filter_spec_t *spec,
2266 			     struct sfc_filter *filter)
2267 {
2268 	unsigned int i;
2269 	efx_filter_match_flags_t copy_flags = 0;
2270 	efx_filter_match_flags_t flag;
2271 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2272 	sfc_flow_spec_check *check;
2273 	unsigned int multiplier = 1;
2274 
2275 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2276 		flag = sfc_flow_copy_flags[i].flag;
2277 		check = sfc_flow_copy_flags[i].spec_check;
2278 		if ((flag & miss_flags) == flag) {
2279 			if (check != NULL && (!check(match, spec, filter)))
2280 				continue;
2281 
2282 			copy_flags |= flag;
2283 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2284 		}
2285 	}
2286 
2287 	if (copy_flags == miss_flags)
2288 		return multiplier;
2289 
2290 	return 0;
2291 }
2292 
2293 /**
2294  * Attempt to supplement the specification template to the minimally
2295  * supported set of match flags. To do this, it is necessary to copy
2296  * the specifications, filling them with the values of fields that
2297  * correspond to the missing flags.
2298  * The necessary and sufficient filter is built from the fewest number
2299  * of copies which could be made to cover the minimally required set
2300  * of flags.
2301  *
2302  * @param sa[in]
2303  *   SFC adapter.
2304  * @param spec[in, out]
2305  *   SFC flow specification to update.
2306  * @param error[out]
2307  *   Perform verbose error reporting if not NULL.
2308  */
2309 static int
2310 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2311 			       struct sfc_flow_spec *spec,
2312 			       struct rte_flow_error *error)
2313 {
2314 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2315 	struct sfc_filter *filter = &sa->filter;
2316 	efx_filter_match_flags_t miss_flags;
2317 	efx_filter_match_flags_t min_miss_flags = 0;
2318 	efx_filter_match_flags_t match;
2319 	unsigned int min_multiplier = UINT_MAX;
2320 	unsigned int multiplier;
2321 	unsigned int i;
2322 	int rc;
2323 
2324 	match = spec_filter->template.efs_match_flags;
2325 	for (i = 0; i < filter->supported_match_num; i++) {
2326 		if ((match & filter->supported_match[i]) == match) {
2327 			miss_flags = filter->supported_match[i] & (~match);
2328 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2329 				&spec_filter->template, filter);
2330 			if (multiplier > 0) {
2331 				if (multiplier <= min_multiplier) {
2332 					min_multiplier = multiplier;
2333 					min_miss_flags = miss_flags;
2334 				}
2335 			}
2336 		}
2337 	}
2338 
2339 	if (min_multiplier == UINT_MAX) {
2340 		rte_flow_error_set(error, ENOTSUP,
2341 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2342 				   "The flow rule pattern is unsupported");
2343 		return -rte_errno;
2344 	}
2345 
2346 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2347 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2348 
2349 		if ((flag & min_miss_flags) == flag) {
2350 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2351 			if (rc != 0)
2352 				return rc;
2353 		}
2354 	}
2355 
2356 	return 0;
2357 }
2358 
2359 /**
2360  * Check that set of match flags is referred to by a filter. Filter is
2361  * described by match flags with the ability to add OUTER_VID and INNER_VID
2362  * flags.
2363  *
2364  * @param match_flags[in]
2365  *   Set of match flags.
2366  * @param flags_pattern[in]
2367  *   Pattern of filter match flags.
2368  */
2369 static boolean_t
2370 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2371 			    efx_filter_match_flags_t flags_pattern)
2372 {
2373 	if ((match_flags & flags_pattern) != flags_pattern)
2374 		return B_FALSE;
2375 
2376 	switch (match_flags & ~flags_pattern) {
2377 	case 0:
2378 	case EFX_FILTER_MATCH_OUTER_VID:
2379 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2380 		return B_TRUE;
2381 	default:
2382 		return B_FALSE;
2383 	}
2384 }
2385 
2386 /**
2387  * Check whether the spec maps to a hardware filter which is known to be
2388  * ineffective despite being valid.
2389  *
2390  * @param filter[in]
2391  *   SFC filter with list of supported filters.
2392  * @param spec[in]
2393  *   SFC flow specification.
2394  */
2395 static boolean_t
2396 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2397 				  struct sfc_flow_spec *spec)
2398 {
2399 	unsigned int i;
2400 	uint16_t ether_type;
2401 	uint8_t ip_proto;
2402 	efx_filter_match_flags_t match_flags;
2403 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2404 
2405 	for (i = 0; i < spec_filter->count; i++) {
2406 		match_flags = spec_filter->filters[i].efs_match_flags;
2407 
2408 		if (sfc_flow_is_match_with_vids(match_flags,
2409 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2410 		    sfc_flow_is_match_with_vids(match_flags,
2411 						EFX_FILTER_MATCH_ETHER_TYPE |
2412 						EFX_FILTER_MATCH_LOC_MAC)) {
2413 			ether_type = spec_filter->filters[i].efs_ether_type;
2414 			if (filter->supports_ip_proto_or_addr_filter &&
2415 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2416 			     ether_type == EFX_ETHER_TYPE_IPV6))
2417 				return B_TRUE;
2418 		} else if (sfc_flow_is_match_with_vids(match_flags,
2419 				EFX_FILTER_MATCH_ETHER_TYPE |
2420 				EFX_FILTER_MATCH_IP_PROTO) ||
2421 			   sfc_flow_is_match_with_vids(match_flags,
2422 				EFX_FILTER_MATCH_ETHER_TYPE |
2423 				EFX_FILTER_MATCH_IP_PROTO |
2424 				EFX_FILTER_MATCH_LOC_MAC)) {
2425 			ip_proto = spec_filter->filters[i].efs_ip_proto;
2426 			if (filter->supports_rem_or_local_port_filter &&
2427 			    (ip_proto == EFX_IPPROTO_TCP ||
2428 			     ip_proto == EFX_IPPROTO_UDP))
2429 				return B_TRUE;
2430 		}
2431 	}
2432 
2433 	return B_FALSE;
2434 }
2435 
2436 static int
2437 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2438 			      struct rte_flow *flow,
2439 			      struct rte_flow_error *error)
2440 {
2441 	struct sfc_flow_spec *spec = &flow->spec;
2442 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2443 	efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2444 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2445 	int rc;
2446 
2447 	/* Initialize the first filter spec with template */
2448 	spec_filter->filters[0] = *spec_tmpl;
2449 	spec_filter->count = 1;
2450 
2451 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2452 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2453 		if (rc != 0)
2454 			return rc;
2455 	}
2456 
2457 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2458 		rte_flow_error_set(error, ENOTSUP,
2459 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2460 			"The flow rule pattern is unsupported");
2461 		return -rte_errno;
2462 	}
2463 
2464 	return 0;
2465 }
2466 
2467 static int
2468 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2469 			     const struct rte_flow_item pattern[],
2470 			     const struct rte_flow_action actions[],
2471 			     struct rte_flow *flow,
2472 			     struct rte_flow_error *error)
2473 {
2474 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2475 	struct sfc_flow_spec *spec = &flow->spec;
2476 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2477 	struct sfc_flow_parse_ctx ctx;
2478 	int rc;
2479 
2480 	ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2481 	ctx.filter = &spec_filter->template;
2482 
2483 	rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2484 				    pattern, &ctx, error);
2485 	if (rc != 0)
2486 		goto fail_bad_value;
2487 
2488 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2489 	if (rc != 0)
2490 		goto fail_bad_value;
2491 
2492 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2493 	if (rc != 0)
2494 		goto fail_bad_value;
2495 
2496 	return 0;
2497 
2498 fail_bad_value:
2499 	return rc;
2500 }
2501 
2502 static int
2503 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2504 			  const struct rte_flow_item pattern[],
2505 			  const struct rte_flow_action actions[],
2506 			  struct rte_flow *flow,
2507 			  struct rte_flow_error *error)
2508 {
2509 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2510 	struct sfc_flow_spec *spec = &flow->spec;
2511 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2512 	int rc;
2513 
2514 	rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2515 	if (rc != 0)
2516 		return rc;
2517 
2518 	rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2519 	if (rc != 0)
2520 		return rc;
2521 
2522 	return 0;
2523 }
2524 
2525 static int
2526 sfc_flow_parse(struct rte_eth_dev *dev,
2527 	       const struct rte_flow_attr *attr,
2528 	       const struct rte_flow_item pattern[],
2529 	       const struct rte_flow_action actions[],
2530 	       struct rte_flow *flow,
2531 	       struct rte_flow_error *error)
2532 {
2533 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2534 	const struct sfc_flow_ops_by_spec *ops;
2535 	int rc;
2536 
2537 	rc = sfc_flow_parse_attr(sa, attr, flow, error);
2538 	if (rc != 0)
2539 		return rc;
2540 
2541 	ops = sfc_flow_get_ops_by_spec(flow);
2542 	if (ops == NULL || ops->parse == NULL) {
2543 		rte_flow_error_set(error, ENOTSUP,
2544 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2545 				   "No backend to handle this flow");
2546 		return -rte_errno;
2547 	}
2548 
2549 	return ops->parse(dev, pattern, actions, flow, error);
2550 }
2551 
2552 static struct rte_flow *
2553 sfc_flow_zmalloc(struct rte_flow_error *error)
2554 {
2555 	struct rte_flow *flow;
2556 
2557 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2558 	if (flow == NULL) {
2559 		rte_flow_error_set(error, ENOMEM,
2560 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2561 				   "Failed to allocate memory");
2562 	}
2563 
2564 	return flow;
2565 }
2566 
2567 static void
2568 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2569 {
2570 	const struct sfc_flow_ops_by_spec *ops;
2571 
2572 	ops = sfc_flow_get_ops_by_spec(flow);
2573 	if (ops != NULL && ops->cleanup != NULL)
2574 		ops->cleanup(sa, flow);
2575 
2576 	rte_free(flow);
2577 }
2578 
2579 static int
2580 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2581 		struct rte_flow_error *error)
2582 {
2583 	const struct sfc_flow_ops_by_spec *ops;
2584 	int rc;
2585 
2586 	ops = sfc_flow_get_ops_by_spec(flow);
2587 	if (ops == NULL || ops->insert == NULL) {
2588 		rte_flow_error_set(error, ENOTSUP,
2589 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2590 				   "No backend to handle this flow");
2591 		return rte_errno;
2592 	}
2593 
2594 	rc = ops->insert(sa, flow);
2595 	if (rc != 0) {
2596 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2597 				   NULL, "Failed to insert the flow rule");
2598 	}
2599 
2600 	return rc;
2601 }
2602 
2603 static int
2604 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2605 		struct rte_flow_error *error)
2606 {
2607 	const struct sfc_flow_ops_by_spec *ops;
2608 	int rc;
2609 
2610 	ops = sfc_flow_get_ops_by_spec(flow);
2611 	if (ops == NULL || ops->remove == NULL) {
2612 		rte_flow_error_set(error, ENOTSUP,
2613 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2614 				   "No backend to handle this flow");
2615 		return rte_errno;
2616 	}
2617 
2618 	rc = ops->remove(sa, flow);
2619 	if (rc != 0) {
2620 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2621 				   NULL, "Failed to remove the flow rule");
2622 	}
2623 
2624 	return rc;
2625 }
2626 
2627 static int
2628 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2629 		struct rte_flow_error *error)
2630 {
2631 	const struct sfc_flow_ops_by_spec *ops;
2632 	int rc = 0;
2633 
2634 	ops = sfc_flow_get_ops_by_spec(flow);
2635 	if (ops == NULL) {
2636 		rte_flow_error_set(error, ENOTSUP,
2637 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2638 				   "No backend to handle this flow");
2639 		return -rte_errno;
2640 	}
2641 
2642 	if (ops->verify != NULL) {
2643 		SFC_ASSERT(sfc_adapter_is_locked(sa));
2644 		rc = ops->verify(sa, flow);
2645 	}
2646 
2647 	if (rc != 0) {
2648 		rte_flow_error_set(error, rc,
2649 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2650 			"Failed to verify flow validity with FW");
2651 		return -rte_errno;
2652 	}
2653 
2654 	return 0;
2655 }
2656 
2657 static int
2658 sfc_flow_validate(struct rte_eth_dev *dev,
2659 		  const struct rte_flow_attr *attr,
2660 		  const struct rte_flow_item pattern[],
2661 		  const struct rte_flow_action actions[],
2662 		  struct rte_flow_error *error)
2663 {
2664 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2665 	struct rte_flow *flow;
2666 	int rc;
2667 
2668 	flow = sfc_flow_zmalloc(error);
2669 	if (flow == NULL)
2670 		return -rte_errno;
2671 
2672 	sfc_adapter_lock(sa);
2673 
2674 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2675 	if (rc == 0)
2676 		rc = sfc_flow_verify(sa, flow, error);
2677 
2678 	sfc_flow_free(sa, flow);
2679 
2680 	sfc_adapter_unlock(sa);
2681 
2682 	return rc;
2683 }
2684 
2685 static struct rte_flow *
2686 sfc_flow_create(struct rte_eth_dev *dev,
2687 		const struct rte_flow_attr *attr,
2688 		const struct rte_flow_item pattern[],
2689 		const struct rte_flow_action actions[],
2690 		struct rte_flow_error *error)
2691 {
2692 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2693 	struct rte_flow *flow = NULL;
2694 	int rc;
2695 
2696 	flow = sfc_flow_zmalloc(error);
2697 	if (flow == NULL)
2698 		goto fail_no_mem;
2699 
2700 	sfc_adapter_lock(sa);
2701 
2702 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2703 	if (rc != 0)
2704 		goto fail_bad_value;
2705 
2706 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2707 
2708 	if (sa->state == SFC_ADAPTER_STARTED) {
2709 		rc = sfc_flow_insert(sa, flow, error);
2710 		if (rc != 0)
2711 			goto fail_flow_insert;
2712 	}
2713 
2714 	sfc_adapter_unlock(sa);
2715 
2716 	return flow;
2717 
2718 fail_flow_insert:
2719 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2720 
2721 fail_bad_value:
2722 	sfc_flow_free(sa, flow);
2723 	sfc_adapter_unlock(sa);
2724 
2725 fail_no_mem:
2726 	return NULL;
2727 }
2728 
2729 static int
2730 sfc_flow_destroy(struct rte_eth_dev *dev,
2731 		 struct rte_flow *flow,
2732 		 struct rte_flow_error *error)
2733 {
2734 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2735 	struct rte_flow *flow_ptr;
2736 	int rc = EINVAL;
2737 
2738 	sfc_adapter_lock(sa);
2739 
2740 	TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2741 		if (flow_ptr == flow)
2742 			rc = 0;
2743 	}
2744 	if (rc != 0) {
2745 		rte_flow_error_set(error, rc,
2746 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2747 				   "Failed to find flow rule to destroy");
2748 		goto fail_bad_value;
2749 	}
2750 
2751 	if (sa->state == SFC_ADAPTER_STARTED)
2752 		rc = sfc_flow_remove(sa, flow, error);
2753 
2754 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2755 	sfc_flow_free(sa, flow);
2756 
2757 fail_bad_value:
2758 	sfc_adapter_unlock(sa);
2759 
2760 	return -rc;
2761 }
2762 
2763 static int
2764 sfc_flow_flush(struct rte_eth_dev *dev,
2765 	       struct rte_flow_error *error)
2766 {
2767 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2768 	struct rte_flow *flow;
2769 	int ret = 0;
2770 
2771 	sfc_adapter_lock(sa);
2772 
2773 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2774 		if (sa->state == SFC_ADAPTER_STARTED) {
2775 			int rc;
2776 
2777 			rc = sfc_flow_remove(sa, flow, error);
2778 			if (rc != 0)
2779 				ret = rc;
2780 		}
2781 
2782 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2783 		sfc_flow_free(sa, flow);
2784 	}
2785 
2786 	sfc_adapter_unlock(sa);
2787 
2788 	return -ret;
2789 }
2790 
2791 static int
2792 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2793 		 struct rte_flow_error *error)
2794 {
2795 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2796 	int ret = 0;
2797 
2798 	sfc_adapter_lock(sa);
2799 	if (sa->state != SFC_ADAPTER_INITIALIZED) {
2800 		rte_flow_error_set(error, EBUSY,
2801 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2802 				   NULL, "please close the port first");
2803 		ret = -rte_errno;
2804 	} else {
2805 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2806 	}
2807 	sfc_adapter_unlock(sa);
2808 
2809 	return ret;
2810 }
2811 
2812 const struct rte_flow_ops sfc_flow_ops = {
2813 	.validate = sfc_flow_validate,
2814 	.create = sfc_flow_create,
2815 	.destroy = sfc_flow_destroy,
2816 	.flush = sfc_flow_flush,
2817 	.query = NULL,
2818 	.isolate = sfc_flow_isolate,
2819 };
2820 
2821 void
2822 sfc_flow_init(struct sfc_adapter *sa)
2823 {
2824 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2825 
2826 	TAILQ_INIT(&sa->flow_list);
2827 }
2828 
2829 void
2830 sfc_flow_fini(struct sfc_adapter *sa)
2831 {
2832 	struct rte_flow *flow;
2833 
2834 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2835 
2836 	while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2837 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2838 		sfc_flow_free(sa, flow);
2839 	}
2840 }
2841 
2842 void
2843 sfc_flow_stop(struct sfc_adapter *sa)
2844 {
2845 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
2846 	struct sfc_rss *rss = &sas->rss;
2847 	struct rte_flow *flow;
2848 
2849 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2850 
2851 	TAILQ_FOREACH(flow, &sa->flow_list, entries)
2852 		sfc_flow_remove(sa, flow, NULL);
2853 
2854 	if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
2855 		efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
2856 		rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
2857 	}
2858 
2859 	/*
2860 	 * MAE counter service is not stopped on flow rule remove to avoid
2861 	 * extra work. Make sure that it is stopped here.
2862 	 */
2863 	sfc_mae_counter_stop(sa);
2864 }
2865 
2866 int
2867 sfc_flow_start(struct sfc_adapter *sa)
2868 {
2869 	struct rte_flow *flow;
2870 	int rc = 0;
2871 
2872 	sfc_log_init(sa, "entry");
2873 
2874 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2875 
2876 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2877 		rc = sfc_flow_insert(sa, flow, NULL);
2878 		if (rc != 0)
2879 			goto fail_bad_flow;
2880 	}
2881 
2882 	sfc_log_init(sa, "done");
2883 
2884 fail_bad_flow:
2885 	return rc;
2886 }
2887