xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 89b5642d0d45c22c0ceab57efe3fab3b49ff4324)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_byteorder.h>
13 #include <rte_tailq.h>
14 #include <rte_common.h>
15 #include <ethdev_driver.h>
16 #include <rte_ether.h>
17 #include <rte_flow.h>
18 #include <rte_flow_driver.h>
19 
20 #include "efx.h"
21 
22 #include "sfc.h"
23 #include "sfc_debug.h"
24 #include "sfc_rx.h"
25 #include "sfc_filter.h"
26 #include "sfc_flow.h"
27 #include "sfc_flow_rss.h"
28 #include "sfc_flow_tunnel.h"
29 #include "sfc_log.h"
30 #include "sfc_dp_rx.h"
31 #include "sfc_mae_counter.h"
32 #include "sfc_switch.h"
33 
34 struct sfc_flow_ops_by_spec {
35 	sfc_flow_parse_cb_t	*parse;
36 	sfc_flow_verify_cb_t	*verify;
37 	sfc_flow_cleanup_cb_t	*cleanup;
38 	sfc_flow_insert_cb_t	*insert;
39 	sfc_flow_remove_cb_t	*remove;
40 	sfc_flow_query_cb_t	*query;
41 };
42 
43 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
44 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
45 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
46 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
47 static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
48 
49 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
50 	.parse = sfc_flow_parse_rte_to_filter,
51 	.verify = NULL,
52 	.cleanup = sfc_flow_cleanup,
53 	.insert = sfc_flow_filter_insert,
54 	.remove = sfc_flow_filter_remove,
55 	.query = NULL,
56 };
57 
58 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
59 	.parse = sfc_flow_parse_rte_to_mae,
60 	.verify = sfc_mae_flow_verify,
61 	.cleanup = sfc_mae_flow_cleanup,
62 	.insert = sfc_mae_flow_insert,
63 	.remove = sfc_mae_flow_remove,
64 	.query = sfc_mae_flow_query,
65 };
66 
67 static const struct sfc_flow_ops_by_spec *
68 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
69 {
70 	struct sfc_flow_spec *spec = &flow->spec;
71 	const struct sfc_flow_ops_by_spec *ops = NULL;
72 
73 	switch (spec->type) {
74 	case SFC_FLOW_SPEC_FILTER:
75 		ops = &sfc_flow_ops_filter;
76 		break;
77 	case SFC_FLOW_SPEC_MAE:
78 		ops = &sfc_flow_ops_mae;
79 		break;
80 	default:
81 		SFC_ASSERT(false);
82 		break;
83 	}
84 
85 	return ops;
86 }
87 
88 /*
89  * Currently, filter-based (VNIC) flow API is implemented in such a manner
90  * that each flow rule is converted to one or more hardware filters.
91  * All elements of flow rule (attributes, pattern items, actions)
92  * correspond to one or more fields in the efx_filter_spec_s structure
93  * that is responsible for the hardware filter.
94  * If some required field is unset in the flow rule, then a handful
95  * of filter copies will be created to cover all possible values
96  * of such a field.
97  */
98 
99 static sfc_flow_item_parse sfc_flow_parse_void;
100 static sfc_flow_item_parse sfc_flow_parse_eth;
101 static sfc_flow_item_parse sfc_flow_parse_vlan;
102 static sfc_flow_item_parse sfc_flow_parse_ipv4;
103 static sfc_flow_item_parse sfc_flow_parse_ipv6;
104 static sfc_flow_item_parse sfc_flow_parse_tcp;
105 static sfc_flow_item_parse sfc_flow_parse_udp;
106 static sfc_flow_item_parse sfc_flow_parse_vxlan;
107 static sfc_flow_item_parse sfc_flow_parse_geneve;
108 static sfc_flow_item_parse sfc_flow_parse_nvgre;
109 static sfc_flow_item_parse sfc_flow_parse_pppoex;
110 
111 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
112 				     unsigned int filters_count_for_one_val,
113 				     struct rte_flow_error *error);
114 
115 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
116 					efx_filter_spec_t *spec,
117 					struct sfc_filter *filter);
118 
119 struct sfc_flow_copy_flag {
120 	/* EFX filter specification match flag */
121 	efx_filter_match_flags_t flag;
122 	/* Number of values of corresponding field */
123 	unsigned int vals_count;
124 	/* Function to set values in specifications */
125 	sfc_flow_spec_set_vals *set_vals;
126 	/*
127 	 * Function to check that the specification is suitable
128 	 * for adding this match flag
129 	 */
130 	sfc_flow_spec_check *spec_check;
131 };
132 
133 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
134 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
135 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
136 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
137 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
138 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
139 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
140 
141 static boolean_t
142 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
143 {
144 	uint8_t sum = 0;
145 	unsigned int i;
146 
147 	for (i = 0; i < size; i++)
148 		sum |= buf[i];
149 
150 	return (sum == 0) ? B_TRUE : B_FALSE;
151 }
152 
153 /*
154  * Validate item and prepare structures spec and mask for parsing
155  */
156 int
157 sfc_flow_parse_init(const struct rte_flow_item *item,
158 		    const void **spec_ptr,
159 		    const void **mask_ptr,
160 		    const void *supp_mask,
161 		    const void *def_mask,
162 		    unsigned int size,
163 		    struct rte_flow_error *error)
164 {
165 	const uint8_t *spec;
166 	const uint8_t *mask;
167 	const uint8_t *last;
168 	uint8_t supp;
169 	unsigned int i;
170 
171 	if (item == NULL) {
172 		rte_flow_error_set(error, EINVAL,
173 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
174 				   "NULL item");
175 		return -rte_errno;
176 	}
177 
178 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
179 		rte_flow_error_set(error, EINVAL,
180 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
181 				   "Mask or last is set without spec");
182 		return -rte_errno;
183 	}
184 
185 	/*
186 	 * If "mask" is not set, default mask is used,
187 	 * but if default mask is NULL, "mask" should be set
188 	 */
189 	if (item->mask == NULL) {
190 		if (def_mask == NULL) {
191 			rte_flow_error_set(error, EINVAL,
192 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
193 				"Mask should be specified");
194 			return -rte_errno;
195 		}
196 
197 		mask = def_mask;
198 	} else {
199 		mask = item->mask;
200 	}
201 
202 	spec = item->spec;
203 	last = item->last;
204 
205 	if (spec == NULL)
206 		goto exit;
207 
208 	/*
209 	 * If field values in "last" are either 0 or equal to the corresponding
210 	 * values in "spec" then they are ignored
211 	 */
212 	if (last != NULL &&
213 	    !sfc_flow_is_zero(last, size) &&
214 	    memcmp(last, spec, size) != 0) {
215 		rte_flow_error_set(error, ENOTSUP,
216 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
217 				   "Ranging is not supported");
218 		return -rte_errno;
219 	}
220 
221 	if (supp_mask == NULL) {
222 		rte_flow_error_set(error, EINVAL,
223 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
224 			"Supported mask for item should be specified");
225 		return -rte_errno;
226 	}
227 
228 	/* Check that mask does not ask for more match than supp_mask */
229 	for (i = 0; i < size; i++) {
230 		supp = ((const uint8_t *)supp_mask)[i];
231 
232 		if (~supp & mask[i]) {
233 			rte_flow_error_set(error, ENOTSUP,
234 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
235 					   "Item's field is not supported");
236 			return -rte_errno;
237 		}
238 	}
239 
240 exit:
241 	*spec_ptr = spec;
242 	*mask_ptr = mask;
243 	return 0;
244 }
245 
246 /*
247  * Protocol parsers.
248  * Masking is not supported, so masks in items should be either
249  * full or empty (zeroed) and set only for supported fields which
250  * are specified in the supp_mask.
251  */
252 
253 static int
254 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
255 		    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
256 		    __rte_unused struct rte_flow_error *error)
257 {
258 	return 0;
259 }
260 
261 /**
262  * Convert Ethernet item to EFX filter specification.
263  *
264  * @param item[in]
265  *   Item specification. Outer frame specification may only comprise
266  *   source/destination addresses and Ethertype field.
267  *   Inner frame specification may contain destination address only.
268  *   There is support for individual/group mask as well as for empty and full.
269  *   If the mask is NULL, default mask will be used. Ranging is not supported.
270  * @param efx_spec[in, out]
271  *   EFX filter specification to update.
272  * @param[out] error
273  *   Perform verbose error reporting if not NULL.
274  */
275 static int
276 sfc_flow_parse_eth(const struct rte_flow_item *item,
277 		   struct sfc_flow_parse_ctx *parse_ctx,
278 		   struct rte_flow_error *error)
279 {
280 	int rc;
281 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
282 	const struct rte_flow_item_eth *spec = NULL;
283 	const struct rte_flow_item_eth *mask = NULL;
284 	const struct rte_flow_item_eth supp_mask = {
285 		.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
286 		.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
287 		.hdr.ether_type = 0xffff,
288 	};
289 	const struct rte_flow_item_eth ifrm_supp_mask = {
290 		.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
291 	};
292 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
293 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
294 	};
295 	const struct rte_flow_item_eth *supp_mask_p;
296 	const struct rte_flow_item_eth *def_mask_p;
297 	uint8_t *loc_mac = NULL;
298 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
299 		EFX_TUNNEL_PROTOCOL_NONE);
300 
301 	if (is_ifrm) {
302 		supp_mask_p = &ifrm_supp_mask;
303 		def_mask_p = &ifrm_supp_mask;
304 		loc_mac = efx_spec->efs_ifrm_loc_mac;
305 	} else {
306 		supp_mask_p = &supp_mask;
307 		def_mask_p = &rte_flow_item_eth_mask;
308 		loc_mac = efx_spec->efs_loc_mac;
309 	}
310 
311 	rc = sfc_flow_parse_init(item,
312 				 (const void **)&spec,
313 				 (const void **)&mask,
314 				 supp_mask_p, def_mask_p,
315 				 sizeof(struct rte_flow_item_eth),
316 				 error);
317 	if (rc != 0)
318 		return rc;
319 
320 	/* If "spec" is not set, could be any Ethernet */
321 	if (spec == NULL)
322 		return 0;
323 
324 	if (rte_is_same_ether_addr(&mask->hdr.dst_addr, &supp_mask.hdr.dst_addr)) {
325 		efx_spec->efs_match_flags |= is_ifrm ?
326 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
327 			EFX_FILTER_MATCH_LOC_MAC;
328 		rte_memcpy(loc_mac, spec->hdr.dst_addr.addr_bytes,
329 			   EFX_MAC_ADDR_LEN);
330 	} else if (memcmp(mask->hdr.dst_addr.addr_bytes, ig_mask,
331 			  EFX_MAC_ADDR_LEN) == 0) {
332 		if (rte_is_unicast_ether_addr(&spec->hdr.dst_addr))
333 			efx_spec->efs_match_flags |= is_ifrm ?
334 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
335 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
336 		else
337 			efx_spec->efs_match_flags |= is_ifrm ?
338 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
339 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
340 	} else if (!rte_is_zero_ether_addr(&mask->hdr.dst_addr)) {
341 		goto fail_bad_mask;
342 	}
343 
344 	/*
345 	 * ifrm_supp_mask ensures that the source address and
346 	 * ethertype masks are equal to zero in inner frame,
347 	 * so these fields are filled in only for the outer frame
348 	 */
349 	if (rte_is_same_ether_addr(&mask->hdr.src_addr, &supp_mask.hdr.src_addr)) {
350 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
351 		rte_memcpy(efx_spec->efs_rem_mac, spec->hdr.src_addr.addr_bytes,
352 			   EFX_MAC_ADDR_LEN);
353 	} else if (!rte_is_zero_ether_addr(&mask->hdr.src_addr)) {
354 		goto fail_bad_mask;
355 	}
356 
357 	/*
358 	 * Ether type is in big-endian byte order in item and
359 	 * in little-endian in efx_spec, so byte swap is used
360 	 */
361 	if (mask->hdr.ether_type == supp_mask.hdr.ether_type) {
362 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
363 		efx_spec->efs_ether_type = rte_bswap16(spec->hdr.ether_type);
364 	} else if (mask->hdr.ether_type != 0) {
365 		goto fail_bad_mask;
366 	}
367 
368 	return 0;
369 
370 fail_bad_mask:
371 	rte_flow_error_set(error, EINVAL,
372 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
373 			   "Bad mask in the ETH pattern item");
374 	return -rte_errno;
375 }
376 
377 /**
378  * Convert VLAN item to EFX filter specification.
379  *
380  * @param item[in]
381  *   Item specification. Only VID field is supported.
382  *   The mask can not be NULL. Ranging is not supported.
383  * @param efx_spec[in, out]
384  *   EFX filter specification to update.
385  * @param[out] error
386  *   Perform verbose error reporting if not NULL.
387  */
388 static int
389 sfc_flow_parse_vlan(const struct rte_flow_item *item,
390 		    struct sfc_flow_parse_ctx *parse_ctx,
391 		    struct rte_flow_error *error)
392 {
393 	int rc;
394 	uint16_t vid;
395 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
396 	const struct rte_flow_item_vlan *spec = NULL;
397 	const struct rte_flow_item_vlan *mask = NULL;
398 	const struct rte_flow_item_vlan supp_mask = {
399 		.hdr.vlan_tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
400 		.hdr.eth_proto = RTE_BE16(0xffff),
401 	};
402 
403 	rc = sfc_flow_parse_init(item,
404 				 (const void **)&spec,
405 				 (const void **)&mask,
406 				 &supp_mask,
407 				 NULL,
408 				 sizeof(struct rte_flow_item_vlan),
409 				 error);
410 	if (rc != 0)
411 		return rc;
412 
413 	/*
414 	 * VID is in big-endian byte order in item and
415 	 * in little-endian in efx_spec, so byte swap is used.
416 	 * If two VLAN items are included, the first matches
417 	 * the outer tag and the next matches the inner tag.
418 	 */
419 	if (mask->hdr.vlan_tci == supp_mask.hdr.vlan_tci) {
420 		/* Apply mask to keep VID only */
421 		vid = rte_bswap16(spec->hdr.vlan_tci & mask->hdr.vlan_tci);
422 
423 		if (!(efx_spec->efs_match_flags &
424 		      EFX_FILTER_MATCH_OUTER_VID)) {
425 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
426 			efx_spec->efs_outer_vid = vid;
427 		} else if (!(efx_spec->efs_match_flags &
428 			     EFX_FILTER_MATCH_INNER_VID)) {
429 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
430 			efx_spec->efs_inner_vid = vid;
431 		} else {
432 			rte_flow_error_set(error, EINVAL,
433 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
434 					   "More than two VLAN items");
435 			return -rte_errno;
436 		}
437 	} else {
438 		rte_flow_error_set(error, EINVAL,
439 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
440 				   "VLAN ID in TCI match is required");
441 		return -rte_errno;
442 	}
443 
444 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
445 		rte_flow_error_set(error, EINVAL,
446 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
447 				   "VLAN TPID matching is not supported");
448 		return -rte_errno;
449 	}
450 	if (mask->hdr.eth_proto == supp_mask.hdr.eth_proto) {
451 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
452 		efx_spec->efs_ether_type = rte_bswap16(spec->hdr.eth_proto);
453 	} else if (mask->hdr.eth_proto) {
454 		rte_flow_error_set(error, EINVAL,
455 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
456 				   "Bad mask for VLAN inner type");
457 		return -rte_errno;
458 	}
459 
460 	return 0;
461 }
462 
463 /**
464  * Convert IPv4 item to EFX filter specification.
465  *
466  * @param item[in]
467  *   Item specification. Only source and destination addresses and
468  *   protocol fields are supported. If the mask is NULL, default
469  *   mask will be used. Ranging is not supported.
470  * @param efx_spec[in, out]
471  *   EFX filter specification to update.
472  * @param[out] error
473  *   Perform verbose error reporting if not NULL.
474  */
475 static int
476 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
477 		    struct sfc_flow_parse_ctx *parse_ctx,
478 		    struct rte_flow_error *error)
479 {
480 	int rc;
481 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
482 	const struct rte_flow_item_ipv4 *spec = NULL;
483 	const struct rte_flow_item_ipv4 *mask = NULL;
484 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
485 	const struct rte_flow_item_ipv4 supp_mask = {
486 		.hdr = {
487 			.src_addr = 0xffffffff,
488 			.dst_addr = 0xffffffff,
489 			.next_proto_id = 0xff,
490 		}
491 	};
492 
493 	rc = sfc_flow_parse_init(item,
494 				 (const void **)&spec,
495 				 (const void **)&mask,
496 				 &supp_mask,
497 				 &rte_flow_item_ipv4_mask,
498 				 sizeof(struct rte_flow_item_ipv4),
499 				 error);
500 	if (rc != 0)
501 		return rc;
502 
503 	/*
504 	 * Filtering by IPv4 source and destination addresses requires
505 	 * the appropriate ETHER_TYPE in hardware filters
506 	 */
507 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
508 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
509 		efx_spec->efs_ether_type = ether_type_ipv4;
510 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
511 		rte_flow_error_set(error, EINVAL,
512 			RTE_FLOW_ERROR_TYPE_ITEM, item,
513 			"Ethertype in pattern with IPV4 item should be appropriate");
514 		return -rte_errno;
515 	}
516 
517 	if (spec == NULL)
518 		return 0;
519 
520 	/*
521 	 * IPv4 addresses are in big-endian byte order in item and in
522 	 * efx_spec
523 	 */
524 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
525 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
526 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
527 	} else if (mask->hdr.src_addr != 0) {
528 		goto fail_bad_mask;
529 	}
530 
531 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
532 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
533 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
534 	} else if (mask->hdr.dst_addr != 0) {
535 		goto fail_bad_mask;
536 	}
537 
538 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
539 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
540 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
541 	} else if (mask->hdr.next_proto_id != 0) {
542 		goto fail_bad_mask;
543 	}
544 
545 	return 0;
546 
547 fail_bad_mask:
548 	rte_flow_error_set(error, EINVAL,
549 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
550 			   "Bad mask in the IPV4 pattern item");
551 	return -rte_errno;
552 }
553 
554 /**
555  * Convert IPv6 item to EFX filter specification.
556  *
557  * @param item[in]
558  *   Item specification. Only source and destination addresses and
559  *   next header fields are supported. If the mask is NULL, default
560  *   mask will be used. Ranging is not supported.
561  * @param efx_spec[in, out]
562  *   EFX filter specification to update.
563  * @param[out] error
564  *   Perform verbose error reporting if not NULL.
565  */
566 static int
567 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
568 		    struct sfc_flow_parse_ctx *parse_ctx,
569 		    struct rte_flow_error *error)
570 {
571 	int rc;
572 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
573 	const struct rte_flow_item_ipv6 *spec = NULL;
574 	const struct rte_flow_item_ipv6 *mask = NULL;
575 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
576 	const struct rte_flow_item_ipv6 supp_mask = {
577 		.hdr = {
578 			.src_addr = RTE_IPV6_MASK_FULL,
579 			.dst_addr = RTE_IPV6_MASK_FULL,
580 			.proto = 0xff,
581 		}
582 	};
583 
584 	rc = sfc_flow_parse_init(item,
585 				 (const void **)&spec,
586 				 (const void **)&mask,
587 				 &supp_mask,
588 				 &rte_flow_item_ipv6_mask,
589 				 sizeof(struct rte_flow_item_ipv6),
590 				 error);
591 	if (rc != 0)
592 		return rc;
593 
594 	/*
595 	 * Filtering by IPv6 source and destination addresses requires
596 	 * the appropriate ETHER_TYPE in hardware filters
597 	 */
598 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
599 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
600 		efx_spec->efs_ether_type = ether_type_ipv6;
601 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
602 		rte_flow_error_set(error, EINVAL,
603 			RTE_FLOW_ERROR_TYPE_ITEM, item,
604 			"Ethertype in pattern with IPV6 item should be appropriate");
605 		return -rte_errno;
606 	}
607 
608 	if (spec == NULL)
609 		return 0;
610 
611 	/*
612 	 * IPv6 addresses are in big-endian byte order in item and in
613 	 * efx_spec
614 	 */
615 	if (memcmp(&mask->hdr.src_addr, &supp_mask.hdr.src_addr,
616 		   sizeof(mask->hdr.src_addr)) == 0) {
617 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
618 
619 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
620 				 sizeof(spec->hdr.src_addr));
621 		rte_memcpy(&efx_spec->efs_rem_host, &spec->hdr.src_addr,
622 			   sizeof(efx_spec->efs_rem_host));
623 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr.a,
624 				     sizeof(mask->hdr.src_addr))) {
625 		goto fail_bad_mask;
626 	}
627 
628 	if (memcmp(&mask->hdr.dst_addr, &supp_mask.hdr.dst_addr,
629 		   sizeof(mask->hdr.dst_addr)) == 0) {
630 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
631 
632 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
633 				 sizeof(spec->hdr.dst_addr));
634 		rte_memcpy(&efx_spec->efs_loc_host, &spec->hdr.dst_addr,
635 			   sizeof(efx_spec->efs_loc_host));
636 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr.a,
637 				     sizeof(mask->hdr.dst_addr))) {
638 		goto fail_bad_mask;
639 	}
640 
641 	if (mask->hdr.proto == supp_mask.hdr.proto) {
642 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
643 		efx_spec->efs_ip_proto = spec->hdr.proto;
644 	} else if (mask->hdr.proto != 0) {
645 		goto fail_bad_mask;
646 	}
647 
648 	return 0;
649 
650 fail_bad_mask:
651 	rte_flow_error_set(error, EINVAL,
652 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
653 			   "Bad mask in the IPV6 pattern item");
654 	return -rte_errno;
655 }
656 
657 /**
658  * Convert TCP item to EFX filter specification.
659  *
660  * @param item[in]
661  *   Item specification. Only source and destination ports fields
662  *   are supported. If the mask is NULL, default mask will be used.
663  *   Ranging is not supported.
664  * @param efx_spec[in, out]
665  *   EFX filter specification to update.
666  * @param[out] error
667  *   Perform verbose error reporting if not NULL.
668  */
669 static int
670 sfc_flow_parse_tcp(const struct rte_flow_item *item,
671 		   struct sfc_flow_parse_ctx *parse_ctx,
672 		   struct rte_flow_error *error)
673 {
674 	int rc;
675 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
676 	const struct rte_flow_item_tcp *spec = NULL;
677 	const struct rte_flow_item_tcp *mask = NULL;
678 	const struct rte_flow_item_tcp supp_mask = {
679 		.hdr = {
680 			.src_port = 0xffff,
681 			.dst_port = 0xffff,
682 		}
683 	};
684 
685 	rc = sfc_flow_parse_init(item,
686 				 (const void **)&spec,
687 				 (const void **)&mask,
688 				 &supp_mask,
689 				 &rte_flow_item_tcp_mask,
690 				 sizeof(struct rte_flow_item_tcp),
691 				 error);
692 	if (rc != 0)
693 		return rc;
694 
695 	/*
696 	 * Filtering by TCP source and destination ports requires
697 	 * the appropriate IP_PROTO in hardware filters
698 	 */
699 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
700 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
701 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
702 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
703 		rte_flow_error_set(error, EINVAL,
704 			RTE_FLOW_ERROR_TYPE_ITEM, item,
705 			"IP proto in pattern with TCP item should be appropriate");
706 		return -rte_errno;
707 	}
708 
709 	if (spec == NULL)
710 		return 0;
711 
712 	/*
713 	 * Source and destination ports are in big-endian byte order in item and
714 	 * in little-endian in efx_spec, so byte swap is used
715 	 */
716 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
717 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
718 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
719 	} else if (mask->hdr.src_port != 0) {
720 		goto fail_bad_mask;
721 	}
722 
723 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
724 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
725 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
726 	} else if (mask->hdr.dst_port != 0) {
727 		goto fail_bad_mask;
728 	}
729 
730 	return 0;
731 
732 fail_bad_mask:
733 	rte_flow_error_set(error, EINVAL,
734 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
735 			   "Bad mask in the TCP pattern item");
736 	return -rte_errno;
737 }
738 
739 /**
740  * Convert UDP item to EFX filter specification.
741  *
742  * @param item[in]
743  *   Item specification. Only source and destination ports fields
744  *   are supported. If the mask is NULL, default mask will be used.
745  *   Ranging is not supported.
746  * @param efx_spec[in, out]
747  *   EFX filter specification to update.
748  * @param[out] error
749  *   Perform verbose error reporting if not NULL.
750  */
751 static int
752 sfc_flow_parse_udp(const struct rte_flow_item *item,
753 		   struct sfc_flow_parse_ctx *parse_ctx,
754 		   struct rte_flow_error *error)
755 {
756 	int rc;
757 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
758 	const struct rte_flow_item_udp *spec = NULL;
759 	const struct rte_flow_item_udp *mask = NULL;
760 	const struct rte_flow_item_udp supp_mask = {
761 		.hdr = {
762 			.src_port = 0xffff,
763 			.dst_port = 0xffff,
764 		}
765 	};
766 
767 	rc = sfc_flow_parse_init(item,
768 				 (const void **)&spec,
769 				 (const void **)&mask,
770 				 &supp_mask,
771 				 &rte_flow_item_udp_mask,
772 				 sizeof(struct rte_flow_item_udp),
773 				 error);
774 	if (rc != 0)
775 		return rc;
776 
777 	/*
778 	 * Filtering by UDP source and destination ports requires
779 	 * the appropriate IP_PROTO in hardware filters
780 	 */
781 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
782 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
783 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
784 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
785 		rte_flow_error_set(error, EINVAL,
786 			RTE_FLOW_ERROR_TYPE_ITEM, item,
787 			"IP proto in pattern with UDP item should be appropriate");
788 		return -rte_errno;
789 	}
790 
791 	if (spec == NULL)
792 		return 0;
793 
794 	/*
795 	 * Source and destination ports are in big-endian byte order in item and
796 	 * in little-endian in efx_spec, so byte swap is used
797 	 */
798 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
799 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
800 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
801 	} else if (mask->hdr.src_port != 0) {
802 		goto fail_bad_mask;
803 	}
804 
805 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
806 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
807 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
808 	} else if (mask->hdr.dst_port != 0) {
809 		goto fail_bad_mask;
810 	}
811 
812 	return 0;
813 
814 fail_bad_mask:
815 	rte_flow_error_set(error, EINVAL,
816 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
817 			   "Bad mask in the UDP pattern item");
818 	return -rte_errno;
819 }
820 
821 /*
822  * Filters for encapsulated packets match based on the EtherType and IP
823  * protocol in the outer frame.
824  */
825 static int
826 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
827 					efx_filter_spec_t *efx_spec,
828 					uint8_t ip_proto,
829 					struct rte_flow_error *error)
830 {
831 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
832 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
833 		efx_spec->efs_ip_proto = ip_proto;
834 	} else if (efx_spec->efs_ip_proto != ip_proto) {
835 		switch (ip_proto) {
836 		case EFX_IPPROTO_UDP:
837 			rte_flow_error_set(error, EINVAL,
838 				RTE_FLOW_ERROR_TYPE_ITEM, item,
839 				"Outer IP header protocol must be UDP "
840 				"in VxLAN/GENEVE pattern");
841 			return -rte_errno;
842 
843 		case EFX_IPPROTO_GRE:
844 			rte_flow_error_set(error, EINVAL,
845 				RTE_FLOW_ERROR_TYPE_ITEM, item,
846 				"Outer IP header protocol must be GRE "
847 				"in NVGRE pattern");
848 			return -rte_errno;
849 
850 		default:
851 			rte_flow_error_set(error, EINVAL,
852 				RTE_FLOW_ERROR_TYPE_ITEM, item,
853 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
854 				"are supported");
855 			return -rte_errno;
856 		}
857 	}
858 
859 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
860 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
861 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
862 		rte_flow_error_set(error, EINVAL,
863 			RTE_FLOW_ERROR_TYPE_ITEM, item,
864 			"Outer frame EtherType in pattern with tunneling "
865 			"must be IPv4 or IPv6");
866 		return -rte_errno;
867 	}
868 
869 	return 0;
870 }
871 
872 static int
873 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
874 				  const uint8_t *vni_or_vsid_val,
875 				  const uint8_t *vni_or_vsid_mask,
876 				  const struct rte_flow_item *item,
877 				  struct rte_flow_error *error)
878 {
879 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
880 		0xff, 0xff, 0xff
881 	};
882 
883 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
884 		   EFX_VNI_OR_VSID_LEN) == 0) {
885 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
886 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
887 			   EFX_VNI_OR_VSID_LEN);
888 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
889 		rte_flow_error_set(error, EINVAL,
890 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
891 				   "Unsupported VNI/VSID mask");
892 		return -rte_errno;
893 	}
894 
895 	return 0;
896 }
897 
898 /**
899  * Convert VXLAN item to EFX filter specification.
900  *
901  * @param item[in]
902  *   Item specification. Only VXLAN network identifier field is supported.
903  *   If the mask is NULL, default mask will be used.
904  *   Ranging is not supported.
905  * @param efx_spec[in, out]
906  *   EFX filter specification to update.
907  * @param[out] error
908  *   Perform verbose error reporting if not NULL.
909  */
910 static int
911 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
912 		     struct sfc_flow_parse_ctx *parse_ctx,
913 		     struct rte_flow_error *error)
914 {
915 	int rc;
916 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
917 	const struct rte_flow_item_vxlan *spec = NULL;
918 	const struct rte_flow_item_vxlan *mask = NULL;
919 	const struct rte_flow_item_vxlan supp_mask = {
920 		.hdr.vni = { 0xff, 0xff, 0xff }
921 	};
922 
923 	rc = sfc_flow_parse_init(item,
924 				 (const void **)&spec,
925 				 (const void **)&mask,
926 				 &supp_mask,
927 				 &rte_flow_item_vxlan_mask,
928 				 sizeof(struct rte_flow_item_vxlan),
929 				 error);
930 	if (rc != 0)
931 		return rc;
932 
933 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
934 						     EFX_IPPROTO_UDP, error);
935 	if (rc != 0)
936 		return rc;
937 
938 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
939 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
940 
941 	if (spec == NULL)
942 		return 0;
943 
944 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->hdr.vni,
945 					       mask->hdr.vni, item, error);
946 
947 	return rc;
948 }
949 
950 /**
951  * Convert GENEVE item to EFX filter specification.
952  *
953  * @param item[in]
954  *   Item specification. Only Virtual Network Identifier and protocol type
955  *   fields are supported. But protocol type can be only Ethernet (0x6558).
956  *   If the mask is NULL, default mask will be used.
957  *   Ranging is not supported.
958  * @param efx_spec[in, out]
959  *   EFX filter specification to update.
960  * @param[out] error
961  *   Perform verbose error reporting if not NULL.
962  */
963 static int
964 sfc_flow_parse_geneve(const struct rte_flow_item *item,
965 		      struct sfc_flow_parse_ctx *parse_ctx,
966 		      struct rte_flow_error *error)
967 {
968 	int rc;
969 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
970 	const struct rte_flow_item_geneve *spec = NULL;
971 	const struct rte_flow_item_geneve *mask = NULL;
972 	const struct rte_flow_item_geneve supp_mask = {
973 		.protocol = RTE_BE16(0xffff),
974 		.vni = { 0xff, 0xff, 0xff }
975 	};
976 
977 	rc = sfc_flow_parse_init(item,
978 				 (const void **)&spec,
979 				 (const void **)&mask,
980 				 &supp_mask,
981 				 &rte_flow_item_geneve_mask,
982 				 sizeof(struct rte_flow_item_geneve),
983 				 error);
984 	if (rc != 0)
985 		return rc;
986 
987 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
988 						     EFX_IPPROTO_UDP, error);
989 	if (rc != 0)
990 		return rc;
991 
992 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
993 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
994 
995 	if (spec == NULL)
996 		return 0;
997 
998 	if (mask->protocol == supp_mask.protocol) {
999 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
1000 			rte_flow_error_set(error, EINVAL,
1001 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1002 				"GENEVE encap. protocol must be Ethernet "
1003 				"(0x6558) in the GENEVE pattern item");
1004 			return -rte_errno;
1005 		}
1006 	} else if (mask->protocol != 0) {
1007 		rte_flow_error_set(error, EINVAL,
1008 			RTE_FLOW_ERROR_TYPE_ITEM, item,
1009 			"Unsupported mask for GENEVE encap. protocol");
1010 		return -rte_errno;
1011 	}
1012 
1013 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1014 					       mask->vni, item, error);
1015 
1016 	return rc;
1017 }
1018 
1019 /**
1020  * Convert NVGRE item to EFX filter specification.
1021  *
1022  * @param item[in]
1023  *   Item specification. Only virtual subnet ID field is supported.
1024  *   If the mask is NULL, default mask will be used.
1025  *   Ranging is not supported.
1026  * @param efx_spec[in, out]
1027  *   EFX filter specification to update.
1028  * @param[out] error
1029  *   Perform verbose error reporting if not NULL.
1030  */
1031 static int
1032 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1033 		     struct sfc_flow_parse_ctx *parse_ctx,
1034 		     struct rte_flow_error *error)
1035 {
1036 	int rc;
1037 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1038 	const struct rte_flow_item_nvgre *spec = NULL;
1039 	const struct rte_flow_item_nvgre *mask = NULL;
1040 	const struct rte_flow_item_nvgre supp_mask = {
1041 		.tni = { 0xff, 0xff, 0xff }
1042 	};
1043 
1044 	rc = sfc_flow_parse_init(item,
1045 				 (const void **)&spec,
1046 				 (const void **)&mask,
1047 				 &supp_mask,
1048 				 &rte_flow_item_nvgre_mask,
1049 				 sizeof(struct rte_flow_item_nvgre),
1050 				 error);
1051 	if (rc != 0)
1052 		return rc;
1053 
1054 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1055 						     EFX_IPPROTO_GRE, error);
1056 	if (rc != 0)
1057 		return rc;
1058 
1059 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1060 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1061 
1062 	if (spec == NULL)
1063 		return 0;
1064 
1065 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1066 					       mask->tni, item, error);
1067 
1068 	return rc;
1069 }
1070 
1071 /**
1072  * Convert PPPoEx item to EFX filter specification.
1073  *
1074  * @param item[in]
1075  *   Item specification.
1076  *   Matching on PPPoEx fields is not supported.
1077  *   This item can only be used to set or validate the EtherType filter.
1078  *   Only zero masks are allowed.
1079  *   Ranging is not supported.
1080  * @param efx_spec[in, out]
1081  *   EFX filter specification to update.
1082  * @param[out] error
1083  *   Perform verbose error reporting if not NULL.
1084  */
1085 static int
1086 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1087 		      struct sfc_flow_parse_ctx *parse_ctx,
1088 		      struct rte_flow_error *error)
1089 {
1090 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1091 	const struct rte_flow_item_pppoe *spec = NULL;
1092 	const struct rte_flow_item_pppoe *mask = NULL;
1093 	const struct rte_flow_item_pppoe supp_mask = {};
1094 	const struct rte_flow_item_pppoe def_mask = {};
1095 	uint16_t ether_type;
1096 	int rc;
1097 
1098 	rc = sfc_flow_parse_init(item,
1099 				 (const void **)&spec,
1100 				 (const void **)&mask,
1101 				 &supp_mask,
1102 				 &def_mask,
1103 				 sizeof(struct rte_flow_item_pppoe),
1104 				 error);
1105 	if (rc != 0)
1106 		return rc;
1107 
1108 	if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1109 		ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1110 	else
1111 		ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1112 
1113 	if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1114 		if (efx_spec->efs_ether_type != ether_type) {
1115 			rte_flow_error_set(error, EINVAL,
1116 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
1117 					   "Invalid EtherType for a PPPoE flow item");
1118 			return -rte_errno;
1119 		}
1120 	} else {
1121 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1122 		efx_spec->efs_ether_type = ether_type;
1123 	}
1124 
1125 	return 0;
1126 }
1127 
1128 static const struct sfc_flow_item sfc_flow_items[] = {
1129 	{
1130 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1131 		.name = "VOID",
1132 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1133 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1134 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1135 		.parse = sfc_flow_parse_void,
1136 	},
1137 	{
1138 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1139 		.name = "ETH",
1140 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1141 		.layer = SFC_FLOW_ITEM_L2,
1142 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1143 		.parse = sfc_flow_parse_eth,
1144 	},
1145 	{
1146 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1147 		.name = "VLAN",
1148 		.prev_layer = SFC_FLOW_ITEM_L2,
1149 		.layer = SFC_FLOW_ITEM_L2,
1150 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1151 		.parse = sfc_flow_parse_vlan,
1152 	},
1153 	{
1154 		.type = RTE_FLOW_ITEM_TYPE_PPPOED,
1155 		.name = "PPPOED",
1156 		.prev_layer = SFC_FLOW_ITEM_L2,
1157 		.layer = SFC_FLOW_ITEM_L2,
1158 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1159 		.parse = sfc_flow_parse_pppoex,
1160 	},
1161 	{
1162 		.type = RTE_FLOW_ITEM_TYPE_PPPOES,
1163 		.name = "PPPOES",
1164 		.prev_layer = SFC_FLOW_ITEM_L2,
1165 		.layer = SFC_FLOW_ITEM_L2,
1166 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1167 		.parse = sfc_flow_parse_pppoex,
1168 	},
1169 	{
1170 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1171 		.name = "IPV4",
1172 		.prev_layer = SFC_FLOW_ITEM_L2,
1173 		.layer = SFC_FLOW_ITEM_L3,
1174 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1175 		.parse = sfc_flow_parse_ipv4,
1176 	},
1177 	{
1178 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1179 		.name = "IPV6",
1180 		.prev_layer = SFC_FLOW_ITEM_L2,
1181 		.layer = SFC_FLOW_ITEM_L3,
1182 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1183 		.parse = sfc_flow_parse_ipv6,
1184 	},
1185 	{
1186 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1187 		.name = "TCP",
1188 		.prev_layer = SFC_FLOW_ITEM_L3,
1189 		.layer = SFC_FLOW_ITEM_L4,
1190 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1191 		.parse = sfc_flow_parse_tcp,
1192 	},
1193 	{
1194 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1195 		.name = "UDP",
1196 		.prev_layer = SFC_FLOW_ITEM_L3,
1197 		.layer = SFC_FLOW_ITEM_L4,
1198 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1199 		.parse = sfc_flow_parse_udp,
1200 	},
1201 	{
1202 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1203 		.name = "VXLAN",
1204 		.prev_layer = SFC_FLOW_ITEM_L4,
1205 		.layer = SFC_FLOW_ITEM_START_LAYER,
1206 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1207 		.parse = sfc_flow_parse_vxlan,
1208 	},
1209 	{
1210 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1211 		.name = "GENEVE",
1212 		.prev_layer = SFC_FLOW_ITEM_L4,
1213 		.layer = SFC_FLOW_ITEM_START_LAYER,
1214 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1215 		.parse = sfc_flow_parse_geneve,
1216 	},
1217 	{
1218 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1219 		.name = "NVGRE",
1220 		.prev_layer = SFC_FLOW_ITEM_L3,
1221 		.layer = SFC_FLOW_ITEM_START_LAYER,
1222 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1223 		.parse = sfc_flow_parse_nvgre,
1224 	},
1225 };
1226 
1227 /*
1228  * Protocol-independent flow API support
1229  */
1230 static int
1231 sfc_flow_parse_attr(struct sfc_adapter *sa,
1232 		    const struct rte_flow_attr *attr,
1233 		    struct rte_flow *flow,
1234 		    struct rte_flow_error *error)
1235 {
1236 	struct sfc_flow_spec *spec = &flow->spec;
1237 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1238 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1239 	struct sfc_mae *mae = &sa->mae;
1240 
1241 	if (attr == NULL) {
1242 		rte_flow_error_set(error, EINVAL,
1243 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1244 				   "NULL attribute");
1245 		return -rte_errno;
1246 	}
1247 	if (attr->group != 0) {
1248 		rte_flow_error_set(error, ENOTSUP,
1249 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1250 				   "Groups are not supported");
1251 		return -rte_errno;
1252 	}
1253 	if (attr->egress != 0 && attr->transfer == 0) {
1254 		rte_flow_error_set(error, ENOTSUP,
1255 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1256 				   "Egress is not supported");
1257 		return -rte_errno;
1258 	}
1259 	if (attr->ingress == 0 && attr->transfer == 0) {
1260 		rte_flow_error_set(error, ENOTSUP,
1261 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1262 				   "Ingress is compulsory");
1263 		return -rte_errno;
1264 	}
1265 	if (attr->transfer == 0) {
1266 		if (attr->priority != 0) {
1267 			rte_flow_error_set(error, ENOTSUP,
1268 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1269 					   attr, "Priorities are unsupported");
1270 			return -rte_errno;
1271 		}
1272 		spec->type = SFC_FLOW_SPEC_FILTER;
1273 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1274 		spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1275 		spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1276 	} else {
1277 		if (mae->status != SFC_MAE_STATUS_ADMIN) {
1278 			rte_flow_error_set(error, ENOTSUP,
1279 					   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1280 					   attr, "Transfer is not supported");
1281 			return -rte_errno;
1282 		}
1283 		if (attr->priority > mae->nb_action_rule_prios_max) {
1284 			rte_flow_error_set(error, ENOTSUP,
1285 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1286 					   attr, "Unsupported priority level");
1287 			return -rte_errno;
1288 		}
1289 		spec->type = SFC_FLOW_SPEC_MAE;
1290 		spec_mae->priority = attr->priority;
1291 		spec_mae->action_rule = NULL;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 /* Get item from array sfc_flow_items */
1298 static const struct sfc_flow_item *
1299 sfc_flow_get_item(const struct sfc_flow_item *items,
1300 		  unsigned int nb_items,
1301 		  enum rte_flow_item_type type)
1302 {
1303 	unsigned int i;
1304 
1305 	for (i = 0; i < nb_items; i++)
1306 		if (items[i].type == type)
1307 			return &items[i];
1308 
1309 	return NULL;
1310 }
1311 
1312 int
1313 sfc_flow_parse_pattern(struct sfc_adapter *sa,
1314 		       const struct sfc_flow_item *flow_items,
1315 		       unsigned int nb_flow_items,
1316 		       const struct rte_flow_item pattern[],
1317 		       struct sfc_flow_parse_ctx *parse_ctx,
1318 		       struct rte_flow_error *error)
1319 {
1320 	int rc;
1321 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1322 	boolean_t is_ifrm = B_FALSE;
1323 	const struct sfc_flow_item *item;
1324 
1325 	if (pattern == NULL) {
1326 		rte_flow_error_set(error, EINVAL,
1327 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1328 				   "NULL pattern");
1329 		return -rte_errno;
1330 	}
1331 
1332 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1333 		item = sfc_flow_get_item(flow_items, nb_flow_items,
1334 					 pattern->type);
1335 		if (item == NULL) {
1336 			rte_flow_error_set(error, ENOTSUP,
1337 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1338 					   "Unsupported pattern item");
1339 			return -rte_errno;
1340 		}
1341 
1342 		/*
1343 		 * Omitting one or several protocol layers at the beginning
1344 		 * of pattern is supported
1345 		 */
1346 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1347 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1348 		    item->prev_layer != prev_layer) {
1349 			rte_flow_error_set(error, ENOTSUP,
1350 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1351 					   "Unexpected sequence of pattern items");
1352 			return -rte_errno;
1353 		}
1354 
1355 		/*
1356 		 * Allow only VOID and ETH pattern items in the inner frame.
1357 		 * Also check that there is only one tunneling protocol.
1358 		 */
1359 		switch (item->type) {
1360 		case RTE_FLOW_ITEM_TYPE_VOID:
1361 		case RTE_FLOW_ITEM_TYPE_ETH:
1362 			break;
1363 
1364 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1365 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1366 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1367 			if (is_ifrm) {
1368 				rte_flow_error_set(error, EINVAL,
1369 					RTE_FLOW_ERROR_TYPE_ITEM,
1370 					pattern,
1371 					"More than one tunneling protocol");
1372 				return -rte_errno;
1373 			}
1374 			is_ifrm = B_TRUE;
1375 			break;
1376 
1377 		default:
1378 			if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1379 			    is_ifrm) {
1380 				rte_flow_error_set(error, EINVAL,
1381 					RTE_FLOW_ERROR_TYPE_ITEM,
1382 					pattern,
1383 					"There is an unsupported pattern item "
1384 					"in the inner frame");
1385 				return -rte_errno;
1386 			}
1387 			break;
1388 		}
1389 
1390 		if (parse_ctx->type != item->ctx_type) {
1391 			rte_flow_error_set(error, EINVAL,
1392 					RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1393 					"Parse context type mismatch");
1394 			return -rte_errno;
1395 		}
1396 
1397 		rc = item->parse(pattern, parse_ctx, error);
1398 		if (rc != 0) {
1399 			sfc_err(sa, "failed to parse item %s: %s",
1400 				item->name, strerror(-rc));
1401 			return rc;
1402 		}
1403 
1404 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1405 			prev_layer = item->layer;
1406 	}
1407 
1408 	return 0;
1409 }
1410 
1411 static int
1412 sfc_flow_parse_queue(struct sfc_adapter *sa,
1413 		     const struct rte_flow_action_queue *queue,
1414 		     struct rte_flow *flow)
1415 {
1416 	struct sfc_flow_spec *spec = &flow->spec;
1417 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1418 	struct sfc_rxq *rxq;
1419 	struct sfc_rxq_info *rxq_info;
1420 
1421 	if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1422 		return -EINVAL;
1423 
1424 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1425 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1426 
1427 	rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1428 
1429 	if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
1430 		struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1431 		struct sfc_rss *ethdev_rss = &sas->rss;
1432 
1433 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1434 		spec_filter->rss_ctx = &ethdev_rss->dummy_ctx;
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static int
1441 sfc_flow_parse_rss(struct sfc_adapter *sa,
1442 		   const struct rte_flow_action_rss *action_rss,
1443 		   struct rte_flow *flow)
1444 {
1445 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1446 	struct sfc_flow_rss_conf conf;
1447 	uint16_t sw_qid_min;
1448 	struct sfc_rxq *rxq;
1449 	int rc;
1450 
1451 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1452 
1453 	rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
1454 	if (rc != 0)
1455 		return -rc;
1456 
1457 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
1458 	spec_filter->template.efs_dmaq_id = rxq->hw_index;
1459 
1460 	spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
1461 						      action_rss->queue);
1462 	if (spec_filter->rss_ctx != NULL)
1463 		return 0;
1464 
1465 	rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
1466 				  &spec_filter->rss_ctx);
1467 	if (rc != 0)
1468 		return -rc;
1469 
1470 	return 0;
1471 }
1472 
1473 static int
1474 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1475 		    unsigned int filters_count)
1476 {
1477 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1478 	unsigned int i;
1479 	int ret = 0;
1480 
1481 	for (i = 0; i < filters_count; i++) {
1482 		int rc;
1483 
1484 		rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1485 		if (ret == 0 && rc != 0) {
1486 			sfc_err(sa, "failed to remove filter specification "
1487 				"(rc = %d)", rc);
1488 			ret = rc;
1489 		}
1490 	}
1491 
1492 	return ret;
1493 }
1494 
1495 static int
1496 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1497 {
1498 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1499 	unsigned int i;
1500 	int rc = 0;
1501 
1502 	for (i = 0; i < spec_filter->count; i++) {
1503 		rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1504 		if (rc != 0) {
1505 			sfc_flow_spec_flush(sa, spec, i);
1506 			break;
1507 		}
1508 	}
1509 
1510 	return rc;
1511 }
1512 
1513 static int
1514 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1515 {
1516 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1517 
1518 	return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1519 }
1520 
1521 static int
1522 sfc_flow_filter_insert(struct sfc_adapter *sa,
1523 		       struct rte_flow *flow)
1524 {
1525 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1526 	struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
1527 	int rc = 0;
1528 
1529 	rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
1530 	if (rc != 0)
1531 		goto fail_rss_ctx_program;
1532 
1533 	if (rss_ctx != NULL) {
1534 		unsigned int i;
1535 
1536 		/*
1537 		 * At this point, fully elaborated filter specifications
1538 		 * have been produced from the template. To make sure that
1539 		 * RSS behaviour is consistent between them, set the same
1540 		 * RSS context value everywhere.
1541 		 */
1542 		for (i = 0; i < spec_filter->count; i++) {
1543 			efx_filter_spec_t *spec = &spec_filter->filters[i];
1544 
1545 			spec->efs_rss_context = rss_ctx->nic_handle;
1546 		}
1547 	}
1548 
1549 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1550 	if (rc != 0)
1551 		goto fail_filter_insert;
1552 
1553 	return 0;
1554 
1555 fail_filter_insert:
1556 	sfc_flow_rss_ctx_terminate(sa, rss_ctx);
1557 
1558 fail_rss_ctx_program:
1559 	return rc;
1560 }
1561 
1562 static int
1563 sfc_flow_filter_remove(struct sfc_adapter *sa,
1564 		       struct rte_flow *flow)
1565 {
1566 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1567 	int rc = 0;
1568 
1569 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1570 	if (rc != 0)
1571 		return rc;
1572 
1573 	sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
1574 
1575 	return 0;
1576 }
1577 
1578 static int
1579 sfc_flow_parse_mark(struct sfc_adapter *sa,
1580 		    const struct rte_flow_action_mark *mark,
1581 		    struct rte_flow *flow)
1582 {
1583 	struct sfc_flow_spec *spec = &flow->spec;
1584 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1585 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1586 	uint32_t mark_max;
1587 
1588 	mark_max = encp->enc_filter_action_mark_max;
1589 	if (sfc_ft_is_active(sa))
1590 		mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
1591 
1592 	if (mark == NULL || mark->id > mark_max)
1593 		return EINVAL;
1594 
1595 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1596 	spec_filter->template.efs_mark = mark->id;
1597 
1598 	return 0;
1599 }
1600 
1601 static int
1602 sfc_flow_parse_actions(struct sfc_adapter *sa,
1603 		       const struct rte_flow_action actions[],
1604 		       struct rte_flow *flow,
1605 		       struct rte_flow_error *error)
1606 {
1607 	int rc;
1608 	struct sfc_flow_spec *spec = &flow->spec;
1609 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1610 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1611 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
1612 	uint32_t actions_set = 0;
1613 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1614 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1615 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1616 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1617 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1618 
1619 	if (actions == NULL) {
1620 		rte_flow_error_set(error, EINVAL,
1621 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1622 				   "NULL actions");
1623 		return -rte_errno;
1624 	}
1625 
1626 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1627 		switch (actions->type) {
1628 		case RTE_FLOW_ACTION_TYPE_VOID:
1629 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1630 					       actions_set);
1631 			break;
1632 
1633 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1634 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1635 					       actions_set);
1636 			if ((actions_set & fate_actions_mask) != 0)
1637 				goto fail_fate_actions;
1638 
1639 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1640 			if (rc != 0) {
1641 				rte_flow_error_set(error, EINVAL,
1642 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1643 					"Bad QUEUE action");
1644 				return -rte_errno;
1645 			}
1646 			break;
1647 
1648 		case RTE_FLOW_ACTION_TYPE_RSS:
1649 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1650 					       actions_set);
1651 			if ((actions_set & fate_actions_mask) != 0)
1652 				goto fail_fate_actions;
1653 
1654 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1655 			if (rc != 0) {
1656 				rte_flow_error_set(error, -rc,
1657 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1658 					"Bad RSS action");
1659 				return -rte_errno;
1660 			}
1661 			break;
1662 
1663 		case RTE_FLOW_ACTION_TYPE_DROP:
1664 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1665 					       actions_set);
1666 			if ((actions_set & fate_actions_mask) != 0)
1667 				goto fail_fate_actions;
1668 
1669 			spec_filter->template.efs_dmaq_id =
1670 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1671 			break;
1672 
1673 		case RTE_FLOW_ACTION_TYPE_FLAG:
1674 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1675 					       actions_set);
1676 			if ((actions_set & mark_actions_mask) != 0)
1677 				goto fail_actions_overlap;
1678 
1679 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1680 				rte_flow_error_set(error, ENOTSUP,
1681 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1682 					"FLAG action is not supported on the current Rx datapath");
1683 				return -rte_errno;
1684 			} else if ((rx_metadata &
1685 				    RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
1686 				rte_flow_error_set(error, ENOTSUP,
1687 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1688 					"flag delivery has not been negotiated");
1689 				return -rte_errno;
1690 			}
1691 
1692 			spec_filter->template.efs_flags |=
1693 				EFX_FILTER_FLAG_ACTION_FLAG;
1694 			break;
1695 
1696 		case RTE_FLOW_ACTION_TYPE_MARK:
1697 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1698 					       actions_set);
1699 			if ((actions_set & mark_actions_mask) != 0)
1700 				goto fail_actions_overlap;
1701 
1702 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1703 				rte_flow_error_set(error, ENOTSUP,
1704 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1705 					"MARK action is not supported on the current Rx datapath");
1706 				return -rte_errno;
1707 			} else if ((rx_metadata &
1708 				    RTE_ETH_RX_METADATA_USER_MARK) == 0) {
1709 				rte_flow_error_set(error, ENOTSUP,
1710 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1711 					"mark delivery has not been negotiated");
1712 				return -rte_errno;
1713 			}
1714 
1715 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1716 			if (rc != 0) {
1717 				rte_flow_error_set(error, rc,
1718 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1719 					"Bad MARK action");
1720 				return -rte_errno;
1721 			}
1722 			break;
1723 
1724 		default:
1725 			rte_flow_error_set(error, ENOTSUP,
1726 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1727 					   "Action is not supported");
1728 			return -rte_errno;
1729 		}
1730 
1731 		actions_set |= (1UL << actions->type);
1732 	}
1733 
1734 	/* When fate is unknown, drop traffic. */
1735 	if ((actions_set & fate_actions_mask) == 0) {
1736 		spec_filter->template.efs_dmaq_id =
1737 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1738 	}
1739 
1740 	return 0;
1741 
1742 fail_fate_actions:
1743 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1744 			   "Cannot combine several fate-deciding actions, "
1745 			   "choose between QUEUE, RSS or DROP");
1746 	return -rte_errno;
1747 
1748 fail_actions_overlap:
1749 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1750 			   "Overlapping actions are not supported");
1751 	return -rte_errno;
1752 }
1753 
1754 /**
1755  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1756  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1757  * specifications after copying.
1758  *
1759  * @param spec[in, out]
1760  *   SFC flow specification to update.
1761  * @param filters_count_for_one_val[in]
1762  *   How many specifications should have the same match flag, what is the
1763  *   number of specifications before copying.
1764  * @param error[out]
1765  *   Perform verbose error reporting if not NULL.
1766  */
1767 static int
1768 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1769 			       unsigned int filters_count_for_one_val,
1770 			       struct rte_flow_error *error)
1771 {
1772 	unsigned int i;
1773 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1774 	static const efx_filter_match_flags_t vals[] = {
1775 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1776 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1777 	};
1778 
1779 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1780 		rte_flow_error_set(error, EINVAL,
1781 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1782 			"Number of specifications is incorrect while copying "
1783 			"by unknown destination flags");
1784 		return -rte_errno;
1785 	}
1786 
1787 	for (i = 0; i < spec_filter->count; i++) {
1788 		/* The check above ensures that divisor can't be zero here */
1789 		spec_filter->filters[i].efs_match_flags |=
1790 			vals[i / filters_count_for_one_val];
1791 	}
1792 
1793 	return 0;
1794 }
1795 
1796 /**
1797  * Check that the following conditions are met:
1798  * - the list of supported filters has a filter
1799  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1800  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1801  *   be inserted.
1802  *
1803  * @param match[in]
1804  *   The match flags of filter.
1805  * @param spec[in]
1806  *   Specification to be supplemented.
1807  * @param filter[in]
1808  *   SFC filter with list of supported filters.
1809  */
1810 static boolean_t
1811 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1812 				 __rte_unused efx_filter_spec_t *spec,
1813 				 struct sfc_filter *filter)
1814 {
1815 	unsigned int i;
1816 	efx_filter_match_flags_t match_mcast_dst;
1817 
1818 	match_mcast_dst =
1819 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1820 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1821 	for (i = 0; i < filter->supported_match_num; i++) {
1822 		if (match_mcast_dst == filter->supported_match[i])
1823 			return B_TRUE;
1824 	}
1825 
1826 	return B_FALSE;
1827 }
1828 
1829 /**
1830  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1831  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1832  * specifications after copying.
1833  *
1834  * @param spec[in, out]
1835  *   SFC flow specification to update.
1836  * @param filters_count_for_one_val[in]
1837  *   How many specifications should have the same EtherType value, what is the
1838  *   number of specifications before copying.
1839  * @param error[out]
1840  *   Perform verbose error reporting if not NULL.
1841  */
1842 static int
1843 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1844 			unsigned int filters_count_for_one_val,
1845 			struct rte_flow_error *error)
1846 {
1847 	unsigned int i;
1848 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1849 	static const uint16_t vals[] = {
1850 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1851 	};
1852 
1853 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1854 		rte_flow_error_set(error, EINVAL,
1855 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1856 			"Number of specifications is incorrect "
1857 			"while copying by Ethertype");
1858 		return -rte_errno;
1859 	}
1860 
1861 	for (i = 0; i < spec_filter->count; i++) {
1862 		spec_filter->filters[i].efs_match_flags |=
1863 			EFX_FILTER_MATCH_ETHER_TYPE;
1864 
1865 		/*
1866 		 * The check above ensures that
1867 		 * filters_count_for_one_val is not 0
1868 		 */
1869 		spec_filter->filters[i].efs_ether_type =
1870 			vals[i / filters_count_for_one_val];
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 /**
1877  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1878  * in the same specifications after copying.
1879  *
1880  * @param spec[in, out]
1881  *   SFC flow specification to update.
1882  * @param filters_count_for_one_val[in]
1883  *   How many specifications should have the same match flag, what is the
1884  *   number of specifications before copying.
1885  * @param error[out]
1886  *   Perform verbose error reporting if not NULL.
1887  */
1888 static int
1889 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1890 			    unsigned int filters_count_for_one_val,
1891 			    struct rte_flow_error *error)
1892 {
1893 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1894 	unsigned int i;
1895 
1896 	if (filters_count_for_one_val != spec_filter->count) {
1897 		rte_flow_error_set(error, EINVAL,
1898 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1899 			"Number of specifications is incorrect "
1900 			"while copying by outer VLAN ID");
1901 		return -rte_errno;
1902 	}
1903 
1904 	for (i = 0; i < spec_filter->count; i++) {
1905 		spec_filter->filters[i].efs_match_flags |=
1906 			EFX_FILTER_MATCH_OUTER_VID;
1907 
1908 		spec_filter->filters[i].efs_outer_vid = 0;
1909 	}
1910 
1911 	return 0;
1912 }
1913 
1914 /**
1915  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1916  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1917  * specifications after copying.
1918  *
1919  * @param spec[in, out]
1920  *   SFC flow specification to update.
1921  * @param filters_count_for_one_val[in]
1922  *   How many specifications should have the same match flag, what is the
1923  *   number of specifications before copying.
1924  * @param error[out]
1925  *   Perform verbose error reporting if not NULL.
1926  */
1927 static int
1928 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1929 				    unsigned int filters_count_for_one_val,
1930 				    struct rte_flow_error *error)
1931 {
1932 	unsigned int i;
1933 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1934 	static const efx_filter_match_flags_t vals[] = {
1935 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1936 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1937 	};
1938 
1939 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1940 		rte_flow_error_set(error, EINVAL,
1941 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1942 			"Number of specifications is incorrect while copying "
1943 			"by inner frame unknown destination flags");
1944 		return -rte_errno;
1945 	}
1946 
1947 	for (i = 0; i < spec_filter->count; i++) {
1948 		/* The check above ensures that divisor can't be zero here */
1949 		spec_filter->filters[i].efs_match_flags |=
1950 			vals[i / filters_count_for_one_val];
1951 	}
1952 
1953 	return 0;
1954 }
1955 
1956 /**
1957  * Check that the following conditions are met:
1958  * - the specification corresponds to a filter for encapsulated traffic
1959  * - the list of supported filters has a filter
1960  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1961  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1962  *   be inserted.
1963  *
1964  * @param match[in]
1965  *   The match flags of filter.
1966  * @param spec[in]
1967  *   Specification to be supplemented.
1968  * @param filter[in]
1969  *   SFC filter with list of supported filters.
1970  */
1971 static boolean_t
1972 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1973 				      efx_filter_spec_t *spec,
1974 				      struct sfc_filter *filter)
1975 {
1976 	unsigned int i;
1977 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1978 	efx_filter_match_flags_t match_mcast_dst;
1979 
1980 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1981 		return B_FALSE;
1982 
1983 	match_mcast_dst =
1984 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1985 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1986 	for (i = 0; i < filter->supported_match_num; i++) {
1987 		if (match_mcast_dst == filter->supported_match[i])
1988 			return B_TRUE;
1989 	}
1990 
1991 	return B_FALSE;
1992 }
1993 
1994 /**
1995  * Check that the list of supported filters has a filter that differs
1996  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1997  * in this case that filter will be used and the flag
1998  * EFX_FILTER_MATCH_OUTER_VID is not needed.
1999  *
2000  * @param match[in]
2001  *   The match flags of filter.
2002  * @param spec[in]
2003  *   Specification to be supplemented.
2004  * @param filter[in]
2005  *   SFC filter with list of supported filters.
2006  */
2007 static boolean_t
2008 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2009 			      __rte_unused efx_filter_spec_t *spec,
2010 			      struct sfc_filter *filter)
2011 {
2012 	unsigned int i;
2013 	efx_filter_match_flags_t match_without_vid =
2014 		match & ~EFX_FILTER_MATCH_OUTER_VID;
2015 
2016 	for (i = 0; i < filter->supported_match_num; i++) {
2017 		if (match_without_vid == filter->supported_match[i])
2018 			return B_FALSE;
2019 	}
2020 
2021 	return B_TRUE;
2022 }
2023 
2024 /*
2025  * Match flags that can be automatically added to filters.
2026  * Selecting the last minimum when searching for the copy flag ensures that the
2027  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2028  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2029  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2030  * filters.
2031  */
2032 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2033 	{
2034 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2035 		.vals_count = 2,
2036 		.set_vals = sfc_flow_set_unknown_dst_flags,
2037 		.spec_check = sfc_flow_check_unknown_dst_flags,
2038 	},
2039 	{
2040 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
2041 		.vals_count = 2,
2042 		.set_vals = sfc_flow_set_ethertypes,
2043 		.spec_check = NULL,
2044 	},
2045 	{
2046 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2047 		.vals_count = 2,
2048 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2049 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2050 	},
2051 	{
2052 		.flag = EFX_FILTER_MATCH_OUTER_VID,
2053 		.vals_count = 1,
2054 		.set_vals = sfc_flow_set_outer_vid_flag,
2055 		.spec_check = sfc_flow_check_outer_vid_flag,
2056 	},
2057 };
2058 
2059 /* Get item from array sfc_flow_copy_flags */
2060 static const struct sfc_flow_copy_flag *
2061 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2062 {
2063 	unsigned int i;
2064 
2065 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2066 		if (sfc_flow_copy_flags[i].flag == flag)
2067 			return &sfc_flow_copy_flags[i];
2068 	}
2069 
2070 	return NULL;
2071 }
2072 
2073 /**
2074  * Make copies of the specifications, set match flag and values
2075  * of the field that corresponds to it.
2076  *
2077  * @param spec[in, out]
2078  *   SFC flow specification to update.
2079  * @param flag[in]
2080  *   The match flag to add.
2081  * @param error[out]
2082  *   Perform verbose error reporting if not NULL.
2083  */
2084 static int
2085 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2086 			     efx_filter_match_flags_t flag,
2087 			     struct rte_flow_error *error)
2088 {
2089 	unsigned int i;
2090 	unsigned int new_filters_count;
2091 	unsigned int filters_count_for_one_val;
2092 	const struct sfc_flow_copy_flag *copy_flag;
2093 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2094 	int rc;
2095 
2096 	copy_flag = sfc_flow_get_copy_flag(flag);
2097 	if (copy_flag == NULL) {
2098 		rte_flow_error_set(error, ENOTSUP,
2099 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2100 				   "Unsupported spec field for copying");
2101 		return -rte_errno;
2102 	}
2103 
2104 	new_filters_count = spec_filter->count * copy_flag->vals_count;
2105 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2106 		rte_flow_error_set(error, EINVAL,
2107 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2108 			"Too much EFX specifications in the flow rule");
2109 		return -rte_errno;
2110 	}
2111 
2112 	/* Copy filters specifications */
2113 	for (i = spec_filter->count; i < new_filters_count; i++) {
2114 		spec_filter->filters[i] =
2115 			spec_filter->filters[i - spec_filter->count];
2116 	}
2117 
2118 	filters_count_for_one_val = spec_filter->count;
2119 	spec_filter->count = new_filters_count;
2120 
2121 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2122 	if (rc != 0)
2123 		return rc;
2124 
2125 	return 0;
2126 }
2127 
2128 /**
2129  * Check that the given set of match flags missing in the original filter spec
2130  * could be covered by adding spec copies which specify the corresponding
2131  * flags and packet field values to match.
2132  *
2133  * @param miss_flags[in]
2134  *   Flags that are missing until the supported filter.
2135  * @param spec[in]
2136  *   Specification to be supplemented.
2137  * @param filter[in]
2138  *   SFC filter.
2139  *
2140  * @return
2141  *   Number of specifications after copy or 0, if the flags can not be added.
2142  */
2143 static unsigned int
2144 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2145 			     efx_filter_spec_t *spec,
2146 			     struct sfc_filter *filter)
2147 {
2148 	unsigned int i;
2149 	efx_filter_match_flags_t copy_flags = 0;
2150 	efx_filter_match_flags_t flag;
2151 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2152 	sfc_flow_spec_check *check;
2153 	unsigned int multiplier = 1;
2154 
2155 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2156 		flag = sfc_flow_copy_flags[i].flag;
2157 		check = sfc_flow_copy_flags[i].spec_check;
2158 		if ((flag & miss_flags) == flag) {
2159 			if (check != NULL && (!check(match, spec, filter)))
2160 				continue;
2161 
2162 			copy_flags |= flag;
2163 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2164 		}
2165 	}
2166 
2167 	if (copy_flags == miss_flags)
2168 		return multiplier;
2169 
2170 	return 0;
2171 }
2172 
2173 /**
2174  * Attempt to supplement the specification template to the minimally
2175  * supported set of match flags. To do this, it is necessary to copy
2176  * the specifications, filling them with the values of fields that
2177  * correspond to the missing flags.
2178  * The necessary and sufficient filter is built from the fewest number
2179  * of copies which could be made to cover the minimally required set
2180  * of flags.
2181  *
2182  * @param sa[in]
2183  *   SFC adapter.
2184  * @param spec[in, out]
2185  *   SFC flow specification to update.
2186  * @param error[out]
2187  *   Perform verbose error reporting if not NULL.
2188  */
2189 static int
2190 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2191 			       struct sfc_flow_spec *spec,
2192 			       struct rte_flow_error *error)
2193 {
2194 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2195 	struct sfc_filter *filter = &sa->filter;
2196 	efx_filter_match_flags_t miss_flags;
2197 	efx_filter_match_flags_t min_miss_flags = 0;
2198 	efx_filter_match_flags_t match;
2199 	unsigned int min_multiplier = UINT_MAX;
2200 	unsigned int multiplier;
2201 	unsigned int i;
2202 	int rc;
2203 
2204 	match = spec_filter->template.efs_match_flags;
2205 	for (i = 0; i < filter->supported_match_num; i++) {
2206 		if ((match & filter->supported_match[i]) == match) {
2207 			miss_flags = filter->supported_match[i] & (~match);
2208 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2209 				&spec_filter->template, filter);
2210 			if (multiplier > 0) {
2211 				if (multiplier <= min_multiplier) {
2212 					min_multiplier = multiplier;
2213 					min_miss_flags = miss_flags;
2214 				}
2215 			}
2216 		}
2217 	}
2218 
2219 	if (min_multiplier == UINT_MAX) {
2220 		rte_flow_error_set(error, ENOTSUP,
2221 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2222 				   "The flow rule pattern is unsupported");
2223 		return -rte_errno;
2224 	}
2225 
2226 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2227 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2228 
2229 		if ((flag & min_miss_flags) == flag) {
2230 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2231 			if (rc != 0)
2232 				return rc;
2233 		}
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 /**
2240  * Check that set of match flags is referred to by a filter. Filter is
2241  * described by match flags with the ability to add OUTER_VID and INNER_VID
2242  * flags.
2243  *
2244  * @param match_flags[in]
2245  *   Set of match flags.
2246  * @param flags_pattern[in]
2247  *   Pattern of filter match flags.
2248  */
2249 static boolean_t
2250 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2251 			    efx_filter_match_flags_t flags_pattern)
2252 {
2253 	if ((match_flags & flags_pattern) != flags_pattern)
2254 		return B_FALSE;
2255 
2256 	switch (match_flags & ~flags_pattern) {
2257 	case 0:
2258 	case EFX_FILTER_MATCH_OUTER_VID:
2259 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2260 		return B_TRUE;
2261 	default:
2262 		return B_FALSE;
2263 	}
2264 }
2265 
2266 /**
2267  * Check whether the spec maps to a hardware filter which is known to be
2268  * ineffective despite being valid.
2269  *
2270  * @param filter[in]
2271  *   SFC filter with list of supported filters.
2272  * @param spec[in]
2273  *   SFC flow specification.
2274  */
2275 static boolean_t
2276 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2277 				  struct sfc_flow_spec *spec)
2278 {
2279 	unsigned int i;
2280 	uint16_t ether_type;
2281 	uint8_t ip_proto;
2282 	efx_filter_match_flags_t match_flags;
2283 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2284 
2285 	for (i = 0; i < spec_filter->count; i++) {
2286 		match_flags = spec_filter->filters[i].efs_match_flags;
2287 
2288 		if (sfc_flow_is_match_with_vids(match_flags,
2289 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2290 		    sfc_flow_is_match_with_vids(match_flags,
2291 						EFX_FILTER_MATCH_ETHER_TYPE |
2292 						EFX_FILTER_MATCH_LOC_MAC)) {
2293 			ether_type = spec_filter->filters[i].efs_ether_type;
2294 			if (filter->supports_ip_proto_or_addr_filter &&
2295 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2296 			     ether_type == EFX_ETHER_TYPE_IPV6))
2297 				return B_TRUE;
2298 		} else if (sfc_flow_is_match_with_vids(match_flags,
2299 				EFX_FILTER_MATCH_ETHER_TYPE |
2300 				EFX_FILTER_MATCH_IP_PROTO) ||
2301 			   sfc_flow_is_match_with_vids(match_flags,
2302 				EFX_FILTER_MATCH_ETHER_TYPE |
2303 				EFX_FILTER_MATCH_IP_PROTO |
2304 				EFX_FILTER_MATCH_LOC_MAC)) {
2305 			ip_proto = spec_filter->filters[i].efs_ip_proto;
2306 			if (filter->supports_rem_or_local_port_filter &&
2307 			    (ip_proto == EFX_IPPROTO_TCP ||
2308 			     ip_proto == EFX_IPPROTO_UDP))
2309 				return B_TRUE;
2310 		}
2311 	}
2312 
2313 	return B_FALSE;
2314 }
2315 
2316 static int
2317 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2318 			      struct rte_flow *flow,
2319 			      struct rte_flow_error *error)
2320 {
2321 	struct sfc_flow_spec *spec = &flow->spec;
2322 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2323 	efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2324 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2325 	int rc;
2326 
2327 	/* Initialize the first filter spec with template */
2328 	spec_filter->filters[0] = *spec_tmpl;
2329 	spec_filter->count = 1;
2330 
2331 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2332 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2333 		if (rc != 0)
2334 			return rc;
2335 	}
2336 
2337 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2338 		rte_flow_error_set(error, ENOTSUP,
2339 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2340 			"The flow rule pattern is unsupported");
2341 		return -rte_errno;
2342 	}
2343 
2344 	return 0;
2345 }
2346 
2347 static int
2348 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2349 			     const struct rte_flow_item pattern[],
2350 			     const struct rte_flow_action actions[],
2351 			     struct rte_flow *flow,
2352 			     struct rte_flow_error *error)
2353 {
2354 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2355 	struct sfc_flow_spec *spec = &flow->spec;
2356 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2357 	struct sfc_flow_parse_ctx ctx;
2358 	int rc;
2359 
2360 	ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2361 	ctx.filter = &spec_filter->template;
2362 
2363 	rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2364 				    pattern, &ctx, error);
2365 	if (rc != 0)
2366 		goto fail_bad_value;
2367 
2368 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2369 	if (rc != 0)
2370 		goto fail_bad_value;
2371 
2372 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2373 	if (rc != 0)
2374 		goto fail_bad_value;
2375 
2376 	return 0;
2377 
2378 fail_bad_value:
2379 	return rc;
2380 }
2381 
2382 static int
2383 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2384 			  const struct rte_flow_item pattern[],
2385 			  const struct rte_flow_action actions[],
2386 			  struct rte_flow *flow,
2387 			  struct rte_flow_error *error)
2388 {
2389 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2390 
2391 	return sfc_mae_rule_parse(sa, pattern, actions, flow, error);
2392 }
2393 
2394 static int
2395 sfc_flow_parse(struct rte_eth_dev *dev,
2396 	       const struct rte_flow_attr *attr,
2397 	       const struct rte_flow_item pattern[],
2398 	       const struct rte_flow_action actions[],
2399 	       struct rte_flow *flow,
2400 	       struct rte_flow_error *error)
2401 {
2402 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2403 	const struct sfc_flow_ops_by_spec *ops;
2404 	int rc;
2405 
2406 	rc = sfc_flow_parse_attr(sa, attr, flow, error);
2407 	if (rc != 0)
2408 		return rc;
2409 
2410 	ops = sfc_flow_get_ops_by_spec(flow);
2411 	if (ops == NULL || ops->parse == NULL) {
2412 		rte_flow_error_set(error, ENOTSUP,
2413 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2414 				   "No backend to handle this flow");
2415 		return -rte_errno;
2416 	}
2417 
2418 	return ops->parse(dev, pattern, actions, flow, error);
2419 }
2420 
2421 static struct rte_flow *
2422 sfc_flow_zmalloc(struct rte_flow_error *error)
2423 {
2424 	struct rte_flow *flow;
2425 
2426 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2427 	if (flow == NULL) {
2428 		rte_flow_error_set(error, ENOMEM,
2429 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2430 				   "Failed to allocate memory");
2431 	}
2432 
2433 	return flow;
2434 }
2435 
2436 static void
2437 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2438 {
2439 	const struct sfc_flow_ops_by_spec *ops;
2440 
2441 	ops = sfc_flow_get_ops_by_spec(flow);
2442 	if (ops != NULL && ops->cleanup != NULL)
2443 		ops->cleanup(sa, flow);
2444 
2445 	rte_free(flow);
2446 }
2447 
2448 static int
2449 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2450 		struct rte_flow_error *error)
2451 {
2452 	const struct sfc_flow_ops_by_spec *ops;
2453 	int rc;
2454 
2455 	ops = sfc_flow_get_ops_by_spec(flow);
2456 	if (ops == NULL || ops->insert == NULL) {
2457 		rte_flow_error_set(error, ENOTSUP,
2458 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2459 				   "No backend to handle this flow");
2460 		return rte_errno;
2461 	}
2462 
2463 	rc = ops->insert(sa, flow);
2464 	if (rc != 0) {
2465 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2466 				   NULL, "Failed to insert the flow rule");
2467 	}
2468 
2469 	return rc;
2470 }
2471 
2472 static int
2473 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2474 		struct rte_flow_error *error)
2475 {
2476 	const struct sfc_flow_ops_by_spec *ops;
2477 	int rc;
2478 
2479 	ops = sfc_flow_get_ops_by_spec(flow);
2480 	if (ops == NULL || ops->remove == NULL) {
2481 		rte_flow_error_set(error, ENOTSUP,
2482 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2483 				   "No backend to handle this flow");
2484 		return rte_errno;
2485 	}
2486 
2487 	rc = ops->remove(sa, flow);
2488 	if (rc != 0) {
2489 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2490 				   NULL, "Failed to remove the flow rule");
2491 	}
2492 
2493 	return rc;
2494 }
2495 
2496 static int
2497 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2498 		struct rte_flow_error *error)
2499 {
2500 	const struct sfc_flow_ops_by_spec *ops;
2501 	int rc = 0;
2502 
2503 	ops = sfc_flow_get_ops_by_spec(flow);
2504 	if (ops == NULL) {
2505 		rte_flow_error_set(error, ENOTSUP,
2506 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2507 				   "No backend to handle this flow");
2508 		return -rte_errno;
2509 	}
2510 
2511 	if (ops->verify != NULL) {
2512 		SFC_ASSERT(sfc_adapter_is_locked(sa));
2513 		rc = ops->verify(sa, flow);
2514 	}
2515 
2516 	if (rc != 0) {
2517 		rte_flow_error_set(error, rc,
2518 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2519 			"Failed to verify flow validity with FW");
2520 		return -rte_errno;
2521 	}
2522 
2523 	return 0;
2524 }
2525 
2526 static int
2527 sfc_flow_validate(struct rte_eth_dev *dev,
2528 		  const struct rte_flow_attr *attr,
2529 		  const struct rte_flow_item pattern[],
2530 		  const struct rte_flow_action actions[],
2531 		  struct rte_flow_error *error)
2532 {
2533 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2534 	struct rte_flow *flow;
2535 	int rc;
2536 
2537 	flow = sfc_flow_zmalloc(error);
2538 	if (flow == NULL)
2539 		return -rte_errno;
2540 
2541 	sfc_adapter_lock(sa);
2542 
2543 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2544 	if (rc == 0)
2545 		rc = sfc_flow_verify(sa, flow, error);
2546 
2547 	sfc_flow_free(sa, flow);
2548 
2549 	sfc_adapter_unlock(sa);
2550 
2551 	return rc;
2552 }
2553 
2554 static struct rte_flow *
2555 sfc_flow_create(struct rte_eth_dev *dev,
2556 		const struct rte_flow_attr *attr,
2557 		const struct rte_flow_item pattern[],
2558 		const struct rte_flow_action actions[],
2559 		struct rte_flow_error *error)
2560 {
2561 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2562 	struct rte_flow *flow;
2563 
2564 	sfc_adapter_lock(sa);
2565 	flow = sfc_flow_create_locked(sa, false, attr, pattern, actions, error);
2566 	sfc_adapter_unlock(sa);
2567 
2568 	return flow;
2569 }
2570 
2571 struct rte_flow *
2572 sfc_flow_create_locked(struct sfc_adapter *sa, bool internal,
2573 		       const struct rte_flow_attr *attr,
2574 		       const struct rte_flow_item pattern[],
2575 		       const struct rte_flow_action actions[],
2576 		       struct rte_flow_error *error)
2577 {
2578 	struct rte_flow *flow = NULL;
2579 	int rc;
2580 
2581 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2582 
2583 	flow = sfc_flow_zmalloc(error);
2584 	if (flow == NULL)
2585 		goto fail_no_mem;
2586 
2587 	flow->internal = internal;
2588 
2589 	rc = sfc_flow_parse(sa->eth_dev, attr, pattern, actions, flow, error);
2590 	if (rc != 0)
2591 		goto fail_bad_value;
2592 
2593 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2594 
2595 	if (flow->internal || sa->state == SFC_ETHDEV_STARTED) {
2596 		rc = sfc_flow_insert(sa, flow, error);
2597 		if (rc != 0)
2598 			goto fail_flow_insert;
2599 	}
2600 
2601 	return flow;
2602 
2603 fail_flow_insert:
2604 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2605 
2606 fail_bad_value:
2607 	sfc_flow_free(sa, flow);
2608 
2609 fail_no_mem:
2610 	return NULL;
2611 }
2612 
2613 static int
2614 sfc_flow_destroy(struct rte_eth_dev *dev,
2615 		 struct rte_flow *flow,
2616 		 struct rte_flow_error *error)
2617 {
2618 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2619 	int rc;
2620 
2621 	sfc_adapter_lock(sa);
2622 	rc = sfc_flow_destroy_locked(sa, flow, error);
2623 	sfc_adapter_unlock(sa);
2624 
2625 	return rc;
2626 }
2627 
2628 int
2629 sfc_flow_destroy_locked(struct sfc_adapter *sa, struct rte_flow *flow,
2630 			struct rte_flow_error *error)
2631 {
2632 	struct rte_flow *flow_ptr;
2633 	int rc = EINVAL;
2634 
2635 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2636 
2637 	TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2638 		if (flow_ptr == flow)
2639 			rc = 0;
2640 	}
2641 	if (rc != 0) {
2642 		rte_flow_error_set(error, rc,
2643 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2644 				   "Failed to find flow rule to destroy");
2645 		goto fail_bad_value;
2646 	}
2647 
2648 	if (flow->internal || sa->state == SFC_ETHDEV_STARTED)
2649 		rc = sfc_flow_remove(sa, flow, error);
2650 
2651 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2652 	sfc_flow_free(sa, flow);
2653 
2654 fail_bad_value:
2655 	return -rc;
2656 }
2657 
2658 static int
2659 sfc_flow_flush(struct rte_eth_dev *dev,
2660 	       struct rte_flow_error *error)
2661 {
2662 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2663 	struct rte_flow *flow;
2664 	int ret = 0;
2665 	void *tmp;
2666 
2667 	sfc_adapter_lock(sa);
2668 
2669 	RTE_TAILQ_FOREACH_SAFE(flow, &sa->flow_list, entries, tmp) {
2670 		if (flow->internal)
2671 			continue;
2672 
2673 		if (sa->state == SFC_ETHDEV_STARTED) {
2674 			int rc;
2675 
2676 			rc = sfc_flow_remove(sa, flow, error);
2677 			if (rc != 0)
2678 				ret = rc;
2679 		}
2680 
2681 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2682 		sfc_flow_free(sa, flow);
2683 	}
2684 
2685 	sfc_adapter_unlock(sa);
2686 
2687 	return -ret;
2688 }
2689 
2690 static int
2691 sfc_flow_query(struct rte_eth_dev *dev,
2692 	       struct rte_flow *flow,
2693 	       const struct rte_flow_action *action,
2694 	       void *data,
2695 	       struct rte_flow_error *error)
2696 {
2697 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2698 	const struct sfc_flow_ops_by_spec *ops;
2699 	int ret;
2700 
2701 	sfc_adapter_lock(sa);
2702 
2703 	ops = sfc_flow_get_ops_by_spec(flow);
2704 	if (ops == NULL || ops->query == NULL) {
2705 		ret = rte_flow_error_set(error, ENOTSUP,
2706 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2707 			"No backend to handle this flow");
2708 		goto fail_no_backend;
2709 	}
2710 
2711 	if (sa->state != SFC_ETHDEV_STARTED) {
2712 		ret = rte_flow_error_set(error, EINVAL,
2713 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2714 			"Can't query the flow: the adapter is not started");
2715 		goto fail_not_started;
2716 	}
2717 
2718 	ret = ops->query(dev, flow, action, data, error);
2719 	if (ret != 0)
2720 		goto fail_query;
2721 
2722 	sfc_adapter_unlock(sa);
2723 
2724 	return 0;
2725 
2726 fail_query:
2727 fail_not_started:
2728 fail_no_backend:
2729 	sfc_adapter_unlock(sa);
2730 	return ret;
2731 }
2732 
2733 static int
2734 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2735 		 struct rte_flow_error *error)
2736 {
2737 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2738 	int ret = 0;
2739 
2740 	sfc_adapter_lock(sa);
2741 	if (sa->state != SFC_ETHDEV_INITIALIZED) {
2742 		rte_flow_error_set(error, EBUSY,
2743 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2744 				   NULL, "please close the port first");
2745 		ret = -rte_errno;
2746 	} else {
2747 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2748 	}
2749 	sfc_adapter_unlock(sa);
2750 
2751 	return ret;
2752 }
2753 
2754 static int
2755 sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
2756 			     uint16_t *transfer_proxy_port,
2757 			     struct rte_flow_error *error)
2758 {
2759 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2760 	int ret;
2761 
2762 	ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
2763 					      transfer_proxy_port);
2764 	if (ret != 0) {
2765 		return rte_flow_error_set(error, ret,
2766 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2767 					  NULL, NULL);
2768 	}
2769 
2770 	return 0;
2771 }
2772 
2773 static struct rte_flow_action_handle *
2774 sfc_flow_action_handle_create(struct rte_eth_dev *dev,
2775 			      const struct rte_flow_indir_action_conf *conf,
2776 			      const struct rte_flow_action *action,
2777 			      struct rte_flow_error *error)
2778 {
2779 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2780 	struct rte_flow_action_handle *handle;
2781 	int ret;
2782 
2783 	if (!conf->transfer) {
2784 		rte_flow_error_set(error, ENOTSUP,
2785 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2786 				   "non-transfer domain does not support indirect actions");
2787 		return NULL;
2788 	}
2789 
2790 	if (conf->ingress || conf->egress) {
2791 		rte_flow_error_set(error, EINVAL,
2792 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2793 				   NULL, "cannot combine ingress/egress with transfer");
2794 		return NULL;
2795 	}
2796 
2797 	handle = rte_zmalloc("sfc_rte_flow_action_handle", sizeof(*handle), 0);
2798 	if (handle == NULL) {
2799 		rte_flow_error_set(error, ENOMEM,
2800 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2801 				   "failed to allocate memory");
2802 		return NULL;
2803 	}
2804 
2805 	sfc_adapter_lock(sa);
2806 
2807 	ret = sfc_mae_indir_action_create(sa, action, handle, error);
2808 	if (ret != 0) {
2809 		sfc_adapter_unlock(sa);
2810 		rte_free(handle);
2811 		return NULL;
2812 	}
2813 
2814 	TAILQ_INSERT_TAIL(&sa->flow_indir_actions, handle, entries);
2815 
2816 	handle->transfer = (bool)conf->transfer;
2817 
2818 	sfc_adapter_unlock(sa);
2819 
2820 	return handle;
2821 }
2822 
2823 static int
2824 sfc_flow_action_handle_destroy(struct rte_eth_dev *dev,
2825 			       struct rte_flow_action_handle *handle,
2826 			       struct rte_flow_error *error)
2827 {
2828 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2829 	struct rte_flow_action_handle *entry;
2830 	int rc = EINVAL;
2831 
2832 	sfc_adapter_lock(sa);
2833 
2834 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
2835 		if (entry != handle)
2836 			continue;
2837 
2838 		if (entry->transfer) {
2839 			rc = sfc_mae_indir_action_destroy(sa, handle,
2840 							  error);
2841 			if (rc != 0)
2842 				goto exit;
2843 		} else {
2844 			SFC_ASSERT(B_FALSE);
2845 		}
2846 
2847 		TAILQ_REMOVE(&sa->flow_indir_actions, entry, entries);
2848 		rte_free(entry);
2849 		goto exit;
2850 	}
2851 
2852 	rc = rte_flow_error_set(error, ENOENT,
2853 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2854 				"indirect action handle not found");
2855 
2856 exit:
2857 	sfc_adapter_unlock(sa);
2858 	return rc;
2859 }
2860 
2861 static int
2862 sfc_flow_action_handle_update(struct rte_eth_dev *dev,
2863 			      struct rte_flow_action_handle *handle,
2864 			      const void *update, struct rte_flow_error *error)
2865 {
2866 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2867 	struct rte_flow_action_handle *entry;
2868 	int rc = EINVAL;
2869 
2870 	sfc_adapter_lock(sa);
2871 
2872 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
2873 		if (entry != handle)
2874 			continue;
2875 
2876 		if (entry->transfer) {
2877 			rc = sfc_mae_indir_action_update(sa, handle,
2878 							 update, error);
2879 		} else {
2880 			SFC_ASSERT(B_FALSE);
2881 		}
2882 
2883 		goto exit;
2884 	}
2885 
2886 	rc = rte_flow_error_set(error, ENOENT,
2887 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2888 				"indirect action handle not found");
2889 
2890 exit:
2891 	sfc_adapter_unlock(sa);
2892 	return rc;
2893 }
2894 
2895 static int
2896 sfc_flow_action_handle_query(struct rte_eth_dev *dev,
2897 			     const struct rte_flow_action_handle *handle,
2898 			     void *data, struct rte_flow_error *error)
2899 {
2900 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2901 	struct rte_flow_action_handle *entry;
2902 	int rc = EINVAL;
2903 
2904 	sfc_adapter_lock(sa);
2905 
2906 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
2907 		if (entry != handle)
2908 			continue;
2909 
2910 		if (entry->transfer) {
2911 			rc = sfc_mae_indir_action_query(sa, handle,
2912 							data, error);
2913 		} else {
2914 			SFC_ASSERT(B_FALSE);
2915 		}
2916 
2917 		goto exit;
2918 	}
2919 
2920 	rc = rte_flow_error_set(error, ENOENT,
2921 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2922 				"indirect action handle not found");
2923 
2924 exit:
2925 	sfc_adapter_unlock(sa);
2926 	return rc;
2927 }
2928 
2929 const struct rte_flow_ops sfc_flow_ops = {
2930 	.validate = sfc_flow_validate,
2931 	.create = sfc_flow_create,
2932 	.destroy = sfc_flow_destroy,
2933 	.flush = sfc_flow_flush,
2934 	.query = sfc_flow_query,
2935 	.isolate = sfc_flow_isolate,
2936 	.action_handle_create = sfc_flow_action_handle_create,
2937 	.action_handle_destroy = sfc_flow_action_handle_destroy,
2938 	.action_handle_update = sfc_flow_action_handle_update,
2939 	.action_handle_query = sfc_flow_action_handle_query,
2940 	.tunnel_decap_set = sfc_ft_decap_set,
2941 	.tunnel_match = sfc_ft_match,
2942 	.tunnel_action_decap_release = sfc_ft_action_decap_release,
2943 	.tunnel_item_release = sfc_ft_item_release,
2944 	.get_restore_info = sfc_ft_get_restore_info,
2945 	.pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
2946 };
2947 
2948 void
2949 sfc_flow_init(struct sfc_adapter *sa)
2950 {
2951 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2952 
2953 	TAILQ_INIT(&sa->flow_indir_actions);
2954 	TAILQ_INIT(&sa->flow_list);
2955 }
2956 
2957 void
2958 sfc_flow_fini(struct sfc_adapter *sa)
2959 {
2960 	struct rte_flow *flow;
2961 	void *tmp;
2962 
2963 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2964 
2965 	RTE_TAILQ_FOREACH_SAFE(flow, &sa->flow_list, entries, tmp) {
2966 		if (flow->internal)
2967 			continue;
2968 
2969 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2970 		sfc_flow_free(sa, flow);
2971 	}
2972 }
2973 
2974 void
2975 sfc_flow_stop(struct sfc_adapter *sa)
2976 {
2977 	struct rte_flow *flow;
2978 
2979 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2980 
2981 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2982 		if (!flow->internal)
2983 			sfc_flow_remove(sa, flow, NULL);
2984 	}
2985 
2986 	/*
2987 	 * MAE counter service is not stopped on flow rule remove to avoid
2988 	 * extra work. Make sure that it is stopped here.
2989 	 */
2990 	sfc_mae_counter_stop(sa);
2991 }
2992 
2993 int
2994 sfc_flow_start(struct sfc_adapter *sa)
2995 {
2996 	struct rte_flow *flow;
2997 	int rc = 0;
2998 
2999 	sfc_log_init(sa, "entry");
3000 
3001 	SFC_ASSERT(sfc_adapter_is_locked(sa));
3002 
3003 	sfc_ft_counters_reset(sa);
3004 
3005 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
3006 		if (flow->internal)
3007 			continue;
3008 
3009 		rc = sfc_flow_insert(sa, flow, NULL);
3010 		if (rc != 0)
3011 			goto fail_bad_flow;
3012 	}
3013 
3014 	sfc_log_init(sa, "done");
3015 
3016 fail_bad_flow:
3017 	return rc;
3018 }
3019 
3020 static void
3021 sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
3022 {
3023 	if (flow == NULL)
3024 		return;
3025 
3026 	sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);
3027 }
3028