xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2017-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <stdbool.h>
11 
12 #include <rte_byteorder.h>
13 #include <rte_tailq.h>
14 #include <rte_common.h>
15 #include <ethdev_driver.h>
16 #include <rte_ether.h>
17 #include <rte_flow.h>
18 #include <rte_flow_driver.h>
19 
20 #include "efx.h"
21 
22 #include "sfc.h"
23 #include "sfc_debug.h"
24 #include "sfc_rx.h"
25 #include "sfc_filter.h"
26 #include "sfc_flow.h"
27 #include "sfc_flow_rss.h"
28 #include "sfc_flow_tunnel.h"
29 #include "sfc_log.h"
30 #include "sfc_dp_rx.h"
31 #include "sfc_mae_counter.h"
32 #include "sfc_switch.h"
33 
34 struct sfc_flow_ops_by_spec {
35 	sfc_flow_parse_cb_t	*parse;
36 	sfc_flow_verify_cb_t	*verify;
37 	sfc_flow_cleanup_cb_t	*cleanup;
38 	sfc_flow_insert_cb_t	*insert;
39 	sfc_flow_remove_cb_t	*remove;
40 	sfc_flow_query_cb_t	*query;
41 };
42 
43 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
44 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
45 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
46 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
47 static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
48 
49 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
50 	.parse = sfc_flow_parse_rte_to_filter,
51 	.verify = NULL,
52 	.cleanup = sfc_flow_cleanup,
53 	.insert = sfc_flow_filter_insert,
54 	.remove = sfc_flow_filter_remove,
55 	.query = NULL,
56 };
57 
58 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
59 	.parse = sfc_flow_parse_rte_to_mae,
60 	.verify = sfc_mae_flow_verify,
61 	.cleanup = sfc_mae_flow_cleanup,
62 	.insert = sfc_mae_flow_insert,
63 	.remove = sfc_mae_flow_remove,
64 	.query = sfc_mae_flow_query,
65 };
66 
67 static const struct sfc_flow_ops_by_spec *
68 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
69 {
70 	struct sfc_flow_spec *spec = &flow->spec;
71 	const struct sfc_flow_ops_by_spec *ops = NULL;
72 
73 	switch (spec->type) {
74 	case SFC_FLOW_SPEC_FILTER:
75 		ops = &sfc_flow_ops_filter;
76 		break;
77 	case SFC_FLOW_SPEC_MAE:
78 		ops = &sfc_flow_ops_mae;
79 		break;
80 	default:
81 		SFC_ASSERT(false);
82 		break;
83 	}
84 
85 	return ops;
86 }
87 
88 /*
89  * Currently, filter-based (VNIC) flow API is implemented in such a manner
90  * that each flow rule is converted to one or more hardware filters.
91  * All elements of flow rule (attributes, pattern items, actions)
92  * correspond to one or more fields in the efx_filter_spec_s structure
93  * that is responsible for the hardware filter.
94  * If some required field is unset in the flow rule, then a handful
95  * of filter copies will be created to cover all possible values
96  * of such a field.
97  */
98 
99 static sfc_flow_item_parse sfc_flow_parse_void;
100 static sfc_flow_item_parse sfc_flow_parse_eth;
101 static sfc_flow_item_parse sfc_flow_parse_vlan;
102 static sfc_flow_item_parse sfc_flow_parse_ipv4;
103 static sfc_flow_item_parse sfc_flow_parse_ipv6;
104 static sfc_flow_item_parse sfc_flow_parse_tcp;
105 static sfc_flow_item_parse sfc_flow_parse_udp;
106 static sfc_flow_item_parse sfc_flow_parse_vxlan;
107 static sfc_flow_item_parse sfc_flow_parse_geneve;
108 static sfc_flow_item_parse sfc_flow_parse_nvgre;
109 static sfc_flow_item_parse sfc_flow_parse_pppoex;
110 
111 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
112 				     unsigned int filters_count_for_one_val,
113 				     struct rte_flow_error *error);
114 
115 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
116 					efx_filter_spec_t *spec,
117 					struct sfc_filter *filter);
118 
119 struct sfc_flow_copy_flag {
120 	/* EFX filter specification match flag */
121 	efx_filter_match_flags_t flag;
122 	/* Number of values of corresponding field */
123 	unsigned int vals_count;
124 	/* Function to set values in specifications */
125 	sfc_flow_spec_set_vals *set_vals;
126 	/*
127 	 * Function to check that the specification is suitable
128 	 * for adding this match flag
129 	 */
130 	sfc_flow_spec_check *spec_check;
131 };
132 
133 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
134 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
135 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
136 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
137 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
138 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
139 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
140 
141 static boolean_t
142 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
143 {
144 	uint8_t sum = 0;
145 	unsigned int i;
146 
147 	for (i = 0; i < size; i++)
148 		sum |= buf[i];
149 
150 	return (sum == 0) ? B_TRUE : B_FALSE;
151 }
152 
153 /*
154  * Validate item and prepare structures spec and mask for parsing
155  */
156 int
157 sfc_flow_parse_init(const struct rte_flow_item *item,
158 		    const void **spec_ptr,
159 		    const void **mask_ptr,
160 		    const void *supp_mask,
161 		    const void *def_mask,
162 		    unsigned int size,
163 		    struct rte_flow_error *error)
164 {
165 	const uint8_t *spec;
166 	const uint8_t *mask;
167 	const uint8_t *last;
168 	uint8_t supp;
169 	unsigned int i;
170 
171 	if (item == NULL) {
172 		rte_flow_error_set(error, EINVAL,
173 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
174 				   "NULL item");
175 		return -rte_errno;
176 	}
177 
178 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
179 		rte_flow_error_set(error, EINVAL,
180 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
181 				   "Mask or last is set without spec");
182 		return -rte_errno;
183 	}
184 
185 	/*
186 	 * If "mask" is not set, default mask is used,
187 	 * but if default mask is NULL, "mask" should be set
188 	 */
189 	if (item->mask == NULL) {
190 		if (def_mask == NULL) {
191 			rte_flow_error_set(error, EINVAL,
192 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
193 				"Mask should be specified");
194 			return -rte_errno;
195 		}
196 
197 		mask = def_mask;
198 	} else {
199 		mask = item->mask;
200 	}
201 
202 	spec = item->spec;
203 	last = item->last;
204 
205 	if (spec == NULL)
206 		goto exit;
207 
208 	/*
209 	 * If field values in "last" are either 0 or equal to the corresponding
210 	 * values in "spec" then they are ignored
211 	 */
212 	if (last != NULL &&
213 	    !sfc_flow_is_zero(last, size) &&
214 	    memcmp(last, spec, size) != 0) {
215 		rte_flow_error_set(error, ENOTSUP,
216 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
217 				   "Ranging is not supported");
218 		return -rte_errno;
219 	}
220 
221 	if (supp_mask == NULL) {
222 		rte_flow_error_set(error, EINVAL,
223 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
224 			"Supported mask for item should be specified");
225 		return -rte_errno;
226 	}
227 
228 	/* Check that mask does not ask for more match than supp_mask */
229 	for (i = 0; i < size; i++) {
230 		supp = ((const uint8_t *)supp_mask)[i];
231 
232 		if (~supp & mask[i]) {
233 			rte_flow_error_set(error, ENOTSUP,
234 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
235 					   "Item's field is not supported");
236 			return -rte_errno;
237 		}
238 	}
239 
240 exit:
241 	*spec_ptr = spec;
242 	*mask_ptr = mask;
243 	return 0;
244 }
245 
246 /*
247  * Protocol parsers.
248  * Masking is not supported, so masks in items should be either
249  * full or empty (zeroed) and set only for supported fields which
250  * are specified in the supp_mask.
251  */
252 
253 static int
254 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
255 		    __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
256 		    __rte_unused struct rte_flow_error *error)
257 {
258 	return 0;
259 }
260 
261 /**
262  * Convert Ethernet item to EFX filter specification.
263  *
264  * @param item[in]
265  *   Item specification. Outer frame specification may only comprise
266  *   source/destination addresses and Ethertype field.
267  *   Inner frame specification may contain destination address only.
268  *   There is support for individual/group mask as well as for empty and full.
269  *   If the mask is NULL, default mask will be used. Ranging is not supported.
270  * @param efx_spec[in, out]
271  *   EFX filter specification to update.
272  * @param[out] error
273  *   Perform verbose error reporting if not NULL.
274  */
275 static int
276 sfc_flow_parse_eth(const struct rte_flow_item *item,
277 		   struct sfc_flow_parse_ctx *parse_ctx,
278 		   struct rte_flow_error *error)
279 {
280 	int rc;
281 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
282 	const struct rte_flow_item_eth *spec = NULL;
283 	const struct rte_flow_item_eth *mask = NULL;
284 	const struct rte_flow_item_eth supp_mask = {
285 		.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
286 		.hdr.src_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
287 		.hdr.ether_type = 0xffff,
288 	};
289 	const struct rte_flow_item_eth ifrm_supp_mask = {
290 		.hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
291 	};
292 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
293 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
294 	};
295 	const struct rte_flow_item_eth *supp_mask_p;
296 	const struct rte_flow_item_eth *def_mask_p;
297 	uint8_t *loc_mac = NULL;
298 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
299 		EFX_TUNNEL_PROTOCOL_NONE);
300 
301 	if (is_ifrm) {
302 		supp_mask_p = &ifrm_supp_mask;
303 		def_mask_p = &ifrm_supp_mask;
304 		loc_mac = efx_spec->efs_ifrm_loc_mac;
305 	} else {
306 		supp_mask_p = &supp_mask;
307 		def_mask_p = &rte_flow_item_eth_mask;
308 		loc_mac = efx_spec->efs_loc_mac;
309 	}
310 
311 	rc = sfc_flow_parse_init(item,
312 				 (const void **)&spec,
313 				 (const void **)&mask,
314 				 supp_mask_p, def_mask_p,
315 				 sizeof(struct rte_flow_item_eth),
316 				 error);
317 	if (rc != 0)
318 		return rc;
319 
320 	/* If "spec" is not set, could be any Ethernet */
321 	if (spec == NULL)
322 		return 0;
323 
324 	if (rte_is_same_ether_addr(&mask->hdr.dst_addr, &supp_mask.hdr.dst_addr)) {
325 		efx_spec->efs_match_flags |= is_ifrm ?
326 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
327 			EFX_FILTER_MATCH_LOC_MAC;
328 		rte_memcpy(loc_mac, spec->hdr.dst_addr.addr_bytes,
329 			   EFX_MAC_ADDR_LEN);
330 	} else if (memcmp(mask->hdr.dst_addr.addr_bytes, ig_mask,
331 			  EFX_MAC_ADDR_LEN) == 0) {
332 		if (rte_is_unicast_ether_addr(&spec->hdr.dst_addr))
333 			efx_spec->efs_match_flags |= is_ifrm ?
334 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
335 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
336 		else
337 			efx_spec->efs_match_flags |= is_ifrm ?
338 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
339 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
340 	} else if (!rte_is_zero_ether_addr(&mask->hdr.dst_addr)) {
341 		goto fail_bad_mask;
342 	}
343 
344 	/*
345 	 * ifrm_supp_mask ensures that the source address and
346 	 * ethertype masks are equal to zero in inner frame,
347 	 * so these fields are filled in only for the outer frame
348 	 */
349 	if (rte_is_same_ether_addr(&mask->hdr.src_addr, &supp_mask.hdr.src_addr)) {
350 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
351 		rte_memcpy(efx_spec->efs_rem_mac, spec->hdr.src_addr.addr_bytes,
352 			   EFX_MAC_ADDR_LEN);
353 	} else if (!rte_is_zero_ether_addr(&mask->hdr.src_addr)) {
354 		goto fail_bad_mask;
355 	}
356 
357 	/*
358 	 * Ether type is in big-endian byte order in item and
359 	 * in little-endian in efx_spec, so byte swap is used
360 	 */
361 	if (mask->hdr.ether_type == supp_mask.hdr.ether_type) {
362 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
363 		efx_spec->efs_ether_type = rte_bswap16(spec->hdr.ether_type);
364 	} else if (mask->hdr.ether_type != 0) {
365 		goto fail_bad_mask;
366 	}
367 
368 	return 0;
369 
370 fail_bad_mask:
371 	rte_flow_error_set(error, EINVAL,
372 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
373 			   "Bad mask in the ETH pattern item");
374 	return -rte_errno;
375 }
376 
377 /**
378  * Convert VLAN item to EFX filter specification.
379  *
380  * @param item[in]
381  *   Item specification. Only VID field is supported.
382  *   The mask can not be NULL. Ranging is not supported.
383  * @param efx_spec[in, out]
384  *   EFX filter specification to update.
385  * @param[out] error
386  *   Perform verbose error reporting if not NULL.
387  */
388 static int
389 sfc_flow_parse_vlan(const struct rte_flow_item *item,
390 		    struct sfc_flow_parse_ctx *parse_ctx,
391 		    struct rte_flow_error *error)
392 {
393 	int rc;
394 	uint16_t vid;
395 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
396 	const struct rte_flow_item_vlan *spec = NULL;
397 	const struct rte_flow_item_vlan *mask = NULL;
398 	const struct rte_flow_item_vlan supp_mask = {
399 		.hdr.vlan_tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
400 		.hdr.eth_proto = RTE_BE16(0xffff),
401 	};
402 
403 	rc = sfc_flow_parse_init(item,
404 				 (const void **)&spec,
405 				 (const void **)&mask,
406 				 &supp_mask,
407 				 NULL,
408 				 sizeof(struct rte_flow_item_vlan),
409 				 error);
410 	if (rc != 0)
411 		return rc;
412 
413 	/*
414 	 * VID is in big-endian byte order in item and
415 	 * in little-endian in efx_spec, so byte swap is used.
416 	 * If two VLAN items are included, the first matches
417 	 * the outer tag and the next matches the inner tag.
418 	 */
419 	if (mask->hdr.vlan_tci == supp_mask.hdr.vlan_tci) {
420 		/* Apply mask to keep VID only */
421 		vid = rte_bswap16(spec->hdr.vlan_tci & mask->hdr.vlan_tci);
422 
423 		if (!(efx_spec->efs_match_flags &
424 		      EFX_FILTER_MATCH_OUTER_VID)) {
425 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
426 			efx_spec->efs_outer_vid = vid;
427 		} else if (!(efx_spec->efs_match_flags &
428 			     EFX_FILTER_MATCH_INNER_VID)) {
429 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
430 			efx_spec->efs_inner_vid = vid;
431 		} else {
432 			rte_flow_error_set(error, EINVAL,
433 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
434 					   "More than two VLAN items");
435 			return -rte_errno;
436 		}
437 	} else {
438 		rte_flow_error_set(error, EINVAL,
439 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
440 				   "VLAN ID in TCI match is required");
441 		return -rte_errno;
442 	}
443 
444 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
445 		rte_flow_error_set(error, EINVAL,
446 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
447 				   "VLAN TPID matching is not supported");
448 		return -rte_errno;
449 	}
450 	if (mask->hdr.eth_proto == supp_mask.hdr.eth_proto) {
451 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
452 		efx_spec->efs_ether_type = rte_bswap16(spec->hdr.eth_proto);
453 	} else if (mask->hdr.eth_proto) {
454 		rte_flow_error_set(error, EINVAL,
455 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
456 				   "Bad mask for VLAN inner type");
457 		return -rte_errno;
458 	}
459 
460 	return 0;
461 }
462 
463 /**
464  * Convert IPv4 item to EFX filter specification.
465  *
466  * @param item[in]
467  *   Item specification. Only source and destination addresses and
468  *   protocol fields are supported. If the mask is NULL, default
469  *   mask will be used. Ranging is not supported.
470  * @param efx_spec[in, out]
471  *   EFX filter specification to update.
472  * @param[out] error
473  *   Perform verbose error reporting if not NULL.
474  */
475 static int
476 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
477 		    struct sfc_flow_parse_ctx *parse_ctx,
478 		    struct rte_flow_error *error)
479 {
480 	int rc;
481 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
482 	const struct rte_flow_item_ipv4 *spec = NULL;
483 	const struct rte_flow_item_ipv4 *mask = NULL;
484 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
485 	const struct rte_flow_item_ipv4 supp_mask = {
486 		.hdr = {
487 			.src_addr = 0xffffffff,
488 			.dst_addr = 0xffffffff,
489 			.next_proto_id = 0xff,
490 		}
491 	};
492 
493 	rc = sfc_flow_parse_init(item,
494 				 (const void **)&spec,
495 				 (const void **)&mask,
496 				 &supp_mask,
497 				 &rte_flow_item_ipv4_mask,
498 				 sizeof(struct rte_flow_item_ipv4),
499 				 error);
500 	if (rc != 0)
501 		return rc;
502 
503 	/*
504 	 * Filtering by IPv4 source and destination addresses requires
505 	 * the appropriate ETHER_TYPE in hardware filters
506 	 */
507 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
508 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
509 		efx_spec->efs_ether_type = ether_type_ipv4;
510 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
511 		rte_flow_error_set(error, EINVAL,
512 			RTE_FLOW_ERROR_TYPE_ITEM, item,
513 			"Ethertype in pattern with IPV4 item should be appropriate");
514 		return -rte_errno;
515 	}
516 
517 	if (spec == NULL)
518 		return 0;
519 
520 	/*
521 	 * IPv4 addresses are in big-endian byte order in item and in
522 	 * efx_spec
523 	 */
524 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
525 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
526 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
527 	} else if (mask->hdr.src_addr != 0) {
528 		goto fail_bad_mask;
529 	}
530 
531 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
532 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
533 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
534 	} else if (mask->hdr.dst_addr != 0) {
535 		goto fail_bad_mask;
536 	}
537 
538 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
539 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
540 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
541 	} else if (mask->hdr.next_proto_id != 0) {
542 		goto fail_bad_mask;
543 	}
544 
545 	return 0;
546 
547 fail_bad_mask:
548 	rte_flow_error_set(error, EINVAL,
549 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
550 			   "Bad mask in the IPV4 pattern item");
551 	return -rte_errno;
552 }
553 
554 /**
555  * Convert IPv6 item to EFX filter specification.
556  *
557  * @param item[in]
558  *   Item specification. Only source and destination addresses and
559  *   next header fields are supported. If the mask is NULL, default
560  *   mask will be used. Ranging is not supported.
561  * @param efx_spec[in, out]
562  *   EFX filter specification to update.
563  * @param[out] error
564  *   Perform verbose error reporting if not NULL.
565  */
566 static int
567 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
568 		    struct sfc_flow_parse_ctx *parse_ctx,
569 		    struct rte_flow_error *error)
570 {
571 	int rc;
572 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
573 	const struct rte_flow_item_ipv6 *spec = NULL;
574 	const struct rte_flow_item_ipv6 *mask = NULL;
575 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
576 	const struct rte_flow_item_ipv6 supp_mask = {
577 		.hdr = {
578 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
579 				      0xff, 0xff, 0xff, 0xff,
580 				      0xff, 0xff, 0xff, 0xff,
581 				      0xff, 0xff, 0xff, 0xff },
582 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
583 				      0xff, 0xff, 0xff, 0xff,
584 				      0xff, 0xff, 0xff, 0xff,
585 				      0xff, 0xff, 0xff, 0xff },
586 			.proto = 0xff,
587 		}
588 	};
589 
590 	rc = sfc_flow_parse_init(item,
591 				 (const void **)&spec,
592 				 (const void **)&mask,
593 				 &supp_mask,
594 				 &rte_flow_item_ipv6_mask,
595 				 sizeof(struct rte_flow_item_ipv6),
596 				 error);
597 	if (rc != 0)
598 		return rc;
599 
600 	/*
601 	 * Filtering by IPv6 source and destination addresses requires
602 	 * the appropriate ETHER_TYPE in hardware filters
603 	 */
604 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
605 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
606 		efx_spec->efs_ether_type = ether_type_ipv6;
607 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
608 		rte_flow_error_set(error, EINVAL,
609 			RTE_FLOW_ERROR_TYPE_ITEM, item,
610 			"Ethertype in pattern with IPV6 item should be appropriate");
611 		return -rte_errno;
612 	}
613 
614 	if (spec == NULL)
615 		return 0;
616 
617 	/*
618 	 * IPv6 addresses are in big-endian byte order in item and in
619 	 * efx_spec
620 	 */
621 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
622 		   sizeof(mask->hdr.src_addr)) == 0) {
623 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
624 
625 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
626 				 sizeof(spec->hdr.src_addr));
627 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
628 			   sizeof(efx_spec->efs_rem_host));
629 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
630 				     sizeof(mask->hdr.src_addr))) {
631 		goto fail_bad_mask;
632 	}
633 
634 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
635 		   sizeof(mask->hdr.dst_addr)) == 0) {
636 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
637 
638 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
639 				 sizeof(spec->hdr.dst_addr));
640 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
641 			   sizeof(efx_spec->efs_loc_host));
642 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
643 				     sizeof(mask->hdr.dst_addr))) {
644 		goto fail_bad_mask;
645 	}
646 
647 	if (mask->hdr.proto == supp_mask.hdr.proto) {
648 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
649 		efx_spec->efs_ip_proto = spec->hdr.proto;
650 	} else if (mask->hdr.proto != 0) {
651 		goto fail_bad_mask;
652 	}
653 
654 	return 0;
655 
656 fail_bad_mask:
657 	rte_flow_error_set(error, EINVAL,
658 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
659 			   "Bad mask in the IPV6 pattern item");
660 	return -rte_errno;
661 }
662 
663 /**
664  * Convert TCP item to EFX filter specification.
665  *
666  * @param item[in]
667  *   Item specification. Only source and destination ports fields
668  *   are supported. If the mask is NULL, default mask will be used.
669  *   Ranging is not supported.
670  * @param efx_spec[in, out]
671  *   EFX filter specification to update.
672  * @param[out] error
673  *   Perform verbose error reporting if not NULL.
674  */
675 static int
676 sfc_flow_parse_tcp(const struct rte_flow_item *item,
677 		   struct sfc_flow_parse_ctx *parse_ctx,
678 		   struct rte_flow_error *error)
679 {
680 	int rc;
681 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
682 	const struct rte_flow_item_tcp *spec = NULL;
683 	const struct rte_flow_item_tcp *mask = NULL;
684 	const struct rte_flow_item_tcp supp_mask = {
685 		.hdr = {
686 			.src_port = 0xffff,
687 			.dst_port = 0xffff,
688 		}
689 	};
690 
691 	rc = sfc_flow_parse_init(item,
692 				 (const void **)&spec,
693 				 (const void **)&mask,
694 				 &supp_mask,
695 				 &rte_flow_item_tcp_mask,
696 				 sizeof(struct rte_flow_item_tcp),
697 				 error);
698 	if (rc != 0)
699 		return rc;
700 
701 	/*
702 	 * Filtering by TCP source and destination ports requires
703 	 * the appropriate IP_PROTO in hardware filters
704 	 */
705 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
706 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
707 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
708 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
709 		rte_flow_error_set(error, EINVAL,
710 			RTE_FLOW_ERROR_TYPE_ITEM, item,
711 			"IP proto in pattern with TCP item should be appropriate");
712 		return -rte_errno;
713 	}
714 
715 	if (spec == NULL)
716 		return 0;
717 
718 	/*
719 	 * Source and destination ports are in big-endian byte order in item and
720 	 * in little-endian in efx_spec, so byte swap is used
721 	 */
722 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
723 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
724 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
725 	} else if (mask->hdr.src_port != 0) {
726 		goto fail_bad_mask;
727 	}
728 
729 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
730 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
731 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
732 	} else if (mask->hdr.dst_port != 0) {
733 		goto fail_bad_mask;
734 	}
735 
736 	return 0;
737 
738 fail_bad_mask:
739 	rte_flow_error_set(error, EINVAL,
740 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
741 			   "Bad mask in the TCP pattern item");
742 	return -rte_errno;
743 }
744 
745 /**
746  * Convert UDP item to EFX filter specification.
747  *
748  * @param item[in]
749  *   Item specification. Only source and destination ports fields
750  *   are supported. If the mask is NULL, default mask will be used.
751  *   Ranging is not supported.
752  * @param efx_spec[in, out]
753  *   EFX filter specification to update.
754  * @param[out] error
755  *   Perform verbose error reporting if not NULL.
756  */
757 static int
758 sfc_flow_parse_udp(const struct rte_flow_item *item,
759 		   struct sfc_flow_parse_ctx *parse_ctx,
760 		   struct rte_flow_error *error)
761 {
762 	int rc;
763 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
764 	const struct rte_flow_item_udp *spec = NULL;
765 	const struct rte_flow_item_udp *mask = NULL;
766 	const struct rte_flow_item_udp supp_mask = {
767 		.hdr = {
768 			.src_port = 0xffff,
769 			.dst_port = 0xffff,
770 		}
771 	};
772 
773 	rc = sfc_flow_parse_init(item,
774 				 (const void **)&spec,
775 				 (const void **)&mask,
776 				 &supp_mask,
777 				 &rte_flow_item_udp_mask,
778 				 sizeof(struct rte_flow_item_udp),
779 				 error);
780 	if (rc != 0)
781 		return rc;
782 
783 	/*
784 	 * Filtering by UDP source and destination ports requires
785 	 * the appropriate IP_PROTO in hardware filters
786 	 */
787 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
788 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
789 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
790 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
791 		rte_flow_error_set(error, EINVAL,
792 			RTE_FLOW_ERROR_TYPE_ITEM, item,
793 			"IP proto in pattern with UDP item should be appropriate");
794 		return -rte_errno;
795 	}
796 
797 	if (spec == NULL)
798 		return 0;
799 
800 	/*
801 	 * Source and destination ports are in big-endian byte order in item and
802 	 * in little-endian in efx_spec, so byte swap is used
803 	 */
804 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
805 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
806 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
807 	} else if (mask->hdr.src_port != 0) {
808 		goto fail_bad_mask;
809 	}
810 
811 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
812 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
813 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
814 	} else if (mask->hdr.dst_port != 0) {
815 		goto fail_bad_mask;
816 	}
817 
818 	return 0;
819 
820 fail_bad_mask:
821 	rte_flow_error_set(error, EINVAL,
822 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
823 			   "Bad mask in the UDP pattern item");
824 	return -rte_errno;
825 }
826 
827 /*
828  * Filters for encapsulated packets match based on the EtherType and IP
829  * protocol in the outer frame.
830  */
831 static int
832 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
833 					efx_filter_spec_t *efx_spec,
834 					uint8_t ip_proto,
835 					struct rte_flow_error *error)
836 {
837 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
838 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
839 		efx_spec->efs_ip_proto = ip_proto;
840 	} else if (efx_spec->efs_ip_proto != ip_proto) {
841 		switch (ip_proto) {
842 		case EFX_IPPROTO_UDP:
843 			rte_flow_error_set(error, EINVAL,
844 				RTE_FLOW_ERROR_TYPE_ITEM, item,
845 				"Outer IP header protocol must be UDP "
846 				"in VxLAN/GENEVE pattern");
847 			return -rte_errno;
848 
849 		case EFX_IPPROTO_GRE:
850 			rte_flow_error_set(error, EINVAL,
851 				RTE_FLOW_ERROR_TYPE_ITEM, item,
852 				"Outer IP header protocol must be GRE "
853 				"in NVGRE pattern");
854 			return -rte_errno;
855 
856 		default:
857 			rte_flow_error_set(error, EINVAL,
858 				RTE_FLOW_ERROR_TYPE_ITEM, item,
859 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
860 				"are supported");
861 			return -rte_errno;
862 		}
863 	}
864 
865 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
866 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
867 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
868 		rte_flow_error_set(error, EINVAL,
869 			RTE_FLOW_ERROR_TYPE_ITEM, item,
870 			"Outer frame EtherType in pattern with tunneling "
871 			"must be IPv4 or IPv6");
872 		return -rte_errno;
873 	}
874 
875 	return 0;
876 }
877 
878 static int
879 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
880 				  const uint8_t *vni_or_vsid_val,
881 				  const uint8_t *vni_or_vsid_mask,
882 				  const struct rte_flow_item *item,
883 				  struct rte_flow_error *error)
884 {
885 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
886 		0xff, 0xff, 0xff
887 	};
888 
889 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
890 		   EFX_VNI_OR_VSID_LEN) == 0) {
891 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
892 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
893 			   EFX_VNI_OR_VSID_LEN);
894 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
895 		rte_flow_error_set(error, EINVAL,
896 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
897 				   "Unsupported VNI/VSID mask");
898 		return -rte_errno;
899 	}
900 
901 	return 0;
902 }
903 
904 /**
905  * Convert VXLAN item to EFX filter specification.
906  *
907  * @param item[in]
908  *   Item specification. Only VXLAN network identifier field is supported.
909  *   If the mask is NULL, default mask will be used.
910  *   Ranging is not supported.
911  * @param efx_spec[in, out]
912  *   EFX filter specification to update.
913  * @param[out] error
914  *   Perform verbose error reporting if not NULL.
915  */
916 static int
917 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
918 		     struct sfc_flow_parse_ctx *parse_ctx,
919 		     struct rte_flow_error *error)
920 {
921 	int rc;
922 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
923 	const struct rte_flow_item_vxlan *spec = NULL;
924 	const struct rte_flow_item_vxlan *mask = NULL;
925 	const struct rte_flow_item_vxlan supp_mask = {
926 		.hdr.vni = { 0xff, 0xff, 0xff }
927 	};
928 
929 	rc = sfc_flow_parse_init(item,
930 				 (const void **)&spec,
931 				 (const void **)&mask,
932 				 &supp_mask,
933 				 &rte_flow_item_vxlan_mask,
934 				 sizeof(struct rte_flow_item_vxlan),
935 				 error);
936 	if (rc != 0)
937 		return rc;
938 
939 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
940 						     EFX_IPPROTO_UDP, error);
941 	if (rc != 0)
942 		return rc;
943 
944 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
945 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
946 
947 	if (spec == NULL)
948 		return 0;
949 
950 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->hdr.vni,
951 					       mask->hdr.vni, item, error);
952 
953 	return rc;
954 }
955 
956 /**
957  * Convert GENEVE item to EFX filter specification.
958  *
959  * @param item[in]
960  *   Item specification. Only Virtual Network Identifier and protocol type
961  *   fields are supported. But protocol type can be only Ethernet (0x6558).
962  *   If the mask is NULL, default mask will be used.
963  *   Ranging is not supported.
964  * @param efx_spec[in, out]
965  *   EFX filter specification to update.
966  * @param[out] error
967  *   Perform verbose error reporting if not NULL.
968  */
969 static int
970 sfc_flow_parse_geneve(const struct rte_flow_item *item,
971 		      struct sfc_flow_parse_ctx *parse_ctx,
972 		      struct rte_flow_error *error)
973 {
974 	int rc;
975 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
976 	const struct rte_flow_item_geneve *spec = NULL;
977 	const struct rte_flow_item_geneve *mask = NULL;
978 	const struct rte_flow_item_geneve supp_mask = {
979 		.protocol = RTE_BE16(0xffff),
980 		.vni = { 0xff, 0xff, 0xff }
981 	};
982 
983 	rc = sfc_flow_parse_init(item,
984 				 (const void **)&spec,
985 				 (const void **)&mask,
986 				 &supp_mask,
987 				 &rte_flow_item_geneve_mask,
988 				 sizeof(struct rte_flow_item_geneve),
989 				 error);
990 	if (rc != 0)
991 		return rc;
992 
993 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
994 						     EFX_IPPROTO_UDP, error);
995 	if (rc != 0)
996 		return rc;
997 
998 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
999 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1000 
1001 	if (spec == NULL)
1002 		return 0;
1003 
1004 	if (mask->protocol == supp_mask.protocol) {
1005 		if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
1006 			rte_flow_error_set(error, EINVAL,
1007 				RTE_FLOW_ERROR_TYPE_ITEM, item,
1008 				"GENEVE encap. protocol must be Ethernet "
1009 				"(0x6558) in the GENEVE pattern item");
1010 			return -rte_errno;
1011 		}
1012 	} else if (mask->protocol != 0) {
1013 		rte_flow_error_set(error, EINVAL,
1014 			RTE_FLOW_ERROR_TYPE_ITEM, item,
1015 			"Unsupported mask for GENEVE encap. protocol");
1016 		return -rte_errno;
1017 	}
1018 
1019 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1020 					       mask->vni, item, error);
1021 
1022 	return rc;
1023 }
1024 
1025 /**
1026  * Convert NVGRE item to EFX filter specification.
1027  *
1028  * @param item[in]
1029  *   Item specification. Only virtual subnet ID field is supported.
1030  *   If the mask is NULL, default mask will be used.
1031  *   Ranging is not supported.
1032  * @param efx_spec[in, out]
1033  *   EFX filter specification to update.
1034  * @param[out] error
1035  *   Perform verbose error reporting if not NULL.
1036  */
1037 static int
1038 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1039 		     struct sfc_flow_parse_ctx *parse_ctx,
1040 		     struct rte_flow_error *error)
1041 {
1042 	int rc;
1043 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1044 	const struct rte_flow_item_nvgre *spec = NULL;
1045 	const struct rte_flow_item_nvgre *mask = NULL;
1046 	const struct rte_flow_item_nvgre supp_mask = {
1047 		.tni = { 0xff, 0xff, 0xff }
1048 	};
1049 
1050 	rc = sfc_flow_parse_init(item,
1051 				 (const void **)&spec,
1052 				 (const void **)&mask,
1053 				 &supp_mask,
1054 				 &rte_flow_item_nvgre_mask,
1055 				 sizeof(struct rte_flow_item_nvgre),
1056 				 error);
1057 	if (rc != 0)
1058 		return rc;
1059 
1060 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1061 						     EFX_IPPROTO_GRE, error);
1062 	if (rc != 0)
1063 		return rc;
1064 
1065 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1066 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1067 
1068 	if (spec == NULL)
1069 		return 0;
1070 
1071 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1072 					       mask->tni, item, error);
1073 
1074 	return rc;
1075 }
1076 
1077 /**
1078  * Convert PPPoEx item to EFX filter specification.
1079  *
1080  * @param item[in]
1081  *   Item specification.
1082  *   Matching on PPPoEx fields is not supported.
1083  *   This item can only be used to set or validate the EtherType filter.
1084  *   Only zero masks are allowed.
1085  *   Ranging is not supported.
1086  * @param efx_spec[in, out]
1087  *   EFX filter specification to update.
1088  * @param[out] error
1089  *   Perform verbose error reporting if not NULL.
1090  */
1091 static int
1092 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1093 		      struct sfc_flow_parse_ctx *parse_ctx,
1094 		      struct rte_flow_error *error)
1095 {
1096 	efx_filter_spec_t *efx_spec = parse_ctx->filter;
1097 	const struct rte_flow_item_pppoe *spec = NULL;
1098 	const struct rte_flow_item_pppoe *mask = NULL;
1099 	const struct rte_flow_item_pppoe supp_mask = {};
1100 	const struct rte_flow_item_pppoe def_mask = {};
1101 	uint16_t ether_type;
1102 	int rc;
1103 
1104 	rc = sfc_flow_parse_init(item,
1105 				 (const void **)&spec,
1106 				 (const void **)&mask,
1107 				 &supp_mask,
1108 				 &def_mask,
1109 				 sizeof(struct rte_flow_item_pppoe),
1110 				 error);
1111 	if (rc != 0)
1112 		return rc;
1113 
1114 	if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1115 		ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1116 	else
1117 		ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1118 
1119 	if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1120 		if (efx_spec->efs_ether_type != ether_type) {
1121 			rte_flow_error_set(error, EINVAL,
1122 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
1123 					   "Invalid EtherType for a PPPoE flow item");
1124 			return -rte_errno;
1125 		}
1126 	} else {
1127 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1128 		efx_spec->efs_ether_type = ether_type;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 static const struct sfc_flow_item sfc_flow_items[] = {
1135 	{
1136 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1137 		.name = "VOID",
1138 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1139 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1140 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1141 		.parse = sfc_flow_parse_void,
1142 	},
1143 	{
1144 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1145 		.name = "ETH",
1146 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1147 		.layer = SFC_FLOW_ITEM_L2,
1148 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1149 		.parse = sfc_flow_parse_eth,
1150 	},
1151 	{
1152 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1153 		.name = "VLAN",
1154 		.prev_layer = SFC_FLOW_ITEM_L2,
1155 		.layer = SFC_FLOW_ITEM_L2,
1156 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1157 		.parse = sfc_flow_parse_vlan,
1158 	},
1159 	{
1160 		.type = RTE_FLOW_ITEM_TYPE_PPPOED,
1161 		.name = "PPPOED",
1162 		.prev_layer = SFC_FLOW_ITEM_L2,
1163 		.layer = SFC_FLOW_ITEM_L2,
1164 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1165 		.parse = sfc_flow_parse_pppoex,
1166 	},
1167 	{
1168 		.type = RTE_FLOW_ITEM_TYPE_PPPOES,
1169 		.name = "PPPOES",
1170 		.prev_layer = SFC_FLOW_ITEM_L2,
1171 		.layer = SFC_FLOW_ITEM_L2,
1172 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1173 		.parse = sfc_flow_parse_pppoex,
1174 	},
1175 	{
1176 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1177 		.name = "IPV4",
1178 		.prev_layer = SFC_FLOW_ITEM_L2,
1179 		.layer = SFC_FLOW_ITEM_L3,
1180 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1181 		.parse = sfc_flow_parse_ipv4,
1182 	},
1183 	{
1184 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1185 		.name = "IPV6",
1186 		.prev_layer = SFC_FLOW_ITEM_L2,
1187 		.layer = SFC_FLOW_ITEM_L3,
1188 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1189 		.parse = sfc_flow_parse_ipv6,
1190 	},
1191 	{
1192 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1193 		.name = "TCP",
1194 		.prev_layer = SFC_FLOW_ITEM_L3,
1195 		.layer = SFC_FLOW_ITEM_L4,
1196 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1197 		.parse = sfc_flow_parse_tcp,
1198 	},
1199 	{
1200 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1201 		.name = "UDP",
1202 		.prev_layer = SFC_FLOW_ITEM_L3,
1203 		.layer = SFC_FLOW_ITEM_L4,
1204 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1205 		.parse = sfc_flow_parse_udp,
1206 	},
1207 	{
1208 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1209 		.name = "VXLAN",
1210 		.prev_layer = SFC_FLOW_ITEM_L4,
1211 		.layer = SFC_FLOW_ITEM_START_LAYER,
1212 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1213 		.parse = sfc_flow_parse_vxlan,
1214 	},
1215 	{
1216 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1217 		.name = "GENEVE",
1218 		.prev_layer = SFC_FLOW_ITEM_L4,
1219 		.layer = SFC_FLOW_ITEM_START_LAYER,
1220 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1221 		.parse = sfc_flow_parse_geneve,
1222 	},
1223 	{
1224 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1225 		.name = "NVGRE",
1226 		.prev_layer = SFC_FLOW_ITEM_L3,
1227 		.layer = SFC_FLOW_ITEM_START_LAYER,
1228 		.ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1229 		.parse = sfc_flow_parse_nvgre,
1230 	},
1231 };
1232 
1233 /*
1234  * Protocol-independent flow API support
1235  */
1236 static int
1237 sfc_flow_parse_attr(struct sfc_adapter *sa,
1238 		    const struct rte_flow_attr *attr,
1239 		    struct rte_flow *flow,
1240 		    struct rte_flow_error *error)
1241 {
1242 	struct sfc_flow_spec *spec = &flow->spec;
1243 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1244 	struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1245 	struct sfc_mae *mae = &sa->mae;
1246 
1247 	if (attr == NULL) {
1248 		rte_flow_error_set(error, EINVAL,
1249 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1250 				   "NULL attribute");
1251 		return -rte_errno;
1252 	}
1253 	if (attr->group != 0) {
1254 		rte_flow_error_set(error, ENOTSUP,
1255 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1256 				   "Groups are not supported");
1257 		return -rte_errno;
1258 	}
1259 	if (attr->egress != 0 && attr->transfer == 0) {
1260 		rte_flow_error_set(error, ENOTSUP,
1261 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1262 				   "Egress is not supported");
1263 		return -rte_errno;
1264 	}
1265 	if (attr->ingress == 0 && attr->transfer == 0) {
1266 		rte_flow_error_set(error, ENOTSUP,
1267 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1268 				   "Ingress is compulsory");
1269 		return -rte_errno;
1270 	}
1271 	if (attr->transfer == 0) {
1272 		if (attr->priority != 0) {
1273 			rte_flow_error_set(error, ENOTSUP,
1274 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1275 					   attr, "Priorities are unsupported");
1276 			return -rte_errno;
1277 		}
1278 		spec->type = SFC_FLOW_SPEC_FILTER;
1279 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1280 		spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1281 		spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1282 	} else {
1283 		if (mae->status != SFC_MAE_STATUS_ADMIN) {
1284 			rte_flow_error_set(error, ENOTSUP,
1285 					   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1286 					   attr, "Transfer is not supported");
1287 			return -rte_errno;
1288 		}
1289 		if (attr->priority > mae->nb_action_rule_prios_max) {
1290 			rte_flow_error_set(error, ENOTSUP,
1291 					   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1292 					   attr, "Unsupported priority level");
1293 			return -rte_errno;
1294 		}
1295 		spec->type = SFC_FLOW_SPEC_MAE;
1296 		spec_mae->priority = attr->priority;
1297 		spec_mae->action_rule = NULL;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 /* Get item from array sfc_flow_items */
1304 static const struct sfc_flow_item *
1305 sfc_flow_get_item(const struct sfc_flow_item *items,
1306 		  unsigned int nb_items,
1307 		  enum rte_flow_item_type type)
1308 {
1309 	unsigned int i;
1310 
1311 	for (i = 0; i < nb_items; i++)
1312 		if (items[i].type == type)
1313 			return &items[i];
1314 
1315 	return NULL;
1316 }
1317 
1318 int
1319 sfc_flow_parse_pattern(struct sfc_adapter *sa,
1320 		       const struct sfc_flow_item *flow_items,
1321 		       unsigned int nb_flow_items,
1322 		       const struct rte_flow_item pattern[],
1323 		       struct sfc_flow_parse_ctx *parse_ctx,
1324 		       struct rte_flow_error *error)
1325 {
1326 	int rc;
1327 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1328 	boolean_t is_ifrm = B_FALSE;
1329 	const struct sfc_flow_item *item;
1330 
1331 	if (pattern == NULL) {
1332 		rte_flow_error_set(error, EINVAL,
1333 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1334 				   "NULL pattern");
1335 		return -rte_errno;
1336 	}
1337 
1338 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1339 		item = sfc_flow_get_item(flow_items, nb_flow_items,
1340 					 pattern->type);
1341 		if (item == NULL) {
1342 			rte_flow_error_set(error, ENOTSUP,
1343 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1344 					   "Unsupported pattern item");
1345 			return -rte_errno;
1346 		}
1347 
1348 		/*
1349 		 * Omitting one or several protocol layers at the beginning
1350 		 * of pattern is supported
1351 		 */
1352 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1353 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1354 		    item->prev_layer != prev_layer) {
1355 			rte_flow_error_set(error, ENOTSUP,
1356 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1357 					   "Unexpected sequence of pattern items");
1358 			return -rte_errno;
1359 		}
1360 
1361 		/*
1362 		 * Allow only VOID and ETH pattern items in the inner frame.
1363 		 * Also check that there is only one tunneling protocol.
1364 		 */
1365 		switch (item->type) {
1366 		case RTE_FLOW_ITEM_TYPE_VOID:
1367 		case RTE_FLOW_ITEM_TYPE_ETH:
1368 			break;
1369 
1370 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1371 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1372 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1373 			if (is_ifrm) {
1374 				rte_flow_error_set(error, EINVAL,
1375 					RTE_FLOW_ERROR_TYPE_ITEM,
1376 					pattern,
1377 					"More than one tunneling protocol");
1378 				return -rte_errno;
1379 			}
1380 			is_ifrm = B_TRUE;
1381 			break;
1382 
1383 		default:
1384 			if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1385 			    is_ifrm) {
1386 				rte_flow_error_set(error, EINVAL,
1387 					RTE_FLOW_ERROR_TYPE_ITEM,
1388 					pattern,
1389 					"There is an unsupported pattern item "
1390 					"in the inner frame");
1391 				return -rte_errno;
1392 			}
1393 			break;
1394 		}
1395 
1396 		if (parse_ctx->type != item->ctx_type) {
1397 			rte_flow_error_set(error, EINVAL,
1398 					RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1399 					"Parse context type mismatch");
1400 			return -rte_errno;
1401 		}
1402 
1403 		rc = item->parse(pattern, parse_ctx, error);
1404 		if (rc != 0) {
1405 			sfc_err(sa, "failed to parse item %s: %s",
1406 				item->name, strerror(-rc));
1407 			return rc;
1408 		}
1409 
1410 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1411 			prev_layer = item->layer;
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 static int
1418 sfc_flow_parse_queue(struct sfc_adapter *sa,
1419 		     const struct rte_flow_action_queue *queue,
1420 		     struct rte_flow *flow)
1421 {
1422 	struct sfc_flow_spec *spec = &flow->spec;
1423 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1424 	struct sfc_rxq *rxq;
1425 	struct sfc_rxq_info *rxq_info;
1426 
1427 	if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1428 		return -EINVAL;
1429 
1430 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1431 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1432 
1433 	rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1434 
1435 	if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
1436 		struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1437 		struct sfc_rss *ethdev_rss = &sas->rss;
1438 
1439 		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1440 		spec_filter->rss_ctx = &ethdev_rss->dummy_ctx;
1441 	}
1442 
1443 	return 0;
1444 }
1445 
1446 static int
1447 sfc_flow_parse_rss(struct sfc_adapter *sa,
1448 		   const struct rte_flow_action_rss *action_rss,
1449 		   struct rte_flow *flow)
1450 {
1451 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1452 	struct sfc_flow_rss_conf conf;
1453 	uint16_t sw_qid_min;
1454 	struct sfc_rxq *rxq;
1455 	int rc;
1456 
1457 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1458 
1459 	rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
1460 	if (rc != 0)
1461 		return -rc;
1462 
1463 	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
1464 	spec_filter->template.efs_dmaq_id = rxq->hw_index;
1465 
1466 	spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
1467 						      action_rss->queue);
1468 	if (spec_filter->rss_ctx != NULL)
1469 		return 0;
1470 
1471 	rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
1472 				  &spec_filter->rss_ctx);
1473 	if (rc != 0)
1474 		return -rc;
1475 
1476 	return 0;
1477 }
1478 
1479 static int
1480 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1481 		    unsigned int filters_count)
1482 {
1483 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1484 	unsigned int i;
1485 	int ret = 0;
1486 
1487 	for (i = 0; i < filters_count; i++) {
1488 		int rc;
1489 
1490 		rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1491 		if (ret == 0 && rc != 0) {
1492 			sfc_err(sa, "failed to remove filter specification "
1493 				"(rc = %d)", rc);
1494 			ret = rc;
1495 		}
1496 	}
1497 
1498 	return ret;
1499 }
1500 
1501 static int
1502 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1503 {
1504 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1505 	unsigned int i;
1506 	int rc = 0;
1507 
1508 	for (i = 0; i < spec_filter->count; i++) {
1509 		rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1510 		if (rc != 0) {
1511 			sfc_flow_spec_flush(sa, spec, i);
1512 			break;
1513 		}
1514 	}
1515 
1516 	return rc;
1517 }
1518 
1519 static int
1520 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1521 {
1522 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1523 
1524 	return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1525 }
1526 
1527 static int
1528 sfc_flow_filter_insert(struct sfc_adapter *sa,
1529 		       struct rte_flow *flow)
1530 {
1531 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1532 	struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
1533 	int rc = 0;
1534 
1535 	rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
1536 	if (rc != 0)
1537 		goto fail_rss_ctx_program;
1538 
1539 	if (rss_ctx != NULL) {
1540 		unsigned int i;
1541 
1542 		/*
1543 		 * At this point, fully elaborated filter specifications
1544 		 * have been produced from the template. To make sure that
1545 		 * RSS behaviour is consistent between them, set the same
1546 		 * RSS context value everywhere.
1547 		 */
1548 		for (i = 0; i < spec_filter->count; i++) {
1549 			efx_filter_spec_t *spec = &spec_filter->filters[i];
1550 
1551 			spec->efs_rss_context = rss_ctx->nic_handle;
1552 		}
1553 	}
1554 
1555 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1556 	if (rc != 0)
1557 		goto fail_filter_insert;
1558 
1559 	return 0;
1560 
1561 fail_filter_insert:
1562 	sfc_flow_rss_ctx_terminate(sa, rss_ctx);
1563 
1564 fail_rss_ctx_program:
1565 	return rc;
1566 }
1567 
1568 static int
1569 sfc_flow_filter_remove(struct sfc_adapter *sa,
1570 		       struct rte_flow *flow)
1571 {
1572 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1573 	int rc = 0;
1574 
1575 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1576 	if (rc != 0)
1577 		return rc;
1578 
1579 	sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
1580 
1581 	return 0;
1582 }
1583 
1584 static int
1585 sfc_flow_parse_mark(struct sfc_adapter *sa,
1586 		    const struct rte_flow_action_mark *mark,
1587 		    struct rte_flow *flow)
1588 {
1589 	struct sfc_flow_spec *spec = &flow->spec;
1590 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1591 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1592 	uint32_t mark_max;
1593 
1594 	mark_max = encp->enc_filter_action_mark_max;
1595 	if (sfc_ft_is_active(sa))
1596 		mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
1597 
1598 	if (mark == NULL || mark->id > mark_max)
1599 		return EINVAL;
1600 
1601 	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1602 	spec_filter->template.efs_mark = mark->id;
1603 
1604 	return 0;
1605 }
1606 
1607 static int
1608 sfc_flow_parse_actions(struct sfc_adapter *sa,
1609 		       const struct rte_flow_action actions[],
1610 		       struct rte_flow *flow,
1611 		       struct rte_flow_error *error)
1612 {
1613 	int rc;
1614 	struct sfc_flow_spec *spec = &flow->spec;
1615 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1616 	const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1617 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
1618 	uint32_t actions_set = 0;
1619 	const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1620 					   (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1621 					   (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1622 	const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1623 					   (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1624 
1625 	if (actions == NULL) {
1626 		rte_flow_error_set(error, EINVAL,
1627 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1628 				   "NULL actions");
1629 		return -rte_errno;
1630 	}
1631 
1632 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1633 		switch (actions->type) {
1634 		case RTE_FLOW_ACTION_TYPE_VOID:
1635 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1636 					       actions_set);
1637 			break;
1638 
1639 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1640 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1641 					       actions_set);
1642 			if ((actions_set & fate_actions_mask) != 0)
1643 				goto fail_fate_actions;
1644 
1645 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1646 			if (rc != 0) {
1647 				rte_flow_error_set(error, EINVAL,
1648 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1649 					"Bad QUEUE action");
1650 				return -rte_errno;
1651 			}
1652 			break;
1653 
1654 		case RTE_FLOW_ACTION_TYPE_RSS:
1655 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1656 					       actions_set);
1657 			if ((actions_set & fate_actions_mask) != 0)
1658 				goto fail_fate_actions;
1659 
1660 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1661 			if (rc != 0) {
1662 				rte_flow_error_set(error, -rc,
1663 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1664 					"Bad RSS action");
1665 				return -rte_errno;
1666 			}
1667 			break;
1668 
1669 		case RTE_FLOW_ACTION_TYPE_DROP:
1670 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1671 					       actions_set);
1672 			if ((actions_set & fate_actions_mask) != 0)
1673 				goto fail_fate_actions;
1674 
1675 			spec_filter->template.efs_dmaq_id =
1676 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1677 			break;
1678 
1679 		case RTE_FLOW_ACTION_TYPE_FLAG:
1680 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1681 					       actions_set);
1682 			if ((actions_set & mark_actions_mask) != 0)
1683 				goto fail_actions_overlap;
1684 
1685 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1686 				rte_flow_error_set(error, ENOTSUP,
1687 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1688 					"FLAG action is not supported on the current Rx datapath");
1689 				return -rte_errno;
1690 			} else if ((rx_metadata &
1691 				    RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
1692 				rte_flow_error_set(error, ENOTSUP,
1693 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1694 					"flag delivery has not been negotiated");
1695 				return -rte_errno;
1696 			}
1697 
1698 			spec_filter->template.efs_flags |=
1699 				EFX_FILTER_FLAG_ACTION_FLAG;
1700 			break;
1701 
1702 		case RTE_FLOW_ACTION_TYPE_MARK:
1703 			SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1704 					       actions_set);
1705 			if ((actions_set & mark_actions_mask) != 0)
1706 				goto fail_actions_overlap;
1707 
1708 			if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1709 				rte_flow_error_set(error, ENOTSUP,
1710 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1711 					"MARK action is not supported on the current Rx datapath");
1712 				return -rte_errno;
1713 			} else if ((rx_metadata &
1714 				    RTE_ETH_RX_METADATA_USER_MARK) == 0) {
1715 				rte_flow_error_set(error, ENOTSUP,
1716 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1717 					"mark delivery has not been negotiated");
1718 				return -rte_errno;
1719 			}
1720 
1721 			rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1722 			if (rc != 0) {
1723 				rte_flow_error_set(error, rc,
1724 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1725 					"Bad MARK action");
1726 				return -rte_errno;
1727 			}
1728 			break;
1729 
1730 		default:
1731 			rte_flow_error_set(error, ENOTSUP,
1732 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1733 					   "Action is not supported");
1734 			return -rte_errno;
1735 		}
1736 
1737 		actions_set |= (1UL << actions->type);
1738 	}
1739 
1740 	/* When fate is unknown, drop traffic. */
1741 	if ((actions_set & fate_actions_mask) == 0) {
1742 		spec_filter->template.efs_dmaq_id =
1743 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1744 	}
1745 
1746 	return 0;
1747 
1748 fail_fate_actions:
1749 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1750 			   "Cannot combine several fate-deciding actions, "
1751 			   "choose between QUEUE, RSS or DROP");
1752 	return -rte_errno;
1753 
1754 fail_actions_overlap:
1755 	rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1756 			   "Overlapping actions are not supported");
1757 	return -rte_errno;
1758 }
1759 
1760 /**
1761  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1762  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1763  * specifications after copying.
1764  *
1765  * @param spec[in, out]
1766  *   SFC flow specification to update.
1767  * @param filters_count_for_one_val[in]
1768  *   How many specifications should have the same match flag, what is the
1769  *   number of specifications before copying.
1770  * @param error[out]
1771  *   Perform verbose error reporting if not NULL.
1772  */
1773 static int
1774 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1775 			       unsigned int filters_count_for_one_val,
1776 			       struct rte_flow_error *error)
1777 {
1778 	unsigned int i;
1779 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1780 	static const efx_filter_match_flags_t vals[] = {
1781 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1782 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1783 	};
1784 
1785 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1786 		rte_flow_error_set(error, EINVAL,
1787 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1788 			"Number of specifications is incorrect while copying "
1789 			"by unknown destination flags");
1790 		return -rte_errno;
1791 	}
1792 
1793 	for (i = 0; i < spec_filter->count; i++) {
1794 		/* The check above ensures that divisor can't be zero here */
1795 		spec_filter->filters[i].efs_match_flags |=
1796 			vals[i / filters_count_for_one_val];
1797 	}
1798 
1799 	return 0;
1800 }
1801 
1802 /**
1803  * Check that the following conditions are met:
1804  * - the list of supported filters has a filter
1805  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1806  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1807  *   be inserted.
1808  *
1809  * @param match[in]
1810  *   The match flags of filter.
1811  * @param spec[in]
1812  *   Specification to be supplemented.
1813  * @param filter[in]
1814  *   SFC filter with list of supported filters.
1815  */
1816 static boolean_t
1817 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1818 				 __rte_unused efx_filter_spec_t *spec,
1819 				 struct sfc_filter *filter)
1820 {
1821 	unsigned int i;
1822 	efx_filter_match_flags_t match_mcast_dst;
1823 
1824 	match_mcast_dst =
1825 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1826 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1827 	for (i = 0; i < filter->supported_match_num; i++) {
1828 		if (match_mcast_dst == filter->supported_match[i])
1829 			return B_TRUE;
1830 	}
1831 
1832 	return B_FALSE;
1833 }
1834 
1835 /**
1836  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1837  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1838  * specifications after copying.
1839  *
1840  * @param spec[in, out]
1841  *   SFC flow specification to update.
1842  * @param filters_count_for_one_val[in]
1843  *   How many specifications should have the same EtherType value, what is the
1844  *   number of specifications before copying.
1845  * @param error[out]
1846  *   Perform verbose error reporting if not NULL.
1847  */
1848 static int
1849 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1850 			unsigned int filters_count_for_one_val,
1851 			struct rte_flow_error *error)
1852 {
1853 	unsigned int i;
1854 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1855 	static const uint16_t vals[] = {
1856 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1857 	};
1858 
1859 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1860 		rte_flow_error_set(error, EINVAL,
1861 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1862 			"Number of specifications is incorrect "
1863 			"while copying by Ethertype");
1864 		return -rte_errno;
1865 	}
1866 
1867 	for (i = 0; i < spec_filter->count; i++) {
1868 		spec_filter->filters[i].efs_match_flags |=
1869 			EFX_FILTER_MATCH_ETHER_TYPE;
1870 
1871 		/*
1872 		 * The check above ensures that
1873 		 * filters_count_for_one_val is not 0
1874 		 */
1875 		spec_filter->filters[i].efs_ether_type =
1876 			vals[i / filters_count_for_one_val];
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 /**
1883  * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1884  * in the same specifications after copying.
1885  *
1886  * @param spec[in, out]
1887  *   SFC flow specification to update.
1888  * @param filters_count_for_one_val[in]
1889  *   How many specifications should have the same match flag, what is the
1890  *   number of specifications before copying.
1891  * @param error[out]
1892  *   Perform verbose error reporting if not NULL.
1893  */
1894 static int
1895 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1896 			    unsigned int filters_count_for_one_val,
1897 			    struct rte_flow_error *error)
1898 {
1899 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1900 	unsigned int i;
1901 
1902 	if (filters_count_for_one_val != spec_filter->count) {
1903 		rte_flow_error_set(error, EINVAL,
1904 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1905 			"Number of specifications is incorrect "
1906 			"while copying by outer VLAN ID");
1907 		return -rte_errno;
1908 	}
1909 
1910 	for (i = 0; i < spec_filter->count; i++) {
1911 		spec_filter->filters[i].efs_match_flags |=
1912 			EFX_FILTER_MATCH_OUTER_VID;
1913 
1914 		spec_filter->filters[i].efs_outer_vid = 0;
1915 	}
1916 
1917 	return 0;
1918 }
1919 
1920 /**
1921  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1922  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1923  * specifications after copying.
1924  *
1925  * @param spec[in, out]
1926  *   SFC flow specification to update.
1927  * @param filters_count_for_one_val[in]
1928  *   How many specifications should have the same match flag, what is the
1929  *   number of specifications before copying.
1930  * @param error[out]
1931  *   Perform verbose error reporting if not NULL.
1932  */
1933 static int
1934 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1935 				    unsigned int filters_count_for_one_val,
1936 				    struct rte_flow_error *error)
1937 {
1938 	unsigned int i;
1939 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1940 	static const efx_filter_match_flags_t vals[] = {
1941 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1942 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1943 	};
1944 
1945 	if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1946 		rte_flow_error_set(error, EINVAL,
1947 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1948 			"Number of specifications is incorrect while copying "
1949 			"by inner frame unknown destination flags");
1950 		return -rte_errno;
1951 	}
1952 
1953 	for (i = 0; i < spec_filter->count; i++) {
1954 		/* The check above ensures that divisor can't be zero here */
1955 		spec_filter->filters[i].efs_match_flags |=
1956 			vals[i / filters_count_for_one_val];
1957 	}
1958 
1959 	return 0;
1960 }
1961 
1962 /**
1963  * Check that the following conditions are met:
1964  * - the specification corresponds to a filter for encapsulated traffic
1965  * - the list of supported filters has a filter
1966  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1967  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1968  *   be inserted.
1969  *
1970  * @param match[in]
1971  *   The match flags of filter.
1972  * @param spec[in]
1973  *   Specification to be supplemented.
1974  * @param filter[in]
1975  *   SFC filter with list of supported filters.
1976  */
1977 static boolean_t
1978 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1979 				      efx_filter_spec_t *spec,
1980 				      struct sfc_filter *filter)
1981 {
1982 	unsigned int i;
1983 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1984 	efx_filter_match_flags_t match_mcast_dst;
1985 
1986 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1987 		return B_FALSE;
1988 
1989 	match_mcast_dst =
1990 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1991 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1992 	for (i = 0; i < filter->supported_match_num; i++) {
1993 		if (match_mcast_dst == filter->supported_match[i])
1994 			return B_TRUE;
1995 	}
1996 
1997 	return B_FALSE;
1998 }
1999 
2000 /**
2001  * Check that the list of supported filters has a filter that differs
2002  * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2003  * in this case that filter will be used and the flag
2004  * EFX_FILTER_MATCH_OUTER_VID is not needed.
2005  *
2006  * @param match[in]
2007  *   The match flags of filter.
2008  * @param spec[in]
2009  *   Specification to be supplemented.
2010  * @param filter[in]
2011  *   SFC filter with list of supported filters.
2012  */
2013 static boolean_t
2014 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2015 			      __rte_unused efx_filter_spec_t *spec,
2016 			      struct sfc_filter *filter)
2017 {
2018 	unsigned int i;
2019 	efx_filter_match_flags_t match_without_vid =
2020 		match & ~EFX_FILTER_MATCH_OUTER_VID;
2021 
2022 	for (i = 0; i < filter->supported_match_num; i++) {
2023 		if (match_without_vid == filter->supported_match[i])
2024 			return B_FALSE;
2025 	}
2026 
2027 	return B_TRUE;
2028 }
2029 
2030 /*
2031  * Match flags that can be automatically added to filters.
2032  * Selecting the last minimum when searching for the copy flag ensures that the
2033  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2034  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2035  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2036  * filters.
2037  */
2038 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2039 	{
2040 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2041 		.vals_count = 2,
2042 		.set_vals = sfc_flow_set_unknown_dst_flags,
2043 		.spec_check = sfc_flow_check_unknown_dst_flags,
2044 	},
2045 	{
2046 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
2047 		.vals_count = 2,
2048 		.set_vals = sfc_flow_set_ethertypes,
2049 		.spec_check = NULL,
2050 	},
2051 	{
2052 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2053 		.vals_count = 2,
2054 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2055 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2056 	},
2057 	{
2058 		.flag = EFX_FILTER_MATCH_OUTER_VID,
2059 		.vals_count = 1,
2060 		.set_vals = sfc_flow_set_outer_vid_flag,
2061 		.spec_check = sfc_flow_check_outer_vid_flag,
2062 	},
2063 };
2064 
2065 /* Get item from array sfc_flow_copy_flags */
2066 static const struct sfc_flow_copy_flag *
2067 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2068 {
2069 	unsigned int i;
2070 
2071 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2072 		if (sfc_flow_copy_flags[i].flag == flag)
2073 			return &sfc_flow_copy_flags[i];
2074 	}
2075 
2076 	return NULL;
2077 }
2078 
2079 /**
2080  * Make copies of the specifications, set match flag and values
2081  * of the field that corresponds to it.
2082  *
2083  * @param spec[in, out]
2084  *   SFC flow specification to update.
2085  * @param flag[in]
2086  *   The match flag to add.
2087  * @param error[out]
2088  *   Perform verbose error reporting if not NULL.
2089  */
2090 static int
2091 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2092 			     efx_filter_match_flags_t flag,
2093 			     struct rte_flow_error *error)
2094 {
2095 	unsigned int i;
2096 	unsigned int new_filters_count;
2097 	unsigned int filters_count_for_one_val;
2098 	const struct sfc_flow_copy_flag *copy_flag;
2099 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2100 	int rc;
2101 
2102 	copy_flag = sfc_flow_get_copy_flag(flag);
2103 	if (copy_flag == NULL) {
2104 		rte_flow_error_set(error, ENOTSUP,
2105 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2106 				   "Unsupported spec field for copying");
2107 		return -rte_errno;
2108 	}
2109 
2110 	new_filters_count = spec_filter->count * copy_flag->vals_count;
2111 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2112 		rte_flow_error_set(error, EINVAL,
2113 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2114 			"Too much EFX specifications in the flow rule");
2115 		return -rte_errno;
2116 	}
2117 
2118 	/* Copy filters specifications */
2119 	for (i = spec_filter->count; i < new_filters_count; i++) {
2120 		spec_filter->filters[i] =
2121 			spec_filter->filters[i - spec_filter->count];
2122 	}
2123 
2124 	filters_count_for_one_val = spec_filter->count;
2125 	spec_filter->count = new_filters_count;
2126 
2127 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2128 	if (rc != 0)
2129 		return rc;
2130 
2131 	return 0;
2132 }
2133 
2134 /**
2135  * Check that the given set of match flags missing in the original filter spec
2136  * could be covered by adding spec copies which specify the corresponding
2137  * flags and packet field values to match.
2138  *
2139  * @param miss_flags[in]
2140  *   Flags that are missing until the supported filter.
2141  * @param spec[in]
2142  *   Specification to be supplemented.
2143  * @param filter[in]
2144  *   SFC filter.
2145  *
2146  * @return
2147  *   Number of specifications after copy or 0, if the flags can not be added.
2148  */
2149 static unsigned int
2150 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2151 			     efx_filter_spec_t *spec,
2152 			     struct sfc_filter *filter)
2153 {
2154 	unsigned int i;
2155 	efx_filter_match_flags_t copy_flags = 0;
2156 	efx_filter_match_flags_t flag;
2157 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2158 	sfc_flow_spec_check *check;
2159 	unsigned int multiplier = 1;
2160 
2161 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2162 		flag = sfc_flow_copy_flags[i].flag;
2163 		check = sfc_flow_copy_flags[i].spec_check;
2164 		if ((flag & miss_flags) == flag) {
2165 			if (check != NULL && (!check(match, spec, filter)))
2166 				continue;
2167 
2168 			copy_flags |= flag;
2169 			multiplier *= sfc_flow_copy_flags[i].vals_count;
2170 		}
2171 	}
2172 
2173 	if (copy_flags == miss_flags)
2174 		return multiplier;
2175 
2176 	return 0;
2177 }
2178 
2179 /**
2180  * Attempt to supplement the specification template to the minimally
2181  * supported set of match flags. To do this, it is necessary to copy
2182  * the specifications, filling them with the values of fields that
2183  * correspond to the missing flags.
2184  * The necessary and sufficient filter is built from the fewest number
2185  * of copies which could be made to cover the minimally required set
2186  * of flags.
2187  *
2188  * @param sa[in]
2189  *   SFC adapter.
2190  * @param spec[in, out]
2191  *   SFC flow specification to update.
2192  * @param error[out]
2193  *   Perform verbose error reporting if not NULL.
2194  */
2195 static int
2196 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2197 			       struct sfc_flow_spec *spec,
2198 			       struct rte_flow_error *error)
2199 {
2200 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2201 	struct sfc_filter *filter = &sa->filter;
2202 	efx_filter_match_flags_t miss_flags;
2203 	efx_filter_match_flags_t min_miss_flags = 0;
2204 	efx_filter_match_flags_t match;
2205 	unsigned int min_multiplier = UINT_MAX;
2206 	unsigned int multiplier;
2207 	unsigned int i;
2208 	int rc;
2209 
2210 	match = spec_filter->template.efs_match_flags;
2211 	for (i = 0; i < filter->supported_match_num; i++) {
2212 		if ((match & filter->supported_match[i]) == match) {
2213 			miss_flags = filter->supported_match[i] & (~match);
2214 			multiplier = sfc_flow_check_missing_flags(miss_flags,
2215 				&spec_filter->template, filter);
2216 			if (multiplier > 0) {
2217 				if (multiplier <= min_multiplier) {
2218 					min_multiplier = multiplier;
2219 					min_miss_flags = miss_flags;
2220 				}
2221 			}
2222 		}
2223 	}
2224 
2225 	if (min_multiplier == UINT_MAX) {
2226 		rte_flow_error_set(error, ENOTSUP,
2227 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2228 				   "The flow rule pattern is unsupported");
2229 		return -rte_errno;
2230 	}
2231 
2232 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2233 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2234 
2235 		if ((flag & min_miss_flags) == flag) {
2236 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2237 			if (rc != 0)
2238 				return rc;
2239 		}
2240 	}
2241 
2242 	return 0;
2243 }
2244 
2245 /**
2246  * Check that set of match flags is referred to by a filter. Filter is
2247  * described by match flags with the ability to add OUTER_VID and INNER_VID
2248  * flags.
2249  *
2250  * @param match_flags[in]
2251  *   Set of match flags.
2252  * @param flags_pattern[in]
2253  *   Pattern of filter match flags.
2254  */
2255 static boolean_t
2256 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2257 			    efx_filter_match_flags_t flags_pattern)
2258 {
2259 	if ((match_flags & flags_pattern) != flags_pattern)
2260 		return B_FALSE;
2261 
2262 	switch (match_flags & ~flags_pattern) {
2263 	case 0:
2264 	case EFX_FILTER_MATCH_OUTER_VID:
2265 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2266 		return B_TRUE;
2267 	default:
2268 		return B_FALSE;
2269 	}
2270 }
2271 
2272 /**
2273  * Check whether the spec maps to a hardware filter which is known to be
2274  * ineffective despite being valid.
2275  *
2276  * @param filter[in]
2277  *   SFC filter with list of supported filters.
2278  * @param spec[in]
2279  *   SFC flow specification.
2280  */
2281 static boolean_t
2282 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2283 				  struct sfc_flow_spec *spec)
2284 {
2285 	unsigned int i;
2286 	uint16_t ether_type;
2287 	uint8_t ip_proto;
2288 	efx_filter_match_flags_t match_flags;
2289 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2290 
2291 	for (i = 0; i < spec_filter->count; i++) {
2292 		match_flags = spec_filter->filters[i].efs_match_flags;
2293 
2294 		if (sfc_flow_is_match_with_vids(match_flags,
2295 						EFX_FILTER_MATCH_ETHER_TYPE) ||
2296 		    sfc_flow_is_match_with_vids(match_flags,
2297 						EFX_FILTER_MATCH_ETHER_TYPE |
2298 						EFX_FILTER_MATCH_LOC_MAC)) {
2299 			ether_type = spec_filter->filters[i].efs_ether_type;
2300 			if (filter->supports_ip_proto_or_addr_filter &&
2301 			    (ether_type == EFX_ETHER_TYPE_IPV4 ||
2302 			     ether_type == EFX_ETHER_TYPE_IPV6))
2303 				return B_TRUE;
2304 		} else if (sfc_flow_is_match_with_vids(match_flags,
2305 				EFX_FILTER_MATCH_ETHER_TYPE |
2306 				EFX_FILTER_MATCH_IP_PROTO) ||
2307 			   sfc_flow_is_match_with_vids(match_flags,
2308 				EFX_FILTER_MATCH_ETHER_TYPE |
2309 				EFX_FILTER_MATCH_IP_PROTO |
2310 				EFX_FILTER_MATCH_LOC_MAC)) {
2311 			ip_proto = spec_filter->filters[i].efs_ip_proto;
2312 			if (filter->supports_rem_or_local_port_filter &&
2313 			    (ip_proto == EFX_IPPROTO_TCP ||
2314 			     ip_proto == EFX_IPPROTO_UDP))
2315 				return B_TRUE;
2316 		}
2317 	}
2318 
2319 	return B_FALSE;
2320 }
2321 
2322 static int
2323 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2324 			      struct rte_flow *flow,
2325 			      struct rte_flow_error *error)
2326 {
2327 	struct sfc_flow_spec *spec = &flow->spec;
2328 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2329 	efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2330 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2331 	int rc;
2332 
2333 	/* Initialize the first filter spec with template */
2334 	spec_filter->filters[0] = *spec_tmpl;
2335 	spec_filter->count = 1;
2336 
2337 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2338 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2339 		if (rc != 0)
2340 			return rc;
2341 	}
2342 
2343 	if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2344 		rte_flow_error_set(error, ENOTSUP,
2345 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2346 			"The flow rule pattern is unsupported");
2347 		return -rte_errno;
2348 	}
2349 
2350 	return 0;
2351 }
2352 
2353 static int
2354 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2355 			     const struct rte_flow_item pattern[],
2356 			     const struct rte_flow_action actions[],
2357 			     struct rte_flow *flow,
2358 			     struct rte_flow_error *error)
2359 {
2360 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2361 	struct sfc_flow_spec *spec = &flow->spec;
2362 	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2363 	struct sfc_flow_parse_ctx ctx;
2364 	int rc;
2365 
2366 	ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2367 	ctx.filter = &spec_filter->template;
2368 
2369 	rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2370 				    pattern, &ctx, error);
2371 	if (rc != 0)
2372 		goto fail_bad_value;
2373 
2374 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2375 	if (rc != 0)
2376 		goto fail_bad_value;
2377 
2378 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2379 	if (rc != 0)
2380 		goto fail_bad_value;
2381 
2382 	return 0;
2383 
2384 fail_bad_value:
2385 	return rc;
2386 }
2387 
2388 static int
2389 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2390 			  const struct rte_flow_item pattern[],
2391 			  const struct rte_flow_action actions[],
2392 			  struct rte_flow *flow,
2393 			  struct rte_flow_error *error)
2394 {
2395 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2396 
2397 	return sfc_mae_rule_parse(sa, pattern, actions, flow, error);
2398 }
2399 
2400 static int
2401 sfc_flow_parse(struct rte_eth_dev *dev,
2402 	       const struct rte_flow_attr *attr,
2403 	       const struct rte_flow_item pattern[],
2404 	       const struct rte_flow_action actions[],
2405 	       struct rte_flow *flow,
2406 	       struct rte_flow_error *error)
2407 {
2408 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2409 	const struct sfc_flow_ops_by_spec *ops;
2410 	int rc;
2411 
2412 	rc = sfc_flow_parse_attr(sa, attr, flow, error);
2413 	if (rc != 0)
2414 		return rc;
2415 
2416 	ops = sfc_flow_get_ops_by_spec(flow);
2417 	if (ops == NULL || ops->parse == NULL) {
2418 		rte_flow_error_set(error, ENOTSUP,
2419 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2420 				   "No backend to handle this flow");
2421 		return -rte_errno;
2422 	}
2423 
2424 	return ops->parse(dev, pattern, actions, flow, error);
2425 }
2426 
2427 static struct rte_flow *
2428 sfc_flow_zmalloc(struct rte_flow_error *error)
2429 {
2430 	struct rte_flow *flow;
2431 
2432 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2433 	if (flow == NULL) {
2434 		rte_flow_error_set(error, ENOMEM,
2435 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2436 				   "Failed to allocate memory");
2437 	}
2438 
2439 	return flow;
2440 }
2441 
2442 static void
2443 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2444 {
2445 	const struct sfc_flow_ops_by_spec *ops;
2446 
2447 	ops = sfc_flow_get_ops_by_spec(flow);
2448 	if (ops != NULL && ops->cleanup != NULL)
2449 		ops->cleanup(sa, flow);
2450 
2451 	rte_free(flow);
2452 }
2453 
2454 static int
2455 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2456 		struct rte_flow_error *error)
2457 {
2458 	const struct sfc_flow_ops_by_spec *ops;
2459 	int rc;
2460 
2461 	ops = sfc_flow_get_ops_by_spec(flow);
2462 	if (ops == NULL || ops->insert == NULL) {
2463 		rte_flow_error_set(error, ENOTSUP,
2464 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2465 				   "No backend to handle this flow");
2466 		return rte_errno;
2467 	}
2468 
2469 	rc = ops->insert(sa, flow);
2470 	if (rc != 0) {
2471 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2472 				   NULL, "Failed to insert the flow rule");
2473 	}
2474 
2475 	return rc;
2476 }
2477 
2478 static int
2479 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2480 		struct rte_flow_error *error)
2481 {
2482 	const struct sfc_flow_ops_by_spec *ops;
2483 	int rc;
2484 
2485 	ops = sfc_flow_get_ops_by_spec(flow);
2486 	if (ops == NULL || ops->remove == NULL) {
2487 		rte_flow_error_set(error, ENOTSUP,
2488 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2489 				   "No backend to handle this flow");
2490 		return rte_errno;
2491 	}
2492 
2493 	rc = ops->remove(sa, flow);
2494 	if (rc != 0) {
2495 		rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2496 				   NULL, "Failed to remove the flow rule");
2497 	}
2498 
2499 	return rc;
2500 }
2501 
2502 static int
2503 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2504 		struct rte_flow_error *error)
2505 {
2506 	const struct sfc_flow_ops_by_spec *ops;
2507 	int rc = 0;
2508 
2509 	ops = sfc_flow_get_ops_by_spec(flow);
2510 	if (ops == NULL) {
2511 		rte_flow_error_set(error, ENOTSUP,
2512 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2513 				   "No backend to handle this flow");
2514 		return -rte_errno;
2515 	}
2516 
2517 	if (ops->verify != NULL) {
2518 		SFC_ASSERT(sfc_adapter_is_locked(sa));
2519 		rc = ops->verify(sa, flow);
2520 	}
2521 
2522 	if (rc != 0) {
2523 		rte_flow_error_set(error, rc,
2524 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2525 			"Failed to verify flow validity with FW");
2526 		return -rte_errno;
2527 	}
2528 
2529 	return 0;
2530 }
2531 
2532 static int
2533 sfc_flow_validate(struct rte_eth_dev *dev,
2534 		  const struct rte_flow_attr *attr,
2535 		  const struct rte_flow_item pattern[],
2536 		  const struct rte_flow_action actions[],
2537 		  struct rte_flow_error *error)
2538 {
2539 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2540 	struct rte_flow *flow;
2541 	int rc;
2542 
2543 	flow = sfc_flow_zmalloc(error);
2544 	if (flow == NULL)
2545 		return -rte_errno;
2546 
2547 	sfc_adapter_lock(sa);
2548 
2549 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2550 	if (rc == 0)
2551 		rc = sfc_flow_verify(sa, flow, error);
2552 
2553 	sfc_flow_free(sa, flow);
2554 
2555 	sfc_adapter_unlock(sa);
2556 
2557 	return rc;
2558 }
2559 
2560 static struct rte_flow *
2561 sfc_flow_create(struct rte_eth_dev *dev,
2562 		const struct rte_flow_attr *attr,
2563 		const struct rte_flow_item pattern[],
2564 		const struct rte_flow_action actions[],
2565 		struct rte_flow_error *error)
2566 {
2567 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2568 	struct rte_flow *flow;
2569 
2570 	sfc_adapter_lock(sa);
2571 	flow = sfc_flow_create_locked(sa, false, attr, pattern, actions, error);
2572 	sfc_adapter_unlock(sa);
2573 
2574 	return flow;
2575 }
2576 
2577 struct rte_flow *
2578 sfc_flow_create_locked(struct sfc_adapter *sa, bool internal,
2579 		       const struct rte_flow_attr *attr,
2580 		       const struct rte_flow_item pattern[],
2581 		       const struct rte_flow_action actions[],
2582 		       struct rte_flow_error *error)
2583 {
2584 	struct rte_flow *flow = NULL;
2585 	int rc;
2586 
2587 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2588 
2589 	flow = sfc_flow_zmalloc(error);
2590 	if (flow == NULL)
2591 		goto fail_no_mem;
2592 
2593 	flow->internal = internal;
2594 
2595 	rc = sfc_flow_parse(sa->eth_dev, attr, pattern, actions, flow, error);
2596 	if (rc != 0)
2597 		goto fail_bad_value;
2598 
2599 	TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2600 
2601 	if (flow->internal || sa->state == SFC_ETHDEV_STARTED) {
2602 		rc = sfc_flow_insert(sa, flow, error);
2603 		if (rc != 0)
2604 			goto fail_flow_insert;
2605 	}
2606 
2607 	return flow;
2608 
2609 fail_flow_insert:
2610 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2611 
2612 fail_bad_value:
2613 	sfc_flow_free(sa, flow);
2614 
2615 fail_no_mem:
2616 	return NULL;
2617 }
2618 
2619 static int
2620 sfc_flow_destroy(struct rte_eth_dev *dev,
2621 		 struct rte_flow *flow,
2622 		 struct rte_flow_error *error)
2623 {
2624 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2625 	int rc;
2626 
2627 	sfc_adapter_lock(sa);
2628 	rc = sfc_flow_destroy_locked(sa, flow, error);
2629 	sfc_adapter_unlock(sa);
2630 
2631 	return rc;
2632 }
2633 
2634 int
2635 sfc_flow_destroy_locked(struct sfc_adapter *sa, struct rte_flow *flow,
2636 			struct rte_flow_error *error)
2637 {
2638 	struct rte_flow *flow_ptr;
2639 	int rc = EINVAL;
2640 
2641 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2642 
2643 	TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2644 		if (flow_ptr == flow)
2645 			rc = 0;
2646 	}
2647 	if (rc != 0) {
2648 		rte_flow_error_set(error, rc,
2649 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2650 				   "Failed to find flow rule to destroy");
2651 		goto fail_bad_value;
2652 	}
2653 
2654 	if (flow->internal || sa->state == SFC_ETHDEV_STARTED)
2655 		rc = sfc_flow_remove(sa, flow, error);
2656 
2657 	TAILQ_REMOVE(&sa->flow_list, flow, entries);
2658 	sfc_flow_free(sa, flow);
2659 
2660 fail_bad_value:
2661 	return -rc;
2662 }
2663 
2664 static int
2665 sfc_flow_flush(struct rte_eth_dev *dev,
2666 	       struct rte_flow_error *error)
2667 {
2668 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2669 	struct rte_flow *flow;
2670 	int ret = 0;
2671 	void *tmp;
2672 
2673 	sfc_adapter_lock(sa);
2674 
2675 	RTE_TAILQ_FOREACH_SAFE(flow, &sa->flow_list, entries, tmp) {
2676 		if (flow->internal)
2677 			continue;
2678 
2679 		if (sa->state == SFC_ETHDEV_STARTED) {
2680 			int rc;
2681 
2682 			rc = sfc_flow_remove(sa, flow, error);
2683 			if (rc != 0)
2684 				ret = rc;
2685 		}
2686 
2687 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2688 		sfc_flow_free(sa, flow);
2689 	}
2690 
2691 	sfc_adapter_unlock(sa);
2692 
2693 	return -ret;
2694 }
2695 
2696 static int
2697 sfc_flow_query(struct rte_eth_dev *dev,
2698 	       struct rte_flow *flow,
2699 	       const struct rte_flow_action *action,
2700 	       void *data,
2701 	       struct rte_flow_error *error)
2702 {
2703 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2704 	const struct sfc_flow_ops_by_spec *ops;
2705 	int ret;
2706 
2707 	sfc_adapter_lock(sa);
2708 
2709 	ops = sfc_flow_get_ops_by_spec(flow);
2710 	if (ops == NULL || ops->query == NULL) {
2711 		ret = rte_flow_error_set(error, ENOTSUP,
2712 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2713 			"No backend to handle this flow");
2714 		goto fail_no_backend;
2715 	}
2716 
2717 	if (sa->state != SFC_ETHDEV_STARTED) {
2718 		ret = rte_flow_error_set(error, EINVAL,
2719 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2720 			"Can't query the flow: the adapter is not started");
2721 		goto fail_not_started;
2722 	}
2723 
2724 	ret = ops->query(dev, flow, action, data, error);
2725 	if (ret != 0)
2726 		goto fail_query;
2727 
2728 	sfc_adapter_unlock(sa);
2729 
2730 	return 0;
2731 
2732 fail_query:
2733 fail_not_started:
2734 fail_no_backend:
2735 	sfc_adapter_unlock(sa);
2736 	return ret;
2737 }
2738 
2739 static int
2740 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2741 		 struct rte_flow_error *error)
2742 {
2743 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2744 	int ret = 0;
2745 
2746 	sfc_adapter_lock(sa);
2747 	if (sa->state != SFC_ETHDEV_INITIALIZED) {
2748 		rte_flow_error_set(error, EBUSY,
2749 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2750 				   NULL, "please close the port first");
2751 		ret = -rte_errno;
2752 	} else {
2753 		sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2754 	}
2755 	sfc_adapter_unlock(sa);
2756 
2757 	return ret;
2758 }
2759 
2760 static int
2761 sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
2762 			     uint16_t *transfer_proxy_port,
2763 			     struct rte_flow_error *error)
2764 {
2765 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2766 	int ret;
2767 
2768 	ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
2769 					      transfer_proxy_port);
2770 	if (ret != 0) {
2771 		return rte_flow_error_set(error, ret,
2772 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2773 					  NULL, NULL);
2774 	}
2775 
2776 	return 0;
2777 }
2778 
2779 static struct rte_flow_action_handle *
2780 sfc_flow_action_handle_create(struct rte_eth_dev *dev,
2781 			      const struct rte_flow_indir_action_conf *conf,
2782 			      const struct rte_flow_action *action,
2783 			      struct rte_flow_error *error)
2784 {
2785 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2786 	struct rte_flow_action_handle *handle;
2787 	int ret;
2788 
2789 	if (!conf->transfer) {
2790 		rte_flow_error_set(error, ENOTSUP,
2791 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2792 				   "non-transfer domain does not support indirect actions");
2793 		return NULL;
2794 	}
2795 
2796 	if (conf->ingress || conf->egress) {
2797 		rte_flow_error_set(error, EINVAL,
2798 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2799 				   NULL, "cannot combine ingress/egress with transfer");
2800 		return NULL;
2801 	}
2802 
2803 	handle = rte_zmalloc("sfc_rte_flow_action_handle", sizeof(*handle), 0);
2804 	if (handle == NULL) {
2805 		rte_flow_error_set(error, ENOMEM,
2806 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2807 				   "failed to allocate memory");
2808 		return NULL;
2809 	}
2810 
2811 	sfc_adapter_lock(sa);
2812 
2813 	ret = sfc_mae_indir_action_create(sa, action, handle, error);
2814 	if (ret != 0) {
2815 		sfc_adapter_unlock(sa);
2816 		rte_free(handle);
2817 		return NULL;
2818 	}
2819 
2820 	TAILQ_INSERT_TAIL(&sa->flow_indir_actions, handle, entries);
2821 
2822 	handle->transfer = (bool)conf->transfer;
2823 
2824 	sfc_adapter_unlock(sa);
2825 
2826 	return handle;
2827 }
2828 
2829 static int
2830 sfc_flow_action_handle_destroy(struct rte_eth_dev *dev,
2831 			       struct rte_flow_action_handle *handle,
2832 			       struct rte_flow_error *error)
2833 {
2834 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2835 	struct rte_flow_action_handle *entry;
2836 	int rc = EINVAL;
2837 
2838 	sfc_adapter_lock(sa);
2839 
2840 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
2841 		if (entry != handle)
2842 			continue;
2843 
2844 		if (entry->transfer) {
2845 			rc = sfc_mae_indir_action_destroy(sa, handle,
2846 							  error);
2847 			if (rc != 0)
2848 				goto exit;
2849 		} else {
2850 			SFC_ASSERT(B_FALSE);
2851 		}
2852 
2853 		TAILQ_REMOVE(&sa->flow_indir_actions, entry, entries);
2854 		rte_free(entry);
2855 		goto exit;
2856 	}
2857 
2858 	rc = rte_flow_error_set(error, ENOENT,
2859 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2860 				"indirect action handle not found");
2861 
2862 exit:
2863 	sfc_adapter_unlock(sa);
2864 	return rc;
2865 }
2866 
2867 static int
2868 sfc_flow_action_handle_update(struct rte_eth_dev *dev,
2869 			      struct rte_flow_action_handle *handle,
2870 			      const void *update, struct rte_flow_error *error)
2871 {
2872 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2873 	struct rte_flow_action_handle *entry;
2874 	int rc = EINVAL;
2875 
2876 	sfc_adapter_lock(sa);
2877 
2878 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
2879 		if (entry != handle)
2880 			continue;
2881 
2882 		if (entry->transfer) {
2883 			rc = sfc_mae_indir_action_update(sa, handle,
2884 							 update, error);
2885 		} else {
2886 			SFC_ASSERT(B_FALSE);
2887 		}
2888 
2889 		goto exit;
2890 	}
2891 
2892 	rc = rte_flow_error_set(error, ENOENT,
2893 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2894 				"indirect action handle not found");
2895 
2896 exit:
2897 	sfc_adapter_unlock(sa);
2898 	return rc;
2899 }
2900 
2901 static int
2902 sfc_flow_action_handle_query(struct rte_eth_dev *dev,
2903 			     const struct rte_flow_action_handle *handle,
2904 			     void *data, struct rte_flow_error *error)
2905 {
2906 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2907 	struct rte_flow_action_handle *entry;
2908 	int rc = EINVAL;
2909 
2910 	sfc_adapter_lock(sa);
2911 
2912 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
2913 		if (entry != handle)
2914 			continue;
2915 
2916 		if (entry->transfer) {
2917 			rc = sfc_mae_indir_action_query(sa, handle,
2918 							data, error);
2919 		} else {
2920 			SFC_ASSERT(B_FALSE);
2921 		}
2922 
2923 		goto exit;
2924 	}
2925 
2926 	rc = rte_flow_error_set(error, ENOENT,
2927 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2928 				"indirect action handle not found");
2929 
2930 exit:
2931 	sfc_adapter_unlock(sa);
2932 	return rc;
2933 }
2934 
2935 const struct rte_flow_ops sfc_flow_ops = {
2936 	.validate = sfc_flow_validate,
2937 	.create = sfc_flow_create,
2938 	.destroy = sfc_flow_destroy,
2939 	.flush = sfc_flow_flush,
2940 	.query = sfc_flow_query,
2941 	.isolate = sfc_flow_isolate,
2942 	.action_handle_create = sfc_flow_action_handle_create,
2943 	.action_handle_destroy = sfc_flow_action_handle_destroy,
2944 	.action_handle_update = sfc_flow_action_handle_update,
2945 	.action_handle_query = sfc_flow_action_handle_query,
2946 	.tunnel_decap_set = sfc_ft_decap_set,
2947 	.tunnel_match = sfc_ft_match,
2948 	.tunnel_action_decap_release = sfc_ft_action_decap_release,
2949 	.tunnel_item_release = sfc_ft_item_release,
2950 	.get_restore_info = sfc_ft_get_restore_info,
2951 	.pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
2952 };
2953 
2954 void
2955 sfc_flow_init(struct sfc_adapter *sa)
2956 {
2957 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2958 
2959 	TAILQ_INIT(&sa->flow_indir_actions);
2960 	TAILQ_INIT(&sa->flow_list);
2961 }
2962 
2963 void
2964 sfc_flow_fini(struct sfc_adapter *sa)
2965 {
2966 	struct rte_flow *flow;
2967 	void *tmp;
2968 
2969 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2970 
2971 	RTE_TAILQ_FOREACH_SAFE(flow, &sa->flow_list, entries, tmp) {
2972 		if (flow->internal)
2973 			continue;
2974 
2975 		TAILQ_REMOVE(&sa->flow_list, flow, entries);
2976 		sfc_flow_free(sa, flow);
2977 	}
2978 }
2979 
2980 void
2981 sfc_flow_stop(struct sfc_adapter *sa)
2982 {
2983 	struct rte_flow *flow;
2984 
2985 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2986 
2987 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2988 		if (!flow->internal)
2989 			sfc_flow_remove(sa, flow, NULL);
2990 	}
2991 
2992 	/*
2993 	 * MAE counter service is not stopped on flow rule remove to avoid
2994 	 * extra work. Make sure that it is stopped here.
2995 	 */
2996 	sfc_mae_counter_stop(sa);
2997 }
2998 
2999 int
3000 sfc_flow_start(struct sfc_adapter *sa)
3001 {
3002 	struct rte_flow *flow;
3003 	int rc = 0;
3004 
3005 	sfc_log_init(sa, "entry");
3006 
3007 	SFC_ASSERT(sfc_adapter_is_locked(sa));
3008 
3009 	sfc_ft_counters_reset(sa);
3010 
3011 	TAILQ_FOREACH(flow, &sa->flow_list, entries) {
3012 		if (flow->internal)
3013 			continue;
3014 
3015 		rc = sfc_flow_insert(sa, flow, NULL);
3016 		if (rc != 0)
3017 			goto fail_bad_flow;
3018 	}
3019 
3020 	sfc_log_init(sa, "done");
3021 
3022 fail_bad_flow:
3023 	return rc;
3024 }
3025 
3026 static void
3027 sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
3028 {
3029 	if (flow == NULL)
3030 		return;
3031 
3032 	sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);
3033 }
3034