xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision ac8d22de2394e03ba4a77d8fd24381147aafb1d3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 
26 /*
27  * At now flow API is implemented in such a manner that each
28  * flow rule is converted to one or more hardware filters.
29  * All elements of flow rule (attributes, pattern items, actions)
30  * correspond to one or more fields in the efx_filter_spec_s structure
31  * that is responsible for the hardware filter.
32  * If some required field is unset in the flow rule, then a handful
33  * of filter copies will be created to cover all possible values
34  * of such a field.
35  */
36 
37 enum sfc_flow_item_layers {
38 	SFC_FLOW_ITEM_ANY_LAYER,
39 	SFC_FLOW_ITEM_START_LAYER,
40 	SFC_FLOW_ITEM_L2,
41 	SFC_FLOW_ITEM_L3,
42 	SFC_FLOW_ITEM_L4,
43 };
44 
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46 				  efx_filter_spec_t *spec,
47 				  struct rte_flow_error *error);
48 
49 struct sfc_flow_item {
50 	enum rte_flow_item_type type;		/* Type of item */
51 	enum sfc_flow_item_layers layer;	/* Layer of item */
52 	enum sfc_flow_item_layers prev_layer;	/* Previous layer of item */
53 	sfc_flow_item_parse *parse;		/* Parsing function */
54 };
55 
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
66 
67 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
68 				     unsigned int filters_count_for_one_val,
69 				     struct rte_flow_error *error);
70 
71 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
72 					efx_filter_spec_t *spec,
73 					struct sfc_filter *filter);
74 
75 struct sfc_flow_copy_flag {
76 	/* EFX filter specification match flag */
77 	efx_filter_match_flags_t flag;
78 	/* Number of values of corresponding field */
79 	unsigned int vals_count;
80 	/* Function to set values in specifications */
81 	sfc_flow_spec_set_vals *set_vals;
82 	/*
83 	 * Function to check that the specification is suitable
84 	 * for adding this match flag
85 	 */
86 	sfc_flow_spec_check *spec_check;
87 };
88 
89 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
90 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
91 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
92 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
93 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
94 
95 static boolean_t
96 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
97 {
98 	uint8_t sum = 0;
99 	unsigned int i;
100 
101 	for (i = 0; i < size; i++)
102 		sum |= buf[i];
103 
104 	return (sum == 0) ? B_TRUE : B_FALSE;
105 }
106 
107 /*
108  * Validate item and prepare structures spec and mask for parsing
109  */
110 static int
111 sfc_flow_parse_init(const struct rte_flow_item *item,
112 		    const void **spec_ptr,
113 		    const void **mask_ptr,
114 		    const void *supp_mask,
115 		    const void *def_mask,
116 		    unsigned int size,
117 		    struct rte_flow_error *error)
118 {
119 	const uint8_t *spec;
120 	const uint8_t *mask;
121 	const uint8_t *last;
122 	uint8_t match;
123 	uint8_t supp;
124 	unsigned int i;
125 
126 	if (item == NULL) {
127 		rte_flow_error_set(error, EINVAL,
128 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
129 				   "NULL item");
130 		return -rte_errno;
131 	}
132 
133 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
134 		rte_flow_error_set(error, EINVAL,
135 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
136 				   "Mask or last is set without spec");
137 		return -rte_errno;
138 	}
139 
140 	/*
141 	 * If "mask" is not set, default mask is used,
142 	 * but if default mask is NULL, "mask" should be set
143 	 */
144 	if (item->mask == NULL) {
145 		if (def_mask == NULL) {
146 			rte_flow_error_set(error, EINVAL,
147 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
148 				"Mask should be specified");
149 			return -rte_errno;
150 		}
151 
152 		mask = def_mask;
153 	} else {
154 		mask = item->mask;
155 	}
156 
157 	spec = item->spec;
158 	last = item->last;
159 
160 	if (spec == NULL)
161 		goto exit;
162 
163 	/*
164 	 * If field values in "last" are either 0 or equal to the corresponding
165 	 * values in "spec" then they are ignored
166 	 */
167 	if (last != NULL &&
168 	    !sfc_flow_is_zero(last, size) &&
169 	    memcmp(last, spec, size) != 0) {
170 		rte_flow_error_set(error, ENOTSUP,
171 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
172 				   "Ranging is not supported");
173 		return -rte_errno;
174 	}
175 
176 	if (supp_mask == NULL) {
177 		rte_flow_error_set(error, EINVAL,
178 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
179 			"Supported mask for item should be specified");
180 		return -rte_errno;
181 	}
182 
183 	/* Check that mask and spec not asks for more match than supp_mask */
184 	for (i = 0; i < size; i++) {
185 		match = spec[i] | mask[i];
186 		supp = ((const uint8_t *)supp_mask)[i];
187 
188 		if ((match | supp) != supp) {
189 			rte_flow_error_set(error, ENOTSUP,
190 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
191 					   "Item's field is not supported");
192 			return -rte_errno;
193 		}
194 	}
195 
196 exit:
197 	*spec_ptr = spec;
198 	*mask_ptr = mask;
199 	return 0;
200 }
201 
202 /*
203  * Protocol parsers.
204  * Masking is not supported, so masks in items should be either
205  * full or empty (zeroed) and set only for supported fields which
206  * are specified in the supp_mask.
207  */
208 
209 static int
210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
211 		    __rte_unused efx_filter_spec_t *efx_spec,
212 		    __rte_unused struct rte_flow_error *error)
213 {
214 	return 0;
215 }
216 
217 /**
218  * Convert Ethernet item to EFX filter specification.
219  *
220  * @param item[in]
221  *   Item specification. Outer frame specification may only comprise
222  *   source/destination addresses and Ethertype field.
223  *   Inner frame specification may contain destination address only.
224  *   There is support for individual/group mask as well as for empty and full.
225  *   If the mask is NULL, default mask will be used. Ranging is not supported.
226  * @param efx_spec[in, out]
227  *   EFX filter specification to update.
228  * @param[out] error
229  *   Perform verbose error reporting if not NULL.
230  */
231 static int
232 sfc_flow_parse_eth(const struct rte_flow_item *item,
233 		   efx_filter_spec_t *efx_spec,
234 		   struct rte_flow_error *error)
235 {
236 	int rc;
237 	const struct rte_flow_item_eth *spec = NULL;
238 	const struct rte_flow_item_eth *mask = NULL;
239 	const struct rte_flow_item_eth supp_mask = {
240 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
241 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242 		.type = 0xffff,
243 	};
244 	const struct rte_flow_item_eth ifrm_supp_mask = {
245 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
246 	};
247 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
248 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
249 	};
250 	const struct rte_flow_item_eth *supp_mask_p;
251 	const struct rte_flow_item_eth *def_mask_p;
252 	uint8_t *loc_mac = NULL;
253 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
254 		EFX_TUNNEL_PROTOCOL_NONE);
255 
256 	if (is_ifrm) {
257 		supp_mask_p = &ifrm_supp_mask;
258 		def_mask_p = &ifrm_supp_mask;
259 		loc_mac = efx_spec->efs_ifrm_loc_mac;
260 	} else {
261 		supp_mask_p = &supp_mask;
262 		def_mask_p = &rte_flow_item_eth_mask;
263 		loc_mac = efx_spec->efs_loc_mac;
264 	}
265 
266 	rc = sfc_flow_parse_init(item,
267 				 (const void **)&spec,
268 				 (const void **)&mask,
269 				 supp_mask_p, def_mask_p,
270 				 sizeof(struct rte_flow_item_eth),
271 				 error);
272 	if (rc != 0)
273 		return rc;
274 
275 	/* If "spec" is not set, could be any Ethernet */
276 	if (spec == NULL)
277 		return 0;
278 
279 	if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
280 		efx_spec->efs_match_flags |= is_ifrm ?
281 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
282 			EFX_FILTER_MATCH_LOC_MAC;
283 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
284 			   EFX_MAC_ADDR_LEN);
285 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
286 			  EFX_MAC_ADDR_LEN) == 0) {
287 		if (is_unicast_ether_addr(&spec->dst))
288 			efx_spec->efs_match_flags |= is_ifrm ?
289 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
290 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
291 		else
292 			efx_spec->efs_match_flags |= is_ifrm ?
293 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
294 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
295 	} else if (!is_zero_ether_addr(&mask->dst)) {
296 		goto fail_bad_mask;
297 	}
298 
299 	/*
300 	 * ifrm_supp_mask ensures that the source address and
301 	 * ethertype masks are equal to zero in inner frame,
302 	 * so these fields are filled in only for the outer frame
303 	 */
304 	if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
305 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
306 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
307 			   EFX_MAC_ADDR_LEN);
308 	} else if (!is_zero_ether_addr(&mask->src)) {
309 		goto fail_bad_mask;
310 	}
311 
312 	/*
313 	 * Ether type is in big-endian byte order in item and
314 	 * in little-endian in efx_spec, so byte swap is used
315 	 */
316 	if (mask->type == supp_mask.type) {
317 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
318 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
319 	} else if (mask->type != 0) {
320 		goto fail_bad_mask;
321 	}
322 
323 	return 0;
324 
325 fail_bad_mask:
326 	rte_flow_error_set(error, EINVAL,
327 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
328 			   "Bad mask in the ETH pattern item");
329 	return -rte_errno;
330 }
331 
332 /**
333  * Convert VLAN item to EFX filter specification.
334  *
335  * @param item[in]
336  *   Item specification. Only VID field is supported.
337  *   The mask can not be NULL. Ranging is not supported.
338  * @param efx_spec[in, out]
339  *   EFX filter specification to update.
340  * @param[out] error
341  *   Perform verbose error reporting if not NULL.
342  */
343 static int
344 sfc_flow_parse_vlan(const struct rte_flow_item *item,
345 		    efx_filter_spec_t *efx_spec,
346 		    struct rte_flow_error *error)
347 {
348 	int rc;
349 	uint16_t vid;
350 	const struct rte_flow_item_vlan *spec = NULL;
351 	const struct rte_flow_item_vlan *mask = NULL;
352 	const struct rte_flow_item_vlan supp_mask = {
353 		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
354 	};
355 
356 	rc = sfc_flow_parse_init(item,
357 				 (const void **)&spec,
358 				 (const void **)&mask,
359 				 &supp_mask,
360 				 NULL,
361 				 sizeof(struct rte_flow_item_vlan),
362 				 error);
363 	if (rc != 0)
364 		return rc;
365 
366 	/*
367 	 * VID is in big-endian byte order in item and
368 	 * in little-endian in efx_spec, so byte swap is used.
369 	 * If two VLAN items are included, the first matches
370 	 * the outer tag and the next matches the inner tag.
371 	 */
372 	if (mask->tci == supp_mask.tci) {
373 		vid = rte_bswap16(spec->tci);
374 
375 		if (!(efx_spec->efs_match_flags &
376 		      EFX_FILTER_MATCH_OUTER_VID)) {
377 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
378 			efx_spec->efs_outer_vid = vid;
379 		} else if (!(efx_spec->efs_match_flags &
380 			     EFX_FILTER_MATCH_INNER_VID)) {
381 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
382 			efx_spec->efs_inner_vid = vid;
383 		} else {
384 			rte_flow_error_set(error, EINVAL,
385 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
386 					   "More than two VLAN items");
387 			return -rte_errno;
388 		}
389 	} else {
390 		rte_flow_error_set(error, EINVAL,
391 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
392 				   "VLAN ID in TCI match is required");
393 		return -rte_errno;
394 	}
395 
396 	return 0;
397 }
398 
399 /**
400  * Convert IPv4 item to EFX filter specification.
401  *
402  * @param item[in]
403  *   Item specification. Only source and destination addresses and
404  *   protocol fields are supported. If the mask is NULL, default
405  *   mask will be used. Ranging is not supported.
406  * @param efx_spec[in, out]
407  *   EFX filter specification to update.
408  * @param[out] error
409  *   Perform verbose error reporting if not NULL.
410  */
411 static int
412 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
413 		    efx_filter_spec_t *efx_spec,
414 		    struct rte_flow_error *error)
415 {
416 	int rc;
417 	const struct rte_flow_item_ipv4 *spec = NULL;
418 	const struct rte_flow_item_ipv4 *mask = NULL;
419 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
420 	const struct rte_flow_item_ipv4 supp_mask = {
421 		.hdr = {
422 			.src_addr = 0xffffffff,
423 			.dst_addr = 0xffffffff,
424 			.next_proto_id = 0xff,
425 		}
426 	};
427 
428 	rc = sfc_flow_parse_init(item,
429 				 (const void **)&spec,
430 				 (const void **)&mask,
431 				 &supp_mask,
432 				 &rte_flow_item_ipv4_mask,
433 				 sizeof(struct rte_flow_item_ipv4),
434 				 error);
435 	if (rc != 0)
436 		return rc;
437 
438 	/*
439 	 * Filtering by IPv4 source and destination addresses requires
440 	 * the appropriate ETHER_TYPE in hardware filters
441 	 */
442 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
443 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
444 		efx_spec->efs_ether_type = ether_type_ipv4;
445 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
446 		rte_flow_error_set(error, EINVAL,
447 			RTE_FLOW_ERROR_TYPE_ITEM, item,
448 			"Ethertype in pattern with IPV4 item should be appropriate");
449 		return -rte_errno;
450 	}
451 
452 	if (spec == NULL)
453 		return 0;
454 
455 	/*
456 	 * IPv4 addresses are in big-endian byte order in item and in
457 	 * efx_spec
458 	 */
459 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
460 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
461 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
462 	} else if (mask->hdr.src_addr != 0) {
463 		goto fail_bad_mask;
464 	}
465 
466 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
467 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
468 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
469 	} else if (mask->hdr.dst_addr != 0) {
470 		goto fail_bad_mask;
471 	}
472 
473 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
474 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
475 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
476 	} else if (mask->hdr.next_proto_id != 0) {
477 		goto fail_bad_mask;
478 	}
479 
480 	return 0;
481 
482 fail_bad_mask:
483 	rte_flow_error_set(error, EINVAL,
484 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
485 			   "Bad mask in the IPV4 pattern item");
486 	return -rte_errno;
487 }
488 
489 /**
490  * Convert IPv6 item to EFX filter specification.
491  *
492  * @param item[in]
493  *   Item specification. Only source and destination addresses and
494  *   next header fields are supported. If the mask is NULL, default
495  *   mask will be used. Ranging is not supported.
496  * @param efx_spec[in, out]
497  *   EFX filter specification to update.
498  * @param[out] error
499  *   Perform verbose error reporting if not NULL.
500  */
501 static int
502 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
503 		    efx_filter_spec_t *efx_spec,
504 		    struct rte_flow_error *error)
505 {
506 	int rc;
507 	const struct rte_flow_item_ipv6 *spec = NULL;
508 	const struct rte_flow_item_ipv6 *mask = NULL;
509 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
510 	const struct rte_flow_item_ipv6 supp_mask = {
511 		.hdr = {
512 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
513 				      0xff, 0xff, 0xff, 0xff,
514 				      0xff, 0xff, 0xff, 0xff,
515 				      0xff, 0xff, 0xff, 0xff },
516 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
517 				      0xff, 0xff, 0xff, 0xff,
518 				      0xff, 0xff, 0xff, 0xff,
519 				      0xff, 0xff, 0xff, 0xff },
520 			.proto = 0xff,
521 		}
522 	};
523 
524 	rc = sfc_flow_parse_init(item,
525 				 (const void **)&spec,
526 				 (const void **)&mask,
527 				 &supp_mask,
528 				 &rte_flow_item_ipv6_mask,
529 				 sizeof(struct rte_flow_item_ipv6),
530 				 error);
531 	if (rc != 0)
532 		return rc;
533 
534 	/*
535 	 * Filtering by IPv6 source and destination addresses requires
536 	 * the appropriate ETHER_TYPE in hardware filters
537 	 */
538 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
539 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
540 		efx_spec->efs_ether_type = ether_type_ipv6;
541 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
542 		rte_flow_error_set(error, EINVAL,
543 			RTE_FLOW_ERROR_TYPE_ITEM, item,
544 			"Ethertype in pattern with IPV6 item should be appropriate");
545 		return -rte_errno;
546 	}
547 
548 	if (spec == NULL)
549 		return 0;
550 
551 	/*
552 	 * IPv6 addresses are in big-endian byte order in item and in
553 	 * efx_spec
554 	 */
555 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
556 		   sizeof(mask->hdr.src_addr)) == 0) {
557 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
558 
559 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
560 				 sizeof(spec->hdr.src_addr));
561 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
562 			   sizeof(efx_spec->efs_rem_host));
563 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
564 				     sizeof(mask->hdr.src_addr))) {
565 		goto fail_bad_mask;
566 	}
567 
568 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
569 		   sizeof(mask->hdr.dst_addr)) == 0) {
570 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
571 
572 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
573 				 sizeof(spec->hdr.dst_addr));
574 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
575 			   sizeof(efx_spec->efs_loc_host));
576 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
577 				     sizeof(mask->hdr.dst_addr))) {
578 		goto fail_bad_mask;
579 	}
580 
581 	if (mask->hdr.proto == supp_mask.hdr.proto) {
582 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
583 		efx_spec->efs_ip_proto = spec->hdr.proto;
584 	} else if (mask->hdr.proto != 0) {
585 		goto fail_bad_mask;
586 	}
587 
588 	return 0;
589 
590 fail_bad_mask:
591 	rte_flow_error_set(error, EINVAL,
592 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
593 			   "Bad mask in the IPV6 pattern item");
594 	return -rte_errno;
595 }
596 
597 /**
598  * Convert TCP item to EFX filter specification.
599  *
600  * @param item[in]
601  *   Item specification. Only source and destination ports fields
602  *   are supported. If the mask is NULL, default mask will be used.
603  *   Ranging is not supported.
604  * @param efx_spec[in, out]
605  *   EFX filter specification to update.
606  * @param[out] error
607  *   Perform verbose error reporting if not NULL.
608  */
609 static int
610 sfc_flow_parse_tcp(const struct rte_flow_item *item,
611 		   efx_filter_spec_t *efx_spec,
612 		   struct rte_flow_error *error)
613 {
614 	int rc;
615 	const struct rte_flow_item_tcp *spec = NULL;
616 	const struct rte_flow_item_tcp *mask = NULL;
617 	const struct rte_flow_item_tcp supp_mask = {
618 		.hdr = {
619 			.src_port = 0xffff,
620 			.dst_port = 0xffff,
621 		}
622 	};
623 
624 	rc = sfc_flow_parse_init(item,
625 				 (const void **)&spec,
626 				 (const void **)&mask,
627 				 &supp_mask,
628 				 &rte_flow_item_tcp_mask,
629 				 sizeof(struct rte_flow_item_tcp),
630 				 error);
631 	if (rc != 0)
632 		return rc;
633 
634 	/*
635 	 * Filtering by TCP source and destination ports requires
636 	 * the appropriate IP_PROTO in hardware filters
637 	 */
638 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
639 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
640 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
641 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
642 		rte_flow_error_set(error, EINVAL,
643 			RTE_FLOW_ERROR_TYPE_ITEM, item,
644 			"IP proto in pattern with TCP item should be appropriate");
645 		return -rte_errno;
646 	}
647 
648 	if (spec == NULL)
649 		return 0;
650 
651 	/*
652 	 * Source and destination ports are in big-endian byte order in item and
653 	 * in little-endian in efx_spec, so byte swap is used
654 	 */
655 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
656 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
657 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
658 	} else if (mask->hdr.src_port != 0) {
659 		goto fail_bad_mask;
660 	}
661 
662 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
663 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
664 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
665 	} else if (mask->hdr.dst_port != 0) {
666 		goto fail_bad_mask;
667 	}
668 
669 	return 0;
670 
671 fail_bad_mask:
672 	rte_flow_error_set(error, EINVAL,
673 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
674 			   "Bad mask in the TCP pattern item");
675 	return -rte_errno;
676 }
677 
678 /**
679  * Convert UDP item to EFX filter specification.
680  *
681  * @param item[in]
682  *   Item specification. Only source and destination ports fields
683  *   are supported. If the mask is NULL, default mask will be used.
684  *   Ranging is not supported.
685  * @param efx_spec[in, out]
686  *   EFX filter specification to update.
687  * @param[out] error
688  *   Perform verbose error reporting if not NULL.
689  */
690 static int
691 sfc_flow_parse_udp(const struct rte_flow_item *item,
692 		   efx_filter_spec_t *efx_spec,
693 		   struct rte_flow_error *error)
694 {
695 	int rc;
696 	const struct rte_flow_item_udp *spec = NULL;
697 	const struct rte_flow_item_udp *mask = NULL;
698 	const struct rte_flow_item_udp supp_mask = {
699 		.hdr = {
700 			.src_port = 0xffff,
701 			.dst_port = 0xffff,
702 		}
703 	};
704 
705 	rc = sfc_flow_parse_init(item,
706 				 (const void **)&spec,
707 				 (const void **)&mask,
708 				 &supp_mask,
709 				 &rte_flow_item_udp_mask,
710 				 sizeof(struct rte_flow_item_udp),
711 				 error);
712 	if (rc != 0)
713 		return rc;
714 
715 	/*
716 	 * Filtering by UDP source and destination ports requires
717 	 * the appropriate IP_PROTO in hardware filters
718 	 */
719 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
720 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
721 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
722 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
723 		rte_flow_error_set(error, EINVAL,
724 			RTE_FLOW_ERROR_TYPE_ITEM, item,
725 			"IP proto in pattern with UDP item should be appropriate");
726 		return -rte_errno;
727 	}
728 
729 	if (spec == NULL)
730 		return 0;
731 
732 	/*
733 	 * Source and destination ports are in big-endian byte order in item and
734 	 * in little-endian in efx_spec, so byte swap is used
735 	 */
736 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
737 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
738 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
739 	} else if (mask->hdr.src_port != 0) {
740 		goto fail_bad_mask;
741 	}
742 
743 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
744 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
745 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
746 	} else if (mask->hdr.dst_port != 0) {
747 		goto fail_bad_mask;
748 	}
749 
750 	return 0;
751 
752 fail_bad_mask:
753 	rte_flow_error_set(error, EINVAL,
754 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
755 			   "Bad mask in the UDP pattern item");
756 	return -rte_errno;
757 }
758 
759 /*
760  * Filters for encapsulated packets match based on the EtherType and IP
761  * protocol in the outer frame.
762  */
763 static int
764 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
765 					efx_filter_spec_t *efx_spec,
766 					uint8_t ip_proto,
767 					struct rte_flow_error *error)
768 {
769 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
770 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
771 		efx_spec->efs_ip_proto = ip_proto;
772 	} else if (efx_spec->efs_ip_proto != ip_proto) {
773 		switch (ip_proto) {
774 		case EFX_IPPROTO_UDP:
775 			rte_flow_error_set(error, EINVAL,
776 				RTE_FLOW_ERROR_TYPE_ITEM, item,
777 				"Outer IP header protocol must be UDP "
778 				"in VxLAN/GENEVE pattern");
779 			return -rte_errno;
780 
781 		case EFX_IPPROTO_GRE:
782 			rte_flow_error_set(error, EINVAL,
783 				RTE_FLOW_ERROR_TYPE_ITEM, item,
784 				"Outer IP header protocol must be GRE "
785 				"in NVGRE pattern");
786 			return -rte_errno;
787 
788 		default:
789 			rte_flow_error_set(error, EINVAL,
790 				RTE_FLOW_ERROR_TYPE_ITEM, item,
791 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
792 				"are supported");
793 			return -rte_errno;
794 		}
795 	}
796 
797 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
798 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
799 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
800 		rte_flow_error_set(error, EINVAL,
801 			RTE_FLOW_ERROR_TYPE_ITEM, item,
802 			"Outer frame EtherType in pattern with tunneling "
803 			"must be IPv4 or IPv6");
804 		return -rte_errno;
805 	}
806 
807 	return 0;
808 }
809 
810 static int
811 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
812 				  const uint8_t *vni_or_vsid_val,
813 				  const uint8_t *vni_or_vsid_mask,
814 				  const struct rte_flow_item *item,
815 				  struct rte_flow_error *error)
816 {
817 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
818 		0xff, 0xff, 0xff
819 	};
820 
821 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
822 		   EFX_VNI_OR_VSID_LEN) == 0) {
823 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
824 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
825 			   EFX_VNI_OR_VSID_LEN);
826 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
827 		rte_flow_error_set(error, EINVAL,
828 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
829 				   "Unsupported VNI/VSID mask");
830 		return -rte_errno;
831 	}
832 
833 	return 0;
834 }
835 
836 /**
837  * Convert VXLAN item to EFX filter specification.
838  *
839  * @param item[in]
840  *   Item specification. Only VXLAN network identifier field is supported.
841  *   If the mask is NULL, default mask will be used.
842  *   Ranging is not supported.
843  * @param efx_spec[in, out]
844  *   EFX filter specification to update.
845  * @param[out] error
846  *   Perform verbose error reporting if not NULL.
847  */
848 static int
849 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
850 		     efx_filter_spec_t *efx_spec,
851 		     struct rte_flow_error *error)
852 {
853 	int rc;
854 	const struct rte_flow_item_vxlan *spec = NULL;
855 	const struct rte_flow_item_vxlan *mask = NULL;
856 	const struct rte_flow_item_vxlan supp_mask = {
857 		.vni = { 0xff, 0xff, 0xff }
858 	};
859 
860 	rc = sfc_flow_parse_init(item,
861 				 (const void **)&spec,
862 				 (const void **)&mask,
863 				 &supp_mask,
864 				 &rte_flow_item_vxlan_mask,
865 				 sizeof(struct rte_flow_item_vxlan),
866 				 error);
867 	if (rc != 0)
868 		return rc;
869 
870 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
871 						     EFX_IPPROTO_UDP, error);
872 	if (rc != 0)
873 		return rc;
874 
875 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
876 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
877 
878 	if (spec == NULL)
879 		return 0;
880 
881 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
882 					       mask->vni, item, error);
883 
884 	return rc;
885 }
886 
887 /**
888  * Convert GENEVE item to EFX filter specification.
889  *
890  * @param item[in]
891  *   Item specification. Only Virtual Network Identifier and protocol type
892  *   fields are supported. But protocol type can be only Ethernet (0x6558).
893  *   If the mask is NULL, default mask will be used.
894  *   Ranging is not supported.
895  * @param efx_spec[in, out]
896  *   EFX filter specification to update.
897  * @param[out] error
898  *   Perform verbose error reporting if not NULL.
899  */
900 static int
901 sfc_flow_parse_geneve(const struct rte_flow_item *item,
902 		      efx_filter_spec_t *efx_spec,
903 		      struct rte_flow_error *error)
904 {
905 	int rc;
906 	const struct rte_flow_item_geneve *spec = NULL;
907 	const struct rte_flow_item_geneve *mask = NULL;
908 	const struct rte_flow_item_geneve supp_mask = {
909 		.protocol = RTE_BE16(0xffff),
910 		.vni = { 0xff, 0xff, 0xff }
911 	};
912 
913 	rc = sfc_flow_parse_init(item,
914 				 (const void **)&spec,
915 				 (const void **)&mask,
916 				 &supp_mask,
917 				 &rte_flow_item_geneve_mask,
918 				 sizeof(struct rte_flow_item_geneve),
919 				 error);
920 	if (rc != 0)
921 		return rc;
922 
923 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
924 						     EFX_IPPROTO_UDP, error);
925 	if (rc != 0)
926 		return rc;
927 
928 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
929 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
930 
931 	if (spec == NULL)
932 		return 0;
933 
934 	if (mask->protocol == supp_mask.protocol) {
935 		if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
936 			rte_flow_error_set(error, EINVAL,
937 				RTE_FLOW_ERROR_TYPE_ITEM, item,
938 				"GENEVE encap. protocol must be Ethernet "
939 				"(0x6558) in the GENEVE pattern item");
940 			return -rte_errno;
941 		}
942 	} else if (mask->protocol != 0) {
943 		rte_flow_error_set(error, EINVAL,
944 			RTE_FLOW_ERROR_TYPE_ITEM, item,
945 			"Unsupported mask for GENEVE encap. protocol");
946 		return -rte_errno;
947 	}
948 
949 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
950 					       mask->vni, item, error);
951 
952 	return rc;
953 }
954 
955 /**
956  * Convert NVGRE item to EFX filter specification.
957  *
958  * @param item[in]
959  *   Item specification. Only virtual subnet ID field is supported.
960  *   If the mask is NULL, default mask will be used.
961  *   Ranging is not supported.
962  * @param efx_spec[in, out]
963  *   EFX filter specification to update.
964  * @param[out] error
965  *   Perform verbose error reporting if not NULL.
966  */
967 static int
968 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
969 		     efx_filter_spec_t *efx_spec,
970 		     struct rte_flow_error *error)
971 {
972 	int rc;
973 	const struct rte_flow_item_nvgre *spec = NULL;
974 	const struct rte_flow_item_nvgre *mask = NULL;
975 	const struct rte_flow_item_nvgre supp_mask = {
976 		.tni = { 0xff, 0xff, 0xff }
977 	};
978 
979 	rc = sfc_flow_parse_init(item,
980 				 (const void **)&spec,
981 				 (const void **)&mask,
982 				 &supp_mask,
983 				 &rte_flow_item_nvgre_mask,
984 				 sizeof(struct rte_flow_item_nvgre),
985 				 error);
986 	if (rc != 0)
987 		return rc;
988 
989 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
990 						     EFX_IPPROTO_GRE, error);
991 	if (rc != 0)
992 		return rc;
993 
994 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
995 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
996 
997 	if (spec == NULL)
998 		return 0;
999 
1000 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1001 					       mask->tni, item, error);
1002 
1003 	return rc;
1004 }
1005 
1006 static const struct sfc_flow_item sfc_flow_items[] = {
1007 	{
1008 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1009 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1010 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1011 		.parse = sfc_flow_parse_void,
1012 	},
1013 	{
1014 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1015 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1016 		.layer = SFC_FLOW_ITEM_L2,
1017 		.parse = sfc_flow_parse_eth,
1018 	},
1019 	{
1020 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1021 		.prev_layer = SFC_FLOW_ITEM_L2,
1022 		.layer = SFC_FLOW_ITEM_L2,
1023 		.parse = sfc_flow_parse_vlan,
1024 	},
1025 	{
1026 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1027 		.prev_layer = SFC_FLOW_ITEM_L2,
1028 		.layer = SFC_FLOW_ITEM_L3,
1029 		.parse = sfc_flow_parse_ipv4,
1030 	},
1031 	{
1032 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1033 		.prev_layer = SFC_FLOW_ITEM_L2,
1034 		.layer = SFC_FLOW_ITEM_L3,
1035 		.parse = sfc_flow_parse_ipv6,
1036 	},
1037 	{
1038 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1039 		.prev_layer = SFC_FLOW_ITEM_L3,
1040 		.layer = SFC_FLOW_ITEM_L4,
1041 		.parse = sfc_flow_parse_tcp,
1042 	},
1043 	{
1044 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1045 		.prev_layer = SFC_FLOW_ITEM_L3,
1046 		.layer = SFC_FLOW_ITEM_L4,
1047 		.parse = sfc_flow_parse_udp,
1048 	},
1049 	{
1050 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1051 		.prev_layer = SFC_FLOW_ITEM_L4,
1052 		.layer = SFC_FLOW_ITEM_START_LAYER,
1053 		.parse = sfc_flow_parse_vxlan,
1054 	},
1055 	{
1056 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1057 		.prev_layer = SFC_FLOW_ITEM_L4,
1058 		.layer = SFC_FLOW_ITEM_START_LAYER,
1059 		.parse = sfc_flow_parse_geneve,
1060 	},
1061 	{
1062 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1063 		.prev_layer = SFC_FLOW_ITEM_L3,
1064 		.layer = SFC_FLOW_ITEM_START_LAYER,
1065 		.parse = sfc_flow_parse_nvgre,
1066 	},
1067 };
1068 
1069 /*
1070  * Protocol-independent flow API support
1071  */
1072 static int
1073 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1074 		    struct rte_flow *flow,
1075 		    struct rte_flow_error *error)
1076 {
1077 	if (attr == NULL) {
1078 		rte_flow_error_set(error, EINVAL,
1079 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1080 				   "NULL attribute");
1081 		return -rte_errno;
1082 	}
1083 	if (attr->group != 0) {
1084 		rte_flow_error_set(error, ENOTSUP,
1085 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1086 				   "Groups are not supported");
1087 		return -rte_errno;
1088 	}
1089 	if (attr->priority != 0) {
1090 		rte_flow_error_set(error, ENOTSUP,
1091 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1092 				   "Priorities are not supported");
1093 		return -rte_errno;
1094 	}
1095 	if (attr->egress != 0) {
1096 		rte_flow_error_set(error, ENOTSUP,
1097 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1098 				   "Egress is not supported");
1099 		return -rte_errno;
1100 	}
1101 	if (attr->ingress == 0) {
1102 		rte_flow_error_set(error, ENOTSUP,
1103 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1104 				   "Only ingress is supported");
1105 		return -rte_errno;
1106 	}
1107 
1108 	flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1109 	flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1110 
1111 	return 0;
1112 }
1113 
1114 /* Get item from array sfc_flow_items */
1115 static const struct sfc_flow_item *
1116 sfc_flow_get_item(enum rte_flow_item_type type)
1117 {
1118 	unsigned int i;
1119 
1120 	for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1121 		if (sfc_flow_items[i].type == type)
1122 			return &sfc_flow_items[i];
1123 
1124 	return NULL;
1125 }
1126 
1127 static int
1128 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1129 		       struct rte_flow *flow,
1130 		       struct rte_flow_error *error)
1131 {
1132 	int rc;
1133 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1134 	boolean_t is_ifrm = B_FALSE;
1135 	const struct sfc_flow_item *item;
1136 
1137 	if (pattern == NULL) {
1138 		rte_flow_error_set(error, EINVAL,
1139 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1140 				   "NULL pattern");
1141 		return -rte_errno;
1142 	}
1143 
1144 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1145 		item = sfc_flow_get_item(pattern->type);
1146 		if (item == NULL) {
1147 			rte_flow_error_set(error, ENOTSUP,
1148 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1149 					   "Unsupported pattern item");
1150 			return -rte_errno;
1151 		}
1152 
1153 		/*
1154 		 * Omitting one or several protocol layers at the beginning
1155 		 * of pattern is supported
1156 		 */
1157 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1158 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1159 		    item->prev_layer != prev_layer) {
1160 			rte_flow_error_set(error, ENOTSUP,
1161 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1162 					   "Unexpected sequence of pattern items");
1163 			return -rte_errno;
1164 		}
1165 
1166 		/*
1167 		 * Allow only VOID and ETH pattern items in the inner frame.
1168 		 * Also check that there is only one tunneling protocol.
1169 		 */
1170 		switch (item->type) {
1171 		case RTE_FLOW_ITEM_TYPE_VOID:
1172 		case RTE_FLOW_ITEM_TYPE_ETH:
1173 			break;
1174 
1175 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1176 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1177 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1178 			if (is_ifrm) {
1179 				rte_flow_error_set(error, EINVAL,
1180 					RTE_FLOW_ERROR_TYPE_ITEM,
1181 					pattern,
1182 					"More than one tunneling protocol");
1183 				return -rte_errno;
1184 			}
1185 			is_ifrm = B_TRUE;
1186 			break;
1187 
1188 		default:
1189 			if (is_ifrm) {
1190 				rte_flow_error_set(error, EINVAL,
1191 					RTE_FLOW_ERROR_TYPE_ITEM,
1192 					pattern,
1193 					"There is an unsupported pattern item "
1194 					"in the inner frame");
1195 				return -rte_errno;
1196 			}
1197 			break;
1198 		}
1199 
1200 		rc = item->parse(pattern, &flow->spec.template, error);
1201 		if (rc != 0)
1202 			return rc;
1203 
1204 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1205 			prev_layer = item->layer;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static int
1212 sfc_flow_parse_queue(struct sfc_adapter *sa,
1213 		     const struct rte_flow_action_queue *queue,
1214 		     struct rte_flow *flow)
1215 {
1216 	struct sfc_rxq *rxq;
1217 
1218 	if (queue->index >= sa->rxq_count)
1219 		return -EINVAL;
1220 
1221 	rxq = sa->rxq_info[queue->index].rxq;
1222 	flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1223 
1224 	return 0;
1225 }
1226 
1227 #if EFSYS_OPT_RX_SCALE
1228 static int
1229 sfc_flow_parse_rss(struct sfc_adapter *sa,
1230 		   const struct rte_flow_action_rss *rss,
1231 		   struct rte_flow *flow)
1232 {
1233 	unsigned int rxq_sw_index;
1234 	struct sfc_rxq *rxq;
1235 	unsigned int rxq_hw_index_min;
1236 	unsigned int rxq_hw_index_max;
1237 	const uint8_t *rss_key;
1238 	struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1239 	unsigned int i;
1240 
1241 	if (rss->queue_num == 0)
1242 		return -EINVAL;
1243 
1244 	rxq_sw_index = sa->rxq_count - 1;
1245 	rxq = sa->rxq_info[rxq_sw_index].rxq;
1246 	rxq_hw_index_min = rxq->hw_index;
1247 	rxq_hw_index_max = 0;
1248 
1249 	for (i = 0; i < rss->queue_num; ++i) {
1250 		rxq_sw_index = rss->queue[i];
1251 
1252 		if (rxq_sw_index >= sa->rxq_count)
1253 			return -EINVAL;
1254 
1255 		rxq = sa->rxq_info[rxq_sw_index].rxq;
1256 
1257 		if (rxq->hw_index < rxq_hw_index_min)
1258 			rxq_hw_index_min = rxq->hw_index;
1259 
1260 		if (rxq->hw_index > rxq_hw_index_max)
1261 			rxq_hw_index_max = rxq->hw_index;
1262 	}
1263 
1264 	if ((rss->types & ~SFC_RSS_OFFLOADS) != 0)
1265 		return -EINVAL;
1266 
1267 	if (rss->key_len) {
1268 		if (rss->key_len != sizeof(sa->rss_key))
1269 			return -EINVAL;
1270 
1271 		rss_key = rss->key;
1272 	} else {
1273 		rss_key = sa->rss_key;
1274 	}
1275 
1276 	flow->rss = B_TRUE;
1277 
1278 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1279 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1280 	sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss->types);
1281 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1282 
1283 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1284 		unsigned int rxq_sw_index = rss->queue[i % rss->queue_num];
1285 		struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1286 
1287 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1288 	}
1289 
1290 	return 0;
1291 }
1292 #endif /* EFSYS_OPT_RX_SCALE */
1293 
1294 static int
1295 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1296 		    unsigned int filters_count)
1297 {
1298 	unsigned int i;
1299 	int ret = 0;
1300 
1301 	for (i = 0; i < filters_count; i++) {
1302 		int rc;
1303 
1304 		rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1305 		if (ret == 0 && rc != 0) {
1306 			sfc_err(sa, "failed to remove filter specification "
1307 				"(rc = %d)", rc);
1308 			ret = rc;
1309 		}
1310 	}
1311 
1312 	return ret;
1313 }
1314 
1315 static int
1316 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1317 {
1318 	unsigned int i;
1319 	int rc = 0;
1320 
1321 	for (i = 0; i < spec->count; i++) {
1322 		rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1323 		if (rc != 0) {
1324 			sfc_flow_spec_flush(sa, spec, i);
1325 			break;
1326 		}
1327 	}
1328 
1329 	return rc;
1330 }
1331 
1332 static int
1333 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1334 {
1335 	return sfc_flow_spec_flush(sa, spec, spec->count);
1336 }
1337 
1338 static int
1339 sfc_flow_filter_insert(struct sfc_adapter *sa,
1340 		       struct rte_flow *flow)
1341 {
1342 #if EFSYS_OPT_RX_SCALE
1343 	struct sfc_flow_rss *rss = &flow->rss_conf;
1344 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1345 	unsigned int i;
1346 	int rc = 0;
1347 
1348 	if (flow->rss) {
1349 		unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1350 					      rss->rxq_hw_index_min + 1,
1351 					      EFX_MAXRSS);
1352 
1353 		rc = efx_rx_scale_context_alloc(sa->nic,
1354 						EFX_RX_SCALE_EXCLUSIVE,
1355 						rss_spread,
1356 						&efs_rss_context);
1357 		if (rc != 0)
1358 			goto fail_scale_context_alloc;
1359 
1360 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1361 					   EFX_RX_HASHALG_TOEPLITZ,
1362 					   rss->rss_hash_types, B_TRUE);
1363 		if (rc != 0)
1364 			goto fail_scale_mode_set;
1365 
1366 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1367 					  rss->rss_key,
1368 					  sizeof(sa->rss_key));
1369 		if (rc != 0)
1370 			goto fail_scale_key_set;
1371 
1372 		/*
1373 		 * At this point, fully elaborated filter specifications
1374 		 * have been produced from the template. To make sure that
1375 		 * RSS behaviour is consistent between them, set the same
1376 		 * RSS context value everywhere.
1377 		 */
1378 		for (i = 0; i < flow->spec.count; i++) {
1379 			efx_filter_spec_t *spec = &flow->spec.filters[i];
1380 
1381 			spec->efs_rss_context = efs_rss_context;
1382 			spec->efs_dmaq_id = rss->rxq_hw_index_min;
1383 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1384 		}
1385 	}
1386 
1387 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1388 	if (rc != 0)
1389 		goto fail_filter_insert;
1390 
1391 	if (flow->rss) {
1392 		/*
1393 		 * Scale table is set after filter insertion because
1394 		 * the table entries are relative to the base RxQ ID
1395 		 * and the latter is submitted to the HW by means of
1396 		 * inserting a filter, so by the time of the request
1397 		 * the HW knows all the information needed to verify
1398 		 * the table entries, and the operation will succeed
1399 		 */
1400 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1401 					  rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1402 		if (rc != 0)
1403 			goto fail_scale_tbl_set;
1404 	}
1405 
1406 	return 0;
1407 
1408 fail_scale_tbl_set:
1409 	sfc_flow_spec_remove(sa, &flow->spec);
1410 
1411 fail_filter_insert:
1412 fail_scale_key_set:
1413 fail_scale_mode_set:
1414 	if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1415 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1416 
1417 fail_scale_context_alloc:
1418 	return rc;
1419 #else /* !EFSYS_OPT_RX_SCALE */
1420 	return sfc_flow_spec_insert(sa, &flow->spec);
1421 #endif /* EFSYS_OPT_RX_SCALE */
1422 }
1423 
1424 static int
1425 sfc_flow_filter_remove(struct sfc_adapter *sa,
1426 		       struct rte_flow *flow)
1427 {
1428 	int rc = 0;
1429 
1430 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1431 	if (rc != 0)
1432 		return rc;
1433 
1434 #if EFSYS_OPT_RX_SCALE
1435 	if (flow->rss) {
1436 		/*
1437 		 * All specifications for a given flow rule have the same RSS
1438 		 * context, so that RSS context value is taken from the first
1439 		 * filter specification
1440 		 */
1441 		efx_filter_spec_t *spec = &flow->spec.filters[0];
1442 
1443 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1444 	}
1445 #endif /* EFSYS_OPT_RX_SCALE */
1446 
1447 	return rc;
1448 }
1449 
1450 static int
1451 sfc_flow_parse_actions(struct sfc_adapter *sa,
1452 		       const struct rte_flow_action actions[],
1453 		       struct rte_flow *flow,
1454 		       struct rte_flow_error *error)
1455 {
1456 	int rc;
1457 	boolean_t is_specified = B_FALSE;
1458 
1459 	if (actions == NULL) {
1460 		rte_flow_error_set(error, EINVAL,
1461 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1462 				   "NULL actions");
1463 		return -rte_errno;
1464 	}
1465 
1466 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1467 		/* This one may appear anywhere multiple times. */
1468 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1469 			continue;
1470 		/* Fate-deciding actions may appear exactly once. */
1471 		if (is_specified) {
1472 			rte_flow_error_set
1473 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1474 				 actions,
1475 				 "Cannot combine several fate-deciding actions,"
1476 				 "choose between QUEUE, RSS or DROP");
1477 			return -rte_errno;
1478 		}
1479 		switch (actions->type) {
1480 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1481 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1482 			if (rc != 0) {
1483 				rte_flow_error_set(error, EINVAL,
1484 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1485 					"Bad QUEUE action");
1486 				return -rte_errno;
1487 			}
1488 
1489 			is_specified = B_TRUE;
1490 			break;
1491 
1492 #if EFSYS_OPT_RX_SCALE
1493 		case RTE_FLOW_ACTION_TYPE_RSS:
1494 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1495 			if (rc != 0) {
1496 				rte_flow_error_set(error, rc,
1497 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1498 					"Bad RSS action");
1499 				return -rte_errno;
1500 			}
1501 
1502 			is_specified = B_TRUE;
1503 			break;
1504 #endif /* EFSYS_OPT_RX_SCALE */
1505 
1506 		case RTE_FLOW_ACTION_TYPE_DROP:
1507 			flow->spec.template.efs_dmaq_id =
1508 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1509 
1510 			is_specified = B_TRUE;
1511 			break;
1512 
1513 		default:
1514 			rte_flow_error_set(error, ENOTSUP,
1515 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1516 					   "Action is not supported");
1517 			return -rte_errno;
1518 		}
1519 	}
1520 
1521 	/* When fate is unknown, drop traffic. */
1522 	if (!is_specified) {
1523 		flow->spec.template.efs_dmaq_id =
1524 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1525 	}
1526 
1527 	return 0;
1528 }
1529 
1530 /**
1531  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1532  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1533  * specifications after copying.
1534  *
1535  * @param spec[in, out]
1536  *   SFC flow specification to update.
1537  * @param filters_count_for_one_val[in]
1538  *   How many specifications should have the same match flag, what is the
1539  *   number of specifications before copying.
1540  * @param error[out]
1541  *   Perform verbose error reporting if not NULL.
1542  */
1543 static int
1544 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1545 			       unsigned int filters_count_for_one_val,
1546 			       struct rte_flow_error *error)
1547 {
1548 	unsigned int i;
1549 	static const efx_filter_match_flags_t vals[] = {
1550 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1551 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1552 	};
1553 
1554 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1555 		rte_flow_error_set(error, EINVAL,
1556 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1557 			"Number of specifications is incorrect while copying "
1558 			"by unknown destination flags");
1559 		return -rte_errno;
1560 	}
1561 
1562 	for (i = 0; i < spec->count; i++) {
1563 		/* The check above ensures that divisor can't be zero here */
1564 		spec->filters[i].efs_match_flags |=
1565 			vals[i / filters_count_for_one_val];
1566 	}
1567 
1568 	return 0;
1569 }
1570 
1571 /**
1572  * Check that the following conditions are met:
1573  * - the list of supported filters has a filter
1574  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1575  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1576  *   be inserted.
1577  *
1578  * @param match[in]
1579  *   The match flags of filter.
1580  * @param spec[in]
1581  *   Specification to be supplemented.
1582  * @param filter[in]
1583  *   SFC filter with list of supported filters.
1584  */
1585 static boolean_t
1586 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1587 				 __rte_unused efx_filter_spec_t *spec,
1588 				 struct sfc_filter *filter)
1589 {
1590 	unsigned int i;
1591 	efx_filter_match_flags_t match_mcast_dst;
1592 
1593 	match_mcast_dst =
1594 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1595 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1596 	for (i = 0; i < filter->supported_match_num; i++) {
1597 		if (match_mcast_dst == filter->supported_match[i])
1598 			return B_TRUE;
1599 	}
1600 
1601 	return B_FALSE;
1602 }
1603 
1604 /**
1605  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1606  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1607  * specifications after copying.
1608  *
1609  * @param spec[in, out]
1610  *   SFC flow specification to update.
1611  * @param filters_count_for_one_val[in]
1612  *   How many specifications should have the same EtherType value, what is the
1613  *   number of specifications before copying.
1614  * @param error[out]
1615  *   Perform verbose error reporting if not NULL.
1616  */
1617 static int
1618 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1619 			unsigned int filters_count_for_one_val,
1620 			struct rte_flow_error *error)
1621 {
1622 	unsigned int i;
1623 	static const uint16_t vals[] = {
1624 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1625 	};
1626 
1627 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1628 		rte_flow_error_set(error, EINVAL,
1629 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1630 			"Number of specifications is incorrect "
1631 			"while copying by Ethertype");
1632 		return -rte_errno;
1633 	}
1634 
1635 	for (i = 0; i < spec->count; i++) {
1636 		spec->filters[i].efs_match_flags |=
1637 			EFX_FILTER_MATCH_ETHER_TYPE;
1638 
1639 		/*
1640 		 * The check above ensures that
1641 		 * filters_count_for_one_val is not 0
1642 		 */
1643 		spec->filters[i].efs_ether_type =
1644 			vals[i / filters_count_for_one_val];
1645 	}
1646 
1647 	return 0;
1648 }
1649 
1650 /**
1651  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1652  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1653  * specifications after copying.
1654  *
1655  * @param spec[in, out]
1656  *   SFC flow specification to update.
1657  * @param filters_count_for_one_val[in]
1658  *   How many specifications should have the same match flag, what is the
1659  *   number of specifications before copying.
1660  * @param error[out]
1661  *   Perform verbose error reporting if not NULL.
1662  */
1663 static int
1664 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1665 				    unsigned int filters_count_for_one_val,
1666 				    struct rte_flow_error *error)
1667 {
1668 	unsigned int i;
1669 	static const efx_filter_match_flags_t vals[] = {
1670 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1671 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1672 	};
1673 
1674 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1675 		rte_flow_error_set(error, EINVAL,
1676 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1677 			"Number of specifications is incorrect while copying "
1678 			"by inner frame unknown destination flags");
1679 		return -rte_errno;
1680 	}
1681 
1682 	for (i = 0; i < spec->count; i++) {
1683 		/* The check above ensures that divisor can't be zero here */
1684 		spec->filters[i].efs_match_flags |=
1685 			vals[i / filters_count_for_one_val];
1686 	}
1687 
1688 	return 0;
1689 }
1690 
1691 /**
1692  * Check that the following conditions are met:
1693  * - the specification corresponds to a filter for encapsulated traffic
1694  * - the list of supported filters has a filter
1695  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1696  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1697  *   be inserted.
1698  *
1699  * @param match[in]
1700  *   The match flags of filter.
1701  * @param spec[in]
1702  *   Specification to be supplemented.
1703  * @param filter[in]
1704  *   SFC filter with list of supported filters.
1705  */
1706 static boolean_t
1707 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1708 				      efx_filter_spec_t *spec,
1709 				      struct sfc_filter *filter)
1710 {
1711 	unsigned int i;
1712 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1713 	efx_filter_match_flags_t match_mcast_dst;
1714 
1715 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1716 		return B_FALSE;
1717 
1718 	match_mcast_dst =
1719 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1720 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1721 	for (i = 0; i < filter->supported_match_num; i++) {
1722 		if (match_mcast_dst == filter->supported_match[i])
1723 			return B_TRUE;
1724 	}
1725 
1726 	return B_FALSE;
1727 }
1728 
1729 /*
1730  * Match flags that can be automatically added to filters.
1731  * Selecting the last minimum when searching for the copy flag ensures that the
1732  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1733  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1734  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1735  * filters.
1736  */
1737 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1738 	{
1739 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1740 		.vals_count = 2,
1741 		.set_vals = sfc_flow_set_unknown_dst_flags,
1742 		.spec_check = sfc_flow_check_unknown_dst_flags,
1743 	},
1744 	{
1745 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
1746 		.vals_count = 2,
1747 		.set_vals = sfc_flow_set_ethertypes,
1748 		.spec_check = NULL,
1749 	},
1750 	{
1751 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1752 		.vals_count = 2,
1753 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1754 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1755 	},
1756 };
1757 
1758 /* Get item from array sfc_flow_copy_flags */
1759 static const struct sfc_flow_copy_flag *
1760 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1761 {
1762 	unsigned int i;
1763 
1764 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1765 		if (sfc_flow_copy_flags[i].flag == flag)
1766 			return &sfc_flow_copy_flags[i];
1767 	}
1768 
1769 	return NULL;
1770 }
1771 
1772 /**
1773  * Make copies of the specifications, set match flag and values
1774  * of the field that corresponds to it.
1775  *
1776  * @param spec[in, out]
1777  *   SFC flow specification to update.
1778  * @param flag[in]
1779  *   The match flag to add.
1780  * @param error[out]
1781  *   Perform verbose error reporting if not NULL.
1782  */
1783 static int
1784 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1785 			     efx_filter_match_flags_t flag,
1786 			     struct rte_flow_error *error)
1787 {
1788 	unsigned int i;
1789 	unsigned int new_filters_count;
1790 	unsigned int filters_count_for_one_val;
1791 	const struct sfc_flow_copy_flag *copy_flag;
1792 	int rc;
1793 
1794 	copy_flag = sfc_flow_get_copy_flag(flag);
1795 	if (copy_flag == NULL) {
1796 		rte_flow_error_set(error, ENOTSUP,
1797 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1798 				   "Unsupported spec field for copying");
1799 		return -rte_errno;
1800 	}
1801 
1802 	new_filters_count = spec->count * copy_flag->vals_count;
1803 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1804 		rte_flow_error_set(error, EINVAL,
1805 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1806 			"Too much EFX specifications in the flow rule");
1807 		return -rte_errno;
1808 	}
1809 
1810 	/* Copy filters specifications */
1811 	for (i = spec->count; i < new_filters_count; i++)
1812 		spec->filters[i] = spec->filters[i - spec->count];
1813 
1814 	filters_count_for_one_val = spec->count;
1815 	spec->count = new_filters_count;
1816 
1817 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1818 	if (rc != 0)
1819 		return rc;
1820 
1821 	return 0;
1822 }
1823 
1824 /**
1825  * Check that the given set of match flags missing in the original filter spec
1826  * could be covered by adding spec copies which specify the corresponding
1827  * flags and packet field values to match.
1828  *
1829  * @param miss_flags[in]
1830  *   Flags that are missing until the supported filter.
1831  * @param spec[in]
1832  *   Specification to be supplemented.
1833  * @param filter[in]
1834  *   SFC filter.
1835  *
1836  * @return
1837  *   Number of specifications after copy or 0, if the flags can not be added.
1838  */
1839 static unsigned int
1840 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1841 			     efx_filter_spec_t *spec,
1842 			     struct sfc_filter *filter)
1843 {
1844 	unsigned int i;
1845 	efx_filter_match_flags_t copy_flags = 0;
1846 	efx_filter_match_flags_t flag;
1847 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1848 	sfc_flow_spec_check *check;
1849 	unsigned int multiplier = 1;
1850 
1851 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1852 		flag = sfc_flow_copy_flags[i].flag;
1853 		check = sfc_flow_copy_flags[i].spec_check;
1854 		if ((flag & miss_flags) == flag) {
1855 			if (check != NULL && (!check(match, spec, filter)))
1856 				continue;
1857 
1858 			copy_flags |= flag;
1859 			multiplier *= sfc_flow_copy_flags[i].vals_count;
1860 		}
1861 	}
1862 
1863 	if (copy_flags == miss_flags)
1864 		return multiplier;
1865 
1866 	return 0;
1867 }
1868 
1869 /**
1870  * Attempt to supplement the specification template to the minimally
1871  * supported set of match flags. To do this, it is necessary to copy
1872  * the specifications, filling them with the values of fields that
1873  * correspond to the missing flags.
1874  * The necessary and sufficient filter is built from the fewest number
1875  * of copies which could be made to cover the minimally required set
1876  * of flags.
1877  *
1878  * @param sa[in]
1879  *   SFC adapter.
1880  * @param spec[in, out]
1881  *   SFC flow specification to update.
1882  * @param error[out]
1883  *   Perform verbose error reporting if not NULL.
1884  */
1885 static int
1886 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
1887 			       struct sfc_flow_spec *spec,
1888 			       struct rte_flow_error *error)
1889 {
1890 	struct sfc_filter *filter = &sa->filter;
1891 	efx_filter_match_flags_t miss_flags;
1892 	efx_filter_match_flags_t min_miss_flags = 0;
1893 	efx_filter_match_flags_t match;
1894 	unsigned int min_multiplier = UINT_MAX;
1895 	unsigned int multiplier;
1896 	unsigned int i;
1897 	int rc;
1898 
1899 	match = spec->template.efs_match_flags;
1900 	for (i = 0; i < filter->supported_match_num; i++) {
1901 		if ((match & filter->supported_match[i]) == match) {
1902 			miss_flags = filter->supported_match[i] & (~match);
1903 			multiplier = sfc_flow_check_missing_flags(miss_flags,
1904 				&spec->template, filter);
1905 			if (multiplier > 0) {
1906 				if (multiplier <= min_multiplier) {
1907 					min_multiplier = multiplier;
1908 					min_miss_flags = miss_flags;
1909 				}
1910 			}
1911 		}
1912 	}
1913 
1914 	if (min_multiplier == UINT_MAX) {
1915 		rte_flow_error_set(error, ENOTSUP,
1916 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1917 				   "Flow rule pattern is not supported");
1918 		return -rte_errno;
1919 	}
1920 
1921 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1922 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
1923 
1924 		if ((flag & min_miss_flags) == flag) {
1925 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
1926 			if (rc != 0)
1927 				return rc;
1928 		}
1929 	}
1930 
1931 	return 0;
1932 }
1933 
1934 /**
1935  * Check that set of match flags is referred to by a filter. Filter is
1936  * described by match flags with the ability to add OUTER_VID and INNER_VID
1937  * flags.
1938  *
1939  * @param match_flags[in]
1940  *   Set of match flags.
1941  * @param flags_pattern[in]
1942  *   Pattern of filter match flags.
1943  */
1944 static boolean_t
1945 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
1946 			    efx_filter_match_flags_t flags_pattern)
1947 {
1948 	if ((match_flags & flags_pattern) != flags_pattern)
1949 		return B_FALSE;
1950 
1951 	switch (match_flags & ~flags_pattern) {
1952 	case 0:
1953 	case EFX_FILTER_MATCH_OUTER_VID:
1954 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
1955 		return B_TRUE;
1956 	default:
1957 		return B_FALSE;
1958 	}
1959 }
1960 
1961 /**
1962  * Check whether the spec maps to a hardware filter which is known to be
1963  * ineffective despite being valid.
1964  *
1965  * @param spec[in]
1966  *   SFC flow specification.
1967  */
1968 static boolean_t
1969 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
1970 {
1971 	unsigned int i;
1972 	uint16_t ether_type;
1973 	uint8_t ip_proto;
1974 	efx_filter_match_flags_t match_flags;
1975 
1976 	for (i = 0; i < spec->count; i++) {
1977 		match_flags = spec->filters[i].efs_match_flags;
1978 
1979 		if (sfc_flow_is_match_with_vids(match_flags,
1980 						EFX_FILTER_MATCH_ETHER_TYPE) ||
1981 		    sfc_flow_is_match_with_vids(match_flags,
1982 						EFX_FILTER_MATCH_ETHER_TYPE |
1983 						EFX_FILTER_MATCH_LOC_MAC)) {
1984 			ether_type = spec->filters[i].efs_ether_type;
1985 			if (ether_type == EFX_ETHER_TYPE_IPV4 ||
1986 			    ether_type == EFX_ETHER_TYPE_IPV6)
1987 				return B_TRUE;
1988 		} else if (sfc_flow_is_match_with_vids(match_flags,
1989 				EFX_FILTER_MATCH_ETHER_TYPE |
1990 				EFX_FILTER_MATCH_IP_PROTO) ||
1991 			   sfc_flow_is_match_with_vids(match_flags,
1992 				EFX_FILTER_MATCH_ETHER_TYPE |
1993 				EFX_FILTER_MATCH_IP_PROTO |
1994 				EFX_FILTER_MATCH_LOC_MAC)) {
1995 			ip_proto = spec->filters[i].efs_ip_proto;
1996 			if (ip_proto == EFX_IPPROTO_TCP ||
1997 			    ip_proto == EFX_IPPROTO_UDP)
1998 				return B_TRUE;
1999 		}
2000 	}
2001 
2002 	return B_FALSE;
2003 }
2004 
2005 static int
2006 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2007 			      struct rte_flow *flow,
2008 			      struct rte_flow_error *error)
2009 {
2010 	efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2011 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2012 	int rc;
2013 
2014 	/* Initialize the first filter spec with template */
2015 	flow->spec.filters[0] = *spec_tmpl;
2016 	flow->spec.count = 1;
2017 
2018 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2019 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2020 		if (rc != 0)
2021 			return rc;
2022 	}
2023 
2024 	if (sfc_flow_is_match_flags_exception(&flow->spec)) {
2025 		rte_flow_error_set(error, ENOTSUP,
2026 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2027 			"The flow rule pattern is unsupported");
2028 		return -rte_errno;
2029 	}
2030 
2031 	return 0;
2032 }
2033 
2034 static int
2035 sfc_flow_parse(struct rte_eth_dev *dev,
2036 	       const struct rte_flow_attr *attr,
2037 	       const struct rte_flow_item pattern[],
2038 	       const struct rte_flow_action actions[],
2039 	       struct rte_flow *flow,
2040 	       struct rte_flow_error *error)
2041 {
2042 	struct sfc_adapter *sa = dev->data->dev_private;
2043 	int rc;
2044 
2045 	rc = sfc_flow_parse_attr(attr, flow, error);
2046 	if (rc != 0)
2047 		goto fail_bad_value;
2048 
2049 	rc = sfc_flow_parse_pattern(pattern, flow, error);
2050 	if (rc != 0)
2051 		goto fail_bad_value;
2052 
2053 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2054 	if (rc != 0)
2055 		goto fail_bad_value;
2056 
2057 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2058 	if (rc != 0)
2059 		goto fail_bad_value;
2060 
2061 	return 0;
2062 
2063 fail_bad_value:
2064 	return rc;
2065 }
2066 
2067 static int
2068 sfc_flow_validate(struct rte_eth_dev *dev,
2069 		  const struct rte_flow_attr *attr,
2070 		  const struct rte_flow_item pattern[],
2071 		  const struct rte_flow_action actions[],
2072 		  struct rte_flow_error *error)
2073 {
2074 	struct rte_flow flow;
2075 
2076 	memset(&flow, 0, sizeof(flow));
2077 
2078 	return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2079 }
2080 
2081 static struct rte_flow *
2082 sfc_flow_create(struct rte_eth_dev *dev,
2083 		const struct rte_flow_attr *attr,
2084 		const struct rte_flow_item pattern[],
2085 		const struct rte_flow_action actions[],
2086 		struct rte_flow_error *error)
2087 {
2088 	struct sfc_adapter *sa = dev->data->dev_private;
2089 	struct rte_flow *flow = NULL;
2090 	int rc;
2091 
2092 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2093 	if (flow == NULL) {
2094 		rte_flow_error_set(error, ENOMEM,
2095 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2096 				   "Failed to allocate memory");
2097 		goto fail_no_mem;
2098 	}
2099 
2100 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2101 	if (rc != 0)
2102 		goto fail_bad_value;
2103 
2104 	TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2105 
2106 	sfc_adapter_lock(sa);
2107 
2108 	if (sa->state == SFC_ADAPTER_STARTED) {
2109 		rc = sfc_flow_filter_insert(sa, flow);
2110 		if (rc != 0) {
2111 			rte_flow_error_set(error, rc,
2112 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2113 				"Failed to insert filter");
2114 			goto fail_filter_insert;
2115 		}
2116 	}
2117 
2118 	sfc_adapter_unlock(sa);
2119 
2120 	return flow;
2121 
2122 fail_filter_insert:
2123 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2124 
2125 fail_bad_value:
2126 	rte_free(flow);
2127 	sfc_adapter_unlock(sa);
2128 
2129 fail_no_mem:
2130 	return NULL;
2131 }
2132 
2133 static int
2134 sfc_flow_remove(struct sfc_adapter *sa,
2135 		struct rte_flow *flow,
2136 		struct rte_flow_error *error)
2137 {
2138 	int rc = 0;
2139 
2140 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2141 
2142 	if (sa->state == SFC_ADAPTER_STARTED) {
2143 		rc = sfc_flow_filter_remove(sa, flow);
2144 		if (rc != 0)
2145 			rte_flow_error_set(error, rc,
2146 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2147 				"Failed to destroy flow rule");
2148 	}
2149 
2150 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2151 	rte_free(flow);
2152 
2153 	return rc;
2154 }
2155 
2156 static int
2157 sfc_flow_destroy(struct rte_eth_dev *dev,
2158 		 struct rte_flow *flow,
2159 		 struct rte_flow_error *error)
2160 {
2161 	struct sfc_adapter *sa = dev->data->dev_private;
2162 	struct rte_flow *flow_ptr;
2163 	int rc = EINVAL;
2164 
2165 	sfc_adapter_lock(sa);
2166 
2167 	TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2168 		if (flow_ptr == flow)
2169 			rc = 0;
2170 	}
2171 	if (rc != 0) {
2172 		rte_flow_error_set(error, rc,
2173 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2174 				   "Failed to find flow rule to destroy");
2175 		goto fail_bad_value;
2176 	}
2177 
2178 	rc = sfc_flow_remove(sa, flow, error);
2179 
2180 fail_bad_value:
2181 	sfc_adapter_unlock(sa);
2182 
2183 	return -rc;
2184 }
2185 
2186 static int
2187 sfc_flow_flush(struct rte_eth_dev *dev,
2188 	       struct rte_flow_error *error)
2189 {
2190 	struct sfc_adapter *sa = dev->data->dev_private;
2191 	struct rte_flow *flow;
2192 	int rc = 0;
2193 	int ret = 0;
2194 
2195 	sfc_adapter_lock(sa);
2196 
2197 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2198 		rc = sfc_flow_remove(sa, flow, error);
2199 		if (rc != 0)
2200 			ret = rc;
2201 	}
2202 
2203 	sfc_adapter_unlock(sa);
2204 
2205 	return -ret;
2206 }
2207 
2208 static int
2209 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2210 		 struct rte_flow_error *error)
2211 {
2212 	struct sfc_adapter *sa = dev->data->dev_private;
2213 	struct sfc_port *port = &sa->port;
2214 	int ret = 0;
2215 
2216 	sfc_adapter_lock(sa);
2217 	if (sa->state != SFC_ADAPTER_INITIALIZED) {
2218 		rte_flow_error_set(error, EBUSY,
2219 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2220 				   NULL, "please close the port first");
2221 		ret = -rte_errno;
2222 	} else {
2223 		port->isolated = (enable) ? B_TRUE : B_FALSE;
2224 	}
2225 	sfc_adapter_unlock(sa);
2226 
2227 	return ret;
2228 }
2229 
2230 const struct rte_flow_ops sfc_flow_ops = {
2231 	.validate = sfc_flow_validate,
2232 	.create = sfc_flow_create,
2233 	.destroy = sfc_flow_destroy,
2234 	.flush = sfc_flow_flush,
2235 	.query = NULL,
2236 	.isolate = sfc_flow_isolate,
2237 };
2238 
2239 void
2240 sfc_flow_init(struct sfc_adapter *sa)
2241 {
2242 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2243 
2244 	TAILQ_INIT(&sa->filter.flow_list);
2245 }
2246 
2247 void
2248 sfc_flow_fini(struct sfc_adapter *sa)
2249 {
2250 	struct rte_flow *flow;
2251 
2252 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2253 
2254 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2255 		TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2256 		rte_free(flow);
2257 	}
2258 }
2259 
2260 void
2261 sfc_flow_stop(struct sfc_adapter *sa)
2262 {
2263 	struct rte_flow *flow;
2264 
2265 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2266 
2267 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2268 		sfc_flow_filter_remove(sa, flow);
2269 }
2270 
2271 int
2272 sfc_flow_start(struct sfc_adapter *sa)
2273 {
2274 	struct rte_flow *flow;
2275 	int rc = 0;
2276 
2277 	sfc_log_init(sa, "entry");
2278 
2279 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2280 
2281 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2282 		rc = sfc_flow_filter_insert(sa, flow);
2283 		if (rc != 0)
2284 			goto fail_bad_flow;
2285 	}
2286 
2287 	sfc_log_init(sa, "done");
2288 
2289 fail_bad_flow:
2290 	return rc;
2291 }
2292