xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision cc17feb904136d740b9b9fac0db4a1a63e23cb1b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
15 #include <rte_flow.h>
16 #include <rte_flow_driver.h>
17 
18 #include "efx.h"
19 
20 #include "sfc.h"
21 #include "sfc_rx.h"
22 #include "sfc_filter.h"
23 #include "sfc_flow.h"
24 #include "sfc_log.h"
25 
26 /*
27  * At now flow API is implemented in such a manner that each
28  * flow rule is converted to one or more hardware filters.
29  * All elements of flow rule (attributes, pattern items, actions)
30  * correspond to one or more fields in the efx_filter_spec_s structure
31  * that is responsible for the hardware filter.
32  * If some required field is unset in the flow rule, then a handful
33  * of filter copies will be created to cover all possible values
34  * of such a field.
35  */
36 
37 enum sfc_flow_item_layers {
38 	SFC_FLOW_ITEM_ANY_LAYER,
39 	SFC_FLOW_ITEM_START_LAYER,
40 	SFC_FLOW_ITEM_L2,
41 	SFC_FLOW_ITEM_L3,
42 	SFC_FLOW_ITEM_L4,
43 };
44 
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46 				  efx_filter_spec_t *spec,
47 				  struct rte_flow_error *error);
48 
49 struct sfc_flow_item {
50 	enum rte_flow_item_type type;		/* Type of item */
51 	enum sfc_flow_item_layers layer;	/* Layer of item */
52 	enum sfc_flow_item_layers prev_layer;	/* Previous layer of item */
53 	sfc_flow_item_parse *parse;		/* Parsing function */
54 };
55 
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
66 
67 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
68 				     unsigned int filters_count_for_one_val,
69 				     struct rte_flow_error *error);
70 
71 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
72 					efx_filter_spec_t *spec,
73 					struct sfc_filter *filter);
74 
75 struct sfc_flow_copy_flag {
76 	/* EFX filter specification match flag */
77 	efx_filter_match_flags_t flag;
78 	/* Number of values of corresponding field */
79 	unsigned int vals_count;
80 	/* Function to set values in specifications */
81 	sfc_flow_spec_set_vals *set_vals;
82 	/*
83 	 * Function to check that the specification is suitable
84 	 * for adding this match flag
85 	 */
86 	sfc_flow_spec_check *spec_check;
87 };
88 
89 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
90 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
91 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
92 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
93 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
94 
95 static boolean_t
96 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
97 {
98 	uint8_t sum = 0;
99 	unsigned int i;
100 
101 	for (i = 0; i < size; i++)
102 		sum |= buf[i];
103 
104 	return (sum == 0) ? B_TRUE : B_FALSE;
105 }
106 
107 /*
108  * Validate item and prepare structures spec and mask for parsing
109  */
110 static int
111 sfc_flow_parse_init(const struct rte_flow_item *item,
112 		    const void **spec_ptr,
113 		    const void **mask_ptr,
114 		    const void *supp_mask,
115 		    const void *def_mask,
116 		    unsigned int size,
117 		    struct rte_flow_error *error)
118 {
119 	const uint8_t *spec;
120 	const uint8_t *mask;
121 	const uint8_t *last;
122 	uint8_t match;
123 	uint8_t supp;
124 	unsigned int i;
125 
126 	if (item == NULL) {
127 		rte_flow_error_set(error, EINVAL,
128 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
129 				   "NULL item");
130 		return -rte_errno;
131 	}
132 
133 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
134 		rte_flow_error_set(error, EINVAL,
135 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
136 				   "Mask or last is set without spec");
137 		return -rte_errno;
138 	}
139 
140 	/*
141 	 * If "mask" is not set, default mask is used,
142 	 * but if default mask is NULL, "mask" should be set
143 	 */
144 	if (item->mask == NULL) {
145 		if (def_mask == NULL) {
146 			rte_flow_error_set(error, EINVAL,
147 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
148 				"Mask should be specified");
149 			return -rte_errno;
150 		}
151 
152 		mask = def_mask;
153 	} else {
154 		mask = item->mask;
155 	}
156 
157 	spec = item->spec;
158 	last = item->last;
159 
160 	if (spec == NULL)
161 		goto exit;
162 
163 	/*
164 	 * If field values in "last" are either 0 or equal to the corresponding
165 	 * values in "spec" then they are ignored
166 	 */
167 	if (last != NULL &&
168 	    !sfc_flow_is_zero(last, size) &&
169 	    memcmp(last, spec, size) != 0) {
170 		rte_flow_error_set(error, ENOTSUP,
171 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
172 				   "Ranging is not supported");
173 		return -rte_errno;
174 	}
175 
176 	if (supp_mask == NULL) {
177 		rte_flow_error_set(error, EINVAL,
178 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
179 			"Supported mask for item should be specified");
180 		return -rte_errno;
181 	}
182 
183 	/* Check that mask and spec not asks for more match than supp_mask */
184 	for (i = 0; i < size; i++) {
185 		match = spec[i] | mask[i];
186 		supp = ((const uint8_t *)supp_mask)[i];
187 
188 		if ((match | supp) != supp) {
189 			rte_flow_error_set(error, ENOTSUP,
190 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
191 					   "Item's field is not supported");
192 			return -rte_errno;
193 		}
194 	}
195 
196 exit:
197 	*spec_ptr = spec;
198 	*mask_ptr = mask;
199 	return 0;
200 }
201 
202 /*
203  * Protocol parsers.
204  * Masking is not supported, so masks in items should be either
205  * full or empty (zeroed) and set only for supported fields which
206  * are specified in the supp_mask.
207  */
208 
209 static int
210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
211 		    __rte_unused efx_filter_spec_t *efx_spec,
212 		    __rte_unused struct rte_flow_error *error)
213 {
214 	return 0;
215 }
216 
217 /**
218  * Convert Ethernet item to EFX filter specification.
219  *
220  * @param item[in]
221  *   Item specification. Outer frame specification may only comprise
222  *   source/destination addresses and Ethertype field.
223  *   Inner frame specification may contain destination address only.
224  *   There is support for individual/group mask as well as for empty and full.
225  *   If the mask is NULL, default mask will be used. Ranging is not supported.
226  * @param efx_spec[in, out]
227  *   EFX filter specification to update.
228  * @param[out] error
229  *   Perform verbose error reporting if not NULL.
230  */
231 static int
232 sfc_flow_parse_eth(const struct rte_flow_item *item,
233 		   efx_filter_spec_t *efx_spec,
234 		   struct rte_flow_error *error)
235 {
236 	int rc;
237 	const struct rte_flow_item_eth *spec = NULL;
238 	const struct rte_flow_item_eth *mask = NULL;
239 	const struct rte_flow_item_eth supp_mask = {
240 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
241 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242 		.type = 0xffff,
243 	};
244 	const struct rte_flow_item_eth ifrm_supp_mask = {
245 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
246 	};
247 	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
248 		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
249 	};
250 	const struct rte_flow_item_eth *supp_mask_p;
251 	const struct rte_flow_item_eth *def_mask_p;
252 	uint8_t *loc_mac = NULL;
253 	boolean_t is_ifrm = (efx_spec->efs_encap_type !=
254 		EFX_TUNNEL_PROTOCOL_NONE);
255 
256 	if (is_ifrm) {
257 		supp_mask_p = &ifrm_supp_mask;
258 		def_mask_p = &ifrm_supp_mask;
259 		loc_mac = efx_spec->efs_ifrm_loc_mac;
260 	} else {
261 		supp_mask_p = &supp_mask;
262 		def_mask_p = &rte_flow_item_eth_mask;
263 		loc_mac = efx_spec->efs_loc_mac;
264 	}
265 
266 	rc = sfc_flow_parse_init(item,
267 				 (const void **)&spec,
268 				 (const void **)&mask,
269 				 supp_mask_p, def_mask_p,
270 				 sizeof(struct rte_flow_item_eth),
271 				 error);
272 	if (rc != 0)
273 		return rc;
274 
275 	/* If "spec" is not set, could be any Ethernet */
276 	if (spec == NULL)
277 		return 0;
278 
279 	if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
280 		efx_spec->efs_match_flags |= is_ifrm ?
281 			EFX_FILTER_MATCH_IFRM_LOC_MAC :
282 			EFX_FILTER_MATCH_LOC_MAC;
283 		rte_memcpy(loc_mac, spec->dst.addr_bytes,
284 			   EFX_MAC_ADDR_LEN);
285 	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
286 			  EFX_MAC_ADDR_LEN) == 0) {
287 		if (is_unicast_ether_addr(&spec->dst))
288 			efx_spec->efs_match_flags |= is_ifrm ?
289 				EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
290 				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
291 		else
292 			efx_spec->efs_match_flags |= is_ifrm ?
293 				EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
294 				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
295 	} else if (!is_zero_ether_addr(&mask->dst)) {
296 		goto fail_bad_mask;
297 	}
298 
299 	/*
300 	 * ifrm_supp_mask ensures that the source address and
301 	 * ethertype masks are equal to zero in inner frame,
302 	 * so these fields are filled in only for the outer frame
303 	 */
304 	if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
305 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
306 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
307 			   EFX_MAC_ADDR_LEN);
308 	} else if (!is_zero_ether_addr(&mask->src)) {
309 		goto fail_bad_mask;
310 	}
311 
312 	/*
313 	 * Ether type is in big-endian byte order in item and
314 	 * in little-endian in efx_spec, so byte swap is used
315 	 */
316 	if (mask->type == supp_mask.type) {
317 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
318 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
319 	} else if (mask->type != 0) {
320 		goto fail_bad_mask;
321 	}
322 
323 	return 0;
324 
325 fail_bad_mask:
326 	rte_flow_error_set(error, EINVAL,
327 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
328 			   "Bad mask in the ETH pattern item");
329 	return -rte_errno;
330 }
331 
332 /**
333  * Convert VLAN item to EFX filter specification.
334  *
335  * @param item[in]
336  *   Item specification. Only VID field is supported.
337  *   The mask can not be NULL. Ranging is not supported.
338  * @param efx_spec[in, out]
339  *   EFX filter specification to update.
340  * @param[out] error
341  *   Perform verbose error reporting if not NULL.
342  */
343 static int
344 sfc_flow_parse_vlan(const struct rte_flow_item *item,
345 		    efx_filter_spec_t *efx_spec,
346 		    struct rte_flow_error *error)
347 {
348 	int rc;
349 	uint16_t vid;
350 	const struct rte_flow_item_vlan *spec = NULL;
351 	const struct rte_flow_item_vlan *mask = NULL;
352 	const struct rte_flow_item_vlan supp_mask = {
353 		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
354 	};
355 
356 	rc = sfc_flow_parse_init(item,
357 				 (const void **)&spec,
358 				 (const void **)&mask,
359 				 &supp_mask,
360 				 NULL,
361 				 sizeof(struct rte_flow_item_vlan),
362 				 error);
363 	if (rc != 0)
364 		return rc;
365 
366 	/*
367 	 * VID is in big-endian byte order in item and
368 	 * in little-endian in efx_spec, so byte swap is used.
369 	 * If two VLAN items are included, the first matches
370 	 * the outer tag and the next matches the inner tag.
371 	 */
372 	if (mask->tci == supp_mask.tci) {
373 		vid = rte_bswap16(spec->tci);
374 
375 		if (!(efx_spec->efs_match_flags &
376 		      EFX_FILTER_MATCH_OUTER_VID)) {
377 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
378 			efx_spec->efs_outer_vid = vid;
379 		} else if (!(efx_spec->efs_match_flags &
380 			     EFX_FILTER_MATCH_INNER_VID)) {
381 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
382 			efx_spec->efs_inner_vid = vid;
383 		} else {
384 			rte_flow_error_set(error, EINVAL,
385 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
386 					   "More than two VLAN items");
387 			return -rte_errno;
388 		}
389 	} else {
390 		rte_flow_error_set(error, EINVAL,
391 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
392 				   "VLAN ID in TCI match is required");
393 		return -rte_errno;
394 	}
395 
396 	return 0;
397 }
398 
399 /**
400  * Convert IPv4 item to EFX filter specification.
401  *
402  * @param item[in]
403  *   Item specification. Only source and destination addresses and
404  *   protocol fields are supported. If the mask is NULL, default
405  *   mask will be used. Ranging is not supported.
406  * @param efx_spec[in, out]
407  *   EFX filter specification to update.
408  * @param[out] error
409  *   Perform verbose error reporting if not NULL.
410  */
411 static int
412 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
413 		    efx_filter_spec_t *efx_spec,
414 		    struct rte_flow_error *error)
415 {
416 	int rc;
417 	const struct rte_flow_item_ipv4 *spec = NULL;
418 	const struct rte_flow_item_ipv4 *mask = NULL;
419 	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
420 	const struct rte_flow_item_ipv4 supp_mask = {
421 		.hdr = {
422 			.src_addr = 0xffffffff,
423 			.dst_addr = 0xffffffff,
424 			.next_proto_id = 0xff,
425 		}
426 	};
427 
428 	rc = sfc_flow_parse_init(item,
429 				 (const void **)&spec,
430 				 (const void **)&mask,
431 				 &supp_mask,
432 				 &rte_flow_item_ipv4_mask,
433 				 sizeof(struct rte_flow_item_ipv4),
434 				 error);
435 	if (rc != 0)
436 		return rc;
437 
438 	/*
439 	 * Filtering by IPv4 source and destination addresses requires
440 	 * the appropriate ETHER_TYPE in hardware filters
441 	 */
442 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
443 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
444 		efx_spec->efs_ether_type = ether_type_ipv4;
445 	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
446 		rte_flow_error_set(error, EINVAL,
447 			RTE_FLOW_ERROR_TYPE_ITEM, item,
448 			"Ethertype in pattern with IPV4 item should be appropriate");
449 		return -rte_errno;
450 	}
451 
452 	if (spec == NULL)
453 		return 0;
454 
455 	/*
456 	 * IPv4 addresses are in big-endian byte order in item and in
457 	 * efx_spec
458 	 */
459 	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
460 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
461 		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
462 	} else if (mask->hdr.src_addr != 0) {
463 		goto fail_bad_mask;
464 	}
465 
466 	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
467 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
468 		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
469 	} else if (mask->hdr.dst_addr != 0) {
470 		goto fail_bad_mask;
471 	}
472 
473 	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
474 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
475 		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
476 	} else if (mask->hdr.next_proto_id != 0) {
477 		goto fail_bad_mask;
478 	}
479 
480 	return 0;
481 
482 fail_bad_mask:
483 	rte_flow_error_set(error, EINVAL,
484 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
485 			   "Bad mask in the IPV4 pattern item");
486 	return -rte_errno;
487 }
488 
489 /**
490  * Convert IPv6 item to EFX filter specification.
491  *
492  * @param item[in]
493  *   Item specification. Only source and destination addresses and
494  *   next header fields are supported. If the mask is NULL, default
495  *   mask will be used. Ranging is not supported.
496  * @param efx_spec[in, out]
497  *   EFX filter specification to update.
498  * @param[out] error
499  *   Perform verbose error reporting if not NULL.
500  */
501 static int
502 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
503 		    efx_filter_spec_t *efx_spec,
504 		    struct rte_flow_error *error)
505 {
506 	int rc;
507 	const struct rte_flow_item_ipv6 *spec = NULL;
508 	const struct rte_flow_item_ipv6 *mask = NULL;
509 	const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
510 	const struct rte_flow_item_ipv6 supp_mask = {
511 		.hdr = {
512 			.src_addr = { 0xff, 0xff, 0xff, 0xff,
513 				      0xff, 0xff, 0xff, 0xff,
514 				      0xff, 0xff, 0xff, 0xff,
515 				      0xff, 0xff, 0xff, 0xff },
516 			.dst_addr = { 0xff, 0xff, 0xff, 0xff,
517 				      0xff, 0xff, 0xff, 0xff,
518 				      0xff, 0xff, 0xff, 0xff,
519 				      0xff, 0xff, 0xff, 0xff },
520 			.proto = 0xff,
521 		}
522 	};
523 
524 	rc = sfc_flow_parse_init(item,
525 				 (const void **)&spec,
526 				 (const void **)&mask,
527 				 &supp_mask,
528 				 &rte_flow_item_ipv6_mask,
529 				 sizeof(struct rte_flow_item_ipv6),
530 				 error);
531 	if (rc != 0)
532 		return rc;
533 
534 	/*
535 	 * Filtering by IPv6 source and destination addresses requires
536 	 * the appropriate ETHER_TYPE in hardware filters
537 	 */
538 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
539 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
540 		efx_spec->efs_ether_type = ether_type_ipv6;
541 	} else if (efx_spec->efs_ether_type != ether_type_ipv6) {
542 		rte_flow_error_set(error, EINVAL,
543 			RTE_FLOW_ERROR_TYPE_ITEM, item,
544 			"Ethertype in pattern with IPV6 item should be appropriate");
545 		return -rte_errno;
546 	}
547 
548 	if (spec == NULL)
549 		return 0;
550 
551 	/*
552 	 * IPv6 addresses are in big-endian byte order in item and in
553 	 * efx_spec
554 	 */
555 	if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
556 		   sizeof(mask->hdr.src_addr)) == 0) {
557 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
558 
559 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
560 				 sizeof(spec->hdr.src_addr));
561 		rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
562 			   sizeof(efx_spec->efs_rem_host));
563 	} else if (!sfc_flow_is_zero(mask->hdr.src_addr,
564 				     sizeof(mask->hdr.src_addr))) {
565 		goto fail_bad_mask;
566 	}
567 
568 	if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
569 		   sizeof(mask->hdr.dst_addr)) == 0) {
570 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
571 
572 		RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
573 				 sizeof(spec->hdr.dst_addr));
574 		rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
575 			   sizeof(efx_spec->efs_loc_host));
576 	} else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
577 				     sizeof(mask->hdr.dst_addr))) {
578 		goto fail_bad_mask;
579 	}
580 
581 	if (mask->hdr.proto == supp_mask.hdr.proto) {
582 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
583 		efx_spec->efs_ip_proto = spec->hdr.proto;
584 	} else if (mask->hdr.proto != 0) {
585 		goto fail_bad_mask;
586 	}
587 
588 	return 0;
589 
590 fail_bad_mask:
591 	rte_flow_error_set(error, EINVAL,
592 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
593 			   "Bad mask in the IPV6 pattern item");
594 	return -rte_errno;
595 }
596 
597 /**
598  * Convert TCP item to EFX filter specification.
599  *
600  * @param item[in]
601  *   Item specification. Only source and destination ports fields
602  *   are supported. If the mask is NULL, default mask will be used.
603  *   Ranging is not supported.
604  * @param efx_spec[in, out]
605  *   EFX filter specification to update.
606  * @param[out] error
607  *   Perform verbose error reporting if not NULL.
608  */
609 static int
610 sfc_flow_parse_tcp(const struct rte_flow_item *item,
611 		   efx_filter_spec_t *efx_spec,
612 		   struct rte_flow_error *error)
613 {
614 	int rc;
615 	const struct rte_flow_item_tcp *spec = NULL;
616 	const struct rte_flow_item_tcp *mask = NULL;
617 	const struct rte_flow_item_tcp supp_mask = {
618 		.hdr = {
619 			.src_port = 0xffff,
620 			.dst_port = 0xffff,
621 		}
622 	};
623 
624 	rc = sfc_flow_parse_init(item,
625 				 (const void **)&spec,
626 				 (const void **)&mask,
627 				 &supp_mask,
628 				 &rte_flow_item_tcp_mask,
629 				 sizeof(struct rte_flow_item_tcp),
630 				 error);
631 	if (rc != 0)
632 		return rc;
633 
634 	/*
635 	 * Filtering by TCP source and destination ports requires
636 	 * the appropriate IP_PROTO in hardware filters
637 	 */
638 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
639 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
640 		efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
641 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
642 		rte_flow_error_set(error, EINVAL,
643 			RTE_FLOW_ERROR_TYPE_ITEM, item,
644 			"IP proto in pattern with TCP item should be appropriate");
645 		return -rte_errno;
646 	}
647 
648 	if (spec == NULL)
649 		return 0;
650 
651 	/*
652 	 * Source and destination ports are in big-endian byte order in item and
653 	 * in little-endian in efx_spec, so byte swap is used
654 	 */
655 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
656 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
657 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
658 	} else if (mask->hdr.src_port != 0) {
659 		goto fail_bad_mask;
660 	}
661 
662 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
663 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
664 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
665 	} else if (mask->hdr.dst_port != 0) {
666 		goto fail_bad_mask;
667 	}
668 
669 	return 0;
670 
671 fail_bad_mask:
672 	rte_flow_error_set(error, EINVAL,
673 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
674 			   "Bad mask in the TCP pattern item");
675 	return -rte_errno;
676 }
677 
678 /**
679  * Convert UDP item to EFX filter specification.
680  *
681  * @param item[in]
682  *   Item specification. Only source and destination ports fields
683  *   are supported. If the mask is NULL, default mask will be used.
684  *   Ranging is not supported.
685  * @param efx_spec[in, out]
686  *   EFX filter specification to update.
687  * @param[out] error
688  *   Perform verbose error reporting if not NULL.
689  */
690 static int
691 sfc_flow_parse_udp(const struct rte_flow_item *item,
692 		   efx_filter_spec_t *efx_spec,
693 		   struct rte_flow_error *error)
694 {
695 	int rc;
696 	const struct rte_flow_item_udp *spec = NULL;
697 	const struct rte_flow_item_udp *mask = NULL;
698 	const struct rte_flow_item_udp supp_mask = {
699 		.hdr = {
700 			.src_port = 0xffff,
701 			.dst_port = 0xffff,
702 		}
703 	};
704 
705 	rc = sfc_flow_parse_init(item,
706 				 (const void **)&spec,
707 				 (const void **)&mask,
708 				 &supp_mask,
709 				 &rte_flow_item_udp_mask,
710 				 sizeof(struct rte_flow_item_udp),
711 				 error);
712 	if (rc != 0)
713 		return rc;
714 
715 	/*
716 	 * Filtering by UDP source and destination ports requires
717 	 * the appropriate IP_PROTO in hardware filters
718 	 */
719 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
720 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
721 		efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
722 	} else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
723 		rte_flow_error_set(error, EINVAL,
724 			RTE_FLOW_ERROR_TYPE_ITEM, item,
725 			"IP proto in pattern with UDP item should be appropriate");
726 		return -rte_errno;
727 	}
728 
729 	if (spec == NULL)
730 		return 0;
731 
732 	/*
733 	 * Source and destination ports are in big-endian byte order in item and
734 	 * in little-endian in efx_spec, so byte swap is used
735 	 */
736 	if (mask->hdr.src_port == supp_mask.hdr.src_port) {
737 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
738 		efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
739 	} else if (mask->hdr.src_port != 0) {
740 		goto fail_bad_mask;
741 	}
742 
743 	if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
744 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
745 		efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
746 	} else if (mask->hdr.dst_port != 0) {
747 		goto fail_bad_mask;
748 	}
749 
750 	return 0;
751 
752 fail_bad_mask:
753 	rte_flow_error_set(error, EINVAL,
754 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
755 			   "Bad mask in the UDP pattern item");
756 	return -rte_errno;
757 }
758 
759 /*
760  * Filters for encapsulated packets match based on the EtherType and IP
761  * protocol in the outer frame.
762  */
763 static int
764 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
765 					efx_filter_spec_t *efx_spec,
766 					uint8_t ip_proto,
767 					struct rte_flow_error *error)
768 {
769 	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
770 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
771 		efx_spec->efs_ip_proto = ip_proto;
772 	} else if (efx_spec->efs_ip_proto != ip_proto) {
773 		switch (ip_proto) {
774 		case EFX_IPPROTO_UDP:
775 			rte_flow_error_set(error, EINVAL,
776 				RTE_FLOW_ERROR_TYPE_ITEM, item,
777 				"Outer IP header protocol must be UDP "
778 				"in VxLAN/GENEVE pattern");
779 			return -rte_errno;
780 
781 		case EFX_IPPROTO_GRE:
782 			rte_flow_error_set(error, EINVAL,
783 				RTE_FLOW_ERROR_TYPE_ITEM, item,
784 				"Outer IP header protocol must be GRE "
785 				"in NVGRE pattern");
786 			return -rte_errno;
787 
788 		default:
789 			rte_flow_error_set(error, EINVAL,
790 				RTE_FLOW_ERROR_TYPE_ITEM, item,
791 				"Only VxLAN/GENEVE/NVGRE tunneling patterns "
792 				"are supported");
793 			return -rte_errno;
794 		}
795 	}
796 
797 	if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
798 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
799 	    efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
800 		rte_flow_error_set(error, EINVAL,
801 			RTE_FLOW_ERROR_TYPE_ITEM, item,
802 			"Outer frame EtherType in pattern with tunneling "
803 			"must be IPv4 or IPv6");
804 		return -rte_errno;
805 	}
806 
807 	return 0;
808 }
809 
810 static int
811 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
812 				  const uint8_t *vni_or_vsid_val,
813 				  const uint8_t *vni_or_vsid_mask,
814 				  const struct rte_flow_item *item,
815 				  struct rte_flow_error *error)
816 {
817 	const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
818 		0xff, 0xff, 0xff
819 	};
820 
821 	if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
822 		   EFX_VNI_OR_VSID_LEN) == 0) {
823 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
824 		rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
825 			   EFX_VNI_OR_VSID_LEN);
826 	} else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
827 		rte_flow_error_set(error, EINVAL,
828 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
829 				   "Unsupported VNI/VSID mask");
830 		return -rte_errno;
831 	}
832 
833 	return 0;
834 }
835 
836 /**
837  * Convert VXLAN item to EFX filter specification.
838  *
839  * @param item[in]
840  *   Item specification. Only VXLAN network identifier field is supported.
841  *   If the mask is NULL, default mask will be used.
842  *   Ranging is not supported.
843  * @param efx_spec[in, out]
844  *   EFX filter specification to update.
845  * @param[out] error
846  *   Perform verbose error reporting if not NULL.
847  */
848 static int
849 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
850 		     efx_filter_spec_t *efx_spec,
851 		     struct rte_flow_error *error)
852 {
853 	int rc;
854 	const struct rte_flow_item_vxlan *spec = NULL;
855 	const struct rte_flow_item_vxlan *mask = NULL;
856 	const struct rte_flow_item_vxlan supp_mask = {
857 		.vni = { 0xff, 0xff, 0xff }
858 	};
859 
860 	rc = sfc_flow_parse_init(item,
861 				 (const void **)&spec,
862 				 (const void **)&mask,
863 				 &supp_mask,
864 				 &rte_flow_item_vxlan_mask,
865 				 sizeof(struct rte_flow_item_vxlan),
866 				 error);
867 	if (rc != 0)
868 		return rc;
869 
870 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
871 						     EFX_IPPROTO_UDP, error);
872 	if (rc != 0)
873 		return rc;
874 
875 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
876 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
877 
878 	if (spec == NULL)
879 		return 0;
880 
881 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
882 					       mask->vni, item, error);
883 
884 	return rc;
885 }
886 
887 /**
888  * Convert GENEVE item to EFX filter specification.
889  *
890  * @param item[in]
891  *   Item specification. Only Virtual Network Identifier and protocol type
892  *   fields are supported. But protocol type can be only Ethernet (0x6558).
893  *   If the mask is NULL, default mask will be used.
894  *   Ranging is not supported.
895  * @param efx_spec[in, out]
896  *   EFX filter specification to update.
897  * @param[out] error
898  *   Perform verbose error reporting if not NULL.
899  */
900 static int
901 sfc_flow_parse_geneve(const struct rte_flow_item *item,
902 		      efx_filter_spec_t *efx_spec,
903 		      struct rte_flow_error *error)
904 {
905 	int rc;
906 	const struct rte_flow_item_geneve *spec = NULL;
907 	const struct rte_flow_item_geneve *mask = NULL;
908 	const struct rte_flow_item_geneve supp_mask = {
909 		.protocol = RTE_BE16(0xffff),
910 		.vni = { 0xff, 0xff, 0xff }
911 	};
912 
913 	rc = sfc_flow_parse_init(item,
914 				 (const void **)&spec,
915 				 (const void **)&mask,
916 				 &supp_mask,
917 				 &rte_flow_item_geneve_mask,
918 				 sizeof(struct rte_flow_item_geneve),
919 				 error);
920 	if (rc != 0)
921 		return rc;
922 
923 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
924 						     EFX_IPPROTO_UDP, error);
925 	if (rc != 0)
926 		return rc;
927 
928 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
929 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
930 
931 	if (spec == NULL)
932 		return 0;
933 
934 	if (mask->protocol == supp_mask.protocol) {
935 		if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
936 			rte_flow_error_set(error, EINVAL,
937 				RTE_FLOW_ERROR_TYPE_ITEM, item,
938 				"GENEVE encap. protocol must be Ethernet "
939 				"(0x6558) in the GENEVE pattern item");
940 			return -rte_errno;
941 		}
942 	} else if (mask->protocol != 0) {
943 		rte_flow_error_set(error, EINVAL,
944 			RTE_FLOW_ERROR_TYPE_ITEM, item,
945 			"Unsupported mask for GENEVE encap. protocol");
946 		return -rte_errno;
947 	}
948 
949 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
950 					       mask->vni, item, error);
951 
952 	return rc;
953 }
954 
955 /**
956  * Convert NVGRE item to EFX filter specification.
957  *
958  * @param item[in]
959  *   Item specification. Only virtual subnet ID field is supported.
960  *   If the mask is NULL, default mask will be used.
961  *   Ranging is not supported.
962  * @param efx_spec[in, out]
963  *   EFX filter specification to update.
964  * @param[out] error
965  *   Perform verbose error reporting if not NULL.
966  */
967 static int
968 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
969 		     efx_filter_spec_t *efx_spec,
970 		     struct rte_flow_error *error)
971 {
972 	int rc;
973 	const struct rte_flow_item_nvgre *spec = NULL;
974 	const struct rte_flow_item_nvgre *mask = NULL;
975 	const struct rte_flow_item_nvgre supp_mask = {
976 		.tni = { 0xff, 0xff, 0xff }
977 	};
978 
979 	rc = sfc_flow_parse_init(item,
980 				 (const void **)&spec,
981 				 (const void **)&mask,
982 				 &supp_mask,
983 				 &rte_flow_item_nvgre_mask,
984 				 sizeof(struct rte_flow_item_nvgre),
985 				 error);
986 	if (rc != 0)
987 		return rc;
988 
989 	rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
990 						     EFX_IPPROTO_GRE, error);
991 	if (rc != 0)
992 		return rc;
993 
994 	efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
995 	efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
996 
997 	if (spec == NULL)
998 		return 0;
999 
1000 	rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1001 					       mask->tni, item, error);
1002 
1003 	return rc;
1004 }
1005 
1006 static const struct sfc_flow_item sfc_flow_items[] = {
1007 	{
1008 		.type = RTE_FLOW_ITEM_TYPE_VOID,
1009 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1010 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
1011 		.parse = sfc_flow_parse_void,
1012 	},
1013 	{
1014 		.type = RTE_FLOW_ITEM_TYPE_ETH,
1015 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
1016 		.layer = SFC_FLOW_ITEM_L2,
1017 		.parse = sfc_flow_parse_eth,
1018 	},
1019 	{
1020 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
1021 		.prev_layer = SFC_FLOW_ITEM_L2,
1022 		.layer = SFC_FLOW_ITEM_L2,
1023 		.parse = sfc_flow_parse_vlan,
1024 	},
1025 	{
1026 		.type = RTE_FLOW_ITEM_TYPE_IPV4,
1027 		.prev_layer = SFC_FLOW_ITEM_L2,
1028 		.layer = SFC_FLOW_ITEM_L3,
1029 		.parse = sfc_flow_parse_ipv4,
1030 	},
1031 	{
1032 		.type = RTE_FLOW_ITEM_TYPE_IPV6,
1033 		.prev_layer = SFC_FLOW_ITEM_L2,
1034 		.layer = SFC_FLOW_ITEM_L3,
1035 		.parse = sfc_flow_parse_ipv6,
1036 	},
1037 	{
1038 		.type = RTE_FLOW_ITEM_TYPE_TCP,
1039 		.prev_layer = SFC_FLOW_ITEM_L3,
1040 		.layer = SFC_FLOW_ITEM_L4,
1041 		.parse = sfc_flow_parse_tcp,
1042 	},
1043 	{
1044 		.type = RTE_FLOW_ITEM_TYPE_UDP,
1045 		.prev_layer = SFC_FLOW_ITEM_L3,
1046 		.layer = SFC_FLOW_ITEM_L4,
1047 		.parse = sfc_flow_parse_udp,
1048 	},
1049 	{
1050 		.type = RTE_FLOW_ITEM_TYPE_VXLAN,
1051 		.prev_layer = SFC_FLOW_ITEM_L4,
1052 		.layer = SFC_FLOW_ITEM_START_LAYER,
1053 		.parse = sfc_flow_parse_vxlan,
1054 	},
1055 	{
1056 		.type = RTE_FLOW_ITEM_TYPE_GENEVE,
1057 		.prev_layer = SFC_FLOW_ITEM_L4,
1058 		.layer = SFC_FLOW_ITEM_START_LAYER,
1059 		.parse = sfc_flow_parse_geneve,
1060 	},
1061 	{
1062 		.type = RTE_FLOW_ITEM_TYPE_NVGRE,
1063 		.prev_layer = SFC_FLOW_ITEM_L3,
1064 		.layer = SFC_FLOW_ITEM_START_LAYER,
1065 		.parse = sfc_flow_parse_nvgre,
1066 	},
1067 };
1068 
1069 /*
1070  * Protocol-independent flow API support
1071  */
1072 static int
1073 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1074 		    struct rte_flow *flow,
1075 		    struct rte_flow_error *error)
1076 {
1077 	if (attr == NULL) {
1078 		rte_flow_error_set(error, EINVAL,
1079 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1080 				   "NULL attribute");
1081 		return -rte_errno;
1082 	}
1083 	if (attr->group != 0) {
1084 		rte_flow_error_set(error, ENOTSUP,
1085 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1086 				   "Groups are not supported");
1087 		return -rte_errno;
1088 	}
1089 	if (attr->priority != 0) {
1090 		rte_flow_error_set(error, ENOTSUP,
1091 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1092 				   "Priorities are not supported");
1093 		return -rte_errno;
1094 	}
1095 	if (attr->egress != 0) {
1096 		rte_flow_error_set(error, ENOTSUP,
1097 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1098 				   "Egress is not supported");
1099 		return -rte_errno;
1100 	}
1101 	if (attr->ingress == 0) {
1102 		rte_flow_error_set(error, ENOTSUP,
1103 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1104 				   "Only ingress is supported");
1105 		return -rte_errno;
1106 	}
1107 
1108 	flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1109 	flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1110 
1111 	return 0;
1112 }
1113 
1114 /* Get item from array sfc_flow_items */
1115 static const struct sfc_flow_item *
1116 sfc_flow_get_item(enum rte_flow_item_type type)
1117 {
1118 	unsigned int i;
1119 
1120 	for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1121 		if (sfc_flow_items[i].type == type)
1122 			return &sfc_flow_items[i];
1123 
1124 	return NULL;
1125 }
1126 
1127 static int
1128 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1129 		       struct rte_flow *flow,
1130 		       struct rte_flow_error *error)
1131 {
1132 	int rc;
1133 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1134 	boolean_t is_ifrm = B_FALSE;
1135 	const struct sfc_flow_item *item;
1136 
1137 	if (pattern == NULL) {
1138 		rte_flow_error_set(error, EINVAL,
1139 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1140 				   "NULL pattern");
1141 		return -rte_errno;
1142 	}
1143 
1144 	for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1145 		item = sfc_flow_get_item(pattern->type);
1146 		if (item == NULL) {
1147 			rte_flow_error_set(error, ENOTSUP,
1148 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1149 					   "Unsupported pattern item");
1150 			return -rte_errno;
1151 		}
1152 
1153 		/*
1154 		 * Omitting one or several protocol layers at the beginning
1155 		 * of pattern is supported
1156 		 */
1157 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1158 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1159 		    item->prev_layer != prev_layer) {
1160 			rte_flow_error_set(error, ENOTSUP,
1161 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1162 					   "Unexpected sequence of pattern items");
1163 			return -rte_errno;
1164 		}
1165 
1166 		/*
1167 		 * Allow only VOID and ETH pattern items in the inner frame.
1168 		 * Also check that there is only one tunneling protocol.
1169 		 */
1170 		switch (item->type) {
1171 		case RTE_FLOW_ITEM_TYPE_VOID:
1172 		case RTE_FLOW_ITEM_TYPE_ETH:
1173 			break;
1174 
1175 		case RTE_FLOW_ITEM_TYPE_VXLAN:
1176 		case RTE_FLOW_ITEM_TYPE_GENEVE:
1177 		case RTE_FLOW_ITEM_TYPE_NVGRE:
1178 			if (is_ifrm) {
1179 				rte_flow_error_set(error, EINVAL,
1180 					RTE_FLOW_ERROR_TYPE_ITEM,
1181 					pattern,
1182 					"More than one tunneling protocol");
1183 				return -rte_errno;
1184 			}
1185 			is_ifrm = B_TRUE;
1186 			break;
1187 
1188 		default:
1189 			if (is_ifrm) {
1190 				rte_flow_error_set(error, EINVAL,
1191 					RTE_FLOW_ERROR_TYPE_ITEM,
1192 					pattern,
1193 					"There is an unsupported pattern item "
1194 					"in the inner frame");
1195 				return -rte_errno;
1196 			}
1197 			break;
1198 		}
1199 
1200 		rc = item->parse(pattern, &flow->spec.template, error);
1201 		if (rc != 0)
1202 			return rc;
1203 
1204 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1205 			prev_layer = item->layer;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static int
1212 sfc_flow_parse_queue(struct sfc_adapter *sa,
1213 		     const struct rte_flow_action_queue *queue,
1214 		     struct rte_flow *flow)
1215 {
1216 	struct sfc_rxq *rxq;
1217 
1218 	if (queue->index >= sa->rxq_count)
1219 		return -EINVAL;
1220 
1221 	rxq = sa->rxq_info[queue->index].rxq;
1222 	flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1223 
1224 	return 0;
1225 }
1226 
1227 #if EFSYS_OPT_RX_SCALE
1228 static int
1229 sfc_flow_parse_rss(struct sfc_adapter *sa,
1230 		   const struct rte_flow_action_rss *rss,
1231 		   struct rte_flow *flow)
1232 {
1233 	unsigned int rxq_sw_index;
1234 	struct sfc_rxq *rxq;
1235 	unsigned int rxq_hw_index_min;
1236 	unsigned int rxq_hw_index_max;
1237 	const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1238 	uint64_t rss_hf;
1239 	uint8_t *rss_key = NULL;
1240 	struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1241 	unsigned int i;
1242 
1243 	if (rss->num == 0)
1244 		return -EINVAL;
1245 
1246 	rxq_sw_index = sa->rxq_count - 1;
1247 	rxq = sa->rxq_info[rxq_sw_index].rxq;
1248 	rxq_hw_index_min = rxq->hw_index;
1249 	rxq_hw_index_max = 0;
1250 
1251 	for (i = 0; i < rss->num; ++i) {
1252 		rxq_sw_index = rss->queue[i];
1253 
1254 		if (rxq_sw_index >= sa->rxq_count)
1255 			return -EINVAL;
1256 
1257 		rxq = sa->rxq_info[rxq_sw_index].rxq;
1258 
1259 		if (rxq->hw_index < rxq_hw_index_min)
1260 			rxq_hw_index_min = rxq->hw_index;
1261 
1262 		if (rxq->hw_index > rxq_hw_index_max)
1263 			rxq_hw_index_max = rxq->hw_index;
1264 	}
1265 
1266 	rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1267 	if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1268 		return -EINVAL;
1269 
1270 	if (rss_conf != NULL) {
1271 		if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1272 			return -EINVAL;
1273 
1274 		rss_key = rss_conf->rss_key;
1275 	} else {
1276 		rss_key = sa->rss_key;
1277 	}
1278 
1279 	flow->rss = B_TRUE;
1280 
1281 	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1282 	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1283 	sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1284 	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1285 
1286 	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1287 		unsigned int rxq_sw_index = rss->queue[i % rss->num];
1288 		struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1289 
1290 		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1291 	}
1292 
1293 	return 0;
1294 }
1295 #endif /* EFSYS_OPT_RX_SCALE */
1296 
1297 static int
1298 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1299 		    unsigned int filters_count)
1300 {
1301 	unsigned int i;
1302 	int ret = 0;
1303 
1304 	for (i = 0; i < filters_count; i++) {
1305 		int rc;
1306 
1307 		rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1308 		if (ret == 0 && rc != 0) {
1309 			sfc_err(sa, "failed to remove filter specification "
1310 				"(rc = %d)", rc);
1311 			ret = rc;
1312 		}
1313 	}
1314 
1315 	return ret;
1316 }
1317 
1318 static int
1319 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1320 {
1321 	unsigned int i;
1322 	int rc = 0;
1323 
1324 	for (i = 0; i < spec->count; i++) {
1325 		rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1326 		if (rc != 0) {
1327 			sfc_flow_spec_flush(sa, spec, i);
1328 			break;
1329 		}
1330 	}
1331 
1332 	return rc;
1333 }
1334 
1335 static int
1336 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1337 {
1338 	return sfc_flow_spec_flush(sa, spec, spec->count);
1339 }
1340 
1341 static int
1342 sfc_flow_filter_insert(struct sfc_adapter *sa,
1343 		       struct rte_flow *flow)
1344 {
1345 #if EFSYS_OPT_RX_SCALE
1346 	struct sfc_flow_rss *rss = &flow->rss_conf;
1347 	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1348 	unsigned int i;
1349 	int rc = 0;
1350 
1351 	if (flow->rss) {
1352 		unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1353 					      rss->rxq_hw_index_min + 1,
1354 					      EFX_MAXRSS);
1355 
1356 		rc = efx_rx_scale_context_alloc(sa->nic,
1357 						EFX_RX_SCALE_EXCLUSIVE,
1358 						rss_spread,
1359 						&efs_rss_context);
1360 		if (rc != 0)
1361 			goto fail_scale_context_alloc;
1362 
1363 		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1364 					   EFX_RX_HASHALG_TOEPLITZ,
1365 					   rss->rss_hash_types, B_TRUE);
1366 		if (rc != 0)
1367 			goto fail_scale_mode_set;
1368 
1369 		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1370 					  rss->rss_key,
1371 					  sizeof(sa->rss_key));
1372 		if (rc != 0)
1373 			goto fail_scale_key_set;
1374 
1375 		/*
1376 		 * At this point, fully elaborated filter specifications
1377 		 * have been produced from the template. To make sure that
1378 		 * RSS behaviour is consistent between them, set the same
1379 		 * RSS context value everywhere.
1380 		 */
1381 		for (i = 0; i < flow->spec.count; i++) {
1382 			efx_filter_spec_t *spec = &flow->spec.filters[i];
1383 
1384 			spec->efs_rss_context = efs_rss_context;
1385 			spec->efs_dmaq_id = rss->rxq_hw_index_min;
1386 			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1387 		}
1388 	}
1389 
1390 	rc = sfc_flow_spec_insert(sa, &flow->spec);
1391 	if (rc != 0)
1392 		goto fail_filter_insert;
1393 
1394 	if (flow->rss) {
1395 		/*
1396 		 * Scale table is set after filter insertion because
1397 		 * the table entries are relative to the base RxQ ID
1398 		 * and the latter is submitted to the HW by means of
1399 		 * inserting a filter, so by the time of the request
1400 		 * the HW knows all the information needed to verify
1401 		 * the table entries, and the operation will succeed
1402 		 */
1403 		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1404 					  rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1405 		if (rc != 0)
1406 			goto fail_scale_tbl_set;
1407 	}
1408 
1409 	return 0;
1410 
1411 fail_scale_tbl_set:
1412 	sfc_flow_spec_remove(sa, &flow->spec);
1413 
1414 fail_filter_insert:
1415 fail_scale_key_set:
1416 fail_scale_mode_set:
1417 	if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1418 		efx_rx_scale_context_free(sa->nic, efs_rss_context);
1419 
1420 fail_scale_context_alloc:
1421 	return rc;
1422 #else /* !EFSYS_OPT_RX_SCALE */
1423 	return sfc_flow_spec_insert(sa, &flow->spec);
1424 #endif /* EFSYS_OPT_RX_SCALE */
1425 }
1426 
1427 static int
1428 sfc_flow_filter_remove(struct sfc_adapter *sa,
1429 		       struct rte_flow *flow)
1430 {
1431 	int rc = 0;
1432 
1433 	rc = sfc_flow_spec_remove(sa, &flow->spec);
1434 	if (rc != 0)
1435 		return rc;
1436 
1437 #if EFSYS_OPT_RX_SCALE
1438 	if (flow->rss) {
1439 		/*
1440 		 * All specifications for a given flow rule have the same RSS
1441 		 * context, so that RSS context value is taken from the first
1442 		 * filter specification
1443 		 */
1444 		efx_filter_spec_t *spec = &flow->spec.filters[0];
1445 
1446 		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1447 	}
1448 #endif /* EFSYS_OPT_RX_SCALE */
1449 
1450 	return rc;
1451 }
1452 
1453 static int
1454 sfc_flow_parse_actions(struct sfc_adapter *sa,
1455 		       const struct rte_flow_action actions[],
1456 		       struct rte_flow *flow,
1457 		       struct rte_flow_error *error)
1458 {
1459 	int rc;
1460 	boolean_t is_specified = B_FALSE;
1461 
1462 	if (actions == NULL) {
1463 		rte_flow_error_set(error, EINVAL,
1464 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1465 				   "NULL actions");
1466 		return -rte_errno;
1467 	}
1468 
1469 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1470 		/* This one may appear anywhere multiple times. */
1471 		if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1472 			continue;
1473 		/* Fate-deciding actions may appear exactly once. */
1474 		if (is_specified) {
1475 			rte_flow_error_set
1476 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1477 				 actions,
1478 				 "Cannot combine several fate-deciding actions,"
1479 				 "choose between QUEUE, RSS or DROP");
1480 			return -rte_errno;
1481 		}
1482 		switch (actions->type) {
1483 		case RTE_FLOW_ACTION_TYPE_QUEUE:
1484 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1485 			if (rc != 0) {
1486 				rte_flow_error_set(error, EINVAL,
1487 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1488 					"Bad QUEUE action");
1489 				return -rte_errno;
1490 			}
1491 
1492 			is_specified = B_TRUE;
1493 			break;
1494 
1495 #if EFSYS_OPT_RX_SCALE
1496 		case RTE_FLOW_ACTION_TYPE_RSS:
1497 			rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1498 			if (rc != 0) {
1499 				rte_flow_error_set(error, rc,
1500 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
1501 					"Bad RSS action");
1502 				return -rte_errno;
1503 			}
1504 
1505 			is_specified = B_TRUE;
1506 			break;
1507 #endif /* EFSYS_OPT_RX_SCALE */
1508 
1509 		case RTE_FLOW_ACTION_TYPE_DROP:
1510 			flow->spec.template.efs_dmaq_id =
1511 				EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1512 
1513 			is_specified = B_TRUE;
1514 			break;
1515 
1516 		default:
1517 			rte_flow_error_set(error, ENOTSUP,
1518 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
1519 					   "Action is not supported");
1520 			return -rte_errno;
1521 		}
1522 	}
1523 
1524 	/* When fate is unknown, drop traffic. */
1525 	if (!is_specified) {
1526 		flow->spec.template.efs_dmaq_id =
1527 			EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1528 	}
1529 
1530 	return 0;
1531 }
1532 
1533 /**
1534  * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1535  * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1536  * specifications after copying.
1537  *
1538  * @param spec[in, out]
1539  *   SFC flow specification to update.
1540  * @param filters_count_for_one_val[in]
1541  *   How many specifications should have the same match flag, what is the
1542  *   number of specifications before copying.
1543  * @param error[out]
1544  *   Perform verbose error reporting if not NULL.
1545  */
1546 static int
1547 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1548 			       unsigned int filters_count_for_one_val,
1549 			       struct rte_flow_error *error)
1550 {
1551 	unsigned int i;
1552 	static const efx_filter_match_flags_t vals[] = {
1553 		EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1554 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1555 	};
1556 
1557 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1558 		rte_flow_error_set(error, EINVAL,
1559 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1560 			"Number of specifications is incorrect while copying "
1561 			"by unknown destination flags");
1562 		return -rte_errno;
1563 	}
1564 
1565 	for (i = 0; i < spec->count; i++) {
1566 		/* The check above ensures that divisor can't be zero here */
1567 		spec->filters[i].efs_match_flags |=
1568 			vals[i / filters_count_for_one_val];
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 /**
1575  * Check that the following conditions are met:
1576  * - the list of supported filters has a filter
1577  *   with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1578  *   EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1579  *   be inserted.
1580  *
1581  * @param match[in]
1582  *   The match flags of filter.
1583  * @param spec[in]
1584  *   Specification to be supplemented.
1585  * @param filter[in]
1586  *   SFC filter with list of supported filters.
1587  */
1588 static boolean_t
1589 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1590 				 __rte_unused efx_filter_spec_t *spec,
1591 				 struct sfc_filter *filter)
1592 {
1593 	unsigned int i;
1594 	efx_filter_match_flags_t match_mcast_dst;
1595 
1596 	match_mcast_dst =
1597 		(match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1598 		EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1599 	for (i = 0; i < filter->supported_match_num; i++) {
1600 		if (match_mcast_dst == filter->supported_match[i])
1601 			return B_TRUE;
1602 	}
1603 
1604 	return B_FALSE;
1605 }
1606 
1607 /**
1608  * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1609  * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1610  * specifications after copying.
1611  *
1612  * @param spec[in, out]
1613  *   SFC flow specification to update.
1614  * @param filters_count_for_one_val[in]
1615  *   How many specifications should have the same EtherType value, what is the
1616  *   number of specifications before copying.
1617  * @param error[out]
1618  *   Perform verbose error reporting if not NULL.
1619  */
1620 static int
1621 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1622 			unsigned int filters_count_for_one_val,
1623 			struct rte_flow_error *error)
1624 {
1625 	unsigned int i;
1626 	static const uint16_t vals[] = {
1627 		EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1628 	};
1629 
1630 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1631 		rte_flow_error_set(error, EINVAL,
1632 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1633 			"Number of specifications is incorrect "
1634 			"while copying by Ethertype");
1635 		return -rte_errno;
1636 	}
1637 
1638 	for (i = 0; i < spec->count; i++) {
1639 		spec->filters[i].efs_match_flags |=
1640 			EFX_FILTER_MATCH_ETHER_TYPE;
1641 
1642 		/*
1643 		 * The check above ensures that
1644 		 * filters_count_for_one_val is not 0
1645 		 */
1646 		spec->filters[i].efs_ether_type =
1647 			vals[i / filters_count_for_one_val];
1648 	}
1649 
1650 	return 0;
1651 }
1652 
1653 /**
1654  * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1655  * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1656  * specifications after copying.
1657  *
1658  * @param spec[in, out]
1659  *   SFC flow specification to update.
1660  * @param filters_count_for_one_val[in]
1661  *   How many specifications should have the same match flag, what is the
1662  *   number of specifications before copying.
1663  * @param error[out]
1664  *   Perform verbose error reporting if not NULL.
1665  */
1666 static int
1667 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1668 				    unsigned int filters_count_for_one_val,
1669 				    struct rte_flow_error *error)
1670 {
1671 	unsigned int i;
1672 	static const efx_filter_match_flags_t vals[] = {
1673 		EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1674 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1675 	};
1676 
1677 	if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1678 		rte_flow_error_set(error, EINVAL,
1679 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1680 			"Number of specifications is incorrect while copying "
1681 			"by inner frame unknown destination flags");
1682 		return -rte_errno;
1683 	}
1684 
1685 	for (i = 0; i < spec->count; i++) {
1686 		/* The check above ensures that divisor can't be zero here */
1687 		spec->filters[i].efs_match_flags |=
1688 			vals[i / filters_count_for_one_val];
1689 	}
1690 
1691 	return 0;
1692 }
1693 
1694 /**
1695  * Check that the following conditions are met:
1696  * - the specification corresponds to a filter for encapsulated traffic
1697  * - the list of supported filters has a filter
1698  *   with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1699  *   EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1700  *   be inserted.
1701  *
1702  * @param match[in]
1703  *   The match flags of filter.
1704  * @param spec[in]
1705  *   Specification to be supplemented.
1706  * @param filter[in]
1707  *   SFC filter with list of supported filters.
1708  */
1709 static boolean_t
1710 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1711 				      efx_filter_spec_t *spec,
1712 				      struct sfc_filter *filter)
1713 {
1714 	unsigned int i;
1715 	efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1716 	efx_filter_match_flags_t match_mcast_dst;
1717 
1718 	if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1719 		return B_FALSE;
1720 
1721 	match_mcast_dst =
1722 		(match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1723 		EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1724 	for (i = 0; i < filter->supported_match_num; i++) {
1725 		if (match_mcast_dst == filter->supported_match[i])
1726 			return B_TRUE;
1727 	}
1728 
1729 	return B_FALSE;
1730 }
1731 
1732 /*
1733  * Match flags that can be automatically added to filters.
1734  * Selecting the last minimum when searching for the copy flag ensures that the
1735  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1736  * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1737  * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1738  * filters.
1739  */
1740 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1741 	{
1742 		.flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1743 		.vals_count = 2,
1744 		.set_vals = sfc_flow_set_unknown_dst_flags,
1745 		.spec_check = sfc_flow_check_unknown_dst_flags,
1746 	},
1747 	{
1748 		.flag = EFX_FILTER_MATCH_ETHER_TYPE,
1749 		.vals_count = 2,
1750 		.set_vals = sfc_flow_set_ethertypes,
1751 		.spec_check = NULL,
1752 	},
1753 	{
1754 		.flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1755 		.vals_count = 2,
1756 		.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1757 		.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1758 	},
1759 };
1760 
1761 /* Get item from array sfc_flow_copy_flags */
1762 static const struct sfc_flow_copy_flag *
1763 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1764 {
1765 	unsigned int i;
1766 
1767 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1768 		if (sfc_flow_copy_flags[i].flag == flag)
1769 			return &sfc_flow_copy_flags[i];
1770 	}
1771 
1772 	return NULL;
1773 }
1774 
1775 /**
1776  * Make copies of the specifications, set match flag and values
1777  * of the field that corresponds to it.
1778  *
1779  * @param spec[in, out]
1780  *   SFC flow specification to update.
1781  * @param flag[in]
1782  *   The match flag to add.
1783  * @param error[out]
1784  *   Perform verbose error reporting if not NULL.
1785  */
1786 static int
1787 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1788 			     efx_filter_match_flags_t flag,
1789 			     struct rte_flow_error *error)
1790 {
1791 	unsigned int i;
1792 	unsigned int new_filters_count;
1793 	unsigned int filters_count_for_one_val;
1794 	const struct sfc_flow_copy_flag *copy_flag;
1795 	int rc;
1796 
1797 	copy_flag = sfc_flow_get_copy_flag(flag);
1798 	if (copy_flag == NULL) {
1799 		rte_flow_error_set(error, ENOTSUP,
1800 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1801 				   "Unsupported spec field for copying");
1802 		return -rte_errno;
1803 	}
1804 
1805 	new_filters_count = spec->count * copy_flag->vals_count;
1806 	if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1807 		rte_flow_error_set(error, EINVAL,
1808 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1809 			"Too much EFX specifications in the flow rule");
1810 		return -rte_errno;
1811 	}
1812 
1813 	/* Copy filters specifications */
1814 	for (i = spec->count; i < new_filters_count; i++)
1815 		spec->filters[i] = spec->filters[i - spec->count];
1816 
1817 	filters_count_for_one_val = spec->count;
1818 	spec->count = new_filters_count;
1819 
1820 	rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1821 	if (rc != 0)
1822 		return rc;
1823 
1824 	return 0;
1825 }
1826 
1827 /**
1828  * Check that the given set of match flags missing in the original filter spec
1829  * could be covered by adding spec copies which specify the corresponding
1830  * flags and packet field values to match.
1831  *
1832  * @param miss_flags[in]
1833  *   Flags that are missing until the supported filter.
1834  * @param spec[in]
1835  *   Specification to be supplemented.
1836  * @param filter[in]
1837  *   SFC filter.
1838  *
1839  * @return
1840  *   Number of specifications after copy or 0, if the flags can not be added.
1841  */
1842 static unsigned int
1843 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1844 			     efx_filter_spec_t *spec,
1845 			     struct sfc_filter *filter)
1846 {
1847 	unsigned int i;
1848 	efx_filter_match_flags_t copy_flags = 0;
1849 	efx_filter_match_flags_t flag;
1850 	efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1851 	sfc_flow_spec_check *check;
1852 	unsigned int multiplier = 1;
1853 
1854 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1855 		flag = sfc_flow_copy_flags[i].flag;
1856 		check = sfc_flow_copy_flags[i].spec_check;
1857 		if ((flag & miss_flags) == flag) {
1858 			if (check != NULL && (!check(match, spec, filter)))
1859 				continue;
1860 
1861 			copy_flags |= flag;
1862 			multiplier *= sfc_flow_copy_flags[i].vals_count;
1863 		}
1864 	}
1865 
1866 	if (copy_flags == miss_flags)
1867 		return multiplier;
1868 
1869 	return 0;
1870 }
1871 
1872 /**
1873  * Attempt to supplement the specification template to the minimally
1874  * supported set of match flags. To do this, it is necessary to copy
1875  * the specifications, filling them with the values of fields that
1876  * correspond to the missing flags.
1877  * The necessary and sufficient filter is built from the fewest number
1878  * of copies which could be made to cover the minimally required set
1879  * of flags.
1880  *
1881  * @param sa[in]
1882  *   SFC adapter.
1883  * @param spec[in, out]
1884  *   SFC flow specification to update.
1885  * @param error[out]
1886  *   Perform verbose error reporting if not NULL.
1887  */
1888 static int
1889 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
1890 			       struct sfc_flow_spec *spec,
1891 			       struct rte_flow_error *error)
1892 {
1893 	struct sfc_filter *filter = &sa->filter;
1894 	efx_filter_match_flags_t miss_flags;
1895 	efx_filter_match_flags_t min_miss_flags = 0;
1896 	efx_filter_match_flags_t match;
1897 	unsigned int min_multiplier = UINT_MAX;
1898 	unsigned int multiplier;
1899 	unsigned int i;
1900 	int rc;
1901 
1902 	match = spec->template.efs_match_flags;
1903 	for (i = 0; i < filter->supported_match_num; i++) {
1904 		if ((match & filter->supported_match[i]) == match) {
1905 			miss_flags = filter->supported_match[i] & (~match);
1906 			multiplier = sfc_flow_check_missing_flags(miss_flags,
1907 				&spec->template, filter);
1908 			if (multiplier > 0) {
1909 				if (multiplier <= min_multiplier) {
1910 					min_multiplier = multiplier;
1911 					min_miss_flags = miss_flags;
1912 				}
1913 			}
1914 		}
1915 	}
1916 
1917 	if (min_multiplier == UINT_MAX) {
1918 		rte_flow_error_set(error, ENOTSUP,
1919 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1920 				   "Flow rule pattern is not supported");
1921 		return -rte_errno;
1922 	}
1923 
1924 	for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1925 		efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
1926 
1927 		if ((flag & min_miss_flags) == flag) {
1928 			rc = sfc_flow_spec_add_match_flag(spec, flag, error);
1929 			if (rc != 0)
1930 				return rc;
1931 		}
1932 	}
1933 
1934 	return 0;
1935 }
1936 
1937 /**
1938  * Check that set of match flags is referred to by a filter. Filter is
1939  * described by match flags with the ability to add OUTER_VID and INNER_VID
1940  * flags.
1941  *
1942  * @param match_flags[in]
1943  *   Set of match flags.
1944  * @param flags_pattern[in]
1945  *   Pattern of filter match flags.
1946  */
1947 static boolean_t
1948 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
1949 			    efx_filter_match_flags_t flags_pattern)
1950 {
1951 	if ((match_flags & flags_pattern) != flags_pattern)
1952 		return B_FALSE;
1953 
1954 	switch (match_flags & ~flags_pattern) {
1955 	case 0:
1956 	case EFX_FILTER_MATCH_OUTER_VID:
1957 	case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
1958 		return B_TRUE;
1959 	default:
1960 		return B_FALSE;
1961 	}
1962 }
1963 
1964 /**
1965  * Check whether the spec maps to a hardware filter which is known to be
1966  * ineffective despite being valid.
1967  *
1968  * @param spec[in]
1969  *   SFC flow specification.
1970  */
1971 static boolean_t
1972 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
1973 {
1974 	unsigned int i;
1975 	uint16_t ether_type;
1976 	uint8_t ip_proto;
1977 	efx_filter_match_flags_t match_flags;
1978 
1979 	for (i = 0; i < spec->count; i++) {
1980 		match_flags = spec->filters[i].efs_match_flags;
1981 
1982 		if (sfc_flow_is_match_with_vids(match_flags,
1983 						EFX_FILTER_MATCH_ETHER_TYPE) ||
1984 		    sfc_flow_is_match_with_vids(match_flags,
1985 						EFX_FILTER_MATCH_ETHER_TYPE |
1986 						EFX_FILTER_MATCH_LOC_MAC)) {
1987 			ether_type = spec->filters[i].efs_ether_type;
1988 			if (ether_type == EFX_ETHER_TYPE_IPV4 ||
1989 			    ether_type == EFX_ETHER_TYPE_IPV6)
1990 				return B_TRUE;
1991 		} else if (sfc_flow_is_match_with_vids(match_flags,
1992 				EFX_FILTER_MATCH_ETHER_TYPE |
1993 				EFX_FILTER_MATCH_IP_PROTO) ||
1994 			   sfc_flow_is_match_with_vids(match_flags,
1995 				EFX_FILTER_MATCH_ETHER_TYPE |
1996 				EFX_FILTER_MATCH_IP_PROTO |
1997 				EFX_FILTER_MATCH_LOC_MAC)) {
1998 			ip_proto = spec->filters[i].efs_ip_proto;
1999 			if (ip_proto == EFX_IPPROTO_TCP ||
2000 			    ip_proto == EFX_IPPROTO_UDP)
2001 				return B_TRUE;
2002 		}
2003 	}
2004 
2005 	return B_FALSE;
2006 }
2007 
2008 static int
2009 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2010 			      struct rte_flow *flow,
2011 			      struct rte_flow_error *error)
2012 {
2013 	efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2014 	efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2015 	int rc;
2016 
2017 	/* Initialize the first filter spec with template */
2018 	flow->spec.filters[0] = *spec_tmpl;
2019 	flow->spec.count = 1;
2020 
2021 	if (!sfc_filter_is_match_supported(sa, match_flags)) {
2022 		rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2023 		if (rc != 0)
2024 			return rc;
2025 	}
2026 
2027 	if (sfc_flow_is_match_flags_exception(&flow->spec)) {
2028 		rte_flow_error_set(error, ENOTSUP,
2029 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2030 			"The flow rule pattern is unsupported");
2031 		return -rte_errno;
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 static int
2038 sfc_flow_parse(struct rte_eth_dev *dev,
2039 	       const struct rte_flow_attr *attr,
2040 	       const struct rte_flow_item pattern[],
2041 	       const struct rte_flow_action actions[],
2042 	       struct rte_flow *flow,
2043 	       struct rte_flow_error *error)
2044 {
2045 	struct sfc_adapter *sa = dev->data->dev_private;
2046 	int rc;
2047 
2048 	rc = sfc_flow_parse_attr(attr, flow, error);
2049 	if (rc != 0)
2050 		goto fail_bad_value;
2051 
2052 	rc = sfc_flow_parse_pattern(pattern, flow, error);
2053 	if (rc != 0)
2054 		goto fail_bad_value;
2055 
2056 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
2057 	if (rc != 0)
2058 		goto fail_bad_value;
2059 
2060 	rc = sfc_flow_validate_match_flags(sa, flow, error);
2061 	if (rc != 0)
2062 		goto fail_bad_value;
2063 
2064 	return 0;
2065 
2066 fail_bad_value:
2067 	return rc;
2068 }
2069 
2070 static int
2071 sfc_flow_validate(struct rte_eth_dev *dev,
2072 		  const struct rte_flow_attr *attr,
2073 		  const struct rte_flow_item pattern[],
2074 		  const struct rte_flow_action actions[],
2075 		  struct rte_flow_error *error)
2076 {
2077 	struct rte_flow flow;
2078 
2079 	memset(&flow, 0, sizeof(flow));
2080 
2081 	return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2082 }
2083 
2084 static struct rte_flow *
2085 sfc_flow_create(struct rte_eth_dev *dev,
2086 		const struct rte_flow_attr *attr,
2087 		const struct rte_flow_item pattern[],
2088 		const struct rte_flow_action actions[],
2089 		struct rte_flow_error *error)
2090 {
2091 	struct sfc_adapter *sa = dev->data->dev_private;
2092 	struct rte_flow *flow = NULL;
2093 	int rc;
2094 
2095 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2096 	if (flow == NULL) {
2097 		rte_flow_error_set(error, ENOMEM,
2098 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2099 				   "Failed to allocate memory");
2100 		goto fail_no_mem;
2101 	}
2102 
2103 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2104 	if (rc != 0)
2105 		goto fail_bad_value;
2106 
2107 	TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2108 
2109 	sfc_adapter_lock(sa);
2110 
2111 	if (sa->state == SFC_ADAPTER_STARTED) {
2112 		rc = sfc_flow_filter_insert(sa, flow);
2113 		if (rc != 0) {
2114 			rte_flow_error_set(error, rc,
2115 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2116 				"Failed to insert filter");
2117 			goto fail_filter_insert;
2118 		}
2119 	}
2120 
2121 	sfc_adapter_unlock(sa);
2122 
2123 	return flow;
2124 
2125 fail_filter_insert:
2126 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2127 
2128 fail_bad_value:
2129 	rte_free(flow);
2130 	sfc_adapter_unlock(sa);
2131 
2132 fail_no_mem:
2133 	return NULL;
2134 }
2135 
2136 static int
2137 sfc_flow_remove(struct sfc_adapter *sa,
2138 		struct rte_flow *flow,
2139 		struct rte_flow_error *error)
2140 {
2141 	int rc = 0;
2142 
2143 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2144 
2145 	if (sa->state == SFC_ADAPTER_STARTED) {
2146 		rc = sfc_flow_filter_remove(sa, flow);
2147 		if (rc != 0)
2148 			rte_flow_error_set(error, rc,
2149 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2150 				"Failed to destroy flow rule");
2151 	}
2152 
2153 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2154 	rte_free(flow);
2155 
2156 	return rc;
2157 }
2158 
2159 static int
2160 sfc_flow_destroy(struct rte_eth_dev *dev,
2161 		 struct rte_flow *flow,
2162 		 struct rte_flow_error *error)
2163 {
2164 	struct sfc_adapter *sa = dev->data->dev_private;
2165 	struct rte_flow *flow_ptr;
2166 	int rc = EINVAL;
2167 
2168 	sfc_adapter_lock(sa);
2169 
2170 	TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2171 		if (flow_ptr == flow)
2172 			rc = 0;
2173 	}
2174 	if (rc != 0) {
2175 		rte_flow_error_set(error, rc,
2176 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2177 				   "Failed to find flow rule to destroy");
2178 		goto fail_bad_value;
2179 	}
2180 
2181 	rc = sfc_flow_remove(sa, flow, error);
2182 
2183 fail_bad_value:
2184 	sfc_adapter_unlock(sa);
2185 
2186 	return -rc;
2187 }
2188 
2189 static int
2190 sfc_flow_flush(struct rte_eth_dev *dev,
2191 	       struct rte_flow_error *error)
2192 {
2193 	struct sfc_adapter *sa = dev->data->dev_private;
2194 	struct rte_flow *flow;
2195 	int rc = 0;
2196 	int ret = 0;
2197 
2198 	sfc_adapter_lock(sa);
2199 
2200 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2201 		rc = sfc_flow_remove(sa, flow, error);
2202 		if (rc != 0)
2203 			ret = rc;
2204 	}
2205 
2206 	sfc_adapter_unlock(sa);
2207 
2208 	return -ret;
2209 }
2210 
2211 static int
2212 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2213 		 struct rte_flow_error *error)
2214 {
2215 	struct sfc_adapter *sa = dev->data->dev_private;
2216 	struct sfc_port *port = &sa->port;
2217 	int ret = 0;
2218 
2219 	sfc_adapter_lock(sa);
2220 	if (sa->state != SFC_ADAPTER_INITIALIZED) {
2221 		rte_flow_error_set(error, EBUSY,
2222 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2223 				   NULL, "please close the port first");
2224 		ret = -rte_errno;
2225 	} else {
2226 		port->isolated = (enable) ? B_TRUE : B_FALSE;
2227 	}
2228 	sfc_adapter_unlock(sa);
2229 
2230 	return ret;
2231 }
2232 
2233 const struct rte_flow_ops sfc_flow_ops = {
2234 	.validate = sfc_flow_validate,
2235 	.create = sfc_flow_create,
2236 	.destroy = sfc_flow_destroy,
2237 	.flush = sfc_flow_flush,
2238 	.query = NULL,
2239 	.isolate = sfc_flow_isolate,
2240 };
2241 
2242 void
2243 sfc_flow_init(struct sfc_adapter *sa)
2244 {
2245 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2246 
2247 	TAILQ_INIT(&sa->filter.flow_list);
2248 }
2249 
2250 void
2251 sfc_flow_fini(struct sfc_adapter *sa)
2252 {
2253 	struct rte_flow *flow;
2254 
2255 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2256 
2257 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2258 		TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2259 		rte_free(flow);
2260 	}
2261 }
2262 
2263 void
2264 sfc_flow_stop(struct sfc_adapter *sa)
2265 {
2266 	struct rte_flow *flow;
2267 
2268 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2269 
2270 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2271 		sfc_flow_filter_remove(sa, flow);
2272 }
2273 
2274 int
2275 sfc_flow_start(struct sfc_adapter *sa)
2276 {
2277 	struct rte_flow *flow;
2278 	int rc = 0;
2279 
2280 	sfc_log_init(sa, "entry");
2281 
2282 	SFC_ASSERT(sfc_adapter_is_locked(sa));
2283 
2284 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2285 		rc = sfc_flow_filter_insert(sa, flow);
2286 		if (rc != 0)
2287 			goto fail_bad_flow;
2288 	}
2289 
2290 	sfc_log_init(sa, "done");
2291 
2292 fail_bad_flow:
2293 	return rc;
2294 }
2295