xref: /dpdk/drivers/net/sfc/sfc_flow.c (revision 894080975e1e9ec32309d801b81475c8d3896f59)
1 /*-
2  * Copyright (c) 2017 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
35 #include <rte_flow.h>
36 #include <rte_flow_driver.h>
37 
38 #include "efx.h"
39 
40 #include "sfc.h"
41 #include "sfc_rx.h"
42 #include "sfc_filter.h"
43 #include "sfc_flow.h"
44 #include "sfc_log.h"
45 
46 /*
47  * At now flow API is implemented in such a manner that each
48  * flow rule is converted to a hardware filter.
49  * All elements of flow rule (attributes, pattern items, actions)
50  * correspond to one or more fields in the efx_filter_spec_s structure
51  * that is responsible for the hardware filter.
52  */
53 
54 enum sfc_flow_item_layers {
55 	SFC_FLOW_ITEM_ANY_LAYER,
56 	SFC_FLOW_ITEM_START_LAYER,
57 	SFC_FLOW_ITEM_L2,
58 };
59 
60 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
61 				  efx_filter_spec_t *spec,
62 				  struct rte_flow_error *error);
63 
64 struct sfc_flow_item {
65 	enum rte_flow_item_type type;		/* Type of item */
66 	enum sfc_flow_item_layers layer;	/* Layer of item */
67 	enum sfc_flow_item_layers prev_layer;	/* Previous layer of item */
68 	sfc_flow_item_parse *parse;		/* Parsing function */
69 };
70 
71 static sfc_flow_item_parse sfc_flow_parse_void;
72 static sfc_flow_item_parse sfc_flow_parse_eth;
73 static sfc_flow_item_parse sfc_flow_parse_vlan;
74 
75 static boolean_t
76 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
77 {
78 	uint8_t sum = 0;
79 	unsigned int i;
80 
81 	for (i = 0; i < size; i++)
82 		sum |= buf[i];
83 
84 	return (sum == 0) ? B_TRUE : B_FALSE;
85 }
86 
87 /*
88  * Validate item and prepare structures spec and mask for parsing
89  */
90 static int
91 sfc_flow_parse_init(const struct rte_flow_item *item,
92 		    const void **spec_ptr,
93 		    const void **mask_ptr,
94 		    const void *supp_mask,
95 		    const void *def_mask,
96 		    unsigned int size,
97 		    struct rte_flow_error *error)
98 {
99 	const uint8_t *spec;
100 	const uint8_t *mask;
101 	const uint8_t *last;
102 	uint8_t match;
103 	uint8_t supp;
104 	unsigned int i;
105 
106 	if (item == NULL) {
107 		rte_flow_error_set(error, EINVAL,
108 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
109 				   "NULL item");
110 		return -rte_errno;
111 	}
112 
113 	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
114 		rte_flow_error_set(error, EINVAL,
115 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
116 				   "Mask or last is set without spec");
117 		return -rte_errno;
118 	}
119 
120 	/*
121 	 * If "mask" is not set, default mask is used,
122 	 * but if default mask is NULL, "mask" should be set
123 	 */
124 	if (item->mask == NULL) {
125 		if (def_mask == NULL) {
126 			rte_flow_error_set(error, EINVAL,
127 				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
128 				"Mask should be specified");
129 			return -rte_errno;
130 		}
131 
132 		mask = (const uint8_t *)def_mask;
133 	} else {
134 		mask = (const uint8_t *)item->mask;
135 	}
136 
137 	spec = (const uint8_t *)item->spec;
138 	last = (const uint8_t *)item->last;
139 
140 	if (spec == NULL)
141 		goto exit;
142 
143 	/*
144 	 * If field values in "last" are either 0 or equal to the corresponding
145 	 * values in "spec" then they are ignored
146 	 */
147 	if (last != NULL &&
148 	    !sfc_flow_is_zero(last, size) &&
149 	    memcmp(last, spec, size) != 0) {
150 		rte_flow_error_set(error, ENOTSUP,
151 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
152 				   "Ranging is not supported");
153 		return -rte_errno;
154 	}
155 
156 	if (supp_mask == NULL) {
157 		rte_flow_error_set(error, EINVAL,
158 			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
159 			"Supported mask for item should be specified");
160 		return -rte_errno;
161 	}
162 
163 	/* Check that mask and spec not asks for more match than supp_mask */
164 	for (i = 0; i < size; i++) {
165 		match = spec[i] | mask[i];
166 		supp = ((const uint8_t *)supp_mask)[i];
167 
168 		if ((match | supp) != supp) {
169 			rte_flow_error_set(error, ENOTSUP,
170 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
171 					   "Item's field is not supported");
172 			return -rte_errno;
173 		}
174 	}
175 
176 exit:
177 	*spec_ptr = spec;
178 	*mask_ptr = mask;
179 	return 0;
180 }
181 
182 /*
183  * Protocol parsers.
184  * Masking is not supported, so masks in items should be either
185  * full or empty (zeroed) and set only for supported fields which
186  * are specified in the supp_mask.
187  */
188 
189 static int
190 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
191 		    __rte_unused efx_filter_spec_t *efx_spec,
192 		    __rte_unused struct rte_flow_error *error)
193 {
194 	return 0;
195 }
196 
197 /**
198  * Convert Ethernet item to EFX filter specification.
199  *
200  * @param item[in]
201  *   Item specification. Only source and destination addresses and
202  *   Ethernet type fields are supported. If the mask is NULL, default
203  *   mask will be used. Ranging is not supported.
204  * @param efx_spec[in, out]
205  *   EFX filter specification to update.
206  * @param[out] error
207  *   Perform verbose error reporting if not NULL.
208  */
209 static int
210 sfc_flow_parse_eth(const struct rte_flow_item *item,
211 		   efx_filter_spec_t *efx_spec,
212 		   struct rte_flow_error *error)
213 {
214 	int rc;
215 	const struct rte_flow_item_eth *spec = NULL;
216 	const struct rte_flow_item_eth *mask = NULL;
217 	const struct rte_flow_item_eth supp_mask = {
218 		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
219 		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
220 		.type = 0xffff,
221 	};
222 
223 	rc = sfc_flow_parse_init(item,
224 				 (const void **)&spec,
225 				 (const void **)&mask,
226 				 &supp_mask,
227 				 &rte_flow_item_eth_mask,
228 				 sizeof(struct rte_flow_item_eth),
229 				 error);
230 	if (rc != 0)
231 		return rc;
232 
233 	/* If "spec" is not set, could be any Ethernet */
234 	if (spec == NULL)
235 		return 0;
236 
237 	if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
238 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
239 		rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
240 			   EFX_MAC_ADDR_LEN);
241 	} else if (!is_zero_ether_addr(&mask->dst)) {
242 		goto fail_bad_mask;
243 	}
244 
245 	if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
246 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
247 		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
248 			   EFX_MAC_ADDR_LEN);
249 	} else if (!is_zero_ether_addr(&mask->src)) {
250 		goto fail_bad_mask;
251 	}
252 
253 	/*
254 	 * Ether type is in big-endian byte order in item and
255 	 * in little-endian in efx_spec, so byte swap is used
256 	 */
257 	if (mask->type == supp_mask.type) {
258 		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
259 		efx_spec->efs_ether_type = rte_bswap16(spec->type);
260 	} else if (mask->type != 0) {
261 		goto fail_bad_mask;
262 	}
263 
264 	return 0;
265 
266 fail_bad_mask:
267 	rte_flow_error_set(error, EINVAL,
268 			   RTE_FLOW_ERROR_TYPE_ITEM, item,
269 			   "Bad mask in the ETH pattern item");
270 	return -rte_errno;
271 }
272 
273 /**
274  * Convert VLAN item to EFX filter specification.
275  *
276  * @param item[in]
277  *   Item specification. Only VID field is supported.
278  *   The mask can not be NULL. Ranging is not supported.
279  * @param efx_spec[in, out]
280  *   EFX filter specification to update.
281  * @param[out] error
282  *   Perform verbose error reporting if not NULL.
283  */
284 static int
285 sfc_flow_parse_vlan(const struct rte_flow_item *item,
286 		    efx_filter_spec_t *efx_spec,
287 		    struct rte_flow_error *error)
288 {
289 	int rc;
290 	uint16_t vid;
291 	const struct rte_flow_item_vlan *spec = NULL;
292 	const struct rte_flow_item_vlan *mask = NULL;
293 	const struct rte_flow_item_vlan supp_mask = {
294 		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
295 	};
296 
297 	rc = sfc_flow_parse_init(item,
298 				 (const void **)&spec,
299 				 (const void **)&mask,
300 				 &supp_mask,
301 				 NULL,
302 				 sizeof(struct rte_flow_item_vlan),
303 				 error);
304 	if (rc != 0)
305 		return rc;
306 
307 	/*
308 	 * VID is in big-endian byte order in item and
309 	 * in little-endian in efx_spec, so byte swap is used.
310 	 * If two VLAN items are included, the first matches
311 	 * the outer tag and the next matches the inner tag.
312 	 */
313 	if (mask->tci == supp_mask.tci) {
314 		vid = rte_bswap16(spec->tci);
315 
316 		if (!(efx_spec->efs_match_flags &
317 		      EFX_FILTER_MATCH_OUTER_VID)) {
318 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
319 			efx_spec->efs_outer_vid = vid;
320 		} else if (!(efx_spec->efs_match_flags &
321 			     EFX_FILTER_MATCH_INNER_VID)) {
322 			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
323 			efx_spec->efs_inner_vid = vid;
324 		} else {
325 			rte_flow_error_set(error, EINVAL,
326 					   RTE_FLOW_ERROR_TYPE_ITEM, item,
327 					   "More than two VLAN items");
328 			return -rte_errno;
329 		}
330 	} else {
331 		rte_flow_error_set(error, EINVAL,
332 				   RTE_FLOW_ERROR_TYPE_ITEM, item,
333 				   "VLAN ID in TCI match is required");
334 		return -rte_errno;
335 	}
336 
337 	return 0;
338 }
339 
340 static const struct sfc_flow_item sfc_flow_items[] = {
341 	{
342 		.type = RTE_FLOW_ITEM_TYPE_VOID,
343 		.prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
344 		.layer = SFC_FLOW_ITEM_ANY_LAYER,
345 		.parse = sfc_flow_parse_void,
346 	},
347 	{
348 		.type = RTE_FLOW_ITEM_TYPE_ETH,
349 		.prev_layer = SFC_FLOW_ITEM_START_LAYER,
350 		.layer = SFC_FLOW_ITEM_L2,
351 		.parse = sfc_flow_parse_eth,
352 	},
353 	{
354 		.type = RTE_FLOW_ITEM_TYPE_VLAN,
355 		.prev_layer = SFC_FLOW_ITEM_L2,
356 		.layer = SFC_FLOW_ITEM_L2,
357 		.parse = sfc_flow_parse_vlan,
358 	},
359 };
360 
361 /*
362  * Protocol-independent flow API support
363  */
364 static int
365 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
366 		    struct rte_flow *flow,
367 		    struct rte_flow_error *error)
368 {
369 	if (attr == NULL) {
370 		rte_flow_error_set(error, EINVAL,
371 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
372 				   "NULL attribute");
373 		return -rte_errno;
374 	}
375 	if (attr->group != 0) {
376 		rte_flow_error_set(error, ENOTSUP,
377 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
378 				   "Groups are not supported");
379 		return -rte_errno;
380 	}
381 	if (attr->priority != 0) {
382 		rte_flow_error_set(error, ENOTSUP,
383 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
384 				   "Priorities are not supported");
385 		return -rte_errno;
386 	}
387 	if (attr->egress != 0) {
388 		rte_flow_error_set(error, ENOTSUP,
389 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
390 				   "Egress is not supported");
391 		return -rte_errno;
392 	}
393 	if (attr->ingress == 0) {
394 		rte_flow_error_set(error, ENOTSUP,
395 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
396 				   "Only ingress is supported");
397 		return -rte_errno;
398 	}
399 
400 	flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
401 	flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
402 
403 	return 0;
404 }
405 
406 /* Get item from array sfc_flow_items */
407 static const struct sfc_flow_item *
408 sfc_flow_get_item(enum rte_flow_item_type type)
409 {
410 	unsigned int i;
411 
412 	for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
413 		if (sfc_flow_items[i].type == type)
414 			return &sfc_flow_items[i];
415 
416 	return NULL;
417 }
418 
419 static int
420 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
421 		       struct rte_flow *flow,
422 		       struct rte_flow_error *error)
423 {
424 	int rc;
425 	unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
426 	const struct sfc_flow_item *item;
427 
428 	if (pattern == NULL) {
429 		rte_flow_error_set(error, EINVAL,
430 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
431 				   "NULL pattern");
432 		return -rte_errno;
433 	}
434 
435 	for (; pattern != NULL &&
436 	       pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
437 		item = sfc_flow_get_item(pattern->type);
438 		if (item == NULL) {
439 			rte_flow_error_set(error, ENOTSUP,
440 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
441 					   "Unsupported pattern item");
442 			return -rte_errno;
443 		}
444 
445 		/*
446 		 * Omitting one or several protocol layers at the beginning
447 		 * of pattern is supported
448 		 */
449 		if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
450 		    prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
451 		    item->prev_layer != prev_layer) {
452 			rte_flow_error_set(error, ENOTSUP,
453 					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
454 					   "Unexpected sequence of pattern items");
455 			return -rte_errno;
456 		}
457 
458 		rc = item->parse(pattern, &flow->spec, error);
459 		if (rc != 0)
460 			return rc;
461 
462 		if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
463 			prev_layer = item->layer;
464 	}
465 
466 	if (pattern == NULL) {
467 		rte_flow_error_set(error, EINVAL,
468 				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
469 				   "NULL item");
470 		return -rte_errno;
471 	}
472 
473 	return 0;
474 }
475 
476 static int
477 sfc_flow_parse_queue(struct sfc_adapter *sa,
478 		     const struct rte_flow_action_queue *queue,
479 		     struct rte_flow *flow)
480 {
481 	struct sfc_rxq *rxq;
482 
483 	if (queue->index >= sa->rxq_count)
484 		return -EINVAL;
485 
486 	rxq = sa->rxq_info[queue->index].rxq;
487 	flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
488 
489 	return 0;
490 }
491 
492 static int
493 sfc_flow_parse_actions(struct sfc_adapter *sa,
494 		       const struct rte_flow_action actions[],
495 		       struct rte_flow *flow,
496 		       struct rte_flow_error *error)
497 {
498 	int rc;
499 	boolean_t is_specified = B_FALSE;
500 
501 	if (actions == NULL) {
502 		rte_flow_error_set(error, EINVAL,
503 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
504 				   "NULL actions");
505 		return -rte_errno;
506 	}
507 
508 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
509 		switch (actions->type) {
510 		case RTE_FLOW_ACTION_TYPE_VOID:
511 			break;
512 
513 		case RTE_FLOW_ACTION_TYPE_QUEUE:
514 			rc = sfc_flow_parse_queue(sa, actions->conf, flow);
515 			if (rc != 0) {
516 				rte_flow_error_set(error, EINVAL,
517 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
518 					"Bad QUEUE action");
519 				return -rte_errno;
520 			}
521 
522 			is_specified = B_TRUE;
523 			break;
524 
525 		default:
526 			rte_flow_error_set(error, ENOTSUP,
527 					   RTE_FLOW_ERROR_TYPE_ACTION, actions,
528 					   "Action is not supported");
529 			return -rte_errno;
530 		}
531 	}
532 
533 	if (!is_specified) {
534 		rte_flow_error_set(error, EINVAL,
535 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
536 				   "Action is unspecified");
537 		return -rte_errno;
538 	}
539 
540 	return 0;
541 }
542 
543 static int
544 sfc_flow_parse(struct rte_eth_dev *dev,
545 	       const struct rte_flow_attr *attr,
546 	       const struct rte_flow_item pattern[],
547 	       const struct rte_flow_action actions[],
548 	       struct rte_flow *flow,
549 	       struct rte_flow_error *error)
550 {
551 	struct sfc_adapter *sa = dev->data->dev_private;
552 	int rc;
553 
554 	memset(&flow->spec, 0, sizeof(flow->spec));
555 
556 	rc = sfc_flow_parse_attr(attr, flow, error);
557 	if (rc != 0)
558 		goto fail_bad_value;
559 
560 	rc = sfc_flow_parse_pattern(pattern, flow, error);
561 	if (rc != 0)
562 		goto fail_bad_value;
563 
564 	rc = sfc_flow_parse_actions(sa, actions, flow, error);
565 	if (rc != 0)
566 		goto fail_bad_value;
567 
568 	if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
569 		rte_flow_error_set(error, ENOTSUP,
570 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
571 				   "Flow rule pattern is not supported");
572 		return -rte_errno;
573 	}
574 
575 fail_bad_value:
576 	return rc;
577 }
578 
579 static int
580 sfc_flow_validate(struct rte_eth_dev *dev,
581 		  const struct rte_flow_attr *attr,
582 		  const struct rte_flow_item pattern[],
583 		  const struct rte_flow_action actions[],
584 		  struct rte_flow_error *error)
585 {
586 	struct rte_flow flow;
587 
588 	return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
589 }
590 
591 static struct rte_flow *
592 sfc_flow_create(struct rte_eth_dev *dev,
593 		const struct rte_flow_attr *attr,
594 		const struct rte_flow_item pattern[],
595 		const struct rte_flow_action actions[],
596 		struct rte_flow_error *error)
597 {
598 	struct sfc_adapter *sa = dev->data->dev_private;
599 	struct rte_flow *flow = NULL;
600 	int rc;
601 
602 	flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
603 	if (flow == NULL) {
604 		rte_flow_error_set(error, ENOMEM,
605 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
606 				   "Failed to allocate memory");
607 		goto fail_no_mem;
608 	}
609 
610 	rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
611 	if (rc != 0)
612 		goto fail_bad_value;
613 
614 	TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
615 
616 	sfc_adapter_lock(sa);
617 
618 	if (sa->state == SFC_ADAPTER_STARTED) {
619 		rc = efx_filter_insert(sa->nic, &flow->spec);
620 		if (rc != 0) {
621 			rte_flow_error_set(error, rc,
622 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
623 				"Failed to insert filter");
624 			goto fail_filter_insert;
625 		}
626 	}
627 
628 	sfc_adapter_unlock(sa);
629 
630 	return flow;
631 
632 fail_filter_insert:
633 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
634 
635 fail_bad_value:
636 	rte_free(flow);
637 	sfc_adapter_unlock(sa);
638 
639 fail_no_mem:
640 	return NULL;
641 }
642 
643 static int
644 sfc_flow_remove(struct sfc_adapter *sa,
645 		struct rte_flow *flow,
646 		struct rte_flow_error *error)
647 {
648 	int rc = 0;
649 
650 	SFC_ASSERT(sfc_adapter_is_locked(sa));
651 
652 	if (sa->state == SFC_ADAPTER_STARTED) {
653 		rc = efx_filter_remove(sa->nic, &flow->spec);
654 		if (rc != 0)
655 			rte_flow_error_set(error, rc,
656 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
657 				"Failed to destroy flow rule");
658 	}
659 
660 	TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
661 	rte_free(flow);
662 
663 	return rc;
664 }
665 
666 static int
667 sfc_flow_destroy(struct rte_eth_dev *dev,
668 		 struct rte_flow *flow,
669 		 struct rte_flow_error *error)
670 {
671 	struct sfc_adapter *sa = dev->data->dev_private;
672 	struct rte_flow *flow_ptr;
673 	int rc = EINVAL;
674 
675 	sfc_adapter_lock(sa);
676 
677 	TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
678 		if (flow_ptr == flow)
679 			rc = 0;
680 	}
681 	if (rc != 0) {
682 		rte_flow_error_set(error, rc,
683 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
684 				   "Failed to find flow rule to destroy");
685 		goto fail_bad_value;
686 	}
687 
688 	rc = sfc_flow_remove(sa, flow, error);
689 
690 fail_bad_value:
691 	sfc_adapter_unlock(sa);
692 
693 	return -rc;
694 }
695 
696 static int
697 sfc_flow_flush(struct rte_eth_dev *dev,
698 	       struct rte_flow_error *error)
699 {
700 	struct sfc_adapter *sa = dev->data->dev_private;
701 	struct rte_flow *flow;
702 	int rc = 0;
703 	int ret = 0;
704 
705 	sfc_adapter_lock(sa);
706 
707 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
708 		rc = sfc_flow_remove(sa, flow, error);
709 		if (rc != 0)
710 			ret = rc;
711 	}
712 
713 	sfc_adapter_unlock(sa);
714 
715 	return -ret;
716 }
717 
718 const struct rte_flow_ops sfc_flow_ops = {
719 	.validate = sfc_flow_validate,
720 	.create = sfc_flow_create,
721 	.destroy = sfc_flow_destroy,
722 	.flush = sfc_flow_flush,
723 	.query = NULL,
724 };
725 
726 void
727 sfc_flow_init(struct sfc_adapter *sa)
728 {
729 	SFC_ASSERT(sfc_adapter_is_locked(sa));
730 
731 	TAILQ_INIT(&sa->filter.flow_list);
732 }
733 
734 void
735 sfc_flow_fini(struct sfc_adapter *sa)
736 {
737 	struct rte_flow *flow;
738 
739 	SFC_ASSERT(sfc_adapter_is_locked(sa));
740 
741 	while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
742 		TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
743 		rte_free(flow);
744 	}
745 }
746 
747 void
748 sfc_flow_stop(struct sfc_adapter *sa)
749 {
750 	struct rte_flow *flow;
751 
752 	SFC_ASSERT(sfc_adapter_is_locked(sa));
753 
754 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
755 		efx_filter_remove(sa->nic, &flow->spec);
756 }
757 
758 int
759 sfc_flow_start(struct sfc_adapter *sa)
760 {
761 	struct rte_flow *flow;
762 	int rc = 0;
763 
764 	sfc_log_init(sa, "entry");
765 
766 	SFC_ASSERT(sfc_adapter_is_locked(sa));
767 
768 	TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
769 		rc = efx_filter_insert(sa->nic, &flow->spec);
770 		if (rc != 0)
771 			goto fail_bad_flow;
772 	}
773 
774 	sfc_log_init(sa, "done");
775 
776 fail_bad_flow:
777 	return rc;
778 }
779