xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision b42c000e37a80112bdf5150fbccb645ae34e5a09)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <sys/queue.h>
7 #include <stdint.h>
8 #include <string.h>
9 
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19 
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_prm.h"
32 #include "mlx5_glue.h"
33 
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
37 
38 struct rte_flow {
39 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
40 };
41 
42 static const struct rte_flow_ops mlx5_flow_ops = {
43 	.isolate = mlx5_flow_isolate,
44 };
45 
46 /* Convert FDIR request to Generic flow. */
47 struct mlx5_fdir {
48 	struct rte_flow_attr attr;
49 	struct rte_flow_action actions[2];
50 	struct rte_flow_item items[4];
51 	struct rte_flow_item_eth l2;
52 	struct rte_flow_item_eth l2_mask;
53 	union {
54 		struct rte_flow_item_ipv4 ipv4;
55 		struct rte_flow_item_ipv6 ipv6;
56 	} l3;
57 	union {
58 		struct rte_flow_item_ipv4 ipv4;
59 		struct rte_flow_item_ipv6 ipv6;
60 	} l3_mask;
61 	union {
62 		struct rte_flow_item_udp udp;
63 		struct rte_flow_item_tcp tcp;
64 	} l4;
65 	union {
66 		struct rte_flow_item_udp udp;
67 		struct rte_flow_item_tcp tcp;
68 	} l4_mask;
69 	struct rte_flow_action_queue queue;
70 };
71 
72 /* Verbs specification header. */
73 struct ibv_spec_header {
74 	enum ibv_flow_spec_type type;
75 	uint16_t size;
76 };
77 
78 /**
79  * Convert a flow.
80  *
81  * @param dev
82  *   Pointer to Ethernet device.
83  * @param list
84  *   Pointer to a TAILQ flow list.
85  * @param[in] attr
86  *   Flow rule attributes.
87  * @param[in] pattern
88  *   Pattern specification (list terminated by the END pattern item).
89  * @param[in] actions
90  *   Associated actions (list terminated by the END action).
91  * @param[out] error
92  *   Perform verbose error reporting if not NULL.
93  *
94  * @return
95  *   A flow on success, NULL otherwise and rte_errno is set.
96  */
97 static struct rte_flow *
98 mlx5_flow_list_create(struct rte_eth_dev *dev __rte_unused,
99 		      struct mlx5_flows *list __rte_unused,
100 		      const struct rte_flow_attr *attr __rte_unused,
101 		      const struct rte_flow_item items[] __rte_unused,
102 		      const struct rte_flow_action actions[] __rte_unused,
103 		      struct rte_flow_error *error)
104 {
105 	rte_flow_error_set(error, ENOTSUP,
106 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
107 			   NULL,
108 			   "action not supported");
109 	return NULL;
110 }
111 
112 /**
113  * Validate a flow supported by the NIC.
114  *
115  * @see rte_flow_validate()
116  * @see rte_flow_ops
117  */
118 int
119 mlx5_flow_validate(struct rte_eth_dev *dev __rte_unused,
120 		   const struct rte_flow_attr *attr __rte_unused,
121 		   const struct rte_flow_item items[] __rte_unused,
122 		   const struct rte_flow_action actions[] __rte_unused,
123 		   struct rte_flow_error *error)
124 {
125 	return rte_flow_error_set(error, ENOTSUP,
126 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
127 				  NULL,
128 				  "action not supported");
129 }
130 
131 /**
132  * Create a flow.
133  *
134  * @see rte_flow_create()
135  * @see rte_flow_ops
136  */
137 struct rte_flow *
138 mlx5_flow_create(struct rte_eth_dev *dev __rte_unused,
139 		 const struct rte_flow_attr *attr __rte_unused,
140 		 const struct rte_flow_item items[] __rte_unused,
141 		 const struct rte_flow_action actions[] __rte_unused,
142 		 struct rte_flow_error *error)
143 {
144 	rte_flow_error_set(error, ENOTSUP,
145 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
146 			   NULL,
147 			   "action not supported");
148 	return NULL;
149 }
150 
151 /**
152  * Destroy a flow in a list.
153  *
154  * @param dev
155  *   Pointer to Ethernet device.
156  * @param list
157  *   Pointer to a TAILQ flow list.
158  * @param[in] flow
159  *   Flow to destroy.
160  */
161 static void
162 mlx5_flow_list_destroy(struct rte_eth_dev *dev __rte_unused,
163 		       struct mlx5_flows *list __rte_unused,
164 		       struct rte_flow *flow __rte_unused)
165 {
166 }
167 
168 /**
169  * Destroy all flows.
170  *
171  * @param dev
172  *   Pointer to Ethernet device.
173  * @param list
174  *   Pointer to a TAILQ flow list.
175  */
176 void
177 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
178 {
179 	while (!TAILQ_EMPTY(list)) {
180 		struct rte_flow *flow;
181 
182 		flow = TAILQ_FIRST(list);
183 		mlx5_flow_list_destroy(dev, list, flow);
184 	}
185 }
186 
187 /**
188  * Create drop queue.
189  *
190  * @param dev
191  *   Pointer to Ethernet device.
192  *
193  * @return
194  *   0 on success, a negative errno value otherwise and rte_errno is set.
195  */
196 int
197 mlx5_flow_create_drop_queue(struct rte_eth_dev *dev __rte_unused)
198 {
199 	return 0;
200 }
201 
202 /**
203  * Delete drop queue.
204  *
205  * @param dev
206  *   Pointer to Ethernet device.
207  */
208 void
209 mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev __rte_unused)
210 {
211 }
212 
213 /**
214  * Remove all flows.
215  *
216  * @param dev
217  *   Pointer to Ethernet device.
218  * @param list
219  *   Pointer to a TAILQ flow list.
220  */
221 void
222 mlx5_flow_stop(struct rte_eth_dev *dev __rte_unused,
223 	       struct mlx5_flows *list __rte_unused)
224 {
225 }
226 
227 /**
228  * Add all flows.
229  *
230  * @param dev
231  *   Pointer to Ethernet device.
232  * @param list
233  *   Pointer to a TAILQ flow list.
234  *
235  * @return
236  *   0 on success, a negative errno value otherwise and rte_errno is set.
237  */
238 int
239 mlx5_flow_start(struct rte_eth_dev *dev __rte_unused,
240 		struct mlx5_flows *list __rte_unused)
241 {
242 	return 0;
243 }
244 
245 /**
246  * Verify the flow list is empty
247  *
248  * @param dev
249  *  Pointer to Ethernet device.
250  *
251  * @return the number of flows not released.
252  */
253 int
254 mlx5_flow_verify(struct rte_eth_dev *dev)
255 {
256 	struct priv *priv = dev->data->dev_private;
257 	struct rte_flow *flow;
258 	int ret = 0;
259 
260 	TAILQ_FOREACH(flow, &priv->flows, next) {
261 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
262 			dev->data->port_id, (void *)flow);
263 		++ret;
264 	}
265 	return ret;
266 }
267 
268 /**
269  * Enable a control flow configured from the control plane.
270  *
271  * @param dev
272  *   Pointer to Ethernet device.
273  * @param eth_spec
274  *   An Ethernet flow spec to apply.
275  * @param eth_mask
276  *   An Ethernet flow mask to apply.
277  * @param vlan_spec
278  *   A VLAN flow spec to apply.
279  * @param vlan_mask
280  *   A VLAN flow mask to apply.
281  *
282  * @return
283  *   0 on success, a negative errno value otherwise and rte_errno is set.
284  */
285 int
286 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
287 		    struct rte_flow_item_eth *eth_spec,
288 		    struct rte_flow_item_eth *eth_mask,
289 		    struct rte_flow_item_vlan *vlan_spec,
290 		    struct rte_flow_item_vlan *vlan_mask)
291 {
292 	struct priv *priv = dev->data->dev_private;
293 	const struct rte_flow_attr attr = {
294 		.ingress = 1,
295 	};
296 	struct rte_flow_item items[] = {
297 		{
298 			.type = RTE_FLOW_ITEM_TYPE_ETH,
299 			.spec = eth_spec,
300 			.last = NULL,
301 			.mask = eth_mask,
302 		},
303 		{
304 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
305 				RTE_FLOW_ITEM_TYPE_END,
306 			.spec = vlan_spec,
307 			.last = NULL,
308 			.mask = vlan_mask,
309 		},
310 		{
311 			.type = RTE_FLOW_ITEM_TYPE_END,
312 		},
313 	};
314 	uint16_t queue[priv->reta_idx_n];
315 	struct rte_flow_action_rss action_rss = {
316 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
317 		.level = 0,
318 		.types = priv->rss_conf.rss_hf,
319 		.key_len = priv->rss_conf.rss_key_len,
320 		.queue_num = priv->reta_idx_n,
321 		.key = priv->rss_conf.rss_key,
322 		.queue = queue,
323 	};
324 	struct rte_flow_action actions[] = {
325 		{
326 			.type = RTE_FLOW_ACTION_TYPE_RSS,
327 			.conf = &action_rss,
328 		},
329 		{
330 			.type = RTE_FLOW_ACTION_TYPE_END,
331 		},
332 	};
333 	struct rte_flow *flow;
334 	struct rte_flow_error error;
335 	unsigned int i;
336 
337 	if (!priv->reta_idx_n) {
338 		rte_errno = EINVAL;
339 		return -rte_errno;
340 	}
341 	for (i = 0; i != priv->reta_idx_n; ++i)
342 		queue[i] = (*priv->reta_idx)[i];
343 	flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
344 				     actions, &error);
345 	if (!flow)
346 		return -rte_errno;
347 	return 0;
348 }
349 
350 /**
351  * Enable a flow control configured from the control plane.
352  *
353  * @param dev
354  *   Pointer to Ethernet device.
355  * @param eth_spec
356  *   An Ethernet flow spec to apply.
357  * @param eth_mask
358  *   An Ethernet flow mask to apply.
359  *
360  * @return
361  *   0 on success, a negative errno value otherwise and rte_errno is set.
362  */
363 int
364 mlx5_ctrl_flow(struct rte_eth_dev *dev,
365 	       struct rte_flow_item_eth *eth_spec,
366 	       struct rte_flow_item_eth *eth_mask)
367 {
368 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
369 }
370 
371 /**
372  * Destroy a flow.
373  *
374  * @see rte_flow_destroy()
375  * @see rte_flow_ops
376  */
377 int
378 mlx5_flow_destroy(struct rte_eth_dev *dev,
379 		  struct rte_flow *flow,
380 		  struct rte_flow_error *error __rte_unused)
381 {
382 	struct priv *priv = dev->data->dev_private;
383 
384 	mlx5_flow_list_destroy(dev, &priv->flows, flow);
385 	return 0;
386 }
387 
388 /**
389  * Destroy all flows.
390  *
391  * @see rte_flow_flush()
392  * @see rte_flow_ops
393  */
394 int
395 mlx5_flow_flush(struct rte_eth_dev *dev,
396 		struct rte_flow_error *error __rte_unused)
397 {
398 	struct priv *priv = dev->data->dev_private;
399 
400 	mlx5_flow_list_flush(dev, &priv->flows);
401 	return 0;
402 }
403 
404 /**
405  * Isolated mode.
406  *
407  * @see rte_flow_isolate()
408  * @see rte_flow_ops
409  */
410 int
411 mlx5_flow_isolate(struct rte_eth_dev *dev,
412 		  int enable,
413 		  struct rte_flow_error *error)
414 {
415 	struct priv *priv = dev->data->dev_private;
416 
417 	if (dev->data->dev_started) {
418 		rte_flow_error_set(error, EBUSY,
419 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
420 				   NULL,
421 				   "port must be stopped first");
422 		return -rte_errno;
423 	}
424 	priv->isolated = !!enable;
425 	if (enable)
426 		dev->dev_ops = &mlx5_dev_ops_isolate;
427 	else
428 		dev->dev_ops = &mlx5_dev_ops;
429 	return 0;
430 }
431 
432 /**
433  * Convert a flow director filter to a generic flow.
434  *
435  * @param dev
436  *   Pointer to Ethernet device.
437  * @param fdir_filter
438  *   Flow director filter to add.
439  * @param attributes
440  *   Generic flow parameters structure.
441  *
442  * @return
443  *   0 on success, a negative errno value otherwise and rte_errno is set.
444  */
445 static int
446 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
447 			 const struct rte_eth_fdir_filter *fdir_filter,
448 			 struct mlx5_fdir *attributes)
449 {
450 	struct priv *priv = dev->data->dev_private;
451 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
452 	const struct rte_eth_fdir_masks *mask =
453 		&dev->data->dev_conf.fdir_conf.mask;
454 
455 	/* Validate queue number. */
456 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
457 		DRV_LOG(ERR, "port %u invalid queue number %d",
458 			dev->data->port_id, fdir_filter->action.rx_queue);
459 		rte_errno = EINVAL;
460 		return -rte_errno;
461 	}
462 	attributes->attr.ingress = 1;
463 	attributes->items[0] = (struct rte_flow_item) {
464 		.type = RTE_FLOW_ITEM_TYPE_ETH,
465 		.spec = &attributes->l2,
466 		.mask = &attributes->l2_mask,
467 	};
468 	switch (fdir_filter->action.behavior) {
469 	case RTE_ETH_FDIR_ACCEPT:
470 		attributes->actions[0] = (struct rte_flow_action){
471 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
472 			.conf = &attributes->queue,
473 		};
474 		break;
475 	case RTE_ETH_FDIR_REJECT:
476 		attributes->actions[0] = (struct rte_flow_action){
477 			.type = RTE_FLOW_ACTION_TYPE_DROP,
478 		};
479 		break;
480 	default:
481 		DRV_LOG(ERR, "port %u invalid behavior %d",
482 			dev->data->port_id,
483 			fdir_filter->action.behavior);
484 		rte_errno = ENOTSUP;
485 		return -rte_errno;
486 	}
487 	attributes->queue.index = fdir_filter->action.rx_queue;
488 	/* Handle L3. */
489 	switch (fdir_filter->input.flow_type) {
490 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
491 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
492 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
493 		attributes->l3.ipv4.hdr = (struct ipv4_hdr){
494 			.src_addr = input->flow.ip4_flow.src_ip,
495 			.dst_addr = input->flow.ip4_flow.dst_ip,
496 			.time_to_live = input->flow.ip4_flow.ttl,
497 			.type_of_service = input->flow.ip4_flow.tos,
498 			.next_proto_id = input->flow.ip4_flow.proto,
499 		};
500 		attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
501 			.src_addr = mask->ipv4_mask.src_ip,
502 			.dst_addr = mask->ipv4_mask.dst_ip,
503 			.time_to_live = mask->ipv4_mask.ttl,
504 			.type_of_service = mask->ipv4_mask.tos,
505 			.next_proto_id = mask->ipv4_mask.proto,
506 		};
507 		attributes->items[1] = (struct rte_flow_item){
508 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
509 			.spec = &attributes->l3,
510 			.mask = &attributes->l3_mask,
511 		};
512 		break;
513 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
514 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
515 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
516 		attributes->l3.ipv6.hdr = (struct ipv6_hdr){
517 			.hop_limits = input->flow.ipv6_flow.hop_limits,
518 			.proto = input->flow.ipv6_flow.proto,
519 		};
520 
521 		memcpy(attributes->l3.ipv6.hdr.src_addr,
522 		       input->flow.ipv6_flow.src_ip,
523 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
524 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
525 		       input->flow.ipv6_flow.dst_ip,
526 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
527 		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
528 		       mask->ipv6_mask.src_ip,
529 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
530 		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
531 		       mask->ipv6_mask.dst_ip,
532 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
533 		attributes->items[1] = (struct rte_flow_item){
534 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
535 			.spec = &attributes->l3,
536 			.mask = &attributes->l3_mask,
537 		};
538 		break;
539 	default:
540 		DRV_LOG(ERR, "port %u invalid flow type%d",
541 			dev->data->port_id, fdir_filter->input.flow_type);
542 		rte_errno = ENOTSUP;
543 		return -rte_errno;
544 	}
545 	/* Handle L4. */
546 	switch (fdir_filter->input.flow_type) {
547 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
548 		attributes->l4.udp.hdr = (struct udp_hdr){
549 			.src_port = input->flow.udp4_flow.src_port,
550 			.dst_port = input->flow.udp4_flow.dst_port,
551 		};
552 		attributes->l4_mask.udp.hdr = (struct udp_hdr){
553 			.src_port = mask->src_port_mask,
554 			.dst_port = mask->dst_port_mask,
555 		};
556 		attributes->items[2] = (struct rte_flow_item){
557 			.type = RTE_FLOW_ITEM_TYPE_UDP,
558 			.spec = &attributes->l4,
559 			.mask = &attributes->l4_mask,
560 		};
561 		break;
562 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
563 		attributes->l4.tcp.hdr = (struct tcp_hdr){
564 			.src_port = input->flow.tcp4_flow.src_port,
565 			.dst_port = input->flow.tcp4_flow.dst_port,
566 		};
567 		attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
568 			.src_port = mask->src_port_mask,
569 			.dst_port = mask->dst_port_mask,
570 		};
571 		attributes->items[2] = (struct rte_flow_item){
572 			.type = RTE_FLOW_ITEM_TYPE_TCP,
573 			.spec = &attributes->l4,
574 			.mask = &attributes->l4_mask,
575 		};
576 		break;
577 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
578 		attributes->l4.udp.hdr = (struct udp_hdr){
579 			.src_port = input->flow.udp6_flow.src_port,
580 			.dst_port = input->flow.udp6_flow.dst_port,
581 		};
582 		attributes->l4_mask.udp.hdr = (struct udp_hdr){
583 			.src_port = mask->src_port_mask,
584 			.dst_port = mask->dst_port_mask,
585 		};
586 		attributes->items[2] = (struct rte_flow_item){
587 			.type = RTE_FLOW_ITEM_TYPE_UDP,
588 			.spec = &attributes->l4,
589 			.mask = &attributes->l4_mask,
590 		};
591 		break;
592 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
593 		attributes->l4.tcp.hdr = (struct tcp_hdr){
594 			.src_port = input->flow.tcp6_flow.src_port,
595 			.dst_port = input->flow.tcp6_flow.dst_port,
596 		};
597 		attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
598 			.src_port = mask->src_port_mask,
599 			.dst_port = mask->dst_port_mask,
600 		};
601 		attributes->items[2] = (struct rte_flow_item){
602 			.type = RTE_FLOW_ITEM_TYPE_TCP,
603 			.spec = &attributes->l4,
604 			.mask = &attributes->l4_mask,
605 		};
606 		break;
607 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
608 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
609 		break;
610 	default:
611 		DRV_LOG(ERR, "port %u invalid flow type%d",
612 			dev->data->port_id, fdir_filter->input.flow_type);
613 		rte_errno = ENOTSUP;
614 		return -rte_errno;
615 	}
616 	return 0;
617 }
618 
619 /**
620  * Add new flow director filter and store it in list.
621  *
622  * @param dev
623  *   Pointer to Ethernet device.
624  * @param fdir_filter
625  *   Flow director filter to add.
626  *
627  * @return
628  *   0 on success, a negative errno value otherwise and rte_errno is set.
629  */
630 static int
631 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
632 		     const struct rte_eth_fdir_filter *fdir_filter)
633 {
634 	struct priv *priv = dev->data->dev_private;
635 	struct mlx5_fdir attributes = {
636 		.attr.group = 0,
637 		.l2_mask = {
638 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
639 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
640 			.type = 0,
641 		},
642 	};
643 	struct rte_flow_error error;
644 	struct rte_flow *flow;
645 	int ret;
646 
647 	ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
648 	if (ret)
649 		return ret;
650 	flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
651 				     attributes.items, attributes.actions,
652 				     &error);
653 	if (flow) {
654 		DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
655 			(void *)flow);
656 		return 0;
657 	}
658 	return -rte_errno;
659 }
660 
661 /**
662  * Delete specific filter.
663  *
664  * @param dev
665  *   Pointer to Ethernet device.
666  * @param fdir_filter
667  *   Filter to be deleted.
668  *
669  * @return
670  *   0 on success, a negative errno value otherwise and rte_errno is set.
671  */
672 static int
673 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
674 			const struct rte_eth_fdir_filter *fdir_filter
675 			__rte_unused)
676 {
677 	rte_errno = ENOTSUP;
678 	return -rte_errno;
679 }
680 
681 /**
682  * Update queue for specific filter.
683  *
684  * @param dev
685  *   Pointer to Ethernet device.
686  * @param fdir_filter
687  *   Filter to be updated.
688  *
689  * @return
690  *   0 on success, a negative errno value otherwise and rte_errno is set.
691  */
692 static int
693 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
694 			const struct rte_eth_fdir_filter *fdir_filter)
695 {
696 	int ret;
697 
698 	ret = mlx5_fdir_filter_delete(dev, fdir_filter);
699 	if (ret)
700 		return ret;
701 	return mlx5_fdir_filter_add(dev, fdir_filter);
702 }
703 
704 /**
705  * Flush all filters.
706  *
707  * @param dev
708  *   Pointer to Ethernet device.
709  */
710 static void
711 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
712 {
713 	struct priv *priv = dev->data->dev_private;
714 
715 	mlx5_flow_list_flush(dev, &priv->flows);
716 }
717 
718 /**
719  * Get flow director information.
720  *
721  * @param dev
722  *   Pointer to Ethernet device.
723  * @param[out] fdir_info
724  *   Resulting flow director information.
725  */
726 static void
727 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
728 {
729 	struct rte_eth_fdir_masks *mask =
730 		&dev->data->dev_conf.fdir_conf.mask;
731 
732 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
733 	fdir_info->guarant_spc = 0;
734 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
735 	fdir_info->max_flexpayload = 0;
736 	fdir_info->flow_types_mask[0] = 0;
737 	fdir_info->flex_payload_unit = 0;
738 	fdir_info->max_flex_payload_segment_num = 0;
739 	fdir_info->flex_payload_limit = 0;
740 	memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
741 }
742 
743 /**
744  * Deal with flow director operations.
745  *
746  * @param dev
747  *   Pointer to Ethernet device.
748  * @param filter_op
749  *   Operation to perform.
750  * @param arg
751  *   Pointer to operation-specific structure.
752  *
753  * @return
754  *   0 on success, a negative errno value otherwise and rte_errno is set.
755  */
756 static int
757 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
758 		    void *arg)
759 {
760 	enum rte_fdir_mode fdir_mode =
761 		dev->data->dev_conf.fdir_conf.mode;
762 
763 	if (filter_op == RTE_ETH_FILTER_NOP)
764 		return 0;
765 	if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
766 	    fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
767 		DRV_LOG(ERR, "port %u flow director mode %d not supported",
768 			dev->data->port_id, fdir_mode);
769 		rte_errno = EINVAL;
770 		return -rte_errno;
771 	}
772 	switch (filter_op) {
773 	case RTE_ETH_FILTER_ADD:
774 		return mlx5_fdir_filter_add(dev, arg);
775 	case RTE_ETH_FILTER_UPDATE:
776 		return mlx5_fdir_filter_update(dev, arg);
777 	case RTE_ETH_FILTER_DELETE:
778 		return mlx5_fdir_filter_delete(dev, arg);
779 	case RTE_ETH_FILTER_FLUSH:
780 		mlx5_fdir_filter_flush(dev);
781 		break;
782 	case RTE_ETH_FILTER_INFO:
783 		mlx5_fdir_info_get(dev, arg);
784 		break;
785 	default:
786 		DRV_LOG(DEBUG, "port %u unknown operation %u",
787 			dev->data->port_id, filter_op);
788 		rte_errno = EINVAL;
789 		return -rte_errno;
790 	}
791 	return 0;
792 }
793 
794 /**
795  * Manage filter operations.
796  *
797  * @param dev
798  *   Pointer to Ethernet device structure.
799  * @param filter_type
800  *   Filter type.
801  * @param filter_op
802  *   Operation to perform.
803  * @param arg
804  *   Pointer to operation-specific structure.
805  *
806  * @return
807  *   0 on success, a negative errno value otherwise and rte_errno is set.
808  */
809 int
810 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
811 		     enum rte_filter_type filter_type,
812 		     enum rte_filter_op filter_op,
813 		     void *arg)
814 {
815 	switch (filter_type) {
816 	case RTE_ETH_FILTER_GENERIC:
817 		if (filter_op != RTE_ETH_FILTER_GET) {
818 			rte_errno = EINVAL;
819 			return -rte_errno;
820 		}
821 		*(const void **)arg = &mlx5_flow_ops;
822 		return 0;
823 	case RTE_ETH_FILTER_FDIR:
824 		return mlx5_fdir_ctrl_func(dev, filter_op, arg);
825 	default:
826 		DRV_LOG(ERR, "port %u filter type (%d) not supported",
827 			dev->data->port_id, filter_type);
828 		rte_errno = ENOTSUP;
829 		return -rte_errno;
830 	}
831 	return 0;
832 }
833 
834 /**
835  * Detect number of Verbs flow priorities supported.
836  *
837  * @param dev
838  *   Pointer to Ethernet device.
839  *
840  * @return
841  *   number of supported Verbs flow priority.
842  */
843 unsigned int
844 mlx5_get_max_verbs_prio(struct rte_eth_dev *dev __rte_unused)
845 {
846 	return 8;
847 }
848