xref: /dpdk/drivers/net/mlx5/mlx5_flow.c (revision 78be885295b86da2c208324f1e276e802812b60d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <sys/queue.h>
7 #include <stdint.h>
8 #include <string.h>
9 
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19 
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_prm.h"
32 #include "mlx5_glue.h"
33 
34 /* Dev ops structure defined in mlx5.c */
35 extern const struct eth_dev_ops mlx5_dev_ops;
36 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
37 
38 struct rte_flow {
39 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
40 };
41 
42 static const struct rte_flow_ops mlx5_flow_ops = {
43 	.isolate = mlx5_flow_isolate,
44 };
45 
46 /* Convert FDIR request to Generic flow. */
47 struct mlx5_fdir {
48 	struct rte_flow_attr attr;
49 	struct rte_flow_action actions[2];
50 	struct rte_flow_item items[4];
51 	struct rte_flow_item_eth l2;
52 	struct rte_flow_item_eth l2_mask;
53 	union {
54 		struct rte_flow_item_ipv4 ipv4;
55 		struct rte_flow_item_ipv6 ipv6;
56 	} l3;
57 	union {
58 		struct rte_flow_item_ipv4 ipv4;
59 		struct rte_flow_item_ipv6 ipv6;
60 	} l3_mask;
61 	union {
62 		struct rte_flow_item_udp udp;
63 		struct rte_flow_item_tcp tcp;
64 	} l4;
65 	union {
66 		struct rte_flow_item_udp udp;
67 		struct rte_flow_item_tcp tcp;
68 	} l4_mask;
69 	struct rte_flow_action_queue queue;
70 };
71 
72 /* Verbs specification header. */
73 struct ibv_spec_header {
74 	enum ibv_flow_spec_type type;
75 	uint16_t size;
76 };
77 
78  /**
79   * Get the maximum number of priority available.
80   *
81   * @param[in] dev
82   *   Pointer to Ethernet device.
83   *
84   * @return
85   *   number of supported Verbs flow priority on success, a negative errno
86   *   value otherwise and rte_errno is set.
87   */
88 int
89 mlx5_verbs_max_prio(struct rte_eth_dev *dev)
90 {
91 	struct {
92 		struct ibv_flow_attr attr;
93 		struct ibv_flow_spec_eth eth;
94 		struct ibv_flow_spec_action_drop drop;
95 	} flow_attr = {
96 		.attr = {
97 			.num_of_specs = 2,
98 		},
99 		.eth = {
100 			.type = IBV_FLOW_SPEC_ETH,
101 			.size = sizeof(struct ibv_flow_spec_eth),
102 		},
103 		.drop = {
104 			.size = sizeof(struct ibv_flow_spec_action_drop),
105 			.type = IBV_FLOW_SPEC_ACTION_DROP,
106 		},
107 	};
108 	struct ibv_flow *flow;
109 	uint32_t verb_priorities;
110 	struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
111 
112 	if (!drop) {
113 		rte_errno = ENOTSUP;
114 		return -rte_errno;
115 	}
116 	for (verb_priorities = 0; 1; verb_priorities++) {
117 		flow_attr.attr.priority = verb_priorities;
118 		flow = mlx5_glue->create_flow(drop->qp,
119 					      &flow_attr.attr);
120 		if (!flow)
121 			break;
122 		claim_zero(mlx5_glue->destroy_flow(flow));
123 	}
124 	mlx5_hrxq_drop_release(dev);
125 	DRV_LOG(INFO, "port %u flow maximum priority: %d",
126 		dev->data->port_id, verb_priorities);
127 	return verb_priorities;
128 }
129 
130 /**
131  * Convert a flow.
132  *
133  * @param dev
134  *   Pointer to Ethernet device.
135  * @param list
136  *   Pointer to a TAILQ flow list.
137  * @param[in] attr
138  *   Flow rule attributes.
139  * @param[in] pattern
140  *   Pattern specification (list terminated by the END pattern item).
141  * @param[in] actions
142  *   Associated actions (list terminated by the END action).
143  * @param[out] error
144  *   Perform verbose error reporting if not NULL.
145  *
146  * @return
147  *   A flow on success, NULL otherwise and rte_errno is set.
148  */
149 static struct rte_flow *
150 mlx5_flow_list_create(struct rte_eth_dev *dev __rte_unused,
151 		      struct mlx5_flows *list __rte_unused,
152 		      const struct rte_flow_attr *attr __rte_unused,
153 		      const struct rte_flow_item items[] __rte_unused,
154 		      const struct rte_flow_action actions[] __rte_unused,
155 		      struct rte_flow_error *error)
156 {
157 	rte_flow_error_set(error, ENOTSUP,
158 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
159 			   NULL,
160 			   "action not supported");
161 	return NULL;
162 }
163 
164 /**
165  * Validate a flow supported by the NIC.
166  *
167  * @see rte_flow_validate()
168  * @see rte_flow_ops
169  */
170 int
171 mlx5_flow_validate(struct rte_eth_dev *dev __rte_unused,
172 		   const struct rte_flow_attr *attr __rte_unused,
173 		   const struct rte_flow_item items[] __rte_unused,
174 		   const struct rte_flow_action actions[] __rte_unused,
175 		   struct rte_flow_error *error)
176 {
177 	return rte_flow_error_set(error, ENOTSUP,
178 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
179 				  NULL,
180 				  "action not supported");
181 }
182 
183 /**
184  * Create a flow.
185  *
186  * @see rte_flow_create()
187  * @see rte_flow_ops
188  */
189 struct rte_flow *
190 mlx5_flow_create(struct rte_eth_dev *dev __rte_unused,
191 		 const struct rte_flow_attr *attr __rte_unused,
192 		 const struct rte_flow_item items[] __rte_unused,
193 		 const struct rte_flow_action actions[] __rte_unused,
194 		 struct rte_flow_error *error)
195 {
196 	rte_flow_error_set(error, ENOTSUP,
197 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
198 			   NULL,
199 			   "action not supported");
200 	return NULL;
201 }
202 
203 /**
204  * Destroy a flow in a list.
205  *
206  * @param dev
207  *   Pointer to Ethernet device.
208  * @param list
209  *   Pointer to a TAILQ flow list.
210  * @param[in] flow
211  *   Flow to destroy.
212  */
213 static void
214 mlx5_flow_list_destroy(struct rte_eth_dev *dev __rte_unused,
215 		       struct mlx5_flows *list __rte_unused,
216 		       struct rte_flow *flow __rte_unused)
217 {
218 }
219 
220 /**
221  * Destroy all flows.
222  *
223  * @param dev
224  *   Pointer to Ethernet device.
225  * @param list
226  *   Pointer to a TAILQ flow list.
227  */
228 void
229 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
230 {
231 	while (!TAILQ_EMPTY(list)) {
232 		struct rte_flow *flow;
233 
234 		flow = TAILQ_FIRST(list);
235 		mlx5_flow_list_destroy(dev, list, flow);
236 	}
237 }
238 
239 /**
240  * Remove all flows.
241  *
242  * @param dev
243  *   Pointer to Ethernet device.
244  * @param list
245  *   Pointer to a TAILQ flow list.
246  */
247 void
248 mlx5_flow_stop(struct rte_eth_dev *dev __rte_unused,
249 	       struct mlx5_flows *list __rte_unused)
250 {
251 }
252 
253 /**
254  * Add all flows.
255  *
256  * @param dev
257  *   Pointer to Ethernet device.
258  * @param list
259  *   Pointer to a TAILQ flow list.
260  *
261  * @return
262  *   0 on success, a negative errno value otherwise and rte_errno is set.
263  */
264 int
265 mlx5_flow_start(struct rte_eth_dev *dev __rte_unused,
266 		struct mlx5_flows *list __rte_unused)
267 {
268 	return 0;
269 }
270 
271 /**
272  * Verify the flow list is empty
273  *
274  * @param dev
275  *  Pointer to Ethernet device.
276  *
277  * @return the number of flows not released.
278  */
279 int
280 mlx5_flow_verify(struct rte_eth_dev *dev)
281 {
282 	struct priv *priv = dev->data->dev_private;
283 	struct rte_flow *flow;
284 	int ret = 0;
285 
286 	TAILQ_FOREACH(flow, &priv->flows, next) {
287 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
288 			dev->data->port_id, (void *)flow);
289 		++ret;
290 	}
291 	return ret;
292 }
293 
294 /**
295  * Enable a control flow configured from the control plane.
296  *
297  * @param dev
298  *   Pointer to Ethernet device.
299  * @param eth_spec
300  *   An Ethernet flow spec to apply.
301  * @param eth_mask
302  *   An Ethernet flow mask to apply.
303  * @param vlan_spec
304  *   A VLAN flow spec to apply.
305  * @param vlan_mask
306  *   A VLAN flow mask to apply.
307  *
308  * @return
309  *   0 on success, a negative errno value otherwise and rte_errno is set.
310  */
311 int
312 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
313 		    struct rte_flow_item_eth *eth_spec,
314 		    struct rte_flow_item_eth *eth_mask,
315 		    struct rte_flow_item_vlan *vlan_spec,
316 		    struct rte_flow_item_vlan *vlan_mask)
317 {
318 	struct priv *priv = dev->data->dev_private;
319 	const struct rte_flow_attr attr = {
320 		.ingress = 1,
321 		.priority = priv->config.max_verbs_prio - 1,
322 	};
323 	struct rte_flow_item items[] = {
324 		{
325 			.type = RTE_FLOW_ITEM_TYPE_ETH,
326 			.spec = eth_spec,
327 			.last = NULL,
328 			.mask = eth_mask,
329 		},
330 		{
331 			.type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
332 				RTE_FLOW_ITEM_TYPE_END,
333 			.spec = vlan_spec,
334 			.last = NULL,
335 			.mask = vlan_mask,
336 		},
337 		{
338 			.type = RTE_FLOW_ITEM_TYPE_END,
339 		},
340 	};
341 	uint16_t queue[priv->reta_idx_n];
342 	struct rte_flow_action_rss action_rss = {
343 		.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
344 		.level = 0,
345 		.types = priv->rss_conf.rss_hf,
346 		.key_len = priv->rss_conf.rss_key_len,
347 		.queue_num = priv->reta_idx_n,
348 		.key = priv->rss_conf.rss_key,
349 		.queue = queue,
350 	};
351 	struct rte_flow_action actions[] = {
352 		{
353 			.type = RTE_FLOW_ACTION_TYPE_RSS,
354 			.conf = &action_rss,
355 		},
356 		{
357 			.type = RTE_FLOW_ACTION_TYPE_END,
358 		},
359 	};
360 	struct rte_flow *flow;
361 	struct rte_flow_error error;
362 	unsigned int i;
363 
364 	if (!priv->reta_idx_n) {
365 		rte_errno = EINVAL;
366 		return -rte_errno;
367 	}
368 	for (i = 0; i != priv->reta_idx_n; ++i)
369 		queue[i] = (*priv->reta_idx)[i];
370 	flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
371 				     actions, &error);
372 	if (!flow)
373 		return -rte_errno;
374 	return 0;
375 }
376 
377 /**
378  * Enable a flow control configured from the control plane.
379  *
380  * @param dev
381  *   Pointer to Ethernet device.
382  * @param eth_spec
383  *   An Ethernet flow spec to apply.
384  * @param eth_mask
385  *   An Ethernet flow mask to apply.
386  *
387  * @return
388  *   0 on success, a negative errno value otherwise and rte_errno is set.
389  */
390 int
391 mlx5_ctrl_flow(struct rte_eth_dev *dev,
392 	       struct rte_flow_item_eth *eth_spec,
393 	       struct rte_flow_item_eth *eth_mask)
394 {
395 	return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
396 }
397 
398 /**
399  * Destroy a flow.
400  *
401  * @see rte_flow_destroy()
402  * @see rte_flow_ops
403  */
404 int
405 mlx5_flow_destroy(struct rte_eth_dev *dev,
406 		  struct rte_flow *flow,
407 		  struct rte_flow_error *error __rte_unused)
408 {
409 	struct priv *priv = dev->data->dev_private;
410 
411 	mlx5_flow_list_destroy(dev, &priv->flows, flow);
412 	return 0;
413 }
414 
415 /**
416  * Destroy all flows.
417  *
418  * @see rte_flow_flush()
419  * @see rte_flow_ops
420  */
421 int
422 mlx5_flow_flush(struct rte_eth_dev *dev,
423 		struct rte_flow_error *error __rte_unused)
424 {
425 	struct priv *priv = dev->data->dev_private;
426 
427 	mlx5_flow_list_flush(dev, &priv->flows);
428 	return 0;
429 }
430 
431 /**
432  * Isolated mode.
433  *
434  * @see rte_flow_isolate()
435  * @see rte_flow_ops
436  */
437 int
438 mlx5_flow_isolate(struct rte_eth_dev *dev,
439 		  int enable,
440 		  struct rte_flow_error *error)
441 {
442 	struct priv *priv = dev->data->dev_private;
443 
444 	if (dev->data->dev_started) {
445 		rte_flow_error_set(error, EBUSY,
446 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
447 				   NULL,
448 				   "port must be stopped first");
449 		return -rte_errno;
450 	}
451 	priv->isolated = !!enable;
452 	if (enable)
453 		dev->dev_ops = &mlx5_dev_ops_isolate;
454 	else
455 		dev->dev_ops = &mlx5_dev_ops;
456 	return 0;
457 }
458 
459 /**
460  * Convert a flow director filter to a generic flow.
461  *
462  * @param dev
463  *   Pointer to Ethernet device.
464  * @param fdir_filter
465  *   Flow director filter to add.
466  * @param attributes
467  *   Generic flow parameters structure.
468  *
469  * @return
470  *   0 on success, a negative errno value otherwise and rte_errno is set.
471  */
472 static int
473 mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
474 			 const struct rte_eth_fdir_filter *fdir_filter,
475 			 struct mlx5_fdir *attributes)
476 {
477 	struct priv *priv = dev->data->dev_private;
478 	const struct rte_eth_fdir_input *input = &fdir_filter->input;
479 	const struct rte_eth_fdir_masks *mask =
480 		&dev->data->dev_conf.fdir_conf.mask;
481 
482 	/* Validate queue number. */
483 	if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
484 		DRV_LOG(ERR, "port %u invalid queue number %d",
485 			dev->data->port_id, fdir_filter->action.rx_queue);
486 		rte_errno = EINVAL;
487 		return -rte_errno;
488 	}
489 	attributes->attr.ingress = 1;
490 	attributes->items[0] = (struct rte_flow_item) {
491 		.type = RTE_FLOW_ITEM_TYPE_ETH,
492 		.spec = &attributes->l2,
493 		.mask = &attributes->l2_mask,
494 	};
495 	switch (fdir_filter->action.behavior) {
496 	case RTE_ETH_FDIR_ACCEPT:
497 		attributes->actions[0] = (struct rte_flow_action){
498 			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
499 			.conf = &attributes->queue,
500 		};
501 		break;
502 	case RTE_ETH_FDIR_REJECT:
503 		attributes->actions[0] = (struct rte_flow_action){
504 			.type = RTE_FLOW_ACTION_TYPE_DROP,
505 		};
506 		break;
507 	default:
508 		DRV_LOG(ERR, "port %u invalid behavior %d",
509 			dev->data->port_id,
510 			fdir_filter->action.behavior);
511 		rte_errno = ENOTSUP;
512 		return -rte_errno;
513 	}
514 	attributes->queue.index = fdir_filter->action.rx_queue;
515 	/* Handle L3. */
516 	switch (fdir_filter->input.flow_type) {
517 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
518 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
519 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
520 		attributes->l3.ipv4.hdr = (struct ipv4_hdr){
521 			.src_addr = input->flow.ip4_flow.src_ip,
522 			.dst_addr = input->flow.ip4_flow.dst_ip,
523 			.time_to_live = input->flow.ip4_flow.ttl,
524 			.type_of_service = input->flow.ip4_flow.tos,
525 			.next_proto_id = input->flow.ip4_flow.proto,
526 		};
527 		attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
528 			.src_addr = mask->ipv4_mask.src_ip,
529 			.dst_addr = mask->ipv4_mask.dst_ip,
530 			.time_to_live = mask->ipv4_mask.ttl,
531 			.type_of_service = mask->ipv4_mask.tos,
532 			.next_proto_id = mask->ipv4_mask.proto,
533 		};
534 		attributes->items[1] = (struct rte_flow_item){
535 			.type = RTE_FLOW_ITEM_TYPE_IPV4,
536 			.spec = &attributes->l3,
537 			.mask = &attributes->l3_mask,
538 		};
539 		break;
540 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
541 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
542 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
543 		attributes->l3.ipv6.hdr = (struct ipv6_hdr){
544 			.hop_limits = input->flow.ipv6_flow.hop_limits,
545 			.proto = input->flow.ipv6_flow.proto,
546 		};
547 
548 		memcpy(attributes->l3.ipv6.hdr.src_addr,
549 		       input->flow.ipv6_flow.src_ip,
550 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
551 		memcpy(attributes->l3.ipv6.hdr.dst_addr,
552 		       input->flow.ipv6_flow.dst_ip,
553 		       RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
554 		memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
555 		       mask->ipv6_mask.src_ip,
556 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
557 		memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
558 		       mask->ipv6_mask.dst_ip,
559 		       RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
560 		attributes->items[1] = (struct rte_flow_item){
561 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
562 			.spec = &attributes->l3,
563 			.mask = &attributes->l3_mask,
564 		};
565 		break;
566 	default:
567 		DRV_LOG(ERR, "port %u invalid flow type%d",
568 			dev->data->port_id, fdir_filter->input.flow_type);
569 		rte_errno = ENOTSUP;
570 		return -rte_errno;
571 	}
572 	/* Handle L4. */
573 	switch (fdir_filter->input.flow_type) {
574 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
575 		attributes->l4.udp.hdr = (struct udp_hdr){
576 			.src_port = input->flow.udp4_flow.src_port,
577 			.dst_port = input->flow.udp4_flow.dst_port,
578 		};
579 		attributes->l4_mask.udp.hdr = (struct udp_hdr){
580 			.src_port = mask->src_port_mask,
581 			.dst_port = mask->dst_port_mask,
582 		};
583 		attributes->items[2] = (struct rte_flow_item){
584 			.type = RTE_FLOW_ITEM_TYPE_UDP,
585 			.spec = &attributes->l4,
586 			.mask = &attributes->l4_mask,
587 		};
588 		break;
589 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
590 		attributes->l4.tcp.hdr = (struct tcp_hdr){
591 			.src_port = input->flow.tcp4_flow.src_port,
592 			.dst_port = input->flow.tcp4_flow.dst_port,
593 		};
594 		attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
595 			.src_port = mask->src_port_mask,
596 			.dst_port = mask->dst_port_mask,
597 		};
598 		attributes->items[2] = (struct rte_flow_item){
599 			.type = RTE_FLOW_ITEM_TYPE_TCP,
600 			.spec = &attributes->l4,
601 			.mask = &attributes->l4_mask,
602 		};
603 		break;
604 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
605 		attributes->l4.udp.hdr = (struct udp_hdr){
606 			.src_port = input->flow.udp6_flow.src_port,
607 			.dst_port = input->flow.udp6_flow.dst_port,
608 		};
609 		attributes->l4_mask.udp.hdr = (struct udp_hdr){
610 			.src_port = mask->src_port_mask,
611 			.dst_port = mask->dst_port_mask,
612 		};
613 		attributes->items[2] = (struct rte_flow_item){
614 			.type = RTE_FLOW_ITEM_TYPE_UDP,
615 			.spec = &attributes->l4,
616 			.mask = &attributes->l4_mask,
617 		};
618 		break;
619 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
620 		attributes->l4.tcp.hdr = (struct tcp_hdr){
621 			.src_port = input->flow.tcp6_flow.src_port,
622 			.dst_port = input->flow.tcp6_flow.dst_port,
623 		};
624 		attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
625 			.src_port = mask->src_port_mask,
626 			.dst_port = mask->dst_port_mask,
627 		};
628 		attributes->items[2] = (struct rte_flow_item){
629 			.type = RTE_FLOW_ITEM_TYPE_TCP,
630 			.spec = &attributes->l4,
631 			.mask = &attributes->l4_mask,
632 		};
633 		break;
634 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
635 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
636 		break;
637 	default:
638 		DRV_LOG(ERR, "port %u invalid flow type%d",
639 			dev->data->port_id, fdir_filter->input.flow_type);
640 		rte_errno = ENOTSUP;
641 		return -rte_errno;
642 	}
643 	return 0;
644 }
645 
646 /**
647  * Add new flow director filter and store it in list.
648  *
649  * @param dev
650  *   Pointer to Ethernet device.
651  * @param fdir_filter
652  *   Flow director filter to add.
653  *
654  * @return
655  *   0 on success, a negative errno value otherwise and rte_errno is set.
656  */
657 static int
658 mlx5_fdir_filter_add(struct rte_eth_dev *dev,
659 		     const struct rte_eth_fdir_filter *fdir_filter)
660 {
661 	struct priv *priv = dev->data->dev_private;
662 	struct mlx5_fdir attributes = {
663 		.attr.group = 0,
664 		.l2_mask = {
665 			.dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
666 			.src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
667 			.type = 0,
668 		},
669 	};
670 	struct rte_flow_error error;
671 	struct rte_flow *flow;
672 	int ret;
673 
674 	ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
675 	if (ret)
676 		return ret;
677 	flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
678 				     attributes.items, attributes.actions,
679 				     &error);
680 	if (flow) {
681 		DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
682 			(void *)flow);
683 		return 0;
684 	}
685 	return -rte_errno;
686 }
687 
688 /**
689  * Delete specific filter.
690  *
691  * @param dev
692  *   Pointer to Ethernet device.
693  * @param fdir_filter
694  *   Filter to be deleted.
695  *
696  * @return
697  *   0 on success, a negative errno value otherwise and rte_errno is set.
698  */
699 static int
700 mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
701 			const struct rte_eth_fdir_filter *fdir_filter
702 			__rte_unused)
703 {
704 	rte_errno = ENOTSUP;
705 	return -rte_errno;
706 }
707 
708 /**
709  * Update queue for specific filter.
710  *
711  * @param dev
712  *   Pointer to Ethernet device.
713  * @param fdir_filter
714  *   Filter to be updated.
715  *
716  * @return
717  *   0 on success, a negative errno value otherwise and rte_errno is set.
718  */
719 static int
720 mlx5_fdir_filter_update(struct rte_eth_dev *dev,
721 			const struct rte_eth_fdir_filter *fdir_filter)
722 {
723 	int ret;
724 
725 	ret = mlx5_fdir_filter_delete(dev, fdir_filter);
726 	if (ret)
727 		return ret;
728 	return mlx5_fdir_filter_add(dev, fdir_filter);
729 }
730 
731 /**
732  * Flush all filters.
733  *
734  * @param dev
735  *   Pointer to Ethernet device.
736  */
737 static void
738 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
739 {
740 	struct priv *priv = dev->data->dev_private;
741 
742 	mlx5_flow_list_flush(dev, &priv->flows);
743 }
744 
745 /**
746  * Get flow director information.
747  *
748  * @param dev
749  *   Pointer to Ethernet device.
750  * @param[out] fdir_info
751  *   Resulting flow director information.
752  */
753 static void
754 mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
755 {
756 	struct rte_eth_fdir_masks *mask =
757 		&dev->data->dev_conf.fdir_conf.mask;
758 
759 	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
760 	fdir_info->guarant_spc = 0;
761 	rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
762 	fdir_info->max_flexpayload = 0;
763 	fdir_info->flow_types_mask[0] = 0;
764 	fdir_info->flex_payload_unit = 0;
765 	fdir_info->max_flex_payload_segment_num = 0;
766 	fdir_info->flex_payload_limit = 0;
767 	memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
768 }
769 
770 /**
771  * Deal with flow director operations.
772  *
773  * @param dev
774  *   Pointer to Ethernet device.
775  * @param filter_op
776  *   Operation to perform.
777  * @param arg
778  *   Pointer to operation-specific structure.
779  *
780  * @return
781  *   0 on success, a negative errno value otherwise and rte_errno is set.
782  */
783 static int
784 mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
785 		    void *arg)
786 {
787 	enum rte_fdir_mode fdir_mode =
788 		dev->data->dev_conf.fdir_conf.mode;
789 
790 	if (filter_op == RTE_ETH_FILTER_NOP)
791 		return 0;
792 	if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
793 	    fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
794 		DRV_LOG(ERR, "port %u flow director mode %d not supported",
795 			dev->data->port_id, fdir_mode);
796 		rte_errno = EINVAL;
797 		return -rte_errno;
798 	}
799 	switch (filter_op) {
800 	case RTE_ETH_FILTER_ADD:
801 		return mlx5_fdir_filter_add(dev, arg);
802 	case RTE_ETH_FILTER_UPDATE:
803 		return mlx5_fdir_filter_update(dev, arg);
804 	case RTE_ETH_FILTER_DELETE:
805 		return mlx5_fdir_filter_delete(dev, arg);
806 	case RTE_ETH_FILTER_FLUSH:
807 		mlx5_fdir_filter_flush(dev);
808 		break;
809 	case RTE_ETH_FILTER_INFO:
810 		mlx5_fdir_info_get(dev, arg);
811 		break;
812 	default:
813 		DRV_LOG(DEBUG, "port %u unknown operation %u",
814 			dev->data->port_id, filter_op);
815 		rte_errno = EINVAL;
816 		return -rte_errno;
817 	}
818 	return 0;
819 }
820 
821 /**
822  * Manage filter operations.
823  *
824  * @param dev
825  *   Pointer to Ethernet device structure.
826  * @param filter_type
827  *   Filter type.
828  * @param filter_op
829  *   Operation to perform.
830  * @param arg
831  *   Pointer to operation-specific structure.
832  *
833  * @return
834  *   0 on success, a negative errno value otherwise and rte_errno is set.
835  */
836 int
837 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
838 		     enum rte_filter_type filter_type,
839 		     enum rte_filter_op filter_op,
840 		     void *arg)
841 {
842 	switch (filter_type) {
843 	case RTE_ETH_FILTER_GENERIC:
844 		if (filter_op != RTE_ETH_FILTER_GET) {
845 			rte_errno = EINVAL;
846 			return -rte_errno;
847 		}
848 		*(const void **)arg = &mlx5_flow_ops;
849 		return 0;
850 	case RTE_ETH_FILTER_FDIR:
851 		return mlx5_fdir_ctrl_func(dev, filter_op, arg);
852 	default:
853 		DRV_LOG(ERR, "port %u filter type (%d) not supported",
854 			dev->data->port_id, filter_type);
855 		rte_errno = ENOTSUP;
856 		return -rte_errno;
857 	}
858 	return 0;
859 }
860