xref: /dpdk/drivers/net/mlx5/mlx5_vlan.c (revision 10b71caecbe1cddcbb65c050ca775fba575e88db)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <unistd.h>
10 
11 #include <rte_ethdev_driver.h>
12 #include <rte_common.h>
13 #include <rte_malloc.h>
14 #include <rte_hypervisor.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_nl.h>
19 #include <mlx5_malloc.h>
20 
21 #include "mlx5.h"
22 #include "mlx5_autoconf.h"
23 #include "mlx5_rxtx.h"
24 #include "mlx5_utils.h"
25 
26 /**
27  * DPDK callback to configure a VLAN filter.
28  *
29  * @param dev
30  *   Pointer to Ethernet device structure.
31  * @param vlan_id
32  *   VLAN ID to filter.
33  * @param on
34  *   Toggle filter.
35  *
36  * @return
37  *   0 on success, a negative errno value otherwise and rte_errno is set.
38  */
39 int
40 mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
41 {
42 	struct mlx5_priv *priv = dev->data->dev_private;
43 	unsigned int i;
44 
45 	DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
46 		dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
47 	MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
48 	for (i = 0; (i != priv->vlan_filter_n); ++i)
49 		if (priv->vlan_filter[i] == vlan_id)
50 			break;
51 	/* Check if there's room for another VLAN filter. */
52 	if (i == RTE_DIM(priv->vlan_filter)) {
53 		rte_errno = ENOMEM;
54 		return -rte_errno;
55 	}
56 	if (i < priv->vlan_filter_n) {
57 		MLX5_ASSERT(priv->vlan_filter_n != 0);
58 		/* Enabling an existing VLAN filter has no effect. */
59 		if (on)
60 			goto out;
61 		/* Remove VLAN filter from list. */
62 		--priv->vlan_filter_n;
63 		memmove(&priv->vlan_filter[i],
64 			&priv->vlan_filter[i + 1],
65 			sizeof(priv->vlan_filter[i]) *
66 			(priv->vlan_filter_n - i));
67 		priv->vlan_filter[priv->vlan_filter_n] = 0;
68 	} else {
69 		MLX5_ASSERT(i == priv->vlan_filter_n);
70 		/* Disabling an unknown VLAN filter has no effect. */
71 		if (!on)
72 			goto out;
73 		/* Add new VLAN filter. */
74 		priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
75 		++priv->vlan_filter_n;
76 	}
77 out:
78 	if (dev->data->dev_started)
79 		return mlx5_traffic_restart(dev);
80 	return 0;
81 }
82 
83 /**
84  * Callback to set/reset VLAN stripping for a specific queue.
85  *
86  * @param dev
87  *   Pointer to Ethernet device structure.
88  * @param queue
89  *   RX queue index.
90  * @param on
91  *   Enable/disable VLAN stripping.
92  */
93 void
94 mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
95 {
96 	struct mlx5_priv *priv = dev->data->dev_private;
97 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
98 	struct mlx5_rxq_ctrl *rxq_ctrl =
99 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
100 	struct ibv_wq_attr mod;
101 	uint16_t vlan_offloads =
102 		(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
103 		0;
104 	int ret = 0;
105 
106 	/* Validate hw support */
107 	if (!priv->config.hw_vlan_strip) {
108 		DRV_LOG(ERR, "port %u VLAN stripping is not supported",
109 			dev->data->port_id);
110 		return;
111 	}
112 	/* Validate queue number */
113 	if (queue >= priv->rxqs_n) {
114 		DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
115 			dev->data->port_id, queue);
116 		return;
117 	}
118 	DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
119 		dev->data->port_id, vlan_offloads, rxq->port_id, queue);
120 	if (!rxq_ctrl->obj) {
121 		/* Update related bits in RX queue. */
122 		rxq->vlan_strip = !!on;
123 		return;
124 	}
125 	if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
126 		mod = (struct ibv_wq_attr){
127 			.attr_mask = IBV_WQ_ATTR_FLAGS,
128 			.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
129 			.flags = vlan_offloads,
130 		};
131 		ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
132 	} else if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
133 		struct mlx5_devx_modify_rq_attr rq_attr;
134 
135 		memset(&rq_attr, 0, sizeof(rq_attr));
136 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
137 		rq_attr.state = MLX5_RQC_STATE_RDY;
138 		rq_attr.vsd = (on ? 0 : 1);
139 		rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
140 		ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
141 	}
142 	if (ret) {
143 		DRV_LOG(ERR, "port %u failed to modify object %d stripping "
144 			"mode: %s", dev->data->port_id,
145 			rxq_ctrl->obj->type, strerror(rte_errno));
146 		return;
147 	}
148 	/* Update related bits in RX queue. */
149 	rxq->vlan_strip = !!on;
150 }
151 
152 /**
153  * Callback to set/reset VLAN offloads for a port.
154  *
155  * @param dev
156  *   Pointer to Ethernet device structure.
157  * @param mask
158  *   VLAN offload bit mask.
159  *
160  * @return
161  *   0 on success, a negative errno value otherwise and rte_errno is set.
162  */
163 int
164 mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
165 {
166 	struct mlx5_priv *priv = dev->data->dev_private;
167 	unsigned int i;
168 
169 	if (mask & ETH_VLAN_STRIP_MASK) {
170 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
171 				       DEV_RX_OFFLOAD_VLAN_STRIP);
172 
173 		if (!priv->config.hw_vlan_strip) {
174 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
175 				dev->data->port_id);
176 			return 0;
177 		}
178 		/* Run on every RX queue and set/reset VLAN stripping. */
179 		for (i = 0; (i != priv->rxqs_n); i++)
180 			mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
181 	}
182 	return 0;
183 }
184 
185 /*
186  * Release VLAN network device, created for VM workaround.
187  *
188  * @param[in] dev
189  *   Ethernet device object, Netlink context provider.
190  * @param[in] vlan
191  *   Object representing the network device to release.
192  */
193 void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
194 			    struct mlx5_vf_vlan *vlan)
195 {
196 	struct mlx5_priv *priv = dev->data->dev_private;
197 	struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
198 	struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
199 
200 	MLX5_ASSERT(vlan->created);
201 	MLX5_ASSERT(priv->vmwa_context);
202 	if (!vlan->created || !vmwa)
203 		return;
204 	vlan->created = 0;
205 	MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
206 	if (--vlan_dev[vlan->tag].refcnt == 0 &&
207 	    vlan_dev[vlan->tag].ifindex) {
208 		mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
209 		vlan_dev[vlan->tag].ifindex = 0;
210 	}
211 }
212 
213 /**
214  * Acquire VLAN interface with specified tag for VM workaround.
215  *
216  * @param[in] dev
217  *   Ethernet device object, Netlink context provider.
218  * @param[in] vlan
219  *   Object representing the network device to acquire.
220  */
221 void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
222 			    struct mlx5_vf_vlan *vlan)
223 {
224 	struct mlx5_priv *priv = dev->data->dev_private;
225 	struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
226 	struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
227 
228 	MLX5_ASSERT(!vlan->created);
229 	MLX5_ASSERT(priv->vmwa_context);
230 	if (vlan->created || !vmwa)
231 		return;
232 	if (vlan_dev[vlan->tag].refcnt == 0) {
233 		MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
234 		vlan_dev[vlan->tag].ifindex =
235 			mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
236 						 vlan->tag);
237 	}
238 	if (vlan_dev[vlan->tag].ifindex) {
239 		vlan_dev[vlan->tag].refcnt++;
240 		vlan->created = 1;
241 	}
242 }
243 
244 /*
245  * Create per ethernet device VLAN VM workaround context
246  */
247 struct mlx5_nl_vlan_vmwa_context *
248 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
249 {
250 	struct mlx5_priv *priv = dev->data->dev_private;
251 	struct mlx5_dev_config *config = &priv->config;
252 	struct mlx5_nl_vlan_vmwa_context *vmwa;
253 	enum rte_hypervisor hv_type;
254 
255 	/* Do not engage workaround over PF. */
256 	if (!config->vf)
257 		return NULL;
258 	/* Check whether there is desired virtual environment */
259 	hv_type = rte_hypervisor_get();
260 	switch (hv_type) {
261 	case RTE_HYPERVISOR_UNKNOWN:
262 	case RTE_HYPERVISOR_VMWARE:
263 		/*
264 		 * The "white list" of configurations
265 		 * to engage the workaround.
266 		 */
267 		break;
268 	default:
269 		/*
270 		 * The configuration is not found in the "white list".
271 		 * We should not engage the VLAN workaround.
272 		 */
273 		return NULL;
274 	}
275 	vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
276 			   SOCKET_ID_ANY);
277 	if (!vmwa) {
278 		DRV_LOG(WARNING,
279 			"Can not allocate memory"
280 			" for VLAN workaround context");
281 		return NULL;
282 	}
283 	vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
284 	if (vmwa->nl_socket < 0) {
285 		DRV_LOG(WARNING,
286 			"Can not create Netlink socket"
287 			" for VLAN workaround context");
288 		mlx5_free(vmwa);
289 		return NULL;
290 	}
291 	vmwa->vf_ifindex = ifindex;
292 	/* Cleanup for existing VLAN devices. */
293 	return vmwa;
294 }
295 
296 /*
297  * Destroy per ethernet device VLAN VM workaround context
298  */
299 void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
300 {
301 	unsigned int i;
302 
303 	/* Delete all remaining VLAN devices. */
304 	for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
305 		if (vmwa->vlan_dev[i].ifindex)
306 			mlx5_nl_vlan_vmwa_delete(vmwa,
307 						 vmwa->vlan_dev[i].ifindex);
308 	}
309 	if (vmwa->nl_socket >= 0)
310 		close(vmwa->nl_socket);
311 	mlx5_free(vmwa);
312 }
313