xref: /dpdk/drivers/net/mlx5/linux/mlx5_vlan_os.c (revision be66461cba371c3138ce942eb9fe5657f9e9a446)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <unistd.h>
10 
11 /*
12  * Not needed by this file; included to work around the lack of off_t
13  * definition for mlx5dv.h with unpatched rdma-core versions.
14  */
15 #include <sys/types.h>
16 
17 #include <ethdev_driver.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_hypervisor.h>
21 
22 #include <mlx5.h>
23 #include <mlx5_nl.h>
24 #include <mlx5_malloc.h>
25 
26 /*
27  * Release VLAN network device, created for VM workaround.
28  *
29  * @param[in] dev
30  *   Ethernet device object, Netlink context provider.
31  * @param[in] vlan
32  *   Object representing the network device to release.
33  */
34 void
mlx5_vlan_vmwa_release(struct rte_eth_dev * dev,struct mlx5_vf_vlan * vlan)35 mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
36 			    struct mlx5_vf_vlan *vlan)
37 {
38 	struct mlx5_priv *priv = dev->data->dev_private;
39 	struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
40 	struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
41 
42 	MLX5_ASSERT(vlan->created);
43 	MLX5_ASSERT(priv->vmwa_context);
44 	if (!vlan->created || !vmwa)
45 		return;
46 	vlan->created = 0;
47 	rte_spinlock_lock(&vmwa->sl);
48 	MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
49 	if (--vlan_dev[vlan->tag].refcnt == 0 &&
50 	    vlan_dev[vlan->tag].ifindex) {
51 		mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
52 		vlan_dev[vlan->tag].ifindex = 0;
53 	}
54 	rte_spinlock_unlock(&vmwa->sl);
55 }
56 
57 /**
58  * Acquire VLAN interface with specified tag for VM workaround.
59  *
60  * @param[in] dev
61  *   Ethernet device object, Netlink context provider.
62  * @param[in] vlan
63  *   Object representing the network device to acquire.
64  */
65 void
mlx5_vlan_vmwa_acquire(struct rte_eth_dev * dev,struct mlx5_vf_vlan * vlan)66 mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
67 			    struct mlx5_vf_vlan *vlan)
68 {
69 	struct mlx5_priv *priv = dev->data->dev_private;
70 	struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
71 	struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
72 
73 	MLX5_ASSERT(!vlan->created);
74 	MLX5_ASSERT(priv->vmwa_context);
75 	if (vlan->created || !vmwa)
76 		return;
77 	rte_spinlock_lock(&vmwa->sl);
78 	if (vlan_dev[vlan->tag].refcnt == 0) {
79 		MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
80 		vlan_dev[vlan->tag].ifindex =
81 			mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
82 						 vlan->tag);
83 	}
84 	if (vlan_dev[vlan->tag].ifindex) {
85 		vlan_dev[vlan->tag].refcnt++;
86 		vlan->created = 1;
87 	}
88 	rte_spinlock_unlock(&vmwa->sl);
89 }
90 
91 /*
92  * Create per ethernet device VLAN VM workaround context
93  *
94  * @param dev
95  *   Pointer to Ethernet device structure.
96  * @param ifindex
97  *   Interface index.
98  *
99  * @Return
100  *   Pointer to mlx5_nl_vlan_vmwa_context
101  */
102 void *
mlx5_vlan_vmwa_init(struct rte_eth_dev * dev,uint32_t ifindex)103 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
104 {
105 	struct mlx5_priv *priv = dev->data->dev_private;
106 	struct mlx5_nl_vlan_vmwa_context *vmwa;
107 	enum rte_hypervisor hv_type;
108 
109 	/* Do not engage workaround over PF. */
110 	if (!priv->sh->dev_cap.vf)
111 		return NULL;
112 	/* Check whether there is desired virtual environment */
113 	hv_type = rte_hypervisor_get();
114 	switch (hv_type) {
115 	case RTE_HYPERVISOR_UNKNOWN:
116 	case RTE_HYPERVISOR_VMWARE:
117 		/*
118 		 * The "white list" of configurations
119 		 * to engage the workaround.
120 		 */
121 		break;
122 	default:
123 		/*
124 		 * The configuration is not found in the "white list".
125 		 * We should not engage the VLAN workaround.
126 		 */
127 		return NULL;
128 	}
129 	vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
130 			   SOCKET_ID_ANY);
131 	if (!vmwa) {
132 		DRV_LOG(WARNING,
133 			"Can not allocate memory"
134 			" for VLAN workaround context");
135 		return NULL;
136 	}
137 	rte_spinlock_init(&vmwa->sl);
138 	vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE, 0);
139 	if (vmwa->nl_socket < 0) {
140 		DRV_LOG(WARNING,
141 			"Can not create Netlink socket"
142 			" for VLAN workaround context");
143 		mlx5_free(vmwa);
144 		return NULL;
145 	}
146 	vmwa->vf_ifindex = ifindex;
147 	/* Cleanup for existing VLAN devices. */
148 	return vmwa;
149 }
150 
151 /*
152  * Destroy per ethernet device VLAN VM workaround context
153  *
154  * @param dev
155  *   Pointer to VM context
156  */
157 void
mlx5_vlan_vmwa_exit(void * vmctx)158 mlx5_vlan_vmwa_exit(void *vmctx)
159 {
160 	unsigned int i;
161 
162 	struct mlx5_nl_vlan_vmwa_context *vmwa = vmctx;
163 	/* Delete all remaining VLAN devices. */
164 	for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
165 		if (vmwa->vlan_dev[i].ifindex)
166 			mlx5_nl_vlan_vmwa_delete(vmwa,
167 						 vmwa->vlan_dev[i].ifindex);
168 	}
169 	if (vmwa->nl_socket >= 0)
170 		close(vmwa->nl_socket);
171 	mlx5_free(vmwa);
172 }
173