xref: /dpdk/drivers/net/mlx5/linux/mlx5_vlan_os.c (revision 7af10d29a4a0b8942f8dee9eda0145cd7581a8f7)
1*7af10d29SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
2*7af10d29SOphir Munk  * Copyright 2015 6WIND S.A.
3*7af10d29SOphir Munk  * Copyright 2015 Mellanox Technologies, Ltd
4*7af10d29SOphir Munk  */
5*7af10d29SOphir Munk 
6*7af10d29SOphir Munk #include <stddef.h>
7*7af10d29SOphir Munk #include <errno.h>
8*7af10d29SOphir Munk #include <stdint.h>
9*7af10d29SOphir Munk #include <unistd.h>
10*7af10d29SOphir Munk 
11*7af10d29SOphir Munk /*
12*7af10d29SOphir Munk  * Not needed by this file; included to work around the lack of off_t
13*7af10d29SOphir Munk  * definition for mlx5dv.h with unpatched rdma-core versions.
14*7af10d29SOphir Munk  */
15*7af10d29SOphir Munk #include <sys/types.h>
16*7af10d29SOphir Munk 
17*7af10d29SOphir Munk #include <rte_ethdev_driver.h>
18*7af10d29SOphir Munk #include <rte_common.h>
19*7af10d29SOphir Munk #include <rte_malloc.h>
20*7af10d29SOphir Munk #include <rte_hypervisor.h>
21*7af10d29SOphir Munk 
22*7af10d29SOphir Munk #include <mlx5.h>
23*7af10d29SOphir Munk #include <mlx5_nl.h>
24*7af10d29SOphir Munk #include <mlx5_malloc.h>
25*7af10d29SOphir Munk 
26*7af10d29SOphir Munk /*
27*7af10d29SOphir Munk  * Release VLAN network device, created for VM workaround.
28*7af10d29SOphir Munk  *
29*7af10d29SOphir Munk  * @param[in] dev
30*7af10d29SOphir Munk  *   Ethernet device object, Netlink context provider.
31*7af10d29SOphir Munk  * @param[in] vlan
32*7af10d29SOphir Munk  *   Object representing the network device to release.
33*7af10d29SOphir Munk  */
34*7af10d29SOphir Munk void
35*7af10d29SOphir Munk mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
36*7af10d29SOphir Munk 			    struct mlx5_vf_vlan *vlan)
37*7af10d29SOphir Munk {
38*7af10d29SOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
39*7af10d29SOphir Munk 	struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
40*7af10d29SOphir Munk 	struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
41*7af10d29SOphir Munk 
42*7af10d29SOphir Munk 	MLX5_ASSERT(vlan->created);
43*7af10d29SOphir Munk 	MLX5_ASSERT(priv->vmwa_context);
44*7af10d29SOphir Munk 	if (!vlan->created || !vmwa)
45*7af10d29SOphir Munk 		return;
46*7af10d29SOphir Munk 	vlan->created = 0;
47*7af10d29SOphir Munk 	MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
48*7af10d29SOphir Munk 	if (--vlan_dev[vlan->tag].refcnt == 0 &&
49*7af10d29SOphir Munk 	    vlan_dev[vlan->tag].ifindex) {
50*7af10d29SOphir Munk 		mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
51*7af10d29SOphir Munk 		vlan_dev[vlan->tag].ifindex = 0;
52*7af10d29SOphir Munk 	}
53*7af10d29SOphir Munk }
54*7af10d29SOphir Munk 
55*7af10d29SOphir Munk /**
56*7af10d29SOphir Munk  * Acquire VLAN interface with specified tag for VM workaround.
57*7af10d29SOphir Munk  *
58*7af10d29SOphir Munk  * @param[in] dev
59*7af10d29SOphir Munk  *   Ethernet device object, Netlink context provider.
60*7af10d29SOphir Munk  * @param[in] vlan
61*7af10d29SOphir Munk  *   Object representing the network device to acquire.
62*7af10d29SOphir Munk  */
63*7af10d29SOphir Munk void
64*7af10d29SOphir Munk mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
65*7af10d29SOphir Munk 			    struct mlx5_vf_vlan *vlan)
66*7af10d29SOphir Munk {
67*7af10d29SOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
68*7af10d29SOphir Munk 	struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
69*7af10d29SOphir Munk 	struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
70*7af10d29SOphir Munk 
71*7af10d29SOphir Munk 	MLX5_ASSERT(!vlan->created);
72*7af10d29SOphir Munk 	MLX5_ASSERT(priv->vmwa_context);
73*7af10d29SOphir Munk 	if (vlan->created || !vmwa)
74*7af10d29SOphir Munk 		return;
75*7af10d29SOphir Munk 	if (vlan_dev[vlan->tag].refcnt == 0) {
76*7af10d29SOphir Munk 		MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
77*7af10d29SOphir Munk 		vlan_dev[vlan->tag].ifindex =
78*7af10d29SOphir Munk 			mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
79*7af10d29SOphir Munk 						 vlan->tag);
80*7af10d29SOphir Munk 	}
81*7af10d29SOphir Munk 	if (vlan_dev[vlan->tag].ifindex) {
82*7af10d29SOphir Munk 		vlan_dev[vlan->tag].refcnt++;
83*7af10d29SOphir Munk 		vlan->created = 1;
84*7af10d29SOphir Munk 	}
85*7af10d29SOphir Munk }
86*7af10d29SOphir Munk 
87*7af10d29SOphir Munk /*
88*7af10d29SOphir Munk  * Create per ethernet device VLAN VM workaround context
89*7af10d29SOphir Munk  *
90*7af10d29SOphir Munk  * @param dev
91*7af10d29SOphir Munk  *   Pointer to Ethernet device structure.
92*7af10d29SOphir Munk  * @param ifindex
93*7af10d29SOphir Munk  *   Interface index.
94*7af10d29SOphir Munk  *
95*7af10d29SOphir Munk  * @Return
96*7af10d29SOphir Munk  *   Pointer to mlx5_nl_vlan_vmwa_context
97*7af10d29SOphir Munk  */
98*7af10d29SOphir Munk void *
99*7af10d29SOphir Munk mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
100*7af10d29SOphir Munk {
101*7af10d29SOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
102*7af10d29SOphir Munk 	struct mlx5_dev_config *config = &priv->config;
103*7af10d29SOphir Munk 	struct mlx5_nl_vlan_vmwa_context *vmwa;
104*7af10d29SOphir Munk 	enum rte_hypervisor hv_type;
105*7af10d29SOphir Munk 
106*7af10d29SOphir Munk 	/* Do not engage workaround over PF. */
107*7af10d29SOphir Munk 	if (!config->vf)
108*7af10d29SOphir Munk 		return NULL;
109*7af10d29SOphir Munk 	/* Check whether there is desired virtual environment */
110*7af10d29SOphir Munk 	hv_type = rte_hypervisor_get();
111*7af10d29SOphir Munk 	switch (hv_type) {
112*7af10d29SOphir Munk 	case RTE_HYPERVISOR_UNKNOWN:
113*7af10d29SOphir Munk 	case RTE_HYPERVISOR_VMWARE:
114*7af10d29SOphir Munk 		/*
115*7af10d29SOphir Munk 		 * The "white list" of configurations
116*7af10d29SOphir Munk 		 * to engage the workaround.
117*7af10d29SOphir Munk 		 */
118*7af10d29SOphir Munk 		break;
119*7af10d29SOphir Munk 	default:
120*7af10d29SOphir Munk 		/*
121*7af10d29SOphir Munk 		 * The configuration is not found in the "white list".
122*7af10d29SOphir Munk 		 * We should not engage the VLAN workaround.
123*7af10d29SOphir Munk 		 */
124*7af10d29SOphir Munk 		return NULL;
125*7af10d29SOphir Munk 	}
126*7af10d29SOphir Munk 	vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
127*7af10d29SOphir Munk 			   SOCKET_ID_ANY);
128*7af10d29SOphir Munk 	if (!vmwa) {
129*7af10d29SOphir Munk 		DRV_LOG(WARNING,
130*7af10d29SOphir Munk 			"Can not allocate memory"
131*7af10d29SOphir Munk 			" for VLAN workaround context");
132*7af10d29SOphir Munk 		return NULL;
133*7af10d29SOphir Munk 	}
134*7af10d29SOphir Munk 	vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
135*7af10d29SOphir Munk 	if (vmwa->nl_socket < 0) {
136*7af10d29SOphir Munk 		DRV_LOG(WARNING,
137*7af10d29SOphir Munk 			"Can not create Netlink socket"
138*7af10d29SOphir Munk 			" for VLAN workaround context");
139*7af10d29SOphir Munk 		mlx5_free(vmwa);
140*7af10d29SOphir Munk 		return NULL;
141*7af10d29SOphir Munk 	}
142*7af10d29SOphir Munk 	vmwa->vf_ifindex = ifindex;
143*7af10d29SOphir Munk 	/* Cleanup for existing VLAN devices. */
144*7af10d29SOphir Munk 	return vmwa;
145*7af10d29SOphir Munk }
146*7af10d29SOphir Munk 
147*7af10d29SOphir Munk /*
148*7af10d29SOphir Munk  * Destroy per ethernet device VLAN VM workaround context
149*7af10d29SOphir Munk  *
150*7af10d29SOphir Munk  * @param dev
151*7af10d29SOphir Munk  *   Pointer to VM context
152*7af10d29SOphir Munk  */
153*7af10d29SOphir Munk void
154*7af10d29SOphir Munk mlx5_vlan_vmwa_exit(void *vmctx)
155*7af10d29SOphir Munk {
156*7af10d29SOphir Munk 	unsigned int i;
157*7af10d29SOphir Munk 
158*7af10d29SOphir Munk 	struct mlx5_nl_vlan_vmwa_context *vmwa = vmctx;
159*7af10d29SOphir Munk 	/* Delete all remaining VLAN devices. */
160*7af10d29SOphir Munk 	for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
161*7af10d29SOphir Munk 		if (vmwa->vlan_dev[i].ifindex)
162*7af10d29SOphir Munk 			mlx5_nl_vlan_vmwa_delete(vmwa,
163*7af10d29SOphir Munk 						 vmwa->vlan_dev[i].ifindex);
164*7af10d29SOphir Munk 	}
165*7af10d29SOphir Munk 	if (vmwa->nl_socket >= 0)
166*7af10d29SOphir Munk 		close(vmwa->nl_socket);
167*7af10d29SOphir Munk 	mlx5_free(vmwa);
168*7af10d29SOphir Munk }
169