17af10d29SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
27af10d29SOphir Munk * Copyright 2015 6WIND S.A.
37af10d29SOphir Munk * Copyright 2015 Mellanox Technologies, Ltd
47af10d29SOphir Munk */
57af10d29SOphir Munk
67af10d29SOphir Munk #include <stddef.h>
77af10d29SOphir Munk #include <errno.h>
87af10d29SOphir Munk #include <stdint.h>
97af10d29SOphir Munk #include <unistd.h>
107af10d29SOphir Munk
117af10d29SOphir Munk /*
127af10d29SOphir Munk * Not needed by this file; included to work around the lack of off_t
137af10d29SOphir Munk * definition for mlx5dv.h with unpatched rdma-core versions.
147af10d29SOphir Munk */
157af10d29SOphir Munk #include <sys/types.h>
167af10d29SOphir Munk
17df96fd0dSBruce Richardson #include <ethdev_driver.h>
187af10d29SOphir Munk #include <rte_common.h>
197af10d29SOphir Munk #include <rte_malloc.h>
207af10d29SOphir Munk #include <rte_hypervisor.h>
217af10d29SOphir Munk
227af10d29SOphir Munk #include <mlx5.h>
237af10d29SOphir Munk #include <mlx5_nl.h>
247af10d29SOphir Munk #include <mlx5_malloc.h>
257af10d29SOphir Munk
267af10d29SOphir Munk /*
277af10d29SOphir Munk * Release VLAN network device, created for VM workaround.
287af10d29SOphir Munk *
297af10d29SOphir Munk * @param[in] dev
307af10d29SOphir Munk * Ethernet device object, Netlink context provider.
317af10d29SOphir Munk * @param[in] vlan
327af10d29SOphir Munk * Object representing the network device to release.
337af10d29SOphir Munk */
347af10d29SOphir Munk void
mlx5_vlan_vmwa_release(struct rte_eth_dev * dev,struct mlx5_vf_vlan * vlan)357af10d29SOphir Munk mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
367af10d29SOphir Munk struct mlx5_vf_vlan *vlan)
377af10d29SOphir Munk {
387af10d29SOphir Munk struct mlx5_priv *priv = dev->data->dev_private;
397af10d29SOphir Munk struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
407af10d29SOphir Munk struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
417af10d29SOphir Munk
427af10d29SOphir Munk MLX5_ASSERT(vlan->created);
437af10d29SOphir Munk MLX5_ASSERT(priv->vmwa_context);
447af10d29SOphir Munk if (!vlan->created || !vmwa)
457af10d29SOphir Munk return;
467af10d29SOphir Munk vlan->created = 0;
47c6ca0582SSuanming Mou rte_spinlock_lock(&vmwa->sl);
487af10d29SOphir Munk MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
497af10d29SOphir Munk if (--vlan_dev[vlan->tag].refcnt == 0 &&
507af10d29SOphir Munk vlan_dev[vlan->tag].ifindex) {
517af10d29SOphir Munk mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
527af10d29SOphir Munk vlan_dev[vlan->tag].ifindex = 0;
537af10d29SOphir Munk }
54c6ca0582SSuanming Mou rte_spinlock_unlock(&vmwa->sl);
557af10d29SOphir Munk }
567af10d29SOphir Munk
577af10d29SOphir Munk /**
587af10d29SOphir Munk * Acquire VLAN interface with specified tag for VM workaround.
597af10d29SOphir Munk *
607af10d29SOphir Munk * @param[in] dev
617af10d29SOphir Munk * Ethernet device object, Netlink context provider.
627af10d29SOphir Munk * @param[in] vlan
637af10d29SOphir Munk * Object representing the network device to acquire.
647af10d29SOphir Munk */
657af10d29SOphir Munk void
mlx5_vlan_vmwa_acquire(struct rte_eth_dev * dev,struct mlx5_vf_vlan * vlan)667af10d29SOphir Munk mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
677af10d29SOphir Munk struct mlx5_vf_vlan *vlan)
687af10d29SOphir Munk {
697af10d29SOphir Munk struct mlx5_priv *priv = dev->data->dev_private;
707af10d29SOphir Munk struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
717af10d29SOphir Munk struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
727af10d29SOphir Munk
737af10d29SOphir Munk MLX5_ASSERT(!vlan->created);
747af10d29SOphir Munk MLX5_ASSERT(priv->vmwa_context);
757af10d29SOphir Munk if (vlan->created || !vmwa)
767af10d29SOphir Munk return;
77c6ca0582SSuanming Mou rte_spinlock_lock(&vmwa->sl);
787af10d29SOphir Munk if (vlan_dev[vlan->tag].refcnt == 0) {
797af10d29SOphir Munk MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
807af10d29SOphir Munk vlan_dev[vlan->tag].ifindex =
817af10d29SOphir Munk mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
827af10d29SOphir Munk vlan->tag);
837af10d29SOphir Munk }
847af10d29SOphir Munk if (vlan_dev[vlan->tag].ifindex) {
857af10d29SOphir Munk vlan_dev[vlan->tag].refcnt++;
867af10d29SOphir Munk vlan->created = 1;
877af10d29SOphir Munk }
88c6ca0582SSuanming Mou rte_spinlock_unlock(&vmwa->sl);
897af10d29SOphir Munk }
907af10d29SOphir Munk
917af10d29SOphir Munk /*
927af10d29SOphir Munk * Create per ethernet device VLAN VM workaround context
937af10d29SOphir Munk *
947af10d29SOphir Munk * @param dev
957af10d29SOphir Munk * Pointer to Ethernet device structure.
967af10d29SOphir Munk * @param ifindex
977af10d29SOphir Munk * Interface index.
987af10d29SOphir Munk *
997af10d29SOphir Munk * @Return
1007af10d29SOphir Munk * Pointer to mlx5_nl_vlan_vmwa_context
1017af10d29SOphir Munk */
1027af10d29SOphir Munk void *
mlx5_vlan_vmwa_init(struct rte_eth_dev * dev,uint32_t ifindex)1037af10d29SOphir Munk mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
1047af10d29SOphir Munk {
1057af10d29SOphir Munk struct mlx5_priv *priv = dev->data->dev_private;
1067af10d29SOphir Munk struct mlx5_nl_vlan_vmwa_context *vmwa;
1077af10d29SOphir Munk enum rte_hypervisor hv_type;
1087af10d29SOphir Munk
1097af10d29SOphir Munk /* Do not engage workaround over PF. */
11087af0d1eSMichael Baum if (!priv->sh->dev_cap.vf)
1117af10d29SOphir Munk return NULL;
1127af10d29SOphir Munk /* Check whether there is desired virtual environment */
1137af10d29SOphir Munk hv_type = rte_hypervisor_get();
1147af10d29SOphir Munk switch (hv_type) {
1157af10d29SOphir Munk case RTE_HYPERVISOR_UNKNOWN:
1167af10d29SOphir Munk case RTE_HYPERVISOR_VMWARE:
1177af10d29SOphir Munk /*
1187af10d29SOphir Munk * The "white list" of configurations
1197af10d29SOphir Munk * to engage the workaround.
1207af10d29SOphir Munk */
1217af10d29SOphir Munk break;
1227af10d29SOphir Munk default:
1237af10d29SOphir Munk /*
1247af10d29SOphir Munk * The configuration is not found in the "white list".
1257af10d29SOphir Munk * We should not engage the VLAN workaround.
1267af10d29SOphir Munk */
1277af10d29SOphir Munk return NULL;
1287af10d29SOphir Munk }
1297af10d29SOphir Munk vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
1307af10d29SOphir Munk SOCKET_ID_ANY);
1317af10d29SOphir Munk if (!vmwa) {
1327af10d29SOphir Munk DRV_LOG(WARNING,
1337af10d29SOphir Munk "Can not allocate memory"
1347af10d29SOphir Munk " for VLAN workaround context");
1357af10d29SOphir Munk return NULL;
1367af10d29SOphir Munk }
137c6ca0582SSuanming Mou rte_spinlock_init(&vmwa->sl);
138*be66461cSDmitry Kozlyuk vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE, 0);
1397af10d29SOphir Munk if (vmwa->nl_socket < 0) {
1407af10d29SOphir Munk DRV_LOG(WARNING,
1417af10d29SOphir Munk "Can not create Netlink socket"
1427af10d29SOphir Munk " for VLAN workaround context");
1437af10d29SOphir Munk mlx5_free(vmwa);
1447af10d29SOphir Munk return NULL;
1457af10d29SOphir Munk }
1467af10d29SOphir Munk vmwa->vf_ifindex = ifindex;
1477af10d29SOphir Munk /* Cleanup for existing VLAN devices. */
1487af10d29SOphir Munk return vmwa;
1497af10d29SOphir Munk }
1507af10d29SOphir Munk
1517af10d29SOphir Munk /*
1527af10d29SOphir Munk * Destroy per ethernet device VLAN VM workaround context
1537af10d29SOphir Munk *
1547af10d29SOphir Munk * @param dev
1557af10d29SOphir Munk * Pointer to VM context
1567af10d29SOphir Munk */
1577af10d29SOphir Munk void
mlx5_vlan_vmwa_exit(void * vmctx)1587af10d29SOphir Munk mlx5_vlan_vmwa_exit(void *vmctx)
1597af10d29SOphir Munk {
1607af10d29SOphir Munk unsigned int i;
1617af10d29SOphir Munk
1627af10d29SOphir Munk struct mlx5_nl_vlan_vmwa_context *vmwa = vmctx;
1637af10d29SOphir Munk /* Delete all remaining VLAN devices. */
1647af10d29SOphir Munk for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
1657af10d29SOphir Munk if (vmwa->vlan_dev[i].ifindex)
1667af10d29SOphir Munk mlx5_nl_vlan_vmwa_delete(vmwa,
1677af10d29SOphir Munk vmwa->vlan_dev[i].ifindex);
1687af10d29SOphir Munk }
1697af10d29SOphir Munk if (vmwa->nl_socket >= 0)
1707af10d29SOphir Munk close(vmwa->nl_socket);
1717af10d29SOphir Munk mlx5_free(vmwa);
1727af10d29SOphir Munk }
173