1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <unistd.h> 10 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_pci.h> 14 #include <rte_bus_pci.h> 15 #include <rte_errno.h> 16 17 #include <rte_memory.h> 18 #include <rte_eal.h> 19 #include <rte_dev.h> 20 #include <rte_kvargs.h> 21 22 #include "virtio.h" 23 #include "virtio_ethdev.h" 24 #include "virtio_pci.h" 25 #include "virtio_logs.h" 26 27 /* 28 * The set of PCI devices this driver supports 29 */ 30 static const struct rte_pci_id pci_id_virtio_map[] = { 31 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) }, 32 { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) }, 33 { .vendor_id = 0, /* sentinel */ }, 34 }; 35 36 37 /* 38 * Remap the PCI device again (IO port map for legacy device and 39 * memory map for modern device), so that the secondary process 40 * could have the PCI initiated correctly. 41 */ 42 static int 43 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_pci_dev *dev) 44 { 45 struct virtio_hw *hw = &dev->hw; 46 47 if (dev->modern) { 48 /* 49 * We don't have to re-parse the PCI config space, since 50 * rte_pci_map_device() makes sure the mapped address 51 * in secondary process would equal to the one mapped in 52 * the primary process: error will be returned if that 53 * requirement is not met. 54 * 55 * That said, we could simply reuse all cap pointers 56 * (such as dev_cfg, common_cfg, etc.) parsed from the 57 * primary process, which is stored in shared memory. 58 */ 59 if (rte_pci_map_device(pci_dev)) { 60 PMD_INIT_LOG(DEBUG, "failed to map pci device!"); 61 return -1; 62 } 63 } else { 64 if (vtpci_legacy_ioport_map(hw) < 0) 65 return -1; 66 } 67 68 return 0; 69 } 70 71 static int 72 eth_virtio_pci_init(struct rte_eth_dev *eth_dev) 73 { 74 struct virtio_pci_dev *dev = eth_dev->data->dev_private; 75 struct virtio_hw *hw = &dev->hw; 76 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 77 int ret; 78 79 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 80 hw->port_id = eth_dev->data->port_id; 81 VTPCI_DEV(hw) = pci_dev; 82 ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), dev); 83 if (ret) { 84 PMD_INIT_LOG(ERR, "Failed to init PCI device"); 85 return -1; 86 } 87 } else { 88 VTPCI_DEV(hw) = pci_dev; 89 if (dev->modern) 90 VIRTIO_OPS(hw) = &modern_ops; 91 else 92 VIRTIO_OPS(hw) = &legacy_ops; 93 94 ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), dev); 95 if (ret < 0) { 96 PMD_INIT_LOG(ERR, "Failed to remap PCI device"); 97 return -1; 98 } 99 } 100 101 ret = eth_virtio_dev_init(eth_dev); 102 if (ret < 0) { 103 PMD_INIT_LOG(ERR, "Failed to init virtio device"); 104 goto err_unmap; 105 } 106 107 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 108 eth_dev->data->port_id, pci_dev->id.vendor_id, 109 pci_dev->id.device_id); 110 111 return 0; 112 113 err_unmap: 114 rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev)); 115 if (!dev->modern) 116 vtpci_legacy_ioport_unmap(hw); 117 118 return ret; 119 } 120 121 static int 122 eth_virtio_pci_uninit(struct rte_eth_dev *eth_dev) 123 { 124 int ret; 125 struct virtio_pci_dev *dev; 126 struct virtio_hw *hw; 127 PMD_INIT_FUNC_TRACE(); 128 129 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 130 dev = eth_dev->data->dev_private; 131 hw = &dev->hw; 132 133 if (dev->modern) 134 rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev)); 135 else 136 vtpci_legacy_ioport_unmap(hw); 137 return 0; 138 } 139 140 ret = virtio_dev_stop(eth_dev); 141 virtio_dev_close(eth_dev); 142 143 PMD_INIT_LOG(DEBUG, "dev_uninit completed"); 144 145 return ret; 146 } 147 148 static int vdpa_check_handler(__rte_unused const char *key, 149 const char *value, void *ret_val) 150 { 151 if (strcmp(value, "1") == 0) 152 *(int *)ret_val = 1; 153 else 154 *(int *)ret_val = 0; 155 156 return 0; 157 } 158 159 #define VIRTIO_ARG_VDPA "vdpa" 160 161 static int 162 virtio_pci_devargs_parse(struct rte_devargs *devargs, int *vdpa) 163 { 164 struct rte_kvargs *kvlist; 165 int ret = 0; 166 167 if (devargs == NULL) 168 return 0; 169 170 kvlist = rte_kvargs_parse(devargs->args, NULL); 171 if (kvlist == NULL) { 172 PMD_INIT_LOG(ERR, "error when parsing param"); 173 return 0; 174 } 175 176 if (rte_kvargs_count(kvlist, VIRTIO_ARG_VDPA) == 1) { 177 /* vdpa mode selected when there's a key-value pair: 178 * vdpa=1 179 */ 180 ret = rte_kvargs_process(kvlist, VIRTIO_ARG_VDPA, 181 vdpa_check_handler, vdpa); 182 if (ret < 0) 183 PMD_INIT_LOG(ERR, "Failed to parse %s", VIRTIO_ARG_VDPA); 184 } 185 186 rte_kvargs_free(kvlist); 187 188 return ret; 189 } 190 191 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 192 struct rte_pci_device *pci_dev) 193 { 194 int vdpa = 0; 195 int ret = 0; 196 197 ret = virtio_pci_devargs_parse(pci_dev->device.devargs, &vdpa); 198 if (ret < 0) { 199 PMD_INIT_LOG(ERR, "devargs parsing is failed"); 200 return ret; 201 } 202 /* virtio pmd skips probe if device needs to work in vdpa mode */ 203 if (vdpa == 1) 204 return 1; 205 206 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_pci_dev), 207 eth_virtio_pci_init); 208 } 209 210 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev) 211 { 212 int ret; 213 214 ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_pci_uninit); 215 /* Port has already been released by close. */ 216 if (ret == -ENODEV) 217 ret = 0; 218 return ret; 219 } 220 221 static struct rte_pci_driver rte_virtio_net_pci_pmd = { 222 .driver = { 223 .name = "net_virtio", 224 }, 225 .id_table = pci_id_virtio_map, 226 .drv_flags = 0, 227 .probe = eth_virtio_pci_probe, 228 .remove = eth_virtio_pci_remove, 229 }; 230 231 RTE_INIT(rte_virtio_net_pci_pmd_init) 232 { 233 rte_eal_iopl_init(); 234 rte_pci_register(&rte_virtio_net_pci_pmd); 235 } 236 237 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map); 238 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci"); 239 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__); 240