1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2014-2021 Netronome Systems, Inc. 3 * All rights reserved. 4 * 5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. 6 */ 7 8 /* 9 * vim:shiftwidth=8:noexpandtab 10 * 11 * @file dpdk/pmd/nfp_ethdev_vf.c 12 * 13 * Netronome vNIC VF DPDK Poll-Mode Driver: Main entry point 14 */ 15 16 #include <rte_alarm.h> 17 18 #include "nfpcore/nfp_mip.h" 19 #include "nfpcore/nfp_rtsym.h" 20 21 #include "nfp_common.h" 22 #include "nfp_rxtx.h" 23 #include "nfp_logs.h" 24 #include "nfp_ctrl.h" 25 26 static void nfp_netvf_read_mac(struct nfp_net_hw *hw); 27 static int nfp_netvf_start(struct rte_eth_dev *dev); 28 static int nfp_netvf_stop(struct rte_eth_dev *dev); 29 static int nfp_netvf_set_link_up(struct rte_eth_dev *dev); 30 static int nfp_netvf_set_link_down(struct rte_eth_dev *dev); 31 static int nfp_netvf_close(struct rte_eth_dev *dev); 32 static int nfp_netvf_init(struct rte_eth_dev *eth_dev); 33 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev); 34 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 35 struct rte_pci_device *pci_dev); 36 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev); 37 38 static void 39 nfp_netvf_read_mac(struct nfp_net_hw *hw) 40 { 41 uint32_t tmp; 42 43 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); 44 memcpy(&hw->mac_addr[0], &tmp, 4); 45 46 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); 47 memcpy(&hw->mac_addr[4], &tmp, 2); 48 } 49 50 static int 51 nfp_netvf_start(struct rte_eth_dev *dev) 52 { 53 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 54 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 55 uint32_t new_ctrl, update = 0; 56 struct nfp_net_hw *hw; 57 struct rte_eth_conf *dev_conf; 58 struct rte_eth_rxmode *rxmode; 59 uint32_t intr_vector; 60 int ret; 61 62 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); 63 64 PMD_INIT_LOG(DEBUG, "Start"); 65 66 /* Disabling queues just in case... */ 67 nfp_net_disable_queues(dev); 68 69 /* Enabling the required queues in the device */ 70 nfp_net_enable_queues(dev); 71 72 /* check and configure queue intr-vector mapping */ 73 if (dev->data->dev_conf.intr_conf.rxq != 0) { 74 if (rte_intr_type_get(intr_handle) == 75 RTE_INTR_HANDLE_UIO) { 76 /* 77 * Better not to share LSC with RX interrupts. 78 * Unregistering LSC interrupt handler 79 */ 80 rte_intr_callback_unregister(pci_dev->intr_handle, 81 nfp_net_dev_interrupt_handler, (void *)dev); 82 83 if (dev->data->nb_rx_queues > 1) { 84 PMD_INIT_LOG(ERR, "PMD rx interrupt only " 85 "supports 1 queue with UIO"); 86 return -EIO; 87 } 88 } 89 intr_vector = dev->data->nb_rx_queues; 90 if (rte_intr_efd_enable(intr_handle, intr_vector)) 91 return -1; 92 93 nfp_configure_rx_interrupt(dev, intr_handle); 94 update = NFP_NET_CFG_UPDATE_MSIX; 95 } 96 97 rte_intr_enable(intr_handle); 98 99 new_ctrl = nfp_check_offloads(dev); 100 101 /* Writing configuration parameters in the device */ 102 nfp_net_params_setup(hw); 103 104 dev_conf = &dev->data->dev_conf; 105 rxmode = &dev_conf->rxmode; 106 107 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) { 108 nfp_net_rss_config_default(dev); 109 update |= NFP_NET_CFG_UPDATE_RSS; 110 new_ctrl |= NFP_NET_CFG_CTRL_RSS; 111 } 112 113 /* Enable device */ 114 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 115 116 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; 117 118 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) 119 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 120 121 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); 122 if (nfp_net_reconfig(hw, new_ctrl, update) < 0) 123 return -EIO; 124 125 /* 126 * Allocating rte mbufs for configured rx queues. 127 * This requires queues being enabled before 128 */ 129 if (nfp_net_rx_freelist_setup(dev) < 0) { 130 ret = -ENOMEM; 131 goto error; 132 } 133 134 hw->ctrl = new_ctrl; 135 136 return 0; 137 138 error: 139 /* 140 * An error returned by this function should mean the app 141 * exiting and then the system releasing all the memory 142 * allocated even memory coming from hugepages. 143 * 144 * The device could be enabled at this point with some queues 145 * ready for getting packets. This is true if the call to 146 * nfp_net_rx_freelist_setup() succeeds for some queues but 147 * fails for subsequent queues. 148 * 149 * This should make the app exiting but better if we tell the 150 * device first. 151 */ 152 nfp_net_disable_queues(dev); 153 154 return ret; 155 } 156 157 static int 158 nfp_netvf_stop(struct rte_eth_dev *dev) 159 { 160 struct nfp_net_txq *this_tx_q; 161 struct nfp_net_rxq *this_rx_q; 162 int i; 163 164 PMD_INIT_LOG(DEBUG, "Stop"); 165 166 nfp_net_disable_queues(dev); 167 168 /* Clear queues */ 169 for (i = 0; i < dev->data->nb_tx_queues; i++) { 170 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; 171 nfp_net_reset_tx_queue(this_tx_q); 172 } 173 174 for (i = 0; i < dev->data->nb_rx_queues; i++) { 175 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i]; 176 nfp_net_reset_rx_queue(this_rx_q); 177 } 178 179 return 0; 180 } 181 182 static int 183 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused) 184 { 185 return -ENOTSUP; 186 } 187 188 /* Set the link down. */ 189 static int 190 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused) 191 { 192 return -ENOTSUP; 193 } 194 195 /* Reset and stop device. The device can not be restarted. */ 196 static int 197 nfp_netvf_close(struct rte_eth_dev *dev) 198 { 199 struct rte_pci_device *pci_dev; 200 struct nfp_net_txq *this_tx_q; 201 struct nfp_net_rxq *this_rx_q; 202 int i; 203 204 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 205 return 0; 206 207 PMD_INIT_LOG(DEBUG, "Close"); 208 209 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 210 211 /* 212 * We assume that the DPDK application is stopping all the 213 * threads/queues before calling the device close function. 214 */ 215 216 nfp_net_disable_queues(dev); 217 218 /* Clear queues */ 219 for (i = 0; i < dev->data->nb_tx_queues; i++) { 220 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; 221 nfp_net_reset_tx_queue(this_tx_q); 222 } 223 224 for (i = 0; i < dev->data->nb_rx_queues; i++) { 225 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i]; 226 nfp_net_reset_rx_queue(this_rx_q); 227 } 228 229 rte_intr_disable(pci_dev->intr_handle); 230 231 /* unregister callback func from eal lib */ 232 rte_intr_callback_unregister(pci_dev->intr_handle, 233 nfp_net_dev_interrupt_handler, 234 (void *)dev); 235 236 /* Cancel possible impending LSC work here before releasing the port*/ 237 rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, 238 (void *)dev); 239 240 /* 241 * The ixgbe PMD disables the pcie master on the 242 * device. The i40e does not... 243 */ 244 245 return 0; 246 } 247 248 /* Initialise and register VF driver with DPDK Application */ 249 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = { 250 .dev_configure = nfp_net_configure, 251 .dev_start = nfp_netvf_start, 252 .dev_stop = nfp_netvf_stop, 253 .dev_set_link_up = nfp_netvf_set_link_up, 254 .dev_set_link_down = nfp_netvf_set_link_down, 255 .dev_close = nfp_netvf_close, 256 .promiscuous_enable = nfp_net_promisc_enable, 257 .promiscuous_disable = nfp_net_promisc_disable, 258 .link_update = nfp_net_link_update, 259 .stats_get = nfp_net_stats_get, 260 .stats_reset = nfp_net_stats_reset, 261 .dev_infos_get = nfp_net_infos_get, 262 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, 263 .mtu_set = nfp_net_dev_mtu_set, 264 .mac_addr_set = nfp_set_mac_addr, 265 .vlan_offload_set = nfp_net_vlan_offload_set, 266 .reta_update = nfp_net_reta_update, 267 .reta_query = nfp_net_reta_query, 268 .rss_hash_update = nfp_net_rss_hash_update, 269 .rss_hash_conf_get = nfp_net_rss_hash_conf_get, 270 .rx_queue_setup = nfp_net_rx_queue_setup, 271 .rx_queue_release = nfp_net_rx_queue_release, 272 .tx_queue_setup = nfp_net_tx_queue_setup, 273 .tx_queue_release = nfp_net_tx_queue_release, 274 .rx_queue_intr_enable = nfp_rx_queue_intr_enable, 275 .rx_queue_intr_disable = nfp_rx_queue_intr_disable, 276 }; 277 278 static int 279 nfp_netvf_init(struct rte_eth_dev *eth_dev) 280 { 281 struct rte_pci_device *pci_dev; 282 struct nfp_net_hw *hw; 283 struct rte_ether_addr *tmp_ether_addr; 284 285 uint64_t tx_bar_off = 0, rx_bar_off = 0; 286 uint32_t start_q; 287 int stride = 4; 288 int port = 0; 289 int err; 290 291 PMD_INIT_FUNC_TRACE(); 292 293 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 294 295 /* NFP can not handle DMA addresses requiring more than 40 bits */ 296 if (rte_mem_check_dma_mask(40)) { 297 RTE_LOG(ERR, PMD, "device %s can not be used:", 298 pci_dev->device.name); 299 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n"); 300 return -ENODEV; 301 }; 302 303 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 304 305 eth_dev->dev_ops = &nfp_netvf_eth_dev_ops; 306 eth_dev->rx_queue_count = nfp_net_rx_queue_count; 307 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts; 308 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts; 309 310 /* For secondary processes, the primary has done all the work */ 311 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 312 return 0; 313 314 rte_eth_copy_pci_info(eth_dev, pci_dev); 315 316 hw->device_id = pci_dev->id.device_id; 317 hw->vendor_id = pci_dev->id.vendor_id; 318 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 319 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 320 321 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u", 322 pci_dev->id.vendor_id, pci_dev->id.device_id, 323 pci_dev->addr.domain, pci_dev->addr.bus, 324 pci_dev->addr.devid, pci_dev->addr.function); 325 326 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; 327 if (hw->ctrl_bar == NULL) { 328 PMD_DRV_LOG(ERR, 329 "hw->ctrl_bar is NULL. BAR0 not configured"); 330 return -ENODEV; 331 } 332 333 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); 334 335 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); 336 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); 337 338 /* Work out where in the BAR the queues start. */ 339 switch (pci_dev->id.device_id) { 340 case PCI_DEVICE_ID_NFP6000_VF_NIC: 341 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); 342 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; 343 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); 344 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ; 345 break; 346 default: 347 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); 348 err = -ENODEV; 349 goto dev_err_ctrl_map; 350 } 351 352 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); 353 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); 354 355 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + 356 tx_bar_off; 357 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + 358 rx_bar_off; 359 360 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p", 361 hw->ctrl_bar, hw->tx_bar, hw->rx_bar); 362 363 nfp_net_cfg_queue_setup(hw); 364 365 /* Get some of the read-only fields from the config BAR */ 366 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); 367 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); 368 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); 369 hw->mtu = RTE_ETHER_MTU; 370 371 /* VLAN insertion is incompatible with LSOv2 */ 372 if (hw->cap & NFP_NET_CFG_CTRL_LSO2) 373 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; 374 375 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) 376 hw->rx_offset = NFP_NET_RX_OFFSET; 377 else 378 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); 379 380 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", 381 NFD_CFG_MAJOR_VERSION_of(hw->ver), 382 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu); 383 384 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap, 385 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", 386 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", 387 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", 388 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", 389 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", 390 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", 391 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", 392 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", 393 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", 394 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", 395 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", 396 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "", 397 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", 398 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : ""); 399 400 hw->ctrl = 0; 401 402 hw->stride_rx = stride; 403 hw->stride_tx = stride; 404 405 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u", 406 hw->max_rx_queues, hw->max_tx_queues); 407 408 /* Initializing spinlock for reconfigs */ 409 rte_spinlock_init(&hw->reconfig_lock); 410 411 /* Allocating memory for mac addr */ 412 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 413 RTE_ETHER_ADDR_LEN, 0); 414 if (eth_dev->data->mac_addrs == NULL) { 415 PMD_INIT_LOG(ERR, "Failed to space for MAC address"); 416 err = -ENOMEM; 417 goto dev_err_queues_map; 418 } 419 420 nfp_netvf_read_mac(hw); 421 422 tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr; 423 if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) { 424 PMD_INIT_LOG(INFO, "Using random mac address for port %d", 425 port); 426 /* Using random mac addresses for VFs */ 427 rte_eth_random_addr(&hw->mac_addr[0]); 428 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); 429 } 430 431 /* Copying mac address to DPDK eth_dev struct */ 432 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, 433 ð_dev->data->mac_addrs[0]); 434 435 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) 436 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR; 437 438 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 439 440 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x " 441 "mac=%02x:%02x:%02x:%02x:%02x:%02x", 442 eth_dev->data->port_id, pci_dev->id.vendor_id, 443 pci_dev->id.device_id, 444 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2], 445 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]); 446 447 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 448 /* Registering LSC interrupt handler */ 449 rte_intr_callback_register(pci_dev->intr_handle, 450 nfp_net_dev_interrupt_handler, 451 (void *)eth_dev); 452 /* Telling the firmware about the LSC interrupt entry */ 453 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 454 /* Recording current stats counters values */ 455 nfp_net_stats_reset(eth_dev); 456 } 457 458 return 0; 459 460 dev_err_queues_map: 461 nfp_cpp_area_free(hw->hwqueues_area); 462 dev_err_ctrl_map: 463 nfp_cpp_area_free(hw->ctrl_area); 464 465 return err; 466 } 467 468 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = { 469 { 470 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, 471 PCI_DEVICE_ID_NFP6000_VF_NIC) 472 }, 473 { 474 .vendor_id = 0, 475 }, 476 }; 477 478 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev) 479 { 480 /* VF cleanup, just free private port data */ 481 return nfp_netvf_close(eth_dev); 482 } 483 484 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 485 struct rte_pci_device *pci_dev) 486 { 487 return rte_eth_dev_pci_generic_probe(pci_dev, 488 sizeof(struct nfp_net_adapter), nfp_netvf_init); 489 } 490 491 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev) 492 { 493 return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit); 494 } 495 496 static struct rte_pci_driver rte_nfp_net_vf_pmd = { 497 .id_table = pci_id_nfp_vf_net_map, 498 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 499 .probe = eth_nfp_vf_pci_probe, 500 .remove = eth_nfp_vf_pci_remove, 501 }; 502 503 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd); 504 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map); 505 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio"); 506 /* 507 * Local variables: 508 * c-file-style: "Linux" 509 * indent-tabs-mode: t 510 * End: 511 */ 512