1950820f1SZaiyu Wang /* SPDX-License-Identifier: BSD-3-Clause 2950820f1SZaiyu Wang * Copyright(c) 2018-2025 Beijing WangXun Technology Co., Ltd. 3950820f1SZaiyu Wang * Copyright(c) 2010-2017 Intel Corporation 4950820f1SZaiyu Wang */ 5950820f1SZaiyu Wang 6950820f1SZaiyu Wang #include <sys/queue.h> 7950820f1SZaiyu Wang #include <stdio.h> 8950820f1SZaiyu Wang #include <errno.h> 9950820f1SZaiyu Wang #include <stdint.h> 10950820f1SZaiyu Wang #include <string.h> 11950820f1SZaiyu Wang #include <ethdev_pci.h> 12950820f1SZaiyu Wang 13950820f1SZaiyu Wang #include "ngbe_logs.h" 14950820f1SZaiyu Wang #include "base/ngbe.h" 15950820f1SZaiyu Wang #include "ngbe_ethdev.h" 16950820f1SZaiyu Wang #include "ngbe_rxtx.h" 17950820f1SZaiyu Wang #include "ngbe_regs_group.h" 18950820f1SZaiyu Wang 1954670a16SZaiyu Wang static const struct reg_info ngbevf_regs_general[] = { 2054670a16SZaiyu Wang {NGBE_VFRST, 1, 1, "NGBE_VFRST"}, 2154670a16SZaiyu Wang {NGBE_VFSTATUS, 1, 1, "NGBE_VFSTATUS"}, 2254670a16SZaiyu Wang {NGBE_VFMBCTL, 1, 1, "NGBE_VFMAILBOX"}, 2354670a16SZaiyu Wang {NGBE_VFMBX, 16, 4, "NGBE_VFMBX"}, 2454670a16SZaiyu Wang {NGBE_VFPBWRAP, 1, 1, "NGBE_VFPBWRAP"}, 2554670a16SZaiyu Wang {0, 0, 0, ""} 2654670a16SZaiyu Wang }; 2754670a16SZaiyu Wang 2854670a16SZaiyu Wang static const struct reg_info ngbevf_regs_interrupt[] = { 2954670a16SZaiyu Wang {0, 0, 0, ""} 3054670a16SZaiyu Wang }; 3154670a16SZaiyu Wang 3254670a16SZaiyu Wang static const struct reg_info ngbevf_regs_rxdma[] = { 3354670a16SZaiyu Wang {0, 0, 0, ""} 3454670a16SZaiyu Wang }; 3554670a16SZaiyu Wang 3654670a16SZaiyu Wang static const struct reg_info ngbevf_regs_tx[] = { 3754670a16SZaiyu Wang {0, 0, 0, ""} 3854670a16SZaiyu Wang }; 3954670a16SZaiyu Wang 4054670a16SZaiyu Wang /* VF registers */ 4154670a16SZaiyu Wang static const struct reg_info *ngbevf_regs[] = { 4254670a16SZaiyu Wang ngbevf_regs_general, 4354670a16SZaiyu Wang ngbevf_regs_interrupt, 4454670a16SZaiyu Wang ngbevf_regs_rxdma, 4554670a16SZaiyu Wang ngbevf_regs_tx, 4654670a16SZaiyu Wang NULL}; 4754670a16SZaiyu Wang 48950820f1SZaiyu Wang #define NGBEVF_PMD_NAME "rte_ngbevf_pmd" /* PMD name */ 49950820f1SZaiyu Wang static int ngbevf_dev_close(struct rte_eth_dev *dev); 5062c072c0SZaiyu Wang static int ngbevf_dev_link_update(struct rte_eth_dev *dev, 5162c072c0SZaiyu Wang int wait_to_complete); 52fda42583SZaiyu Wang static void ngbevf_intr_disable(struct rte_eth_dev *dev); 53fda42583SZaiyu Wang static void ngbevf_intr_enable(struct rte_eth_dev *dev); 541d13283aSZaiyu Wang static int ngbevf_dev_stats_reset(struct rte_eth_dev *dev); 55f47dc03cSZaiyu Wang static int ngbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 56f47dc03cSZaiyu Wang static void ngbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 57fda42583SZaiyu Wang static void ngbevf_configure_msix(struct rte_eth_dev *dev); 587744e908SZaiyu Wang static int ngbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 597744e908SZaiyu Wang static int ngbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 602aba42f6SZaiyu Wang static void ngbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 61fda42583SZaiyu Wang static void ngbevf_dev_interrupt_handler(void *param); 62950820f1SZaiyu Wang 63950820f1SZaiyu Wang /* 64950820f1SZaiyu Wang * The set of PCI devices this driver supports (for VF) 65950820f1SZaiyu Wang */ 66950820f1SZaiyu Wang static const struct rte_pci_id pci_id_ngbevf_map[] = { 67950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W_VF) }, 68950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2_VF) }, 69950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S_VF) }, 70950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4_VF) }, 71950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S_VF) }, 72950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2_VF) }, 73950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S_VF) }, 74950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4_VF) }, 75950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S_VF) }, 76950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI_VF) }, 77950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1_VF) }, 78950820f1SZaiyu Wang { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L_VF) }, 79950820f1SZaiyu Wang { .vendor_id = 0, /* sentinel */ }, 80950820f1SZaiyu Wang }; 81950820f1SZaiyu Wang 8266070ca4SZaiyu Wang static const struct rte_eth_desc_lim rx_desc_lim = { 8366070ca4SZaiyu Wang .nb_max = NGBE_RING_DESC_MAX, 8466070ca4SZaiyu Wang .nb_min = NGBE_RING_DESC_MIN, 8566070ca4SZaiyu Wang .nb_align = NGBE_RXD_ALIGN, 8666070ca4SZaiyu Wang }; 8766070ca4SZaiyu Wang 8866070ca4SZaiyu Wang static const struct rte_eth_desc_lim tx_desc_lim = { 8966070ca4SZaiyu Wang .nb_max = NGBE_RING_DESC_MAX, 9066070ca4SZaiyu Wang .nb_min = NGBE_RING_DESC_MIN, 9166070ca4SZaiyu Wang .nb_align = NGBE_TXD_ALIGN, 9266070ca4SZaiyu Wang .nb_seg_max = NGBE_TX_MAX_SEG, 9366070ca4SZaiyu Wang .nb_mtu_seg_max = NGBE_TX_MAX_SEG, 9466070ca4SZaiyu Wang }; 9566070ca4SZaiyu Wang 96950820f1SZaiyu Wang static const struct eth_dev_ops ngbevf_eth_dev_ops; 97950820f1SZaiyu Wang 981d13283aSZaiyu Wang static const struct rte_ngbe_xstats_name_off rte_ngbevf_stats_strings[] = { 991d13283aSZaiyu Wang {"rx_multicast_packets", offsetof(struct ngbevf_hw_stats, vfmprc)}, 1001d13283aSZaiyu Wang }; 1011d13283aSZaiyu Wang 1021d13283aSZaiyu Wang #define NGBEVF_NB_XSTATS (sizeof(rte_ngbevf_stats_strings) / \ 1031d13283aSZaiyu Wang sizeof(rte_ngbevf_stats_strings[0])) 1041d13283aSZaiyu Wang 105950820f1SZaiyu Wang /* 10666070ca4SZaiyu Wang * Negotiate mailbox API version with the PF. 10766070ca4SZaiyu Wang * After reset API version is always set to the basic one (ngbe_mbox_api_10). 10866070ca4SZaiyu Wang * Then we try to negotiate starting with the most recent one. 10966070ca4SZaiyu Wang * If all negotiation attempts fail, then we will proceed with 11066070ca4SZaiyu Wang * the default one (ngbe_mbox_api_10). 11166070ca4SZaiyu Wang */ 11266070ca4SZaiyu Wang static void 11366070ca4SZaiyu Wang ngbevf_negotiate_api(struct ngbe_hw *hw) 11466070ca4SZaiyu Wang { 11566070ca4SZaiyu Wang int32_t i; 11666070ca4SZaiyu Wang 11766070ca4SZaiyu Wang /* start with highest supported, proceed down */ 11866070ca4SZaiyu Wang static const int sup_ver[] = { 11966070ca4SZaiyu Wang ngbe_mbox_api_13, 12066070ca4SZaiyu Wang ngbe_mbox_api_12, 12166070ca4SZaiyu Wang ngbe_mbox_api_11, 12266070ca4SZaiyu Wang ngbe_mbox_api_10, 12366070ca4SZaiyu Wang }; 12466070ca4SZaiyu Wang 12566070ca4SZaiyu Wang for (i = 0; i < ARRAY_SIZE(sup_ver); i++) { 12666070ca4SZaiyu Wang if (ngbevf_negotiate_api_version(hw, sup_ver[i]) == 0) 12766070ca4SZaiyu Wang break; 12866070ca4SZaiyu Wang } 12966070ca4SZaiyu Wang } 13066070ca4SZaiyu Wang 1312aba42f6SZaiyu Wang static void 1322aba42f6SZaiyu Wang generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1332aba42f6SZaiyu Wang { 1342aba42f6SZaiyu Wang uint64_t random; 1352aba42f6SZaiyu Wang 1362aba42f6SZaiyu Wang /* Set Organizationally Unique Identifier (OUI) prefix. */ 1372aba42f6SZaiyu Wang mac_addr->addr_bytes[0] = 0x00; 1382aba42f6SZaiyu Wang mac_addr->addr_bytes[1] = 0x09; 1392aba42f6SZaiyu Wang mac_addr->addr_bytes[2] = 0xC0; 1402aba42f6SZaiyu Wang /* Force indication of locally assigned MAC address. */ 1412aba42f6SZaiyu Wang mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1422aba42f6SZaiyu Wang /* Generate the last 3 bytes of the MAC address with a random number. */ 1432aba42f6SZaiyu Wang random = rte_rand(); 1442aba42f6SZaiyu Wang memcpy(&mac_addr->addr_bytes[3], &random, 3); 1452aba42f6SZaiyu Wang } 1462aba42f6SZaiyu Wang 14766070ca4SZaiyu Wang /* 148950820f1SZaiyu Wang * Virtual Function device init 149950820f1SZaiyu Wang */ 150950820f1SZaiyu Wang static int 151950820f1SZaiyu Wang eth_ngbevf_dev_init(struct rte_eth_dev *eth_dev) 152950820f1SZaiyu Wang { 153950820f1SZaiyu Wang int err; 15466070ca4SZaiyu Wang uint32_t tc, tcs; 155950820f1SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 156fda42583SZaiyu Wang struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 157950820f1SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(eth_dev); 158f47dc03cSZaiyu Wang struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev); 159f47dc03cSZaiyu Wang struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev); 1602aba42f6SZaiyu Wang struct rte_ether_addr *perm_addr = 1612aba42f6SZaiyu Wang (struct rte_ether_addr *)hw->mac.perm_addr; 162950820f1SZaiyu Wang 163950820f1SZaiyu Wang PMD_INIT_FUNC_TRACE(); 164950820f1SZaiyu Wang 165950820f1SZaiyu Wang eth_dev->dev_ops = &ngbevf_eth_dev_ops; 166711a06e8SZaiyu Wang eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status; 167711a06e8SZaiyu Wang eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status; 168711a06e8SZaiyu Wang eth_dev->rx_pkt_burst = &ngbe_recv_pkts; 169711a06e8SZaiyu Wang eth_dev->tx_pkt_burst = &ngbe_xmit_pkts; 170711a06e8SZaiyu Wang 171711a06e8SZaiyu Wang /* for secondary processes, we don't initialise any further as primary 172711a06e8SZaiyu Wang * has already done this work. Only check we don't need a different 173711a06e8SZaiyu Wang * RX function 174711a06e8SZaiyu Wang */ 175711a06e8SZaiyu Wang if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 176711a06e8SZaiyu Wang struct ngbe_tx_queue *txq; 177711a06e8SZaiyu Wang uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; 178711a06e8SZaiyu Wang /* TX queue function in primary, set by last queue initialized 179711a06e8SZaiyu Wang * Tx queue may not initialized by primary process 180711a06e8SZaiyu Wang */ 181711a06e8SZaiyu Wang if (eth_dev->data->tx_queues) { 182711a06e8SZaiyu Wang txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; 183711a06e8SZaiyu Wang ngbe_set_tx_function(eth_dev, txq); 184711a06e8SZaiyu Wang } else { 185711a06e8SZaiyu Wang /* Use default TX function if we get here */ 186711a06e8SZaiyu Wang PMD_INIT_LOG(NOTICE, 187711a06e8SZaiyu Wang "No TX queues configured yet. Using default TX function."); 188711a06e8SZaiyu Wang } 189711a06e8SZaiyu Wang 190711a06e8SZaiyu Wang ngbe_set_rx_function(eth_dev); 191711a06e8SZaiyu Wang 192711a06e8SZaiyu Wang return 0; 193711a06e8SZaiyu Wang } 194950820f1SZaiyu Wang 195950820f1SZaiyu Wang rte_eth_copy_pci_info(eth_dev, pci_dev); 196950820f1SZaiyu Wang 197950820f1SZaiyu Wang hw->device_id = pci_dev->id.device_id; 198950820f1SZaiyu Wang hw->vendor_id = pci_dev->id.vendor_id; 199950820f1SZaiyu Wang hw->sub_system_id = pci_dev->id.subsystem_device_id; 200950820f1SZaiyu Wang ngbe_map_device_id(hw); 201950820f1SZaiyu Wang hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 202950820f1SZaiyu Wang 203f47dc03cSZaiyu Wang /* initialize the vfta */ 204f47dc03cSZaiyu Wang memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 205f47dc03cSZaiyu Wang 206f47dc03cSZaiyu Wang /* initialize the hw strip bitmap*/ 207f47dc03cSZaiyu Wang memset(hwstrip, 0, sizeof(*hwstrip)); 208f47dc03cSZaiyu Wang 209950820f1SZaiyu Wang /* Initialize the shared code (base driver) */ 210950820f1SZaiyu Wang err = ngbe_init_shared_code(hw); 211950820f1SZaiyu Wang if (err != 0) { 212950820f1SZaiyu Wang PMD_INIT_LOG(ERR, 213950820f1SZaiyu Wang "Shared code init failed for ngbevf: %d", err); 214950820f1SZaiyu Wang return -EIO; 215950820f1SZaiyu Wang } 216950820f1SZaiyu Wang 21766070ca4SZaiyu Wang /* init_mailbox_params */ 21866070ca4SZaiyu Wang hw->mbx.init_params(hw); 21966070ca4SZaiyu Wang 2201d13283aSZaiyu Wang /* Reset the hw statistics */ 2211d13283aSZaiyu Wang ngbevf_dev_stats_reset(eth_dev); 2221d13283aSZaiyu Wang 223fda42583SZaiyu Wang /* Disable the interrupts for VF */ 224fda42583SZaiyu Wang ngbevf_intr_disable(eth_dev); 225fda42583SZaiyu Wang 226950820f1SZaiyu Wang hw->mac.num_rar_entries = 32; /* The MAX of the underlying PF */ 22766070ca4SZaiyu Wang err = hw->mac.reset_hw(hw); 22866070ca4SZaiyu Wang 22966070ca4SZaiyu Wang /* 23066070ca4SZaiyu Wang * The VF reset operation returns the NGBE_ERR_INVALID_MAC_ADDR when 23166070ca4SZaiyu Wang * the underlying PF driver has not assigned a MAC address to the VF. 23266070ca4SZaiyu Wang * In this case, assign a random MAC address. 23366070ca4SZaiyu Wang */ 23466070ca4SZaiyu Wang if (err != 0 && err != NGBE_ERR_INVALID_MAC_ADDR) { 23566070ca4SZaiyu Wang PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err); 23666070ca4SZaiyu Wang /* 23766070ca4SZaiyu Wang * This error code will be propagated to the app by 23866070ca4SZaiyu Wang * rte_eth_dev_reset, so use a public error code rather than 23966070ca4SZaiyu Wang * the internal-only NGBE_ERR_RESET_FAILED 24066070ca4SZaiyu Wang */ 24166070ca4SZaiyu Wang return -EAGAIN; 24266070ca4SZaiyu Wang } 24366070ca4SZaiyu Wang 24466070ca4SZaiyu Wang /* negotiate mailbox API version to use with the PF. */ 24566070ca4SZaiyu Wang ngbevf_negotiate_api(hw); 24666070ca4SZaiyu Wang 24766070ca4SZaiyu Wang /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 24866070ca4SZaiyu Wang ngbevf_get_queues(hw, &tcs, &tc); 249950820f1SZaiyu Wang 250950820f1SZaiyu Wang /* Allocate memory for storing MAC addresses */ 251950820f1SZaiyu Wang eth_dev->data->mac_addrs = rte_zmalloc("ngbevf", RTE_ETHER_ADDR_LEN * 252950820f1SZaiyu Wang hw->mac.num_rar_entries, 0); 253950820f1SZaiyu Wang if (eth_dev->data->mac_addrs == NULL) { 254950820f1SZaiyu Wang PMD_INIT_LOG(ERR, 255950820f1SZaiyu Wang "Failed to allocate %u bytes needed to store MAC addresses", 256950820f1SZaiyu Wang RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 257950820f1SZaiyu Wang return -ENOMEM; 258950820f1SZaiyu Wang } 259950820f1SZaiyu Wang 2602aba42f6SZaiyu Wang /* Generate a random MAC address, if none was assigned by PF. */ 2612aba42f6SZaiyu Wang if (rte_is_zero_ether_addr(perm_addr)) { 2622aba42f6SZaiyu Wang generate_random_mac_addr(perm_addr); 2632aba42f6SZaiyu Wang err = ngbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 2642aba42f6SZaiyu Wang if (err) { 2652aba42f6SZaiyu Wang rte_free(eth_dev->data->mac_addrs); 2662aba42f6SZaiyu Wang eth_dev->data->mac_addrs = NULL; 2672aba42f6SZaiyu Wang return err; 2682aba42f6SZaiyu Wang } 2692aba42f6SZaiyu Wang PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 2702aba42f6SZaiyu Wang PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 2712aba42f6SZaiyu Wang "%02x:%02x:%02x:%02x:%02x:%02x", 2722aba42f6SZaiyu Wang perm_addr->addr_bytes[0], 2732aba42f6SZaiyu Wang perm_addr->addr_bytes[1], 2742aba42f6SZaiyu Wang perm_addr->addr_bytes[2], 2752aba42f6SZaiyu Wang perm_addr->addr_bytes[3], 2762aba42f6SZaiyu Wang perm_addr->addr_bytes[4], 2772aba42f6SZaiyu Wang perm_addr->addr_bytes[5]); 2782aba42f6SZaiyu Wang } 2792aba42f6SZaiyu Wang 2802aba42f6SZaiyu Wang /* Copy the permanent MAC address */ 2812aba42f6SZaiyu Wang rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 2822aba42f6SZaiyu Wang 28366070ca4SZaiyu Wang /* reset the hardware with the new settings */ 28466070ca4SZaiyu Wang err = hw->mac.start_hw(hw); 28566070ca4SZaiyu Wang if (err) { 28666070ca4SZaiyu Wang PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err); 28766070ca4SZaiyu Wang return -EIO; 28866070ca4SZaiyu Wang } 28966070ca4SZaiyu Wang 2907744e908SZaiyu Wang /* enter promiscuous mode */ 2917744e908SZaiyu Wang ngbevf_dev_promiscuous_enable(eth_dev); 2927744e908SZaiyu Wang 293fda42583SZaiyu Wang rte_intr_callback_register(intr_handle, 294fda42583SZaiyu Wang ngbevf_dev_interrupt_handler, eth_dev); 295fda42583SZaiyu Wang rte_intr_enable(intr_handle); 296fda42583SZaiyu Wang ngbevf_intr_enable(eth_dev); 297fda42583SZaiyu Wang 298950820f1SZaiyu Wang PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 299950820f1SZaiyu Wang eth_dev->data->port_id, pci_dev->id.vendor_id, 300950820f1SZaiyu Wang pci_dev->id.device_id, "ngbe_mac_sp_vf"); 301950820f1SZaiyu Wang 302950820f1SZaiyu Wang return 0; 303950820f1SZaiyu Wang } 304950820f1SZaiyu Wang 305950820f1SZaiyu Wang /* Virtual Function device uninit */ 306950820f1SZaiyu Wang static int 307950820f1SZaiyu Wang eth_ngbevf_dev_uninit(struct rte_eth_dev *eth_dev) 308950820f1SZaiyu Wang { 309950820f1SZaiyu Wang PMD_INIT_FUNC_TRACE(); 310950820f1SZaiyu Wang 311950820f1SZaiyu Wang if (rte_eal_process_type() != RTE_PROC_PRIMARY) 312950820f1SZaiyu Wang return 0; 313950820f1SZaiyu Wang 314950820f1SZaiyu Wang ngbevf_dev_close(eth_dev); 315950820f1SZaiyu Wang 316950820f1SZaiyu Wang return 0; 317950820f1SZaiyu Wang } 318950820f1SZaiyu Wang 319950820f1SZaiyu Wang static int eth_ngbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 320950820f1SZaiyu Wang struct rte_pci_device *pci_dev) 321950820f1SZaiyu Wang { 322950820f1SZaiyu Wang return rte_eth_dev_pci_generic_probe(pci_dev, 323950820f1SZaiyu Wang sizeof(struct ngbe_adapter), eth_ngbevf_dev_init); 324950820f1SZaiyu Wang } 325950820f1SZaiyu Wang 326950820f1SZaiyu Wang static int eth_ngbevf_pci_remove(struct rte_pci_device *pci_dev) 327950820f1SZaiyu Wang { 328950820f1SZaiyu Wang return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbevf_dev_uninit); 329950820f1SZaiyu Wang } 330950820f1SZaiyu Wang 331950820f1SZaiyu Wang /* 332950820f1SZaiyu Wang * virtual function driver struct 333950820f1SZaiyu Wang */ 334950820f1SZaiyu Wang static struct rte_pci_driver rte_ngbevf_pmd = { 335950820f1SZaiyu Wang .id_table = pci_id_ngbevf_map, 336950820f1SZaiyu Wang .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 337950820f1SZaiyu Wang .probe = eth_ngbevf_pci_probe, 338950820f1SZaiyu Wang .remove = eth_ngbevf_pci_remove, 339950820f1SZaiyu Wang }; 340950820f1SZaiyu Wang 3411d13283aSZaiyu Wang static int ngbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3421d13283aSZaiyu Wang struct rte_eth_xstat_name *xstats_names, unsigned int limit) 3431d13283aSZaiyu Wang { 3441d13283aSZaiyu Wang unsigned int i; 3451d13283aSZaiyu Wang 3461d13283aSZaiyu Wang if (limit < NGBEVF_NB_XSTATS && xstats_names != NULL) 3471d13283aSZaiyu Wang return -ENOMEM; 3481d13283aSZaiyu Wang 3491d13283aSZaiyu Wang if (xstats_names != NULL) 3501d13283aSZaiyu Wang for (i = 0; i < NGBEVF_NB_XSTATS; i++) 3511d13283aSZaiyu Wang snprintf(xstats_names[i].name, 3521d13283aSZaiyu Wang sizeof(xstats_names[i].name), 3531d13283aSZaiyu Wang "%s", rte_ngbevf_stats_strings[i].name); 3541d13283aSZaiyu Wang return NGBEVF_NB_XSTATS; 3551d13283aSZaiyu Wang } 3561d13283aSZaiyu Wang 3571d13283aSZaiyu Wang static void 3581d13283aSZaiyu Wang ngbevf_update_stats(struct rte_eth_dev *dev) 3591d13283aSZaiyu Wang { 3601d13283aSZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 3611d13283aSZaiyu Wang struct ngbevf_hw_stats *hw_stats = (struct ngbevf_hw_stats *) 3621d13283aSZaiyu Wang NGBE_DEV_STATS(dev); 3631d13283aSZaiyu Wang 3641d13283aSZaiyu Wang /* Good Rx packet, include VF loopback */ 3651d13283aSZaiyu Wang NGBE_UPDCNT32(NGBE_QPRXPKT(0), 3661d13283aSZaiyu Wang hw_stats->last_vfgprc, hw_stats->vfgprc); 3671d13283aSZaiyu Wang 3681d13283aSZaiyu Wang /* Good Rx octets, include VF loopback */ 3691d13283aSZaiyu Wang NGBE_UPDCNT36(NGBE_QPRXOCTL(0), 3701d13283aSZaiyu Wang hw_stats->last_vfgorc, hw_stats->vfgorc); 3711d13283aSZaiyu Wang 3721d13283aSZaiyu Wang /* Rx Multicst Packet */ 3731d13283aSZaiyu Wang NGBE_UPDCNT32(NGBE_QPRXMPKT(0), 3741d13283aSZaiyu Wang hw_stats->last_vfmprc, hw_stats->vfmprc); 3751d13283aSZaiyu Wang 3761d13283aSZaiyu Wang /* Rx Broadcast Packet */ 3771d13283aSZaiyu Wang NGBE_UPDCNT32(NGBE_QPRXBPKT(0), 3781d13283aSZaiyu Wang hw_stats->last_vfbprc, hw_stats->vfbprc); 3791d13283aSZaiyu Wang 3801d13283aSZaiyu Wang hw->rx_loaded = 0; 3811d13283aSZaiyu Wang 3821d13283aSZaiyu Wang /* Good Tx packet, include VF loopback */ 3831d13283aSZaiyu Wang NGBE_UPDCNT32(NGBE_QPTXPKT(0), 3841d13283aSZaiyu Wang hw_stats->last_vfgptc, hw_stats->vfgptc); 3851d13283aSZaiyu Wang 3861d13283aSZaiyu Wang /* Good Tx octets, include VF loopback */ 3871d13283aSZaiyu Wang NGBE_UPDCNT36(NGBE_QPTXOCTL(0), 3881d13283aSZaiyu Wang hw_stats->last_vfgotc, hw_stats->vfgotc); 3891d13283aSZaiyu Wang 3901d13283aSZaiyu Wang /* Tx Multicst Packet */ 3911d13283aSZaiyu Wang NGBE_UPDCNT32(NGBE_QPTXMPKT(0), 3921d13283aSZaiyu Wang hw_stats->last_vfmprc, hw_stats->vfmprc); 3931d13283aSZaiyu Wang 3941d13283aSZaiyu Wang /* Tx Broadcast Packet */ 3951d13283aSZaiyu Wang NGBE_UPDCNT32(NGBE_QPTXBPKT(0), 3961d13283aSZaiyu Wang hw_stats->last_vfbptc, hw_stats->vfbptc); 3971d13283aSZaiyu Wang 3981d13283aSZaiyu Wang hw->offset_loaded = 0; 3991d13283aSZaiyu Wang } 4001d13283aSZaiyu Wang 4011d13283aSZaiyu Wang static int 4021d13283aSZaiyu Wang ngbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 4031d13283aSZaiyu Wang unsigned int n) 4041d13283aSZaiyu Wang { 4051d13283aSZaiyu Wang struct ngbevf_hw_stats *hw_stats = (struct ngbevf_hw_stats *) 4061d13283aSZaiyu Wang NGBE_DEV_STATS(dev); 4071d13283aSZaiyu Wang unsigned int i; 4081d13283aSZaiyu Wang 4091d13283aSZaiyu Wang if (n < NGBEVF_NB_XSTATS) 4101d13283aSZaiyu Wang return NGBEVF_NB_XSTATS; 4111d13283aSZaiyu Wang 4121d13283aSZaiyu Wang ngbevf_update_stats(dev); 4131d13283aSZaiyu Wang 4141d13283aSZaiyu Wang if (!xstats) 4151d13283aSZaiyu Wang return 0; 4161d13283aSZaiyu Wang 4171d13283aSZaiyu Wang /* Extended stats */ 4181d13283aSZaiyu Wang for (i = 0; i < NGBEVF_NB_XSTATS; i++) { 4191d13283aSZaiyu Wang xstats[i].id = i; 4201d13283aSZaiyu Wang xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 4211d13283aSZaiyu Wang rte_ngbevf_stats_strings[i].offset); 4221d13283aSZaiyu Wang } 4231d13283aSZaiyu Wang 4241d13283aSZaiyu Wang return NGBEVF_NB_XSTATS; 4251d13283aSZaiyu Wang } 4261d13283aSZaiyu Wang 4271d13283aSZaiyu Wang static int 4281d13283aSZaiyu Wang ngbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 4291d13283aSZaiyu Wang { 4301d13283aSZaiyu Wang struct ngbevf_hw_stats *hw_stats = (struct ngbevf_hw_stats *) 4311d13283aSZaiyu Wang NGBE_DEV_STATS(dev); 4321d13283aSZaiyu Wang 4331d13283aSZaiyu Wang ngbevf_update_stats(dev); 4341d13283aSZaiyu Wang 4351d13283aSZaiyu Wang if (stats == NULL) 4361d13283aSZaiyu Wang return -EINVAL; 4371d13283aSZaiyu Wang 4381d13283aSZaiyu Wang stats->ipackets = hw_stats->vfgprc; 4391d13283aSZaiyu Wang stats->ibytes = hw_stats->vfgorc; 4401d13283aSZaiyu Wang stats->opackets = hw_stats->vfgptc; 4411d13283aSZaiyu Wang stats->obytes = hw_stats->vfgotc; 4421d13283aSZaiyu Wang return 0; 4431d13283aSZaiyu Wang } 4441d13283aSZaiyu Wang 4451d13283aSZaiyu Wang static int 4461d13283aSZaiyu Wang ngbevf_dev_stats_reset(struct rte_eth_dev *dev) 4471d13283aSZaiyu Wang { 4481d13283aSZaiyu Wang struct ngbevf_hw_stats *hw_stats = (struct ngbevf_hw_stats *) 4491d13283aSZaiyu Wang NGBE_DEV_STATS(dev); 4501d13283aSZaiyu Wang 4511d13283aSZaiyu Wang /* Sync HW register to the last stats */ 4521d13283aSZaiyu Wang ngbevf_dev_stats_get(dev, NULL); 4531d13283aSZaiyu Wang 4541d13283aSZaiyu Wang /* reset HW current stats*/ 4551d13283aSZaiyu Wang hw_stats->vfgprc = 0; 4561d13283aSZaiyu Wang hw_stats->vfgorc = 0; 4571d13283aSZaiyu Wang hw_stats->vfgptc = 0; 4581d13283aSZaiyu Wang hw_stats->vfgotc = 0; 4591d13283aSZaiyu Wang 4601d13283aSZaiyu Wang return 0; 4611d13283aSZaiyu Wang } 4621d13283aSZaiyu Wang 463950820f1SZaiyu Wang static int 464950820f1SZaiyu Wang ngbevf_dev_info_get(struct rte_eth_dev *dev, 465950820f1SZaiyu Wang struct rte_eth_dev_info *dev_info) 466950820f1SZaiyu Wang { 46766070ca4SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 468950820f1SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 469950820f1SZaiyu Wang 470950820f1SZaiyu Wang dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 471950820f1SZaiyu Wang dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 47266070ca4SZaiyu Wang dev_info->min_rx_bufsize = 1024; 47366070ca4SZaiyu Wang dev_info->max_rx_pktlen = NGBE_FRAME_SIZE_MAX; 47466070ca4SZaiyu Wang dev_info->max_mac_addrs = hw->mac.num_rar_entries; 47566070ca4SZaiyu Wang dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; 47666070ca4SZaiyu Wang dev_info->max_vfs = pci_dev->max_vfs; 47766070ca4SZaiyu Wang dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 47866070ca4SZaiyu Wang dev_info->rx_queue_offload_capa = ngbe_get_rx_queue_offloads(dev); 47966070ca4SZaiyu Wang dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) | 48066070ca4SZaiyu Wang dev_info->rx_queue_offload_capa); 48166070ca4SZaiyu Wang dev_info->tx_queue_offload_capa = 0; 48266070ca4SZaiyu Wang dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev); 48366070ca4SZaiyu Wang dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 48466070ca4SZaiyu Wang dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; 48566070ca4SZaiyu Wang dev_info->default_rxconf = (struct rte_eth_rxconf) { 48666070ca4SZaiyu Wang .rx_thresh = { 48766070ca4SZaiyu Wang .pthresh = NGBE_DEFAULT_RX_PTHRESH, 48866070ca4SZaiyu Wang .hthresh = NGBE_DEFAULT_RX_HTHRESH, 48966070ca4SZaiyu Wang .wthresh = NGBE_DEFAULT_RX_WTHRESH, 49066070ca4SZaiyu Wang }, 49166070ca4SZaiyu Wang .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH, 49266070ca4SZaiyu Wang .rx_drop_en = 0, 49366070ca4SZaiyu Wang .offloads = 0, 49466070ca4SZaiyu Wang }; 49566070ca4SZaiyu Wang 49666070ca4SZaiyu Wang dev_info->default_txconf = (struct rte_eth_txconf) { 49766070ca4SZaiyu Wang .tx_thresh = { 49866070ca4SZaiyu Wang .pthresh = NGBE_DEFAULT_TX_PTHRESH, 49966070ca4SZaiyu Wang .hthresh = NGBE_DEFAULT_TX_HTHRESH, 50066070ca4SZaiyu Wang .wthresh = NGBE_DEFAULT_TX_WTHRESH, 50166070ca4SZaiyu Wang }, 50266070ca4SZaiyu Wang .tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH, 50366070ca4SZaiyu Wang .offloads = 0, 50466070ca4SZaiyu Wang }; 50566070ca4SZaiyu Wang 50666070ca4SZaiyu Wang dev_info->rx_desc_lim = rx_desc_lim; 50766070ca4SZaiyu Wang dev_info->tx_desc_lim = tx_desc_lim; 508950820f1SZaiyu Wang 509950820f1SZaiyu Wang return 0; 510950820f1SZaiyu Wang } 511950820f1SZaiyu Wang 51262c072c0SZaiyu Wang static int 51362c072c0SZaiyu Wang ngbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 51462c072c0SZaiyu Wang { 51562c072c0SZaiyu Wang return ngbe_dev_link_update_share(dev, wait_to_complete); 51662c072c0SZaiyu Wang } 51762c072c0SZaiyu Wang 518fda42583SZaiyu Wang static void 519fda42583SZaiyu Wang ngbevf_intr_disable(struct rte_eth_dev *dev) 520fda42583SZaiyu Wang { 521fda42583SZaiyu Wang struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 522fda42583SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 523fda42583SZaiyu Wang 524fda42583SZaiyu Wang PMD_INIT_FUNC_TRACE(); 525fda42583SZaiyu Wang 526fda42583SZaiyu Wang /* Clear interrupt mask to stop from interrupts being generated */ 527fda42583SZaiyu Wang wr32(hw, NGBE_VFIMS, NGBE_VFIMS_MASK); 528fda42583SZaiyu Wang 529fda42583SZaiyu Wang ngbe_flush(hw); 530fda42583SZaiyu Wang 531fda42583SZaiyu Wang /* Clear mask value. */ 532fda42583SZaiyu Wang intr->mask_misc = NGBE_VFIMS_MASK; 533fda42583SZaiyu Wang } 534fda42583SZaiyu Wang 535fda42583SZaiyu Wang static void 536fda42583SZaiyu Wang ngbevf_intr_enable(struct rte_eth_dev *dev) 537fda42583SZaiyu Wang { 538fda42583SZaiyu Wang struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 539fda42583SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 540fda42583SZaiyu Wang 541fda42583SZaiyu Wang PMD_INIT_FUNC_TRACE(); 542fda42583SZaiyu Wang 543fda42583SZaiyu Wang /* VF enable interrupt autoclean */ 544fda42583SZaiyu Wang wr32(hw, NGBE_VFIMC, NGBE_VFIMC_MASK); 545fda42583SZaiyu Wang 546fda42583SZaiyu Wang ngbe_flush(hw); 547fda42583SZaiyu Wang 548fda42583SZaiyu Wang intr->mask_misc = 0; 549fda42583SZaiyu Wang } 550fda42583SZaiyu Wang 551950820f1SZaiyu Wang static int 552711a06e8SZaiyu Wang ngbevf_dev_configure(struct rte_eth_dev *dev) 553711a06e8SZaiyu Wang { 554711a06e8SZaiyu Wang struct rte_eth_conf *conf = &dev->data->dev_conf; 555711a06e8SZaiyu Wang struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 556711a06e8SZaiyu Wang 557711a06e8SZaiyu Wang PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 558711a06e8SZaiyu Wang dev->data->port_id); 559711a06e8SZaiyu Wang 560711a06e8SZaiyu Wang /* 561711a06e8SZaiyu Wang * VF has no ability to enable/disable HW CRC 562711a06e8SZaiyu Wang * Keep the persistent behavior the same as Host PF 563711a06e8SZaiyu Wang */ 564711a06e8SZaiyu Wang #ifndef RTE_LIBRTE_NGBE_PF_DISABLE_STRIP_CRC 565711a06e8SZaiyu Wang if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 566711a06e8SZaiyu Wang PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 567711a06e8SZaiyu Wang conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC; 568711a06e8SZaiyu Wang } 569711a06e8SZaiyu Wang #else 570711a06e8SZaiyu Wang if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) { 571711a06e8SZaiyu Wang PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 572711a06e8SZaiyu Wang conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 573711a06e8SZaiyu Wang } 574711a06e8SZaiyu Wang #endif 575711a06e8SZaiyu Wang 576711a06e8SZaiyu Wang /* 577711a06e8SZaiyu Wang * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 578711a06e8SZaiyu Wang * allocation or vector Rx preconditions we will reset it. 579711a06e8SZaiyu Wang */ 580711a06e8SZaiyu Wang adapter->rx_bulk_alloc_allowed = true; 581711a06e8SZaiyu Wang 582711a06e8SZaiyu Wang return 0; 583711a06e8SZaiyu Wang } 584711a06e8SZaiyu Wang 585711a06e8SZaiyu Wang static int 58695956bc0SZaiyu Wang ngbevf_dev_start(struct rte_eth_dev *dev) 58795956bc0SZaiyu Wang { 58895956bc0SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 58995956bc0SZaiyu Wang uint32_t intr_vector = 0; 59095956bc0SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 59195956bc0SZaiyu Wang struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 59295956bc0SZaiyu Wang 59395956bc0SZaiyu Wang int err, mask = 0; 59495956bc0SZaiyu Wang 59595956bc0SZaiyu Wang PMD_INIT_FUNC_TRACE(); 59695956bc0SZaiyu Wang 59795956bc0SZaiyu Wang err = hw->mac.reset_hw(hw); 59895956bc0SZaiyu Wang if (err) { 59995956bc0SZaiyu Wang PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 60095956bc0SZaiyu Wang return err; 60195956bc0SZaiyu Wang } 60295956bc0SZaiyu Wang hw->mac.get_link_status = true; 60395956bc0SZaiyu Wang 60495956bc0SZaiyu Wang /* negotiate mailbox API version to use with the PF. */ 60595956bc0SZaiyu Wang ngbevf_negotiate_api(hw); 60695956bc0SZaiyu Wang 60795956bc0SZaiyu Wang ngbevf_dev_tx_init(dev); 60895956bc0SZaiyu Wang 60995956bc0SZaiyu Wang /* This can fail when allocating mbufs for descriptor rings */ 61095956bc0SZaiyu Wang err = ngbevf_dev_rx_init(dev); 61195956bc0SZaiyu Wang 61295956bc0SZaiyu Wang /** 61395956bc0SZaiyu Wang * In this case, reuses the MAC address assigned by VF 61495956bc0SZaiyu Wang * initialization. 61595956bc0SZaiyu Wang */ 61695956bc0SZaiyu Wang if (err != 0 && err != NGBE_ERR_INVALID_MAC_ADDR) { 61795956bc0SZaiyu Wang PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 61895956bc0SZaiyu Wang ngbe_dev_clear_queues(dev); 61995956bc0SZaiyu Wang return err; 62095956bc0SZaiyu Wang } 62195956bc0SZaiyu Wang 62295956bc0SZaiyu Wang /* Set vfta */ 62395956bc0SZaiyu Wang ngbevf_set_vfta_all(dev, 1); 62495956bc0SZaiyu Wang 62595956bc0SZaiyu Wang /* Set HW strip */ 62695956bc0SZaiyu Wang mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 62795956bc0SZaiyu Wang RTE_ETH_VLAN_EXTEND_MASK; 62895956bc0SZaiyu Wang err = ngbevf_vlan_offload_config(dev, mask); 62995956bc0SZaiyu Wang if (err) { 63095956bc0SZaiyu Wang PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 63195956bc0SZaiyu Wang ngbe_dev_clear_queues(dev); 63295956bc0SZaiyu Wang return err; 63395956bc0SZaiyu Wang } 63495956bc0SZaiyu Wang 63595956bc0SZaiyu Wang ngbevf_dev_rxtx_start(dev); 63695956bc0SZaiyu Wang 63795956bc0SZaiyu Wang /* check and configure queue intr-vector mapping */ 63895956bc0SZaiyu Wang if (rte_intr_cap_multiple(intr_handle) && 63995956bc0SZaiyu Wang dev->data->dev_conf.intr_conf.rxq) { 64095956bc0SZaiyu Wang /* According to datasheet, only vector 0/1/2 can be used, 64195956bc0SZaiyu Wang * now only one vector is used for Rx queue 64295956bc0SZaiyu Wang */ 64395956bc0SZaiyu Wang intr_vector = 1; 64495956bc0SZaiyu Wang if (rte_intr_efd_enable(intr_handle, intr_vector)) 64595956bc0SZaiyu Wang return -1; 64695956bc0SZaiyu Wang } 64795956bc0SZaiyu Wang 64895956bc0SZaiyu Wang if (rte_intr_dp_is_en(intr_handle)) { 64995956bc0SZaiyu Wang if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 65095956bc0SZaiyu Wang dev->data->nb_rx_queues)) { 65195956bc0SZaiyu Wang PMD_INIT_LOG(ERR, 65295956bc0SZaiyu Wang "Failed to allocate %d rx_queues intr_vec", 65395956bc0SZaiyu Wang dev->data->nb_rx_queues); 65495956bc0SZaiyu Wang return -ENOMEM; 65595956bc0SZaiyu Wang } 65695956bc0SZaiyu Wang } 65795956bc0SZaiyu Wang 65895956bc0SZaiyu Wang ngbevf_configure_msix(dev); 65995956bc0SZaiyu Wang 66095956bc0SZaiyu Wang /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 66195956bc0SZaiyu Wang * is mapped to VFIO vector 0 in eth_ngbevf_dev_init( ). 66295956bc0SZaiyu Wang * If previous VFIO interrupt mapping setting in eth_ngbevf_dev_init( ) 66395956bc0SZaiyu Wang * is not cleared, it will fail when following rte_intr_enable( ) tries 66495956bc0SZaiyu Wang * to map Rx queue interrupt to other VFIO vectors. 66595956bc0SZaiyu Wang * So clear uio/vfio intr/evevnfd first to avoid failure. 66695956bc0SZaiyu Wang */ 66795956bc0SZaiyu Wang rte_intr_disable(intr_handle); 66895956bc0SZaiyu Wang 66995956bc0SZaiyu Wang rte_intr_enable(intr_handle); 67095956bc0SZaiyu Wang 67195956bc0SZaiyu Wang /* Re-enable interrupt for VF */ 67295956bc0SZaiyu Wang ngbevf_intr_enable(dev); 67395956bc0SZaiyu Wang 67495956bc0SZaiyu Wang /* 67595956bc0SZaiyu Wang * Update link status right before return, because it may 67695956bc0SZaiyu Wang * start link configuration process in a separate thread. 67795956bc0SZaiyu Wang */ 67895956bc0SZaiyu Wang ngbevf_dev_link_update(dev, 0); 67995956bc0SZaiyu Wang 68095956bc0SZaiyu Wang hw->adapter_stopped = false; 68195956bc0SZaiyu Wang 68295956bc0SZaiyu Wang return 0; 68395956bc0SZaiyu Wang } 68495956bc0SZaiyu Wang 68595956bc0SZaiyu Wang static int 68695956bc0SZaiyu Wang ngbevf_dev_stop(struct rte_eth_dev *dev) 68795956bc0SZaiyu Wang { 68895956bc0SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 68995956bc0SZaiyu Wang struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); 69095956bc0SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 69195956bc0SZaiyu Wang struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 69295956bc0SZaiyu Wang 69395956bc0SZaiyu Wang if (hw->adapter_stopped) 69495956bc0SZaiyu Wang return 0; 69595956bc0SZaiyu Wang 69695956bc0SZaiyu Wang PMD_INIT_FUNC_TRACE(); 69795956bc0SZaiyu Wang 69895956bc0SZaiyu Wang ngbevf_intr_disable(dev); 69995956bc0SZaiyu Wang 70095956bc0SZaiyu Wang hw->adapter_stopped = 1; 70195956bc0SZaiyu Wang hw->mac.stop_hw(hw); 70295956bc0SZaiyu Wang 70395956bc0SZaiyu Wang /* 70495956bc0SZaiyu Wang * Clear what we set, but we still keep shadow_vfta to 70595956bc0SZaiyu Wang * restore after device starts 70695956bc0SZaiyu Wang */ 70795956bc0SZaiyu Wang ngbevf_set_vfta_all(dev, 0); 70895956bc0SZaiyu Wang 70995956bc0SZaiyu Wang /* Clear stored conf */ 71095956bc0SZaiyu Wang dev->data->scattered_rx = 0; 71195956bc0SZaiyu Wang 71295956bc0SZaiyu Wang ngbe_dev_clear_queues(dev); 71395956bc0SZaiyu Wang 71495956bc0SZaiyu Wang /* Clean datapath event and queue/vec mapping */ 71595956bc0SZaiyu Wang rte_intr_efd_disable(intr_handle); 71695956bc0SZaiyu Wang rte_intr_vec_list_free(intr_handle); 71795956bc0SZaiyu Wang 71895956bc0SZaiyu Wang adapter->rss_reta_updated = 0; 71995956bc0SZaiyu Wang 72095956bc0SZaiyu Wang return 0; 72195956bc0SZaiyu Wang } 72295956bc0SZaiyu Wang 72395956bc0SZaiyu Wang static int 724950820f1SZaiyu Wang ngbevf_dev_close(struct rte_eth_dev *dev) 725950820f1SZaiyu Wang { 72666070ca4SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 727fda42583SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 728fda42583SZaiyu Wang struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 72995956bc0SZaiyu Wang int ret; 73066070ca4SZaiyu Wang 731950820f1SZaiyu Wang PMD_INIT_FUNC_TRACE(); 732950820f1SZaiyu Wang if (rte_eal_process_type() != RTE_PROC_PRIMARY) 733950820f1SZaiyu Wang return 0; 734950820f1SZaiyu Wang 73566070ca4SZaiyu Wang hw->mac.reset_hw(hw); 73666070ca4SZaiyu Wang 73795956bc0SZaiyu Wang ret = ngbevf_dev_stop(dev); 73895956bc0SZaiyu Wang 73995956bc0SZaiyu Wang ngbe_dev_free_queues(dev); 74095956bc0SZaiyu Wang 7412aba42f6SZaiyu Wang /** 7422aba42f6SZaiyu Wang * Remove the VF MAC address ro ensure 7432aba42f6SZaiyu Wang * that the VF traffic goes to the PF 7442aba42f6SZaiyu Wang * after stop, close and detach of the VF 7452aba42f6SZaiyu Wang **/ 7462aba42f6SZaiyu Wang ngbevf_remove_mac_addr(dev, 0); 7472aba42f6SZaiyu Wang 7482aba42f6SZaiyu Wang dev->rx_pkt_burst = NULL; 7492aba42f6SZaiyu Wang dev->tx_pkt_burst = NULL; 7502aba42f6SZaiyu Wang 751fda42583SZaiyu Wang /* Disable the interrupts for VF */ 752fda42583SZaiyu Wang ngbevf_intr_disable(dev); 753fda42583SZaiyu Wang 754950820f1SZaiyu Wang rte_free(dev->data->mac_addrs); 755950820f1SZaiyu Wang dev->data->mac_addrs = NULL; 756950820f1SZaiyu Wang 757fda42583SZaiyu Wang rte_intr_disable(intr_handle); 758fda42583SZaiyu Wang rte_intr_callback_unregister(intr_handle, 759fda42583SZaiyu Wang ngbevf_dev_interrupt_handler, dev); 760fda42583SZaiyu Wang 76195956bc0SZaiyu Wang return ret; 76295956bc0SZaiyu Wang } 76395956bc0SZaiyu Wang 76495956bc0SZaiyu Wang /* 76595956bc0SZaiyu Wang * Reset VF device 76695956bc0SZaiyu Wang */ 76795956bc0SZaiyu Wang static int 76895956bc0SZaiyu Wang ngbevf_dev_reset(struct rte_eth_dev *dev) 76995956bc0SZaiyu Wang { 77095956bc0SZaiyu Wang int ret; 77195956bc0SZaiyu Wang 77295956bc0SZaiyu Wang ret = eth_ngbevf_dev_uninit(dev); 77395956bc0SZaiyu Wang if (ret) 77495956bc0SZaiyu Wang return ret; 77595956bc0SZaiyu Wang 77695956bc0SZaiyu Wang ret = eth_ngbevf_dev_init(dev); 77795956bc0SZaiyu Wang 77895956bc0SZaiyu Wang return ret; 779950820f1SZaiyu Wang } 780950820f1SZaiyu Wang 781f47dc03cSZaiyu Wang static void ngbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 782f47dc03cSZaiyu Wang { 783f47dc03cSZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 784f47dc03cSZaiyu Wang struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 785f47dc03cSZaiyu Wang int i = 0, j = 0, vfta = 0, mask = 1; 786f47dc03cSZaiyu Wang 787f47dc03cSZaiyu Wang for (i = 0; i < NGBE_VFTA_SIZE; i++) { 788f47dc03cSZaiyu Wang vfta = shadow_vfta->vfta[i]; 789f47dc03cSZaiyu Wang if (vfta) { 790f47dc03cSZaiyu Wang mask = 1; 791f47dc03cSZaiyu Wang for (j = 0; j < 32; j++) { 792f47dc03cSZaiyu Wang if (vfta & mask) 793f47dc03cSZaiyu Wang hw->mac.set_vfta(hw, (i << 5) + j, 0, 794f47dc03cSZaiyu Wang on, false); 795f47dc03cSZaiyu Wang mask <<= 1; 796f47dc03cSZaiyu Wang } 797f47dc03cSZaiyu Wang } 798f47dc03cSZaiyu Wang } 799f47dc03cSZaiyu Wang } 800f47dc03cSZaiyu Wang 801f47dc03cSZaiyu Wang static int 802f47dc03cSZaiyu Wang ngbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 803f47dc03cSZaiyu Wang { 804f47dc03cSZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 805f47dc03cSZaiyu Wang struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev); 806f47dc03cSZaiyu Wang uint32_t vid_idx = 0; 807f47dc03cSZaiyu Wang uint32_t vid_bit = 0; 808f47dc03cSZaiyu Wang int ret = 0; 809f47dc03cSZaiyu Wang 810f47dc03cSZaiyu Wang PMD_INIT_FUNC_TRACE(); 811f47dc03cSZaiyu Wang 812f47dc03cSZaiyu Wang /* vind is not used in VF driver, set to 0, check ngbe_set_vfta_vf */ 813f47dc03cSZaiyu Wang ret = hw->mac.set_vfta(hw, vlan_id, 0, !!on, false); 814f47dc03cSZaiyu Wang if (ret) { 815f47dc03cSZaiyu Wang PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 816f47dc03cSZaiyu Wang return ret; 817f47dc03cSZaiyu Wang } 818f47dc03cSZaiyu Wang vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); 819f47dc03cSZaiyu Wang vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); 820f47dc03cSZaiyu Wang 821f47dc03cSZaiyu Wang /* Save what we set and restore it after device reset */ 822f47dc03cSZaiyu Wang if (on) 823f47dc03cSZaiyu Wang shadow_vfta->vfta[vid_idx] |= vid_bit; 824f47dc03cSZaiyu Wang else 825f47dc03cSZaiyu Wang shadow_vfta->vfta[vid_idx] &= ~vid_bit; 826f47dc03cSZaiyu Wang 827f47dc03cSZaiyu Wang return 0; 828f47dc03cSZaiyu Wang } 829f47dc03cSZaiyu Wang 830f47dc03cSZaiyu Wang static void 831f47dc03cSZaiyu Wang ngbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 832f47dc03cSZaiyu Wang { 833f47dc03cSZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 834f47dc03cSZaiyu Wang uint32_t ctrl; 835f47dc03cSZaiyu Wang 836f47dc03cSZaiyu Wang PMD_INIT_FUNC_TRACE(); 837f47dc03cSZaiyu Wang 838f47dc03cSZaiyu Wang if (queue >= hw->mac.max_rx_queues) 839f47dc03cSZaiyu Wang return; 840f47dc03cSZaiyu Wang 841f47dc03cSZaiyu Wang ctrl = rd32(hw, NGBE_RXCFG(queue)); 842f47dc03cSZaiyu Wang if (on) 843f47dc03cSZaiyu Wang ctrl |= NGBE_RXCFG_VLAN; 844f47dc03cSZaiyu Wang else 845f47dc03cSZaiyu Wang ctrl &= ~NGBE_RXCFG_VLAN; 846f47dc03cSZaiyu Wang wr32(hw, NGBE_RXCFG(queue), ctrl); 847f47dc03cSZaiyu Wang 848f47dc03cSZaiyu Wang ngbe_vlan_hw_strip_bitmap_set(dev, queue, on); 849f47dc03cSZaiyu Wang } 850f47dc03cSZaiyu Wang 851f47dc03cSZaiyu Wang static int 852f47dc03cSZaiyu Wang ngbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 853f47dc03cSZaiyu Wang { 854f47dc03cSZaiyu Wang struct ngbe_rx_queue *rxq; 855f47dc03cSZaiyu Wang uint16_t i; 856f47dc03cSZaiyu Wang int on = 0; 857f47dc03cSZaiyu Wang 858f47dc03cSZaiyu Wang /* VF function only support hw strip feature, others are not support */ 859f47dc03cSZaiyu Wang if (mask & RTE_ETH_VLAN_STRIP_MASK) { 860f47dc03cSZaiyu Wang for (i = 0; i < dev->data->nb_rx_queues; i++) { 861f47dc03cSZaiyu Wang rxq = dev->data->rx_queues[i]; 862f47dc03cSZaiyu Wang on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 863f47dc03cSZaiyu Wang ngbevf_vlan_strip_queue_set(dev, i, on); 864f47dc03cSZaiyu Wang } 865f47dc03cSZaiyu Wang } 866f47dc03cSZaiyu Wang 867f47dc03cSZaiyu Wang return 0; 868f47dc03cSZaiyu Wang } 869f47dc03cSZaiyu Wang 870f47dc03cSZaiyu Wang static int 871f47dc03cSZaiyu Wang ngbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 872f47dc03cSZaiyu Wang { 873f47dc03cSZaiyu Wang ngbe_config_vlan_strip_on_all_queues(dev, mask); 874f47dc03cSZaiyu Wang 875f47dc03cSZaiyu Wang ngbevf_vlan_offload_config(dev, mask); 876f47dc03cSZaiyu Wang 877f47dc03cSZaiyu Wang return 0; 878f47dc03cSZaiyu Wang } 879f47dc03cSZaiyu Wang 8807744e908SZaiyu Wang static int 881fda42583SZaiyu Wang ngbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 882fda42583SZaiyu Wang { 883fda42583SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 884fda42583SZaiyu Wang struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 885fda42583SZaiyu Wang struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 886fda42583SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 887fda42583SZaiyu Wang uint32_t vec = NGBE_MISC_VEC_ID; 888fda42583SZaiyu Wang 889fda42583SZaiyu Wang if (rte_intr_allow_others(intr_handle)) 890fda42583SZaiyu Wang vec = NGBE_RX_VEC_START; 891fda42583SZaiyu Wang intr->mask_misc &= ~(1 << vec); 892fda42583SZaiyu Wang RTE_SET_USED(queue_id); 893fda42583SZaiyu Wang wr32(hw, NGBE_VFIMC, ~intr->mask_misc); 894fda42583SZaiyu Wang 895fda42583SZaiyu Wang rte_intr_enable(intr_handle); 896fda42583SZaiyu Wang 897fda42583SZaiyu Wang return 0; 898fda42583SZaiyu Wang } 899fda42583SZaiyu Wang 900fda42583SZaiyu Wang static int 901fda42583SZaiyu Wang ngbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 902fda42583SZaiyu Wang { 903fda42583SZaiyu Wang struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 904fda42583SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 905fda42583SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 906fda42583SZaiyu Wang struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 907fda42583SZaiyu Wang uint32_t vec = NGBE_MISC_VEC_ID; 908fda42583SZaiyu Wang 909fda42583SZaiyu Wang if (rte_intr_allow_others(intr_handle)) 910fda42583SZaiyu Wang vec = NGBE_RX_VEC_START; 911fda42583SZaiyu Wang intr->mask_misc |= (1 << vec); 912fda42583SZaiyu Wang RTE_SET_USED(queue_id); 913fda42583SZaiyu Wang wr32(hw, NGBE_VFIMS, intr->mask_misc); 914fda42583SZaiyu Wang 915fda42583SZaiyu Wang return 0; 916fda42583SZaiyu Wang } 917fda42583SZaiyu Wang 918fda42583SZaiyu Wang static void 919fda42583SZaiyu Wang ngbevf_set_ivar_map(struct ngbe_hw *hw, int8_t direction, 920fda42583SZaiyu Wang uint8_t queue, uint8_t msix_vector) 921fda42583SZaiyu Wang { 922fda42583SZaiyu Wang uint32_t tmp, idx; 923fda42583SZaiyu Wang 924fda42583SZaiyu Wang if (direction == -1) { 925fda42583SZaiyu Wang /* other causes */ 926fda42583SZaiyu Wang msix_vector |= NGBE_VFIVAR_VLD; 927fda42583SZaiyu Wang tmp = rd32(hw, NGBE_VFIVARMISC); 928fda42583SZaiyu Wang tmp &= ~0xFF; 929fda42583SZaiyu Wang tmp |= msix_vector; 930fda42583SZaiyu Wang wr32(hw, NGBE_VFIVARMISC, tmp); 931fda42583SZaiyu Wang } else { 932fda42583SZaiyu Wang /* rx or tx cause */ 933fda42583SZaiyu Wang /* Workaround for ICR lost */ 934fda42583SZaiyu Wang idx = ((16 * (queue & 1)) + (8 * direction)); 935fda42583SZaiyu Wang tmp = rd32(hw, NGBE_VFIVAR(queue >> 1)); 936fda42583SZaiyu Wang tmp &= ~(0xFF << idx); 937fda42583SZaiyu Wang tmp |= (msix_vector << idx); 938fda42583SZaiyu Wang wr32(hw, NGBE_VFIVAR(queue >> 1), tmp); 939fda42583SZaiyu Wang } 940fda42583SZaiyu Wang } 941fda42583SZaiyu Wang 942fda42583SZaiyu Wang static void 943fda42583SZaiyu Wang ngbevf_configure_msix(struct rte_eth_dev *dev) 944fda42583SZaiyu Wang { 945fda42583SZaiyu Wang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 946fda42583SZaiyu Wang struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 947fda42583SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 948fda42583SZaiyu Wang uint32_t q_idx; 949fda42583SZaiyu Wang uint32_t vector_idx = NGBE_MISC_VEC_ID; 950fda42583SZaiyu Wang uint32_t base = NGBE_MISC_VEC_ID; 951fda42583SZaiyu Wang 952fda42583SZaiyu Wang /* Configure VF other cause ivar */ 953fda42583SZaiyu Wang ngbevf_set_ivar_map(hw, -1, 1, vector_idx); 954fda42583SZaiyu Wang 955fda42583SZaiyu Wang /* won't configure msix register if no mapping is done 956fda42583SZaiyu Wang * between intr vector and event fd. 957fda42583SZaiyu Wang */ 958fda42583SZaiyu Wang if (!rte_intr_dp_is_en(intr_handle)) 959fda42583SZaiyu Wang return; 960fda42583SZaiyu Wang 961fda42583SZaiyu Wang if (rte_intr_allow_others(intr_handle)) { 962fda42583SZaiyu Wang base = NGBE_RX_VEC_START; 963fda42583SZaiyu Wang vector_idx = NGBE_RX_VEC_START; 964fda42583SZaiyu Wang } 965fda42583SZaiyu Wang 966fda42583SZaiyu Wang /* Configure all RX queues of VF */ 967fda42583SZaiyu Wang for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 968fda42583SZaiyu Wang /* Force all queue use vector 0, 969fda42583SZaiyu Wang * as NGBE_VF_MAXMSIVECOTR = 1 970fda42583SZaiyu Wang */ 971fda42583SZaiyu Wang ngbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 972fda42583SZaiyu Wang rte_intr_vec_list_index_set(intr_handle, q_idx, 973fda42583SZaiyu Wang vector_idx); 974fda42583SZaiyu Wang if (vector_idx < base + rte_intr_nb_efd_get(intr_handle) 975fda42583SZaiyu Wang - 1) 976fda42583SZaiyu Wang vector_idx++; 977fda42583SZaiyu Wang } 978fda42583SZaiyu Wang 979fda42583SZaiyu Wang /* As RX queue setting above show, all queues use the vector 0. 980fda42583SZaiyu Wang * Set only the ITR value of NGBE_MISC_VEC_ID. 981fda42583SZaiyu Wang */ 982fda42583SZaiyu Wang wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID), 983fda42583SZaiyu Wang NGBE_ITR_IVAL(NGBE_QUEUE_ITR_INTERVAL_DEFAULT) 984fda42583SZaiyu Wang | NGBE_ITR_WRDSA); 985fda42583SZaiyu Wang } 986fda42583SZaiyu Wang 987fda42583SZaiyu Wang static int 9882aba42f6SZaiyu Wang ngbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 9892aba42f6SZaiyu Wang __rte_unused uint32_t index, 9902aba42f6SZaiyu Wang __rte_unused uint32_t pool) 9912aba42f6SZaiyu Wang { 9922aba42f6SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 9932aba42f6SZaiyu Wang int err; 9942aba42f6SZaiyu Wang 9952aba42f6SZaiyu Wang /* 9962aba42f6SZaiyu Wang * On a VF, adding again the same MAC addr is not an idempotent 9972aba42f6SZaiyu Wang * operation. Trap this case to avoid exhausting the [very limited] 9982aba42f6SZaiyu Wang * set of PF resources used to store VF MAC addresses. 9992aba42f6SZaiyu Wang */ 10002aba42f6SZaiyu Wang if (memcmp(hw->mac.perm_addr, mac_addr, 10012aba42f6SZaiyu Wang sizeof(struct rte_ether_addr)) == 0) 10022aba42f6SZaiyu Wang return -1; 10032aba42f6SZaiyu Wang err = ngbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 10042aba42f6SZaiyu Wang if (err != 0) 10052aba42f6SZaiyu Wang PMD_DRV_LOG(ERR, "Unable to add MAC address " 10062aba42f6SZaiyu Wang "%02x:%02x:%02x:%02x:%02x:%02x - err=%d", 10072aba42f6SZaiyu Wang mac_addr->addr_bytes[0], 10082aba42f6SZaiyu Wang mac_addr->addr_bytes[1], 10092aba42f6SZaiyu Wang mac_addr->addr_bytes[2], 10102aba42f6SZaiyu Wang mac_addr->addr_bytes[3], 10112aba42f6SZaiyu Wang mac_addr->addr_bytes[4], 10122aba42f6SZaiyu Wang mac_addr->addr_bytes[5], 10132aba42f6SZaiyu Wang err); 10142aba42f6SZaiyu Wang return err; 10152aba42f6SZaiyu Wang } 10162aba42f6SZaiyu Wang 10172aba42f6SZaiyu Wang static void 10182aba42f6SZaiyu Wang ngbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 10192aba42f6SZaiyu Wang { 10202aba42f6SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 10212aba42f6SZaiyu Wang struct rte_ether_addr *perm_addr = 10222aba42f6SZaiyu Wang (struct rte_ether_addr *)hw->mac.perm_addr; 10232aba42f6SZaiyu Wang struct rte_ether_addr *mac_addr; 10242aba42f6SZaiyu Wang uint32_t i; 10252aba42f6SZaiyu Wang int err; 10262aba42f6SZaiyu Wang 10272aba42f6SZaiyu Wang /* 10282aba42f6SZaiyu Wang * The NGBE_VF_SET_MACVLAN command of the ngbe-pf driver does 10292aba42f6SZaiyu Wang * not support the deletion of a given MAC address. 10302aba42f6SZaiyu Wang * Instead, it imposes to delete all MAC addresses, then to add again 10312aba42f6SZaiyu Wang * all MAC addresses with the exception of the one to be deleted. 10322aba42f6SZaiyu Wang */ 10332aba42f6SZaiyu Wang (void)ngbevf_set_uc_addr_vf(hw, 0, NULL); 10342aba42f6SZaiyu Wang 10352aba42f6SZaiyu Wang /* 10362aba42f6SZaiyu Wang * Add again all MAC addresses, with the exception of the deleted one 10372aba42f6SZaiyu Wang * and of the permanent MAC address. 10382aba42f6SZaiyu Wang */ 10392aba42f6SZaiyu Wang for (i = 0, mac_addr = dev->data->mac_addrs; 10402aba42f6SZaiyu Wang i < hw->mac.num_rar_entries; i++, mac_addr++) { 10412aba42f6SZaiyu Wang /* Skip the deleted MAC address */ 10422aba42f6SZaiyu Wang if (i == index) 10432aba42f6SZaiyu Wang continue; 10442aba42f6SZaiyu Wang /* Skip NULL MAC addresses */ 10452aba42f6SZaiyu Wang if (rte_is_zero_ether_addr(mac_addr)) 10462aba42f6SZaiyu Wang continue; 10472aba42f6SZaiyu Wang /* Skip the permanent MAC address */ 10482aba42f6SZaiyu Wang if (memcmp(perm_addr, mac_addr, 10492aba42f6SZaiyu Wang sizeof(struct rte_ether_addr)) == 0) 10502aba42f6SZaiyu Wang continue; 10512aba42f6SZaiyu Wang err = ngbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 10522aba42f6SZaiyu Wang if (err != 0) 10532aba42f6SZaiyu Wang PMD_DRV_LOG(ERR, 10542aba42f6SZaiyu Wang "Adding again MAC address " 10552aba42f6SZaiyu Wang "%02x:%02x:%02x:%02x:%02x:%02x failed " 10562aba42f6SZaiyu Wang "err=%d", 10572aba42f6SZaiyu Wang mac_addr->addr_bytes[0], 10582aba42f6SZaiyu Wang mac_addr->addr_bytes[1], 10592aba42f6SZaiyu Wang mac_addr->addr_bytes[2], 10602aba42f6SZaiyu Wang mac_addr->addr_bytes[3], 10612aba42f6SZaiyu Wang mac_addr->addr_bytes[4], 10622aba42f6SZaiyu Wang mac_addr->addr_bytes[5], 10632aba42f6SZaiyu Wang err); 10642aba42f6SZaiyu Wang } 10652aba42f6SZaiyu Wang } 10662aba42f6SZaiyu Wang 10672aba42f6SZaiyu Wang static int 10682aba42f6SZaiyu Wang ngbevf_set_default_mac_addr(struct rte_eth_dev *dev, 10692aba42f6SZaiyu Wang struct rte_ether_addr *addr) 10702aba42f6SZaiyu Wang { 10712aba42f6SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 10722aba42f6SZaiyu Wang 10732aba42f6SZaiyu Wang hw->mac.set_rar(hw, 0, (void *)addr, 0, 0); 10742aba42f6SZaiyu Wang 10752aba42f6SZaiyu Wang return 0; 10762aba42f6SZaiyu Wang } 10772aba42f6SZaiyu Wang 10782aba42f6SZaiyu Wang static int 107977102375SZaiyu Wang ngbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 108077102375SZaiyu Wang { 108177102375SZaiyu Wang struct ngbe_hw *hw; 108277102375SZaiyu Wang uint32_t max_frame = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 108377102375SZaiyu Wang struct rte_eth_dev_data *dev_data = dev->data; 108477102375SZaiyu Wang 108577102375SZaiyu Wang hw = ngbe_dev_hw(dev); 108677102375SZaiyu Wang 108777102375SZaiyu Wang if (mtu < RTE_ETHER_MIN_MTU || 108877102375SZaiyu Wang max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 108977102375SZaiyu Wang return -EINVAL; 109077102375SZaiyu Wang 109177102375SZaiyu Wang /* If device is started, refuse mtu that requires the support of 109277102375SZaiyu Wang * scattered packets when this feature has not been enabled before. 109377102375SZaiyu Wang */ 109477102375SZaiyu Wang if (dev_data->dev_started && !dev_data->scattered_rx && 109577102375SZaiyu Wang (max_frame + 2 * RTE_VLAN_HLEN > 109677102375SZaiyu Wang dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 109777102375SZaiyu Wang PMD_INIT_LOG(ERR, "Stop port first."); 109877102375SZaiyu Wang return -EINVAL; 109977102375SZaiyu Wang } 110077102375SZaiyu Wang 110177102375SZaiyu Wang /* 110277102375SZaiyu Wang * When supported by the underlying PF driver, use the NGBE_VF_SET_MTU 110377102375SZaiyu Wang * request of the version 2.0 of the mailbox API. 110477102375SZaiyu Wang * For now, use the NGBE_VF_SET_LPE request of the version 1.0 110577102375SZaiyu Wang * of the mailbox API. 110677102375SZaiyu Wang */ 110777102375SZaiyu Wang if (ngbevf_rlpml_set_vf(hw, max_frame)) 110877102375SZaiyu Wang return -EINVAL; 110977102375SZaiyu Wang 111077102375SZaiyu Wang return 0; 111177102375SZaiyu Wang } 111277102375SZaiyu Wang 111377102375SZaiyu Wang static int 111454670a16SZaiyu Wang ngbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 111554670a16SZaiyu Wang { 111654670a16SZaiyu Wang int count = 0; 111754670a16SZaiyu Wang int g_ind = 0; 111854670a16SZaiyu Wang const struct reg_info *reg_group; 111954670a16SZaiyu Wang 112054670a16SZaiyu Wang while ((reg_group = ngbevf_regs[g_ind++])) 112154670a16SZaiyu Wang count += ngbe_regs_group_count(reg_group); 112254670a16SZaiyu Wang 112354670a16SZaiyu Wang return count; 112454670a16SZaiyu Wang } 112554670a16SZaiyu Wang 112654670a16SZaiyu Wang static int 112754670a16SZaiyu Wang ngbevf_get_regs(struct rte_eth_dev *dev, 112854670a16SZaiyu Wang struct rte_dev_reg_info *regs) 112954670a16SZaiyu Wang { 113054670a16SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 113154670a16SZaiyu Wang uint32_t *data = regs->data; 113254670a16SZaiyu Wang int g_ind = 0; 113354670a16SZaiyu Wang int count = 0; 113454670a16SZaiyu Wang const struct reg_info *reg_group; 113554670a16SZaiyu Wang 113654670a16SZaiyu Wang if (data == NULL) { 113754670a16SZaiyu Wang regs->length = ngbevf_get_reg_length(dev); 113854670a16SZaiyu Wang regs->width = sizeof(uint32_t); 113954670a16SZaiyu Wang return 0; 114054670a16SZaiyu Wang } 114154670a16SZaiyu Wang 114254670a16SZaiyu Wang /* Support only full register dump */ 114354670a16SZaiyu Wang if (regs->length == 0 || 114454670a16SZaiyu Wang regs->length == (uint32_t)ngbevf_get_reg_length(dev)) { 114554670a16SZaiyu Wang regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 114654670a16SZaiyu Wang hw->device_id; 114754670a16SZaiyu Wang while ((reg_group = ngbevf_regs[g_ind++])) 114854670a16SZaiyu Wang count += ngbe_read_regs_group(dev, &data[count], 114954670a16SZaiyu Wang reg_group); 115054670a16SZaiyu Wang return 0; 115154670a16SZaiyu Wang } 115254670a16SZaiyu Wang 115354670a16SZaiyu Wang return -ENOTSUP; 115454670a16SZaiyu Wang } 115554670a16SZaiyu Wang 115654670a16SZaiyu Wang static int 11577744e908SZaiyu Wang ngbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 11587744e908SZaiyu Wang { 11597744e908SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 11607744e908SZaiyu Wang int ret; 11617744e908SZaiyu Wang 11627744e908SZaiyu Wang switch (hw->mac.update_xcast_mode(hw, NGBEVF_XCAST_MODE_PROMISC)) { 11637744e908SZaiyu Wang case 0: 11647744e908SZaiyu Wang ret = 0; 11657744e908SZaiyu Wang break; 11667744e908SZaiyu Wang case NGBE_ERR_FEATURE_NOT_SUPPORTED: 11677744e908SZaiyu Wang ret = -ENOTSUP; 11687744e908SZaiyu Wang break; 11697744e908SZaiyu Wang default: 11707744e908SZaiyu Wang ret = -EAGAIN; 11717744e908SZaiyu Wang break; 11727744e908SZaiyu Wang } 11737744e908SZaiyu Wang 11747744e908SZaiyu Wang return ret; 11757744e908SZaiyu Wang } 11767744e908SZaiyu Wang 11777744e908SZaiyu Wang static int 11787744e908SZaiyu Wang ngbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 11797744e908SZaiyu Wang { 11807744e908SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 11817744e908SZaiyu Wang int ret; 11827744e908SZaiyu Wang 11837744e908SZaiyu Wang switch (hw->mac.update_xcast_mode(hw, NGBEVF_XCAST_MODE_NONE)) { 11847744e908SZaiyu Wang case 0: 11857744e908SZaiyu Wang ret = 0; 11867744e908SZaiyu Wang break; 11877744e908SZaiyu Wang case NGBE_ERR_FEATURE_NOT_SUPPORTED: 11887744e908SZaiyu Wang ret = -ENOTSUP; 11897744e908SZaiyu Wang break; 11907744e908SZaiyu Wang default: 11917744e908SZaiyu Wang ret = -EAGAIN; 11927744e908SZaiyu Wang break; 11937744e908SZaiyu Wang } 11947744e908SZaiyu Wang 11957744e908SZaiyu Wang return ret; 11967744e908SZaiyu Wang } 11977744e908SZaiyu Wang 11987744e908SZaiyu Wang static int 11997744e908SZaiyu Wang ngbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 12007744e908SZaiyu Wang { 12017744e908SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 12027744e908SZaiyu Wang int ret; 12037744e908SZaiyu Wang 12047744e908SZaiyu Wang if (dev->data->promiscuous == 1) 12057744e908SZaiyu Wang return 0; 12067744e908SZaiyu Wang 12077744e908SZaiyu Wang switch (hw->mac.update_xcast_mode(hw, NGBEVF_XCAST_MODE_ALLMULTI)) { 12087744e908SZaiyu Wang case 0: 12097744e908SZaiyu Wang ret = 0; 12107744e908SZaiyu Wang break; 12117744e908SZaiyu Wang case NGBE_ERR_FEATURE_NOT_SUPPORTED: 12127744e908SZaiyu Wang ret = -ENOTSUP; 12137744e908SZaiyu Wang break; 12147744e908SZaiyu Wang default: 12157744e908SZaiyu Wang ret = -EAGAIN; 12167744e908SZaiyu Wang break; 12177744e908SZaiyu Wang } 12187744e908SZaiyu Wang 12197744e908SZaiyu Wang return ret; 12207744e908SZaiyu Wang } 12217744e908SZaiyu Wang 12227744e908SZaiyu Wang static int 12237744e908SZaiyu Wang ngbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 12247744e908SZaiyu Wang { 12257744e908SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 12267744e908SZaiyu Wang int ret; 12277744e908SZaiyu Wang 12287744e908SZaiyu Wang switch (hw->mac.update_xcast_mode(hw, NGBEVF_XCAST_MODE_MULTI)) { 12297744e908SZaiyu Wang case 0: 12307744e908SZaiyu Wang ret = 0; 12317744e908SZaiyu Wang break; 12327744e908SZaiyu Wang case NGBE_ERR_FEATURE_NOT_SUPPORTED: 12337744e908SZaiyu Wang ret = -ENOTSUP; 12347744e908SZaiyu Wang break; 12357744e908SZaiyu Wang default: 12367744e908SZaiyu Wang ret = -EAGAIN; 12377744e908SZaiyu Wang break; 12387744e908SZaiyu Wang } 12397744e908SZaiyu Wang 12407744e908SZaiyu Wang return ret; 12417744e908SZaiyu Wang } 12427744e908SZaiyu Wang 1243fda42583SZaiyu Wang static void ngbevf_mbx_process(struct rte_eth_dev *dev) 1244fda42583SZaiyu Wang { 1245fda42583SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 1246fda42583SZaiyu Wang u32 in_msg = 0; 1247fda42583SZaiyu Wang 1248fda42583SZaiyu Wang /* peek the message first */ 1249fda42583SZaiyu Wang in_msg = rd32(hw, NGBE_VFMBX); 1250fda42583SZaiyu Wang 1251fda42583SZaiyu Wang /* PF reset VF event */ 1252fda42583SZaiyu Wang if (in_msg == NGBE_PF_CONTROL_MSG) { 1253fda42583SZaiyu Wang /* dummy mbx read to ack pf */ 1254fda42583SZaiyu Wang if (ngbe_read_mbx(hw, &in_msg, 1, 0)) 1255fda42583SZaiyu Wang return; 1256fda42583SZaiyu Wang rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1257fda42583SZaiyu Wang NULL); 1258fda42583SZaiyu Wang } 1259fda42583SZaiyu Wang } 1260fda42583SZaiyu Wang 1261fda42583SZaiyu Wang static int 1262fda42583SZaiyu Wang ngbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 1263fda42583SZaiyu Wang { 1264fda42583SZaiyu Wang uint32_t eicr; 1265fda42583SZaiyu Wang struct ngbe_hw *hw = ngbe_dev_hw(dev); 1266fda42583SZaiyu Wang struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 1267fda42583SZaiyu Wang 1268fda42583SZaiyu Wang ngbevf_intr_disable(dev); 1269fda42583SZaiyu Wang 1270fda42583SZaiyu Wang /* read-on-clear nic registers here */ 1271fda42583SZaiyu Wang eicr = rd32(hw, NGBE_VFICR); 1272fda42583SZaiyu Wang intr->flags = 0; 1273fda42583SZaiyu Wang 1274fda42583SZaiyu Wang /* only one misc vector supported - mailbox */ 1275fda42583SZaiyu Wang eicr &= NGBE_VFICR_MASK; 1276fda42583SZaiyu Wang /* Workaround for ICR lost */ 1277fda42583SZaiyu Wang intr->flags |= NGBE_FLAG_MAILBOX; 1278fda42583SZaiyu Wang 1279fda42583SZaiyu Wang /* To avoid compiler warnings set eicr to used. */ 1280fda42583SZaiyu Wang RTE_SET_USED(eicr); 1281fda42583SZaiyu Wang 1282fda42583SZaiyu Wang return 0; 1283fda42583SZaiyu Wang } 1284fda42583SZaiyu Wang 1285fda42583SZaiyu Wang static int 1286fda42583SZaiyu Wang ngbevf_dev_interrupt_action(struct rte_eth_dev *dev) 1287fda42583SZaiyu Wang { 1288fda42583SZaiyu Wang struct ngbe_interrupt *intr = ngbe_dev_intr(dev); 1289fda42583SZaiyu Wang 1290fda42583SZaiyu Wang if (intr->flags & NGBE_FLAG_MAILBOX) { 1291fda42583SZaiyu Wang ngbevf_mbx_process(dev); 1292fda42583SZaiyu Wang intr->flags &= ~NGBE_FLAG_MAILBOX; 1293fda42583SZaiyu Wang } 1294fda42583SZaiyu Wang 1295fda42583SZaiyu Wang ngbevf_intr_enable(dev); 1296fda42583SZaiyu Wang 1297fda42583SZaiyu Wang return 0; 1298fda42583SZaiyu Wang } 1299fda42583SZaiyu Wang 1300fda42583SZaiyu Wang static void 1301fda42583SZaiyu Wang ngbevf_dev_interrupt_handler(void *param) 1302fda42583SZaiyu Wang { 1303fda42583SZaiyu Wang struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1304fda42583SZaiyu Wang 1305fda42583SZaiyu Wang ngbevf_dev_interrupt_get_status(dev); 1306fda42583SZaiyu Wang ngbevf_dev_interrupt_action(dev); 1307fda42583SZaiyu Wang } 1308fda42583SZaiyu Wang 1309950820f1SZaiyu Wang /* 1310950820f1SZaiyu Wang * dev_ops for virtual function, bare necessities for basic vf 1311950820f1SZaiyu Wang * operation have been implemented 1312950820f1SZaiyu Wang */ 1313950820f1SZaiyu Wang static const struct eth_dev_ops ngbevf_eth_dev_ops = { 1314711a06e8SZaiyu Wang .dev_configure = ngbevf_dev_configure, 131595956bc0SZaiyu Wang .dev_start = ngbevf_dev_start, 131695956bc0SZaiyu Wang .dev_stop = ngbevf_dev_stop, 131762c072c0SZaiyu Wang .link_update = ngbevf_dev_link_update, 13181d13283aSZaiyu Wang .stats_get = ngbevf_dev_stats_get, 13191d13283aSZaiyu Wang .xstats_get = ngbevf_dev_xstats_get, 13201d13283aSZaiyu Wang .stats_reset = ngbevf_dev_stats_reset, 13211d13283aSZaiyu Wang .xstats_reset = ngbevf_dev_stats_reset, 13221d13283aSZaiyu Wang .xstats_get_names = ngbevf_dev_xstats_get_names, 132395956bc0SZaiyu Wang .dev_close = ngbevf_dev_close, 132495956bc0SZaiyu Wang .dev_reset = ngbevf_dev_reset, 13257744e908SZaiyu Wang .promiscuous_enable = ngbevf_dev_promiscuous_enable, 13267744e908SZaiyu Wang .promiscuous_disable = ngbevf_dev_promiscuous_disable, 13277744e908SZaiyu Wang .allmulticast_enable = ngbevf_dev_allmulticast_enable, 13287744e908SZaiyu Wang .allmulticast_disable = ngbevf_dev_allmulticast_disable, 1329950820f1SZaiyu Wang .dev_infos_get = ngbevf_dev_info_get, 1330*551b556cSZaiyu Wang .dev_supported_ptypes_get = ngbe_dev_supported_ptypes_get, 133177102375SZaiyu Wang .mtu_set = ngbevf_dev_set_mtu, 1332f47dc03cSZaiyu Wang .vlan_filter_set = ngbevf_vlan_filter_set, 1333f47dc03cSZaiyu Wang .vlan_strip_queue_set = ngbevf_vlan_strip_queue_set, 1334f47dc03cSZaiyu Wang .vlan_offload_set = ngbevf_vlan_offload_set, 133595956bc0SZaiyu Wang .rx_queue_setup = ngbe_dev_rx_queue_setup, 133695956bc0SZaiyu Wang .rx_queue_release = ngbe_dev_rx_queue_release, 133795956bc0SZaiyu Wang .tx_queue_setup = ngbe_dev_tx_queue_setup, 133895956bc0SZaiyu Wang .tx_queue_release = ngbe_dev_tx_queue_release, 1339fda42583SZaiyu Wang .rx_queue_intr_enable = ngbevf_dev_rx_queue_intr_enable, 1340fda42583SZaiyu Wang .rx_queue_intr_disable = ngbevf_dev_rx_queue_intr_disable, 13412aba42f6SZaiyu Wang .mac_addr_add = ngbevf_add_mac_addr, 13422aba42f6SZaiyu Wang .mac_addr_remove = ngbevf_remove_mac_addr, 1343*551b556cSZaiyu Wang .set_mc_addr_list = ngbe_dev_set_mc_addr_list, 1344*551b556cSZaiyu Wang .rxq_info_get = ngbe_rxq_info_get, 1345*551b556cSZaiyu Wang .txq_info_get = ngbe_txq_info_get, 13462aba42f6SZaiyu Wang .mac_addr_set = ngbevf_set_default_mac_addr, 134754670a16SZaiyu Wang .get_reg = ngbevf_get_regs, 1348*551b556cSZaiyu Wang .tx_done_cleanup = ngbe_dev_tx_done_cleanup, 1349950820f1SZaiyu Wang }; 1350950820f1SZaiyu Wang 1351950820f1SZaiyu Wang RTE_PMD_REGISTER_PCI(net_ngbe_vf, rte_ngbevf_pmd); 1352950820f1SZaiyu Wang RTE_PMD_REGISTER_PCI_TABLE(net_ngbe_vf, pci_id_ngbevf_map); 1353950820f1SZaiyu Wang RTE_PMD_REGISTER_KMOD_DEP(net_ngbe_vf, "* igb_uio | vfio-pci"); 1354