1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 2aaf4363eSJerin Jacob * Copyright(c) 2016 Cavium, Inc 3e4387966SJerin Jacob */ 4e4387966SJerin Jacob 5e4387966SJerin Jacob #include <assert.h> 6e4387966SJerin Jacob #include <stdio.h> 7e4387966SJerin Jacob #include <stdbool.h> 8e4387966SJerin Jacob #include <errno.h> 9e4387966SJerin Jacob #include <stdint.h> 10e4387966SJerin Jacob #include <string.h> 11e4387966SJerin Jacob #include <unistd.h> 12e4387966SJerin Jacob #include <stdarg.h> 13e4387966SJerin Jacob #include <inttypes.h> 14e4387966SJerin Jacob #include <netinet/in.h> 15e4387966SJerin Jacob #include <sys/queue.h> 16e4387966SJerin Jacob 17e4387966SJerin Jacob #include <rte_alarm.h> 18e4387966SJerin Jacob #include <rte_branch_prediction.h> 19e4387966SJerin Jacob #include <rte_byteorder.h> 20e4387966SJerin Jacob #include <rte_common.h> 21e4387966SJerin Jacob #include <rte_cycles.h> 22e4387966SJerin Jacob #include <rte_debug.h> 23e4387966SJerin Jacob #include <rte_dev.h> 24e4387966SJerin Jacob #include <rte_eal.h> 25e4387966SJerin Jacob #include <rte_ether.h> 26ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 27fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 28e4387966SJerin Jacob #include <rte_interrupts.h> 29e4387966SJerin Jacob #include <rte_log.h> 30e4387966SJerin Jacob #include <rte_memory.h> 31e4387966SJerin Jacob #include <rte_memzone.h> 32e4387966SJerin Jacob #include <rte_malloc.h> 33e4387966SJerin Jacob #include <rte_random.h> 34e4387966SJerin Jacob #include <rte_pci.h> 35c752998bSGaetan Rivet #include <rte_bus_pci.h> 36e4387966SJerin Jacob #include <rte_tailq.h> 37279d3319SRakesh Kudurumalla #include <rte_devargs.h> 38279d3319SRakesh Kudurumalla #include <rte_kvargs.h> 39e4387966SJerin Jacob 40e4387966SJerin Jacob #include "base/nicvf_plat.h" 41e4387966SJerin Jacob 42e4387966SJerin Jacob #include "nicvf_ethdev.h" 431c421f18SJerin Jacob #include "nicvf_rxtx.h" 44627d4ba2SKamil Rytarowski #include "nicvf_svf.h" 45e4387966SJerin Jacob #include "nicvf_logs.h" 46e4387966SJerin Jacob 47c563443cSPavan Nikhilesh int nicvf_logtype_mbox; 48c563443cSPavan Nikhilesh int nicvf_logtype_init; 49c563443cSPavan Nikhilesh int nicvf_logtype_driver; 50c563443cSPavan Nikhilesh 517413feeeSJerin Jacob static void nicvf_dev_stop(struct rte_eth_dev *dev); 52627d4ba2SKamil Rytarowski static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup); 53627d4ba2SKamil Rytarowski static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, 54627d4ba2SKamil Rytarowski bool cleanup); 55d3bf2564SRakesh Kudurumalla static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 56d3bf2564SRakesh Kudurumalla static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 577413feeeSJerin Jacob 58f8e99896SThomas Monjalon RTE_INIT(nicvf_init_log) 59c563443cSPavan Nikhilesh { 60fd396066SHarry van Haaren nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox"); 61c563443cSPavan Nikhilesh if (nicvf_logtype_mbox >= 0) 62c563443cSPavan Nikhilesh rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE); 63c563443cSPavan Nikhilesh 64fd396066SHarry van Haaren nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init"); 65c563443cSPavan Nikhilesh if (nicvf_logtype_init >= 0) 66c563443cSPavan Nikhilesh rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE); 67c563443cSPavan Nikhilesh 68fd396066SHarry van Haaren nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver"); 69c563443cSPavan Nikhilesh if (nicvf_logtype_driver >= 0) 70c563443cSPavan Nikhilesh rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE); 71c563443cSPavan Nikhilesh } 72c563443cSPavan Nikhilesh 738e14dc28SStephen Hemminger static void 748e14dc28SStephen Hemminger nicvf_link_status_update(struct nicvf *nic, 758fc70464SJerin Jacob struct rte_eth_link *link) 768fc70464SJerin Jacob { 778e14dc28SStephen Hemminger memset(link, 0, sizeof(*link)); 788fc70464SJerin Jacob 798e14dc28SStephen Hemminger link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 808fc70464SJerin Jacob 818fc70464SJerin Jacob if (nic->duplex == NICVF_HALF_DUPLEX) 828fc70464SJerin Jacob link->link_duplex = ETH_LINK_HALF_DUPLEX; 838fc70464SJerin Jacob else if (nic->duplex == NICVF_FULL_DUPLEX) 848fc70464SJerin Jacob link->link_duplex = ETH_LINK_FULL_DUPLEX; 858fc70464SJerin Jacob link->link_speed = nic->speed; 861e3a958fSThomas Monjalon link->link_autoneg = ETH_LINK_AUTONEG; 878fc70464SJerin Jacob } 888fc70464SJerin Jacob 89e4387966SJerin Jacob static void 90e4387966SJerin Jacob nicvf_interrupt(void *arg) 91e4387966SJerin Jacob { 92f141adcaSKamil Rytarowski struct rte_eth_dev *dev = arg; 93f141adcaSKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 948e14dc28SStephen Hemminger struct rte_eth_link link; 95e4387966SJerin Jacob 968fc70464SJerin Jacob if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 978e14dc28SStephen Hemminger if (dev->data->dev_conf.intr_conf.lsc) { 988e14dc28SStephen Hemminger nicvf_link_status_update(nic, &link); 998e14dc28SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 1008e14dc28SStephen Hemminger 1018e14dc28SStephen Hemminger _rte_eth_dev_callback_process(dev, 1028e14dc28SStephen Hemminger RTE_ETH_EVENT_INTR_LSC, 103cebe3d7bSThomas Monjalon NULL); 1048fc70464SJerin Jacob } 1058e14dc28SStephen Hemminger } 106e4387966SJerin Jacob 107e4387966SJerin Jacob rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 108f141adcaSKamil Rytarowski nicvf_interrupt, dev); 109f141adcaSKamil Rytarowski } 110f141adcaSKamil Rytarowski 11121e3fb00SKamil Rytarowski static void 112f141adcaSKamil Rytarowski nicvf_vf_interrupt(void *arg) 113f141adcaSKamil Rytarowski { 114f141adcaSKamil Rytarowski struct nicvf *nic = arg; 115f141adcaSKamil Rytarowski 116f141adcaSKamil Rytarowski nicvf_reg_poll_interrupts(nic); 117f141adcaSKamil Rytarowski 118f141adcaSKamil Rytarowski rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 119f141adcaSKamil Rytarowski nicvf_vf_interrupt, nic); 120e4387966SJerin Jacob } 121e4387966SJerin Jacob 122e4387966SJerin Jacob static int 123f141adcaSKamil Rytarowski nicvf_periodic_alarm_start(void (fn)(void *), void *arg) 124e4387966SJerin Jacob { 125f141adcaSKamil Rytarowski return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg); 126e4387966SJerin Jacob } 127e4387966SJerin Jacob 128e4387966SJerin Jacob static int 129f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(void (fn)(void *), void *arg) 130e4387966SJerin Jacob { 131f141adcaSKamil Rytarowski return rte_eal_alarm_cancel(fn, arg); 132e4387966SJerin Jacob } 133e4387966SJerin Jacob 1348fc70464SJerin Jacob /* 1358fc70464SJerin Jacob * Return 0 means link status changed, -1 means not changed 1368fc70464SJerin Jacob */ 1378fc70464SJerin Jacob static int 1380cca5670SAndriy Berestovskyy nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1398fc70464SJerin Jacob { 1400cca5670SAndriy Berestovskyy #define CHECK_INTERVAL 100 /* 100ms */ 1410cca5670SAndriy Berestovskyy #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1428fc70464SJerin Jacob struct rte_eth_link link; 1438fc70464SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1440cca5670SAndriy Berestovskyy int i; 1458fc70464SJerin Jacob 1468fc70464SJerin Jacob PMD_INIT_FUNC_TRACE(); 1478fc70464SJerin Jacob 1480cca5670SAndriy Berestovskyy if (wait_to_complete) { 1490cca5670SAndriy Berestovskyy /* rte_eth_link_get() might need to wait up to 9 seconds */ 1500cca5670SAndriy Berestovskyy for (i = 0; i < MAX_CHECK_TIME; i++) { 1518e14dc28SStephen Hemminger nicvf_link_status_update(nic, &link); 1528e14dc28SStephen Hemminger if (link.link_status == ETH_LINK_UP) 1530cca5670SAndriy Berestovskyy break; 1540cca5670SAndriy Berestovskyy rte_delay_ms(CHECK_INTERVAL); 1550cca5670SAndriy Berestovskyy } 1560cca5670SAndriy Berestovskyy } else { 1578e14dc28SStephen Hemminger nicvf_link_status_update(nic, &link); 1580cca5670SAndriy Berestovskyy } 1598e14dc28SStephen Hemminger 1608e14dc28SStephen Hemminger return rte_eth_linkstatus_set(dev, &link); 1618fc70464SJerin Jacob } 1628fc70464SJerin Jacob 163606ee746SJerin Jacob static int 16465d9804eSJerin Jacob nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 16565d9804eSJerin Jacob { 16665d9804eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 167c77875fbSNitin Saxena uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD; 168b7004ab2SKamil Rytarowski size_t i; 169c97da2cbSMaciej Czekaj struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 17065d9804eSJerin Jacob 17165d9804eSJerin Jacob PMD_INIT_FUNC_TRACE(); 17265d9804eSJerin Jacob 17365d9804eSJerin Jacob if (frame_size > NIC_HW_MAX_FRS) 17465d9804eSJerin Jacob return -EINVAL; 17565d9804eSJerin Jacob 17665d9804eSJerin Jacob if (frame_size < NIC_HW_MIN_FRS) 17765d9804eSJerin Jacob return -EINVAL; 17865d9804eSJerin Jacob 17965d9804eSJerin Jacob buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 18065d9804eSJerin Jacob 18165d9804eSJerin Jacob /* 18265d9804eSJerin Jacob * Refuse mtu that requires the support of scattered packets 18365d9804eSJerin Jacob * when this feature has not been enabled before. 18465d9804eSJerin Jacob */ 185c77875fbSNitin Saxena if (dev->data->dev_started && !dev->data->scattered_rx && 18665d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 18765d9804eSJerin Jacob return -EINVAL; 18865d9804eSJerin Jacob 18965d9804eSJerin Jacob /* check <seg size> * <max_seg> >= max_frame */ 19065d9804eSJerin Jacob if (dev->data->scattered_rx && 19165d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 19265d9804eSJerin Jacob return -EINVAL; 19365d9804eSJerin Jacob 19435b2d13fSOlivier Matz if (frame_size > RTE_ETHER_MAX_LEN) 195c97da2cbSMaciej Czekaj rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 19665d9804eSJerin Jacob else 197c97da2cbSMaciej Czekaj rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 19865d9804eSJerin Jacob 199c77875fbSNitin Saxena if (nicvf_mbox_update_hw_max_frs(nic, mtu)) 20065d9804eSJerin Jacob return -EINVAL; 20165d9804eSJerin Jacob 202c77875fbSNitin Saxena /* Update max_rx_pkt_len */ 20335b2d13fSOlivier Matz rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN; 20465d9804eSJerin Jacob nic->mtu = mtu; 205b7004ab2SKamil Rytarowski 206b7004ab2SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) 207b7004ab2SKamil Rytarowski nic->snicvf[i]->mtu = mtu; 208b7004ab2SKamil Rytarowski 20965d9804eSJerin Jacob return 0; 21065d9804eSJerin Jacob } 21165d9804eSJerin Jacob 21265d9804eSJerin Jacob static int 213606ee746SJerin Jacob nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 214606ee746SJerin Jacob { 215606ee746SJerin Jacob uint64_t *data = regs->data; 216606ee746SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 217606ee746SJerin Jacob 218001a1c0fSZyta Szpak if (data == NULL) { 219001a1c0fSZyta Szpak regs->length = nicvf_reg_get_count(); 220001a1c0fSZyta Szpak regs->width = THUNDERX_REG_BYTES; 221001a1c0fSZyta Szpak return 0; 222001a1c0fSZyta Szpak } 223606ee746SJerin Jacob 224606ee746SJerin Jacob /* Support only full register dump */ 225606ee746SJerin Jacob if ((regs->length == 0) || 226606ee746SJerin Jacob (regs->length == (uint32_t)nicvf_reg_get_count())) { 227606ee746SJerin Jacob regs->version = nic->vendor_id << 16 | nic->device_id; 228606ee746SJerin Jacob nicvf_reg_dump(nic, data); 229606ee746SJerin Jacob return 0; 230606ee746SJerin Jacob } 231606ee746SJerin Jacob return -ENOTSUP; 232606ee746SJerin Jacob } 233606ee746SJerin Jacob 234d5b0924bSMatan Azrad static int 235684fa771SJerin Jacob nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 236684fa771SJerin Jacob { 237684fa771SJerin Jacob uint16_t qidx; 238684fa771SJerin Jacob struct nicvf_hw_rx_qstats rx_qstats; 239684fa771SJerin Jacob struct nicvf_hw_tx_qstats tx_qstats; 240684fa771SJerin Jacob struct nicvf_hw_stats port_stats; 241684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 24221e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 24321e3fb00SKamil Rytarowski uint16_t tx_start, tx_end; 24421e3fb00SKamil Rytarowski size_t i; 24521e3fb00SKamil Rytarowski 24621e3fb00SKamil Rytarowski /* RX queue indices for the first VF */ 24721e3fb00SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 248684fa771SJerin Jacob 249684fa771SJerin Jacob /* Reading per RX ring stats */ 25021e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 251695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 252684fa771SJerin Jacob break; 253684fa771SJerin Jacob 254684fa771SJerin Jacob nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 255684fa771SJerin Jacob stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 256684fa771SJerin Jacob stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 257684fa771SJerin Jacob } 258684fa771SJerin Jacob 25921e3fb00SKamil Rytarowski /* TX queue indices for the first VF */ 26021e3fb00SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 26121e3fb00SKamil Rytarowski 262684fa771SJerin Jacob /* Reading per TX ring stats */ 26321e3fb00SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) { 264695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 265684fa771SJerin Jacob break; 266684fa771SJerin Jacob 267684fa771SJerin Jacob nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 268684fa771SJerin Jacob stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 269684fa771SJerin Jacob stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 270684fa771SJerin Jacob } 271684fa771SJerin Jacob 27221e3fb00SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 27321e3fb00SKamil Rytarowski struct nicvf *snic = nic->snicvf[i]; 27421e3fb00SKamil Rytarowski 27521e3fb00SKamil Rytarowski if (snic == NULL) 27621e3fb00SKamil Rytarowski break; 27721e3fb00SKamil Rytarowski 27821e3fb00SKamil Rytarowski /* RX queue indices for a secondary VF */ 27921e3fb00SKamil Rytarowski nicvf_rx_range(dev, snic, &rx_start, &rx_end); 28021e3fb00SKamil Rytarowski 28121e3fb00SKamil Rytarowski /* Reading per RX ring stats */ 28221e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 283695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 28421e3fb00SKamil Rytarowski break; 28521e3fb00SKamil Rytarowski 28621e3fb00SKamil Rytarowski nicvf_hw_get_rx_qstats(snic, &rx_qstats, 28721e3fb00SKamil Rytarowski qidx % MAX_RCV_QUEUES_PER_QS); 28821e3fb00SKamil Rytarowski stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 28921e3fb00SKamil Rytarowski stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 29021e3fb00SKamil Rytarowski } 29121e3fb00SKamil Rytarowski 29221e3fb00SKamil Rytarowski /* TX queue indices for a secondary VF */ 29321e3fb00SKamil Rytarowski nicvf_tx_range(dev, snic, &tx_start, &tx_end); 29421e3fb00SKamil Rytarowski /* Reading per TX ring stats */ 29521e3fb00SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) { 296695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 29721e3fb00SKamil Rytarowski break; 29821e3fb00SKamil Rytarowski 29921e3fb00SKamil Rytarowski nicvf_hw_get_tx_qstats(snic, &tx_qstats, 30021e3fb00SKamil Rytarowski qidx % MAX_SND_QUEUES_PER_QS); 30121e3fb00SKamil Rytarowski stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 30221e3fb00SKamil Rytarowski stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 30321e3fb00SKamil Rytarowski } 30421e3fb00SKamil Rytarowski } 30521e3fb00SKamil Rytarowski 306684fa771SJerin Jacob nicvf_hw_get_stats(nic, &port_stats); 307684fa771SJerin Jacob stats->ibytes = port_stats.rx_bytes; 308684fa771SJerin Jacob stats->ipackets = port_stats.rx_ucast_frames; 309684fa771SJerin Jacob stats->ipackets += port_stats.rx_bcast_frames; 310684fa771SJerin Jacob stats->ipackets += port_stats.rx_mcast_frames; 311684fa771SJerin Jacob stats->ierrors = port_stats.rx_l2_errors; 312684fa771SJerin Jacob stats->imissed = port_stats.rx_drop_red; 313684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_overrun; 314684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_bcast; 315684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_mcast; 316684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_bcast; 317684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_mcast; 318684fa771SJerin Jacob 319684fa771SJerin Jacob stats->obytes = port_stats.tx_bytes_ok; 320684fa771SJerin Jacob stats->opackets = port_stats.tx_ucast_frames_ok; 321684fa771SJerin Jacob stats->opackets += port_stats.tx_bcast_frames_ok; 322684fa771SJerin Jacob stats->opackets += port_stats.tx_mcast_frames_ok; 323684fa771SJerin Jacob stats->oerrors = port_stats.tx_drops; 324d5b0924bSMatan Azrad 325d5b0924bSMatan Azrad return 0; 326684fa771SJerin Jacob } 327684fa771SJerin Jacob 3281c80e4fdSJerin Jacob static const uint32_t * 3291c80e4fdSJerin Jacob nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3301c80e4fdSJerin Jacob { 3311c80e4fdSJerin Jacob size_t copied; 3321c80e4fdSJerin Jacob static uint32_t ptypes[32]; 3331c80e4fdSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 334398a1be1SJerin Jacob static const uint32_t ptypes_common[] = { 3351c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4, 3361c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4_EXT, 3371c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6, 3381c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6_EXT, 3391c80e4fdSJerin Jacob RTE_PTYPE_L4_TCP, 3401c80e4fdSJerin Jacob RTE_PTYPE_L4_UDP, 3411c80e4fdSJerin Jacob RTE_PTYPE_L4_FRAG, 3421c80e4fdSJerin Jacob }; 343398a1be1SJerin Jacob static const uint32_t ptypes_tunnel[] = { 3441c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GRE, 3451c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GENEVE, 3461c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_VXLAN, 3471c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_NVGRE, 3481c80e4fdSJerin Jacob }; 3491c80e4fdSJerin Jacob static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; 3501c80e4fdSJerin Jacob 351398a1be1SJerin Jacob copied = sizeof(ptypes_common); 352398a1be1SJerin Jacob memcpy(ptypes, ptypes_common, copied); 353398a1be1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 354398a1be1SJerin Jacob memcpy((char *)ptypes + copied, ptypes_tunnel, 355398a1be1SJerin Jacob sizeof(ptypes_tunnel)); 356398a1be1SJerin Jacob copied += sizeof(ptypes_tunnel); 3571c80e4fdSJerin Jacob } 3581c80e4fdSJerin Jacob 3591c80e4fdSJerin Jacob memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); 3601c80e4fdSJerin Jacob 3615e64c812SPavan Nikhilesh /* All Ptypes are supported in all Rx functions. */ 3625e64c812SPavan Nikhilesh return ptypes; 3631c80e4fdSJerin Jacob } 3641c80e4fdSJerin Jacob 3659970a9adSIgor Romanov static int 366684fa771SJerin Jacob nicvf_dev_stats_reset(struct rte_eth_dev *dev) 367684fa771SJerin Jacob { 368684fa771SJerin Jacob int i; 369684fa771SJerin Jacob uint16_t rxqs = 0, txqs = 0; 370684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 37121e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 37221e3fb00SKamil Rytarowski uint16_t tx_start, tx_end; 3739970a9adSIgor Romanov int ret; 374684fa771SJerin Jacob 37521e3fb00SKamil Rytarowski /* Reset all primary nic counters */ 37621e3fb00SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 37721e3fb00SKamil Rytarowski for (i = rx_start; i <= rx_end; i++) 378684fa771SJerin Jacob rxqs |= (0x3 << (i * 2)); 37921e3fb00SKamil Rytarowski 38021e3fb00SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 38121e3fb00SKamil Rytarowski for (i = tx_start; i <= tx_end; i++) 382684fa771SJerin Jacob txqs |= (0x3 << (i * 2)); 383684fa771SJerin Jacob 3849970a9adSIgor Romanov ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 3859970a9adSIgor Romanov if (ret != 0) 3869970a9adSIgor Romanov return ret; 38721e3fb00SKamil Rytarowski 38821e3fb00SKamil Rytarowski /* Reset secondary nic queue counters */ 38921e3fb00SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 39021e3fb00SKamil Rytarowski struct nicvf *snic = nic->snicvf[i]; 39121e3fb00SKamil Rytarowski if (snic == NULL) 39221e3fb00SKamil Rytarowski break; 39321e3fb00SKamil Rytarowski 39421e3fb00SKamil Rytarowski nicvf_rx_range(dev, snic, &rx_start, &rx_end); 39521e3fb00SKamil Rytarowski for (i = rx_start; i <= rx_end; i++) 39621e3fb00SKamil Rytarowski rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2)); 39721e3fb00SKamil Rytarowski 39821e3fb00SKamil Rytarowski nicvf_tx_range(dev, snic, &tx_start, &tx_end); 39921e3fb00SKamil Rytarowski for (i = tx_start; i <= tx_end; i++) 40021e3fb00SKamil Rytarowski txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2)); 40121e3fb00SKamil Rytarowski 4029970a9adSIgor Romanov ret = nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs); 4039970a9adSIgor Romanov if (ret != 0) 4049970a9adSIgor Romanov return ret; 40521e3fb00SKamil Rytarowski } 4069970a9adSIgor Romanov 4079970a9adSIgor Romanov return 0; 408684fa771SJerin Jacob } 409684fa771SJerin Jacob 4106eae36eaSJerin Jacob /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 4119039c812SAndrew Rybchenko static int 4126eae36eaSJerin Jacob nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 4136eae36eaSJerin Jacob { 4149039c812SAndrew Rybchenko return 0; 4156eae36eaSJerin Jacob } 4166eae36eaSJerin Jacob 41743362c6aSJerin Jacob static inline uint64_t 41843362c6aSJerin Jacob nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 41943362c6aSJerin Jacob { 42043362c6aSJerin Jacob uint64_t nic_rss = 0; 42143362c6aSJerin Jacob 42243362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV4) 42343362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 42443362c6aSJerin Jacob 42543362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV6) 42643362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 42743362c6aSJerin Jacob 42843362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 42943362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 43043362c6aSJerin Jacob 43143362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 43243362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 43343362c6aSJerin Jacob 43443362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 43543362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 43643362c6aSJerin Jacob 43743362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 43843362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 43943362c6aSJerin Jacob 44043362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_PORT) 44143362c6aSJerin Jacob nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 44243362c6aSJerin Jacob 44343362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 44443362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_VXLAN) 44543362c6aSJerin Jacob nic_rss |= RSS_TUN_VXLAN_ENA; 44643362c6aSJerin Jacob 44743362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_GENEVE) 44843362c6aSJerin Jacob nic_rss |= RSS_TUN_GENEVE_ENA; 44943362c6aSJerin Jacob 45043362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NVGRE) 45143362c6aSJerin Jacob nic_rss |= RSS_TUN_NVGRE_ENA; 45243362c6aSJerin Jacob } 45343362c6aSJerin Jacob 45443362c6aSJerin Jacob return nic_rss; 45543362c6aSJerin Jacob } 45643362c6aSJerin Jacob 45743362c6aSJerin Jacob static inline uint64_t 45843362c6aSJerin Jacob nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 45943362c6aSJerin Jacob { 46043362c6aSJerin Jacob uint64_t ethdev_rss = 0; 46143362c6aSJerin Jacob 46243362c6aSJerin Jacob if (nic_rss & RSS_IP_ENA) 46343362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 46443362c6aSJerin Jacob 46543362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 46643362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 46743362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_TCP); 46843362c6aSJerin Jacob 46943362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 47043362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 47143362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_UDP); 47243362c6aSJerin Jacob 47343362c6aSJerin Jacob if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 47443362c6aSJerin Jacob ethdev_rss |= ETH_RSS_PORT; 47543362c6aSJerin Jacob 47643362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 47743362c6aSJerin Jacob if (nic_rss & RSS_TUN_VXLAN_ENA) 47843362c6aSJerin Jacob ethdev_rss |= ETH_RSS_VXLAN; 47943362c6aSJerin Jacob 48043362c6aSJerin Jacob if (nic_rss & RSS_TUN_GENEVE_ENA) 48143362c6aSJerin Jacob ethdev_rss |= ETH_RSS_GENEVE; 48243362c6aSJerin Jacob 48343362c6aSJerin Jacob if (nic_rss & RSS_TUN_NVGRE_ENA) 48443362c6aSJerin Jacob ethdev_rss |= ETH_RSS_NVGRE; 48543362c6aSJerin Jacob } 48643362c6aSJerin Jacob return ethdev_rss; 48743362c6aSJerin Jacob } 48843362c6aSJerin Jacob 48943362c6aSJerin Jacob static int 49043362c6aSJerin Jacob nicvf_dev_reta_query(struct rte_eth_dev *dev, 49143362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 49243362c6aSJerin Jacob uint16_t reta_size) 49343362c6aSJerin Jacob { 49443362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 49543362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 49643362c6aSJerin Jacob int ret, i, j; 49743362c6aSJerin Jacob 49843362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 499*d801c395SStephen Hemminger PMD_DRV_LOG(ERR, 500*d801c395SStephen Hemminger "The size of hash lookup table configured " 501*d801c395SStephen Hemminger "(%u) doesn't match the number hardware can supported " 502*d801c395SStephen Hemminger "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 50343362c6aSJerin Jacob return -EINVAL; 50443362c6aSJerin Jacob } 50543362c6aSJerin Jacob 50643362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 50743362c6aSJerin Jacob if (ret) 50843362c6aSJerin Jacob return ret; 50943362c6aSJerin Jacob 51043362c6aSJerin Jacob /* Copy RETA table */ 51143362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 51243362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 51343362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 51443362c6aSJerin Jacob reta_conf[i].reta[j] = tbl[j]; 51543362c6aSJerin Jacob } 51643362c6aSJerin Jacob 51743362c6aSJerin Jacob return 0; 51843362c6aSJerin Jacob } 51943362c6aSJerin Jacob 52043362c6aSJerin Jacob static int 52143362c6aSJerin Jacob nicvf_dev_reta_update(struct rte_eth_dev *dev, 52243362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 52343362c6aSJerin Jacob uint16_t reta_size) 52443362c6aSJerin Jacob { 52543362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 52643362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 52743362c6aSJerin Jacob int ret, i, j; 52843362c6aSJerin Jacob 52943362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 530*d801c395SStephen Hemminger PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 531*d801c395SStephen Hemminger "(%u) doesn't match the number hardware can supported " 532*d801c395SStephen Hemminger "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 53343362c6aSJerin Jacob return -EINVAL; 53443362c6aSJerin Jacob } 53543362c6aSJerin Jacob 53643362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 53743362c6aSJerin Jacob if (ret) 53843362c6aSJerin Jacob return ret; 53943362c6aSJerin Jacob 54043362c6aSJerin Jacob /* Copy RETA table */ 54143362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 54243362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 54343362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 54443362c6aSJerin Jacob tbl[j] = reta_conf[i].reta[j]; 54543362c6aSJerin Jacob } 54643362c6aSJerin Jacob 54743362c6aSJerin Jacob return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 54843362c6aSJerin Jacob } 54943362c6aSJerin Jacob 55043362c6aSJerin Jacob static int 55143362c6aSJerin Jacob nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 55243362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 55343362c6aSJerin Jacob { 55443362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 55543362c6aSJerin Jacob 55643362c6aSJerin Jacob if (rss_conf->rss_key) 55743362c6aSJerin Jacob nicvf_rss_get_key(nic, rss_conf->rss_key); 55843362c6aSJerin Jacob 55943362c6aSJerin Jacob rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 56043362c6aSJerin Jacob rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 56143362c6aSJerin Jacob return 0; 56243362c6aSJerin Jacob } 56343362c6aSJerin Jacob 56443362c6aSJerin Jacob static int 56543362c6aSJerin Jacob nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 56643362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 56743362c6aSJerin Jacob { 56843362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 56943362c6aSJerin Jacob uint64_t nic_rss; 57043362c6aSJerin Jacob 57143362c6aSJerin Jacob if (rss_conf->rss_key && 57243362c6aSJerin Jacob rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 573*d801c395SStephen Hemminger PMD_DRV_LOG(ERR, "Hash key size mismatch %u", 57443362c6aSJerin Jacob rss_conf->rss_key_len); 57543362c6aSJerin Jacob return -EINVAL; 57643362c6aSJerin Jacob } 57743362c6aSJerin Jacob 57843362c6aSJerin Jacob if (rss_conf->rss_key) 57943362c6aSJerin Jacob nicvf_rss_set_key(nic, rss_conf->rss_key); 58043362c6aSJerin Jacob 58143362c6aSJerin Jacob nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 58243362c6aSJerin Jacob nicvf_rss_set_cfg(nic, nic_rss); 58343362c6aSJerin Jacob return 0; 58443362c6aSJerin Jacob } 58543362c6aSJerin Jacob 586aa0d976eSJerin Jacob static int 5876d3cbd56SKamil Rytarowski nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 5886d3cbd56SKamil Rytarowski struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt) 589aa0d976eSJerin Jacob { 590aa0d976eSJerin Jacob const struct rte_memzone *rz; 591d1d861efSKamil Rytarowski uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); 592aa0d976eSJerin Jacob 593b7004ab2SKamil Rytarowski rz = rte_eth_dma_zone_reserve(dev, "cq_ring", 594b7004ab2SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), ring_size, 595aa0d976eSJerin Jacob NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 596aa0d976eSJerin Jacob if (rz == NULL) { 597aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 598aa0d976eSJerin Jacob return -ENOMEM; 599aa0d976eSJerin Jacob } 600aa0d976eSJerin Jacob 601aa0d976eSJerin Jacob memset(rz->addr, 0, ring_size); 602aa0d976eSJerin Jacob 603f17ca787SThomas Monjalon rxq->phys = rz->iova; 604aa0d976eSJerin Jacob rxq->desc = rz->addr; 605aa0d976eSJerin Jacob rxq->qlen_mask = desc_cnt - 1; 606aa0d976eSJerin Jacob 607aa0d976eSJerin Jacob return 0; 608aa0d976eSJerin Jacob } 609aa0d976eSJerin Jacob 6103f3c6f97SJerin Jacob static int 6116d3cbd56SKamil Rytarowski nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 6126d3cbd56SKamil Rytarowski struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt) 6133f3c6f97SJerin Jacob { 6143f3c6f97SJerin Jacob const struct rte_memzone *rz; 615d1d861efSKamil Rytarowski uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); 6163f3c6f97SJerin Jacob 617b7004ab2SKamil Rytarowski rz = rte_eth_dma_zone_reserve(dev, "sq", 618b7004ab2SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), ring_size, 6193f3c6f97SJerin Jacob NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 6203f3c6f97SJerin Jacob if (rz == NULL) { 6213f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 6223f3c6f97SJerin Jacob return -ENOMEM; 6233f3c6f97SJerin Jacob } 6243f3c6f97SJerin Jacob 6253f3c6f97SJerin Jacob memset(rz->addr, 0, ring_size); 6263f3c6f97SJerin Jacob 627f17ca787SThomas Monjalon sq->phys = rz->iova; 6283f3c6f97SJerin Jacob sq->desc = rz->addr; 6293f3c6f97SJerin Jacob sq->qlen_mask = desc_cnt - 1; 6303f3c6f97SJerin Jacob 6313f3c6f97SJerin Jacob return 0; 6323f3c6f97SJerin Jacob } 6333f3c6f97SJerin Jacob 6347413feeeSJerin Jacob static int 6356d3cbd56SKamil Rytarowski nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 6366d3cbd56SKamil Rytarowski uint32_t desc_cnt, uint32_t buffsz) 6377413feeeSJerin Jacob { 6387413feeeSJerin Jacob struct nicvf_rbdr *rbdr; 6397413feeeSJerin Jacob const struct rte_memzone *rz; 6407413feeeSJerin Jacob uint32_t ring_size; 6417413feeeSJerin Jacob 6427413feeeSJerin Jacob assert(nic->rbdr == NULL); 6437413feeeSJerin Jacob rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), 6447413feeeSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 6457413feeeSJerin Jacob if (rbdr == NULL) { 6467413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); 6477413feeeSJerin Jacob return -ENOMEM; 6487413feeeSJerin Jacob } 6497413feeeSJerin Jacob 650d1d861efSKamil Rytarowski ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; 651b7004ab2SKamil Rytarowski rz = rte_eth_dma_zone_reserve(dev, "rbdr", 652b7004ab2SKamil Rytarowski nicvf_netdev_qidx(nic, 0), ring_size, 6537413feeeSJerin Jacob NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); 6547413feeeSJerin Jacob if (rz == NULL) { 6557413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); 6567413feeeSJerin Jacob return -ENOMEM; 6577413feeeSJerin Jacob } 6587413feeeSJerin Jacob 6597413feeeSJerin Jacob memset(rz->addr, 0, ring_size); 6607413feeeSJerin Jacob 661f17ca787SThomas Monjalon rbdr->phys = rz->iova; 6627413feeeSJerin Jacob rbdr->tail = 0; 6637413feeeSJerin Jacob rbdr->next_tail = 0; 6647413feeeSJerin Jacob rbdr->desc = rz->addr; 6657413feeeSJerin Jacob rbdr->buffsz = buffsz; 6667413feeeSJerin Jacob rbdr->qlen_mask = desc_cnt - 1; 6677413feeeSJerin Jacob rbdr->rbdr_status = 6687413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; 6697413feeeSJerin Jacob rbdr->rbdr_door = 6707413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; 6717413feeeSJerin Jacob 6727413feeeSJerin Jacob nic->rbdr = rbdr; 6737413feeeSJerin Jacob return 0; 6747413feeeSJerin Jacob } 6757413feeeSJerin Jacob 6767413feeeSJerin Jacob static void 67721e3fb00SKamil Rytarowski nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic, 678df6e0a06SSantosh Shukla nicvf_iova_addr_t phy) 6797413feeeSJerin Jacob { 6807413feeeSJerin Jacob uint16_t qidx; 6817413feeeSJerin Jacob void *obj; 6827413feeeSJerin Jacob struct nicvf_rxq *rxq; 68321e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 6847413feeeSJerin Jacob 68521e3fb00SKamil Rytarowski /* Get queue ranges for this VF */ 68621e3fb00SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 68721e3fb00SKamil Rytarowski 68821e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 6896d3cbd56SKamil Rytarowski rxq = dev->data->rx_queues[qidx]; 6907413feeeSJerin Jacob if (rxq->precharge_cnt) { 6917413feeeSJerin Jacob obj = (void *)nicvf_mbuff_phy2virt(phy, 6927413feeeSJerin Jacob rxq->mbuf_phys_off); 6937413feeeSJerin Jacob rte_mempool_put(rxq->pool, obj); 6947413feeeSJerin Jacob rxq->precharge_cnt--; 6957413feeeSJerin Jacob break; 6967413feeeSJerin Jacob } 6977413feeeSJerin Jacob } 6987413feeeSJerin Jacob } 6997413feeeSJerin Jacob 7007413feeeSJerin Jacob static inline void 7016d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic) 7027413feeeSJerin Jacob { 7037413feeeSJerin Jacob uint32_t qlen_mask, head; 7047413feeeSJerin Jacob struct rbdr_entry_t *entry; 7057413feeeSJerin Jacob struct nicvf_rbdr *rbdr = nic->rbdr; 7067413feeeSJerin Jacob 7077413feeeSJerin Jacob qlen_mask = rbdr->qlen_mask; 7087413feeeSJerin Jacob head = rbdr->head; 7097413feeeSJerin Jacob while (head != rbdr->tail) { 7107413feeeSJerin Jacob entry = rbdr->desc + head; 7116d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr); 7127413feeeSJerin Jacob head++; 7137413feeeSJerin Jacob head = head & qlen_mask; 7147413feeeSJerin Jacob } 7157413feeeSJerin Jacob } 7167413feeeSJerin Jacob 7173f3c6f97SJerin Jacob static inline void 7183f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 7193f3c6f97SJerin Jacob { 7203f3c6f97SJerin Jacob uint32_t head; 7213f3c6f97SJerin Jacob 7223f3c6f97SJerin Jacob head = txq->head; 7233f3c6f97SJerin Jacob while (head != txq->tail) { 7243f3c6f97SJerin Jacob if (txq->txbuffs[head]) { 7253f3c6f97SJerin Jacob rte_pktmbuf_free_seg(txq->txbuffs[head]); 7263f3c6f97SJerin Jacob txq->txbuffs[head] = NULL; 7273f3c6f97SJerin Jacob } 7283f3c6f97SJerin Jacob head++; 7293f3c6f97SJerin Jacob head = head & txq->qlen_mask; 7303f3c6f97SJerin Jacob } 7313f3c6f97SJerin Jacob } 7323f3c6f97SJerin Jacob 7333f3c6f97SJerin Jacob static void 7343f3c6f97SJerin Jacob nicvf_tx_queue_reset(struct nicvf_txq *txq) 7353f3c6f97SJerin Jacob { 7363f3c6f97SJerin Jacob uint32_t txq_desc_cnt = txq->qlen_mask + 1; 7373f3c6f97SJerin Jacob 7383f3c6f97SJerin Jacob memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 7393f3c6f97SJerin Jacob memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 7403f3c6f97SJerin Jacob txq->tail = 0; 7413f3c6f97SJerin Jacob txq->head = 0; 7423f3c6f97SJerin Jacob txq->xmit_bufs = 0; 7433f3c6f97SJerin Jacob } 7443f3c6f97SJerin Jacob 745fc1f6c62SJerin Jacob static inline int 74671e76186SKamil Rytarowski nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 74771e76186SKamil Rytarowski uint16_t qidx) 748fc1f6c62SJerin Jacob { 749fc1f6c62SJerin Jacob struct nicvf_txq *txq; 750fc1f6c62SJerin Jacob int ret; 751fc1f6c62SJerin Jacob 75271e76186SKamil Rytarowski assert(qidx < MAX_SND_QUEUES_PER_QS); 75371e76186SKamil Rytarowski 75471e76186SKamil Rytarowski if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 75571e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED) 756fc1f6c62SJerin Jacob return 0; 757fc1f6c62SJerin Jacob 75871e76186SKamil Rytarowski txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 759fc1f6c62SJerin Jacob txq->pool = NULL; 76071e76186SKamil Rytarowski ret = nicvf_qset_sq_config(nic, qidx, txq); 761fc1f6c62SJerin Jacob if (ret) { 76271e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d", 76371e76186SKamil Rytarowski nic->vf_id, qidx, ret); 764fc1f6c62SJerin Jacob goto config_sq_error; 765fc1f6c62SJerin Jacob } 766fc1f6c62SJerin Jacob 76771e76186SKamil Rytarowski dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 76871e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED; 769fc1f6c62SJerin Jacob return ret; 770fc1f6c62SJerin Jacob 771fc1f6c62SJerin Jacob config_sq_error: 77271e76186SKamil Rytarowski nicvf_qset_sq_reclaim(nic, qidx); 773fc1f6c62SJerin Jacob return ret; 774fc1f6c62SJerin Jacob } 775fc1f6c62SJerin Jacob 776fc1f6c62SJerin Jacob static inline int 777627d4ba2SKamil Rytarowski nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 778627d4ba2SKamil Rytarowski uint16_t qidx) 779fc1f6c62SJerin Jacob { 780fc1f6c62SJerin Jacob struct nicvf_txq *txq; 781fc1f6c62SJerin Jacob int ret; 782fc1f6c62SJerin Jacob 783627d4ba2SKamil Rytarowski assert(qidx < MAX_SND_QUEUES_PER_QS); 784627d4ba2SKamil Rytarowski 785627d4ba2SKamil Rytarowski if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 786627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED) 787fc1f6c62SJerin Jacob return 0; 788fc1f6c62SJerin Jacob 789627d4ba2SKamil Rytarowski ret = nicvf_qset_sq_reclaim(nic, qidx); 790fc1f6c62SJerin Jacob if (ret) 791627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d", 792627d4ba2SKamil Rytarowski nic->vf_id, qidx, ret); 793fc1f6c62SJerin Jacob 794627d4ba2SKamil Rytarowski txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 795fc1f6c62SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 796fc1f6c62SJerin Jacob nicvf_tx_queue_reset(txq); 797fc1f6c62SJerin Jacob 798627d4ba2SKamil Rytarowski dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 799627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 800fc1f6c62SJerin Jacob return ret; 801fc1f6c62SJerin Jacob } 80286b4eb42SJerin Jacob 80386b4eb42SJerin Jacob static inline int 80486b4eb42SJerin Jacob nicvf_configure_cpi(struct rte_eth_dev *dev) 80586b4eb42SJerin Jacob { 80686b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 80786b4eb42SJerin Jacob uint16_t qidx, qcnt; 80886b4eb42SJerin Jacob int ret; 80986b4eb42SJerin Jacob 81086b4eb42SJerin Jacob /* Count started rx queues */ 811394014bcSKamil Rytarowski for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++) 81286b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == 81386b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 81486b4eb42SJerin Jacob qcnt++; 81586b4eb42SJerin Jacob 81686b4eb42SJerin Jacob nic->cpi_alg = CPI_ALG_NONE; 81786b4eb42SJerin Jacob ret = nicvf_mbox_config_cpi(nic, qcnt); 81886b4eb42SJerin Jacob if (ret) 81986b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 82086b4eb42SJerin Jacob 82186b4eb42SJerin Jacob return ret; 82286b4eb42SJerin Jacob } 82386b4eb42SJerin Jacob 8247413feeeSJerin Jacob static inline int 8257413feeeSJerin Jacob nicvf_configure_rss(struct rte_eth_dev *dev) 8267413feeeSJerin Jacob { 8277413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 8287413feeeSJerin Jacob uint64_t rsshf; 8297413feeeSJerin Jacob int ret = -EINVAL; 8307413feeeSJerin Jacob 8317413feeeSJerin Jacob rsshf = nicvf_rss_ethdev_to_nic(nic, 8327413feeeSJerin Jacob dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 8337413feeeSJerin Jacob PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, 8347413feeeSJerin Jacob dev->data->dev_conf.rxmode.mq_mode, 8356d3cbd56SKamil Rytarowski dev->data->nb_rx_queues, 8366d3cbd56SKamil Rytarowski dev->data->dev_conf.lpbk_mode, rsshf); 8377413feeeSJerin Jacob 8387413feeeSJerin Jacob if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 8397413feeeSJerin Jacob ret = nicvf_rss_term(nic); 8407413feeeSJerin Jacob else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 8416d3cbd56SKamil Rytarowski ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf); 8427413feeeSJerin Jacob if (ret) 8437413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); 8447413feeeSJerin Jacob 8457413feeeSJerin Jacob return ret; 8467413feeeSJerin Jacob } 8477413feeeSJerin Jacob 84886b4eb42SJerin Jacob static int 84986b4eb42SJerin Jacob nicvf_configure_rss_reta(struct rte_eth_dev *dev) 85086b4eb42SJerin Jacob { 85186b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 85286b4eb42SJerin Jacob unsigned int idx, qmap_size; 85386b4eb42SJerin Jacob uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 85486b4eb42SJerin Jacob uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 85586b4eb42SJerin Jacob 85686b4eb42SJerin Jacob if (nic->cpi_alg != CPI_ALG_NONE) 85786b4eb42SJerin Jacob return -EINVAL; 85886b4eb42SJerin Jacob 85986b4eb42SJerin Jacob /* Prepare queue map */ 86086b4eb42SJerin Jacob for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 86186b4eb42SJerin Jacob if (dev->data->rx_queue_state[idx] == 86286b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 86386b4eb42SJerin Jacob qmap[qmap_size++] = idx; 86486b4eb42SJerin Jacob } 86586b4eb42SJerin Jacob 86686b4eb42SJerin Jacob /* Update default RSS RETA */ 86786b4eb42SJerin Jacob for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 86886b4eb42SJerin Jacob default_reta[idx] = qmap[idx % qmap_size]; 86986b4eb42SJerin Jacob 87086b4eb42SJerin Jacob return nicvf_rss_reta_update(nic, default_reta, 87186b4eb42SJerin Jacob NIC_MAX_RSS_IDR_TBL_SIZE); 87286b4eb42SJerin Jacob } 87386b4eb42SJerin Jacob 8743f3c6f97SJerin Jacob static void 8753f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(void *sq) 8763f3c6f97SJerin Jacob { 8773f3c6f97SJerin Jacob struct nicvf_txq *txq; 8783f3c6f97SJerin Jacob 8793f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 8803f3c6f97SJerin Jacob 8813f3c6f97SJerin Jacob txq = (struct nicvf_txq *)sq; 8823f3c6f97SJerin Jacob if (txq) { 8833f3c6f97SJerin Jacob if (txq->txbuffs != NULL) { 8843f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 8853f3c6f97SJerin Jacob rte_free(txq->txbuffs); 8863f3c6f97SJerin Jacob txq->txbuffs = NULL; 8873f3c6f97SJerin Jacob } 8883f3c6f97SJerin Jacob rte_free(txq); 8893f3c6f97SJerin Jacob } 8903f3c6f97SJerin Jacob } 8913f3c6f97SJerin Jacob 8927413feeeSJerin Jacob static void 8937413feeeSJerin Jacob nicvf_set_tx_function(struct rte_eth_dev *dev) 8947413feeeSJerin Jacob { 895d9014196SFerruh Yigit struct nicvf_txq *txq = NULL; 8967413feeeSJerin Jacob size_t i; 8977413feeeSJerin Jacob bool multiseg = false; 8987413feeeSJerin Jacob 8997413feeeSJerin Jacob for (i = 0; i < dev->data->nb_tx_queues; i++) { 9007413feeeSJerin Jacob txq = dev->data->tx_queues[i]; 901c97da2cbSMaciej Czekaj if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { 9027413feeeSJerin Jacob multiseg = true; 9037413feeeSJerin Jacob break; 9047413feeeSJerin Jacob } 9057413feeeSJerin Jacob } 9067413feeeSJerin Jacob 9077413feeeSJerin Jacob /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 9087413feeeSJerin Jacob if (multiseg) { 9097413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); 9107413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; 9117413feeeSJerin Jacob } else { 9127413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); 9137413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts; 9147413feeeSJerin Jacob } 9157413feeeSJerin Jacob 916d9014196SFerruh Yigit if (!txq) 917d9014196SFerruh Yigit return; 918d9014196SFerruh Yigit 9197413feeeSJerin Jacob if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) 9207413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); 9217413feeeSJerin Jacob else 9227413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); 9237413feeeSJerin Jacob } 9247413feeeSJerin Jacob 9257413feeeSJerin Jacob static void 9267413feeeSJerin Jacob nicvf_set_rx_function(struct rte_eth_dev *dev) 9277413feeeSJerin Jacob { 9285e64c812SPavan Nikhilesh struct nicvf *nic = nicvf_pmd_priv(dev); 9295e64c812SPavan Nikhilesh 930d3bf2564SRakesh Kudurumalla const eth_rx_burst_t rx_burst_func[2][2][2] = { 931d3bf2564SRakesh Kudurumalla /* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */ 932d3bf2564SRakesh Kudurumalla [0][0][0] = nicvf_recv_pkts_no_offload, 933d3bf2564SRakesh Kudurumalla [0][0][1] = nicvf_recv_pkts_vlan_strip, 934d3bf2564SRakesh Kudurumalla [0][1][0] = nicvf_recv_pkts_cksum, 935d3bf2564SRakesh Kudurumalla [0][1][1] = nicvf_recv_pkts_cksum_vlan_strip, 936d3bf2564SRakesh Kudurumalla [1][0][0] = nicvf_recv_pkts_multiseg_no_offload, 937d3bf2564SRakesh Kudurumalla [1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip, 938d3bf2564SRakesh Kudurumalla [1][1][0] = nicvf_recv_pkts_multiseg_cksum, 939d3bf2564SRakesh Kudurumalla [1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip, 9405e64c812SPavan Nikhilesh }; 9415e64c812SPavan Nikhilesh 9425e64c812SPavan Nikhilesh dev->rx_pkt_burst = 943d3bf2564SRakesh Kudurumalla rx_burst_func[dev->data->scattered_rx] 944d3bf2564SRakesh Kudurumalla [nic->offload_cksum][nic->vlan_strip]; 9457413feeeSJerin Jacob } 9467413feeeSJerin Jacob 9473f3c6f97SJerin Jacob static int 9483f3c6f97SJerin Jacob nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 9493f3c6f97SJerin Jacob uint16_t nb_desc, unsigned int socket_id, 9503f3c6f97SJerin Jacob const struct rte_eth_txconf *tx_conf) 9513f3c6f97SJerin Jacob { 9523f3c6f97SJerin Jacob uint16_t tx_free_thresh; 953c97da2cbSMaciej Czekaj bool is_single_pool; 9543f3c6f97SJerin Jacob struct nicvf_txq *txq; 9553f3c6f97SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 956a4996bd8SWei Dai uint64_t offloads; 9573f3c6f97SJerin Jacob 9583f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 9593f3c6f97SJerin Jacob 96021e3fb00SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 96121e3fb00SKamil Rytarowski nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1]; 96221e3fb00SKamil Rytarowski 96321e3fb00SKamil Rytarowski qidx = qidx % MAX_SND_QUEUES_PER_QS; 96421e3fb00SKamil Rytarowski 9653f3c6f97SJerin Jacob /* Socket id check */ 9663f3c6f97SJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 9673f3c6f97SJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 9683f3c6f97SJerin Jacob socket_id, nic->node); 9693f3c6f97SJerin Jacob 9703f3c6f97SJerin Jacob /* Tx deferred start is not supported */ 9713f3c6f97SJerin Jacob if (tx_conf->tx_deferred_start) { 9723f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 9733f3c6f97SJerin Jacob return -EINVAL; 9743f3c6f97SJerin Jacob } 9753f3c6f97SJerin Jacob 9763f3c6f97SJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 9773f3c6f97SJerin Jacob nb_desc = nicvf_qsize_sq_roundup(nb_desc); 9783f3c6f97SJerin Jacob if (nb_desc == 0) { 9793f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 9803f3c6f97SJerin Jacob return -EINVAL; 9813f3c6f97SJerin Jacob } 9823f3c6f97SJerin Jacob 9833f3c6f97SJerin Jacob /* Validate tx_free_thresh */ 9843f3c6f97SJerin Jacob tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 9853f3c6f97SJerin Jacob tx_conf->tx_free_thresh : 9863f3c6f97SJerin Jacob NICVF_DEFAULT_TX_FREE_THRESH); 9873f3c6f97SJerin Jacob 9883f3c6f97SJerin Jacob if (tx_free_thresh > (nb_desc) || 9893f3c6f97SJerin Jacob tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 9903f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, 9913f3c6f97SJerin Jacob "tx_free_thresh must be less than the number of TX " 9923f3c6f97SJerin Jacob "descriptors. (tx_free_thresh=%u port=%d " 9933f3c6f97SJerin Jacob "queue=%d)", (unsigned int)tx_free_thresh, 9943f3c6f97SJerin Jacob (int)dev->data->port_id, (int)qidx); 9953f3c6f97SJerin Jacob return -EINVAL; 9963f3c6f97SJerin Jacob } 9973f3c6f97SJerin Jacob 9983f3c6f97SJerin Jacob /* Free memory prior to re-allocation if needed. */ 99921e3fb00SKamil Rytarowski if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 10003f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 100121e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 100221e3fb00SKamil Rytarowski nicvf_dev_tx_queue_release( 100321e3fb00SKamil Rytarowski dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]); 100421e3fb00SKamil Rytarowski dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 10053f3c6f97SJerin Jacob } 10063f3c6f97SJerin Jacob 10073f3c6f97SJerin Jacob /* Allocating tx queue data structure */ 10083f3c6f97SJerin Jacob txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 10093f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 10103f3c6f97SJerin Jacob if (txq == NULL) { 101121e3fb00SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", 101221e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 10133f3c6f97SJerin Jacob return -ENOMEM; 10143f3c6f97SJerin Jacob } 10153f3c6f97SJerin Jacob 10163f3c6f97SJerin Jacob txq->nic = nic; 10173f3c6f97SJerin Jacob txq->queue_id = qidx; 10183f3c6f97SJerin Jacob txq->tx_free_thresh = tx_free_thresh; 10193f3c6f97SJerin Jacob txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 10203f3c6f97SJerin Jacob txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 1021a4996bd8SWei Dai offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1022a4996bd8SWei Dai txq->offloads = offloads; 1023c97da2cbSMaciej Czekaj 1024a4996bd8SWei Dai is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE); 10253f3c6f97SJerin Jacob 10263f3c6f97SJerin Jacob /* Choose optimum free threshold value for multipool case */ 10273f3c6f97SJerin Jacob if (!is_single_pool) { 10283f3c6f97SJerin Jacob txq->tx_free_thresh = (uint16_t) 10293f3c6f97SJerin Jacob (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 10303f3c6f97SJerin Jacob NICVF_TX_FREE_MPOOL_THRESH : 10313f3c6f97SJerin Jacob tx_conf->tx_free_thresh); 10321c421f18SJerin Jacob txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 10331c421f18SJerin Jacob } else { 10341c421f18SJerin Jacob txq->pool_free = nicvf_single_pool_free_xmited_buffers; 10353f3c6f97SJerin Jacob } 10363f3c6f97SJerin Jacob 10373f3c6f97SJerin Jacob /* Allocate software ring */ 10383f3c6f97SJerin Jacob txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 10393f3c6f97SJerin Jacob nb_desc * sizeof(struct rte_mbuf *), 10403f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 10413f3c6f97SJerin Jacob 10423f3c6f97SJerin Jacob if (txq->txbuffs == NULL) { 10433f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 10443f3c6f97SJerin Jacob return -ENOMEM; 10453f3c6f97SJerin Jacob } 10463f3c6f97SJerin Jacob 10476d3cbd56SKamil Rytarowski if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) { 10483f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 10493f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 10503f3c6f97SJerin Jacob return -ENOMEM; 10513f3c6f97SJerin Jacob } 10523f3c6f97SJerin Jacob 10533f3c6f97SJerin Jacob nicvf_tx_queue_reset(txq); 10543f3c6f97SJerin Jacob 1055c97da2cbSMaciej Czekaj PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p" 1056c97da2cbSMaciej Czekaj " phys=0x%" PRIx64 " offloads=0x%" PRIx64, 105721e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, 1058c97da2cbSMaciej Czekaj txq->phys, txq->offloads); 10593f3c6f97SJerin Jacob 106021e3fb00SKamil Rytarowski dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; 106121e3fb00SKamil Rytarowski dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 106221e3fb00SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 10633f3c6f97SJerin Jacob return 0; 10643f3c6f97SJerin Jacob } 10653f3c6f97SJerin Jacob 106686b4eb42SJerin Jacob static inline void 10676d3cbd56SKamil Rytarowski nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) 106886b4eb42SJerin Jacob { 106986b4eb42SJerin Jacob uint32_t rxq_cnt; 107086b4eb42SJerin Jacob uint32_t nb_pkts, released_pkts = 0; 107186b4eb42SJerin Jacob uint32_t refill_cnt = 0; 107286b4eb42SJerin Jacob struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 107386b4eb42SJerin Jacob 107486b4eb42SJerin Jacob if (dev->rx_pkt_burst == NULL) 107586b4eb42SJerin Jacob return; 107686b4eb42SJerin Jacob 107721e3fb00SKamil Rytarowski while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, 107821e3fb00SKamil Rytarowski nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) { 107986b4eb42SJerin Jacob nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 108086b4eb42SJerin Jacob NICVF_MAX_RX_FREE_THRESH); 108186b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 108286b4eb42SJerin Jacob while (nb_pkts) { 108386b4eb42SJerin Jacob rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 108486b4eb42SJerin Jacob released_pkts++; 108586b4eb42SJerin Jacob } 108686b4eb42SJerin Jacob } 108786b4eb42SJerin Jacob 108821e3fb00SKamil Rytarowski 108921e3fb00SKamil Rytarowski refill_cnt += nicvf_dev_rbdr_refill(dev, 109021e3fb00SKamil Rytarowski nicvf_netdev_qidx(rxq->nic, rxq->queue_id)); 109121e3fb00SKamil Rytarowski 109286b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 109386b4eb42SJerin Jacob released_pkts, refill_cnt); 109486b4eb42SJerin Jacob } 109586b4eb42SJerin Jacob 1096aa0d976eSJerin Jacob static void 1097aa0d976eSJerin Jacob nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 1098aa0d976eSJerin Jacob { 1099aa0d976eSJerin Jacob rxq->head = 0; 1100aa0d976eSJerin Jacob rxq->available_space = 0; 1101aa0d976eSJerin Jacob rxq->recv_buffers = 0; 1102aa0d976eSJerin Jacob } 1103aa0d976eSJerin Jacob 110486b4eb42SJerin Jacob static inline int 110571e76186SKamil Rytarowski nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 110671e76186SKamil Rytarowski uint16_t qidx) 110786b4eb42SJerin Jacob { 110886b4eb42SJerin Jacob struct nicvf_rxq *rxq; 110986b4eb42SJerin Jacob int ret; 111086b4eb42SJerin Jacob 111171e76186SKamil Rytarowski assert(qidx < MAX_RCV_QUEUES_PER_QS); 111271e76186SKamil Rytarowski 111371e76186SKamil Rytarowski if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 111471e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED) 111586b4eb42SJerin Jacob return 0; 111686b4eb42SJerin Jacob 111786b4eb42SJerin Jacob /* Update rbdr pointer to all rxq */ 111871e76186SKamil Rytarowski rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 111986b4eb42SJerin Jacob rxq->shared_rbdr = nic->rbdr; 112086b4eb42SJerin Jacob 112186b4eb42SJerin Jacob ret = nicvf_qset_rq_config(nic, qidx, rxq); 112286b4eb42SJerin Jacob if (ret) { 112371e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d", 112471e76186SKamil Rytarowski nic->vf_id, qidx, ret); 112586b4eb42SJerin Jacob goto config_rq_error; 112686b4eb42SJerin Jacob } 112786b4eb42SJerin Jacob ret = nicvf_qset_cq_config(nic, qidx, rxq); 112886b4eb42SJerin Jacob if (ret) { 112971e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d", 113071e76186SKamil Rytarowski nic->vf_id, qidx, ret); 113186b4eb42SJerin Jacob goto config_cq_error; 113286b4eb42SJerin Jacob } 113386b4eb42SJerin Jacob 113471e76186SKamil Rytarowski dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 113571e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED; 113686b4eb42SJerin Jacob return 0; 113786b4eb42SJerin Jacob 113886b4eb42SJerin Jacob config_cq_error: 113986b4eb42SJerin Jacob nicvf_qset_cq_reclaim(nic, qidx); 114086b4eb42SJerin Jacob config_rq_error: 114186b4eb42SJerin Jacob nicvf_qset_rq_reclaim(nic, qidx); 114286b4eb42SJerin Jacob return ret; 114386b4eb42SJerin Jacob } 114486b4eb42SJerin Jacob 114586b4eb42SJerin Jacob static inline int 1146627d4ba2SKamil Rytarowski nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1147627d4ba2SKamil Rytarowski uint16_t qidx) 114886b4eb42SJerin Jacob { 114986b4eb42SJerin Jacob struct nicvf_rxq *rxq; 115086b4eb42SJerin Jacob int ret, other_error; 115186b4eb42SJerin Jacob 1152627d4ba2SKamil Rytarowski if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1153627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED) 115486b4eb42SJerin Jacob return 0; 115586b4eb42SJerin Jacob 115686b4eb42SJerin Jacob ret = nicvf_qset_rq_reclaim(nic, qidx); 115786b4eb42SJerin Jacob if (ret) 1158627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d", 1159627d4ba2SKamil Rytarowski nic->vf_id, qidx, ret); 116086b4eb42SJerin Jacob 116186b4eb42SJerin Jacob other_error = ret; 1162627d4ba2SKamil Rytarowski rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 11636d3cbd56SKamil Rytarowski nicvf_rx_queue_release_mbufs(dev, rxq); 116486b4eb42SJerin Jacob nicvf_rx_queue_reset(rxq); 116586b4eb42SJerin Jacob 116686b4eb42SJerin Jacob ret = nicvf_qset_cq_reclaim(nic, qidx); 116786b4eb42SJerin Jacob if (ret) 1168627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d", 1169627d4ba2SKamil Rytarowski nic->vf_id, qidx, ret); 117086b4eb42SJerin Jacob 117186b4eb42SJerin Jacob other_error |= ret; 1172627d4ba2SKamil Rytarowski dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1173627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 117486b4eb42SJerin Jacob return other_error; 117586b4eb42SJerin Jacob } 117686b4eb42SJerin Jacob 1177aa0d976eSJerin Jacob static void 1178aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(void *rx_queue) 1179aa0d976eSJerin Jacob { 1180aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1181aa0d976eSJerin Jacob 1182394014bcSKamil Rytarowski rte_free(rx_queue); 1183aa0d976eSJerin Jacob } 1184aa0d976eSJerin Jacob 1185aa0d976eSJerin Jacob static int 118686b4eb42SJerin Jacob nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 118786b4eb42SJerin Jacob { 118871e76186SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 118986b4eb42SJerin Jacob int ret; 119086b4eb42SJerin Jacob 119171e76186SKamil Rytarowski if (qidx >= MAX_RCV_QUEUES_PER_QS) 119271e76186SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)]; 119371e76186SKamil Rytarowski 119471e76186SKamil Rytarowski qidx = qidx % MAX_RCV_QUEUES_PER_QS; 119571e76186SKamil Rytarowski 119671e76186SKamil Rytarowski ret = nicvf_vf_start_rx_queue(dev, nic, qidx); 119786b4eb42SJerin Jacob if (ret) 119886b4eb42SJerin Jacob return ret; 119986b4eb42SJerin Jacob 120086b4eb42SJerin Jacob ret = nicvf_configure_cpi(dev); 120186b4eb42SJerin Jacob if (ret) 120286b4eb42SJerin Jacob return ret; 120386b4eb42SJerin Jacob 120486b4eb42SJerin Jacob return nicvf_configure_rss_reta(dev); 120586b4eb42SJerin Jacob } 120686b4eb42SJerin Jacob 120786b4eb42SJerin Jacob static int 120886b4eb42SJerin Jacob nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 120986b4eb42SJerin Jacob { 121086b4eb42SJerin Jacob int ret; 1211627d4ba2SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 121286b4eb42SJerin Jacob 1213627d4ba2SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 1214627d4ba2SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1215627d4ba2SKamil Rytarowski 1216627d4ba2SKamil Rytarowski qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1217627d4ba2SKamil Rytarowski 1218627d4ba2SKamil Rytarowski ret = nicvf_vf_stop_rx_queue(dev, nic, qidx); 121986b4eb42SJerin Jacob ret |= nicvf_configure_cpi(dev); 122086b4eb42SJerin Jacob ret |= nicvf_configure_rss_reta(dev); 122186b4eb42SJerin Jacob return ret; 122286b4eb42SJerin Jacob } 122386b4eb42SJerin Jacob 122486b4eb42SJerin Jacob static int 1225fc1f6c62SJerin Jacob nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1226fc1f6c62SJerin Jacob { 122771e76186SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 122871e76186SKamil Rytarowski 122971e76186SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 123071e76186SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 123171e76186SKamil Rytarowski 123271e76186SKamil Rytarowski qidx = qidx % MAX_SND_QUEUES_PER_QS; 123371e76186SKamil Rytarowski 123471e76186SKamil Rytarowski return nicvf_vf_start_tx_queue(dev, nic, qidx); 1235fc1f6c62SJerin Jacob } 1236fc1f6c62SJerin Jacob 1237fc1f6c62SJerin Jacob static int 1238fc1f6c62SJerin Jacob nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1239fc1f6c62SJerin Jacob { 1240627d4ba2SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 1241627d4ba2SKamil Rytarowski 1242627d4ba2SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 1243627d4ba2SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1244627d4ba2SKamil Rytarowski 1245627d4ba2SKamil Rytarowski qidx = qidx % MAX_SND_QUEUES_PER_QS; 1246627d4ba2SKamil Rytarowski 1247627d4ba2SKamil Rytarowski return nicvf_vf_stop_tx_queue(dev, nic, qidx); 1248fc1f6c62SJerin Jacob } 1249fc1f6c62SJerin Jacob 12505c7ccb26SJerin Jacob static inline void 12515c7ccb26SJerin Jacob nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq) 12525c7ccb26SJerin Jacob { 12535c7ccb26SJerin Jacob uintptr_t p; 12545c7ccb26SJerin Jacob struct rte_mbuf mb_def; 1255279d3319SRakesh Kudurumalla struct nicvf *nic = rxq->nic; 12565c7ccb26SJerin Jacob 12575c7ccb26SJerin Jacob RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8); 125895b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0); 125995b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) - 126095b097c8SJerin Jacob offsetof(struct rte_mbuf, data_off) != 2); 126195b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) - 126295b097c8SJerin Jacob offsetof(struct rte_mbuf, data_off) != 4); 126395b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) - 126495b097c8SJerin Jacob offsetof(struct rte_mbuf, data_off) != 6); 12655e64c812SPavan Nikhilesh RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) - 12665e64c812SPavan Nikhilesh offsetof(struct nicvf_rxq, 12675e64c812SPavan Nikhilesh rxq_fastpath_data_start) > 128); 12685c7ccb26SJerin Jacob mb_def.nb_segs = 1; 1269279d3319SRakesh Kudurumalla mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes); 12705c7ccb26SJerin Jacob mb_def.port = rxq->port_id; 12715c7ccb26SJerin Jacob rte_mbuf_refcnt_set(&mb_def, 1); 12725c7ccb26SJerin Jacob 12735c7ccb26SJerin Jacob /* Prevent compiler reordering: rearm_data covers previous fields */ 12745c7ccb26SJerin Jacob rte_compiler_barrier(); 12755c7ccb26SJerin Jacob p = (uintptr_t)&mb_def.rearm_data; 12765c7ccb26SJerin Jacob rxq->mbuf_initializer.value = *(uint64_t *)p; 12775c7ccb26SJerin Jacob } 1278394014bcSKamil Rytarowski 1279fc1f6c62SJerin Jacob static int 1280aa0d976eSJerin Jacob nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 1281aa0d976eSJerin Jacob uint16_t nb_desc, unsigned int socket_id, 1282aa0d976eSJerin Jacob const struct rte_eth_rxconf *rx_conf, 1283aa0d976eSJerin Jacob struct rte_mempool *mp) 1284aa0d976eSJerin Jacob { 1285aa0d976eSJerin Jacob uint16_t rx_free_thresh; 1286aa0d976eSJerin Jacob struct nicvf_rxq *rxq; 1287aa0d976eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1288a4996bd8SWei Dai uint64_t offloads; 1289279d3319SRakesh Kudurumalla uint32_t buffsz; 1290279d3319SRakesh Kudurumalla struct rte_pktmbuf_pool_private *mbp_priv; 1291aa0d976eSJerin Jacob 1292aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1293aa0d976eSJerin Jacob 1294279d3319SRakesh Kudurumalla /* First skip check */ 1295279d3319SRakesh Kudurumalla mbp_priv = rte_mempool_get_priv(mp); 1296279d3319SRakesh Kudurumalla buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1297279d3319SRakesh Kudurumalla if (buffsz < (uint32_t)(nic->skip_bytes)) { 1298279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "First skip is more than configured buffer size"); 1299279d3319SRakesh Kudurumalla return -EINVAL; 1300279d3319SRakesh Kudurumalla } 1301279d3319SRakesh Kudurumalla 130221e3fb00SKamil Rytarowski if (qidx >= MAX_RCV_QUEUES_PER_QS) 130321e3fb00SKamil Rytarowski nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1]; 130421e3fb00SKamil Rytarowski 130521e3fb00SKamil Rytarowski qidx = qidx % MAX_RCV_QUEUES_PER_QS; 130621e3fb00SKamil Rytarowski 1307aa0d976eSJerin Jacob /* Socket id check */ 1308aa0d976eSJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 1309aa0d976eSJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 1310aa0d976eSJerin Jacob socket_id, nic->node); 1311aa0d976eSJerin Jacob 1312394014bcSKamil Rytarowski /* Mempool memory must be contiguous, so must be one memory segment*/ 1313aa0d976eSJerin Jacob if (mp->nb_mem_chunks != 1) { 1314394014bcSKamil Rytarowski PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages"); 1315394014bcSKamil Rytarowski return -EINVAL; 1316394014bcSKamil Rytarowski } 1317394014bcSKamil Rytarowski 1318394014bcSKamil Rytarowski /* Mempool memory must be physically contiguous */ 13194143b122SAndrew Rybchenko if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) { 1320394014bcSKamil Rytarowski PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); 1321aa0d976eSJerin Jacob return -EINVAL; 1322aa0d976eSJerin Jacob } 1323aa0d976eSJerin Jacob 1324aa0d976eSJerin Jacob /* Rx deferred start is not supported */ 1325aa0d976eSJerin Jacob if (rx_conf->rx_deferred_start) { 1326aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 1327aa0d976eSJerin Jacob return -EINVAL; 1328aa0d976eSJerin Jacob } 1329aa0d976eSJerin Jacob 1330aa0d976eSJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 1331aa0d976eSJerin Jacob nb_desc = nicvf_qsize_cq_roundup(nb_desc); 1332aa0d976eSJerin Jacob if (nb_desc == 0) { 1333aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 1334aa0d976eSJerin Jacob return -EINVAL; 1335aa0d976eSJerin Jacob } 1336aa0d976eSJerin Jacob 1337279d3319SRakesh Kudurumalla 1338aa0d976eSJerin Jacob /* Check rx_free_thresh upper bound */ 1339aa0d976eSJerin Jacob rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 1340aa0d976eSJerin Jacob rx_conf->rx_free_thresh : 1341aa0d976eSJerin Jacob NICVF_DEFAULT_RX_FREE_THRESH); 1342aa0d976eSJerin Jacob if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 1343aa0d976eSJerin Jacob rx_free_thresh >= nb_desc * .75) { 1344aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 1345aa0d976eSJerin Jacob rx_free_thresh); 1346aa0d976eSJerin Jacob return -EINVAL; 1347aa0d976eSJerin Jacob } 1348aa0d976eSJerin Jacob 1349aa0d976eSJerin Jacob /* Free memory prior to re-allocation if needed */ 135021e3fb00SKamil Rytarowski if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 1351aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 135221e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 135321e3fb00SKamil Rytarowski nicvf_dev_rx_queue_release( 135421e3fb00SKamil Rytarowski dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]); 135521e3fb00SKamil Rytarowski dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 1356aa0d976eSJerin Jacob } 1357aa0d976eSJerin Jacob 1358aa0d976eSJerin Jacob /* Allocate rxq memory */ 1359aa0d976eSJerin Jacob rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 1360aa0d976eSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 1361aa0d976eSJerin Jacob if (rxq == NULL) { 136221e3fb00SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", 136321e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 1364aa0d976eSJerin Jacob return -ENOMEM; 1365aa0d976eSJerin Jacob } 1366aa0d976eSJerin Jacob 1367aa0d976eSJerin Jacob rxq->nic = nic; 1368aa0d976eSJerin Jacob rxq->pool = mp; 1369aa0d976eSJerin Jacob rxq->queue_id = qidx; 1370aa0d976eSJerin Jacob rxq->port_id = dev->data->port_id; 1371aa0d976eSJerin Jacob rxq->rx_free_thresh = rx_free_thresh; 1372aa0d976eSJerin Jacob rxq->rx_drop_en = rx_conf->rx_drop_en; 1373aa0d976eSJerin Jacob rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1374aa0d976eSJerin Jacob rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1375aa0d976eSJerin Jacob rxq->precharge_cnt = 0; 1376e2c519b3SJerin Jacob 1377e2c519b3SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2) 1378e2c519b3SJerin Jacob rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD; 1379e2c519b3SJerin Jacob else 1380aa0d976eSJerin Jacob rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1381aa0d976eSJerin Jacob 13825c7ccb26SJerin Jacob nicvf_rxq_mbuf_setup(rxq); 1383e2c519b3SJerin Jacob 1384aa0d976eSJerin Jacob /* Alloc completion queue */ 13856d3cbd56SKamil Rytarowski if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) { 1386aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 1387aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(rxq); 1388aa0d976eSJerin Jacob return -ENOMEM; 1389aa0d976eSJerin Jacob } 1390aa0d976eSJerin Jacob 1391aa0d976eSJerin Jacob nicvf_rx_queue_reset(rxq); 1392aa0d976eSJerin Jacob 1393a4996bd8SWei Dai offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1394c97da2cbSMaciej Czekaj PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)" 1395c97da2cbSMaciej Czekaj " phy=0x%" PRIx64 " offloads=0x%" PRIx64, 139621e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, 1397a4996bd8SWei Dai rte_mempool_avail_count(mp), rxq->phys, offloads); 1398aa0d976eSJerin Jacob 139921e3fb00SKamil Rytarowski dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; 140021e3fb00SKamil Rytarowski dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 140121e3fb00SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 1402aa0d976eSJerin Jacob return 0; 1403aa0d976eSJerin Jacob } 1404aa0d976eSJerin Jacob 1405bdad90d1SIvan Ilchenko static int 1406dcd7b1e1SJerin Jacob nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1407dcd7b1e1SJerin Jacob { 1408dcd7b1e1SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1409c0802544SFerruh Yigit struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1410dcd7b1e1SJerin Jacob 1411dcd7b1e1SJerin Jacob PMD_INIT_FUNC_TRACE(); 1412dcd7b1e1SJerin Jacob 1413ba2d05abSJerin Jacob /* Autonegotiation may be disabled */ 1414ba2d05abSJerin Jacob dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 1415ba2d05abSJerin Jacob dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M | 1416ba2d05abSJerin Jacob ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 1417ba2d05abSJerin Jacob if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF) 1418ba2d05abSJerin Jacob dev_info->speed_capa |= ETH_LINK_SPEED_40G; 1419ba2d05abSJerin Jacob 142035b2d13fSOlivier Matz dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; 142135b2d13fSOlivier Matz dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN; 142221e3fb00SKamil Rytarowski dev_info->max_rx_queues = 142321e3fb00SKamil Rytarowski (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 142421e3fb00SKamil Rytarowski dev_info->max_tx_queues = 142521e3fb00SKamil Rytarowski (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1426dcd7b1e1SJerin Jacob dev_info->max_mac_addrs = 1; 1427eac901ceSJan Blunck dev_info->max_vfs = pci_dev->max_vfs; 1428dcd7b1e1SJerin Jacob 1429c97da2cbSMaciej Czekaj dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA; 1430c97da2cbSMaciej Czekaj dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA; 1431c97da2cbSMaciej Czekaj dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA; 1432c97da2cbSMaciej Czekaj dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA; 1433dcd7b1e1SJerin Jacob 1434dcd7b1e1SJerin Jacob dev_info->reta_size = nic->rss_info.rss_size; 1435dcd7b1e1SJerin Jacob dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1436dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1437dcd7b1e1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1438dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1439dcd7b1e1SJerin Jacob 1440dcd7b1e1SJerin Jacob dev_info->default_rxconf = (struct rte_eth_rxconf) { 1441dcd7b1e1SJerin Jacob .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1442dcd7b1e1SJerin Jacob .rx_drop_en = 0, 1443dcd7b1e1SJerin Jacob }; 1444dcd7b1e1SJerin Jacob 1445dcd7b1e1SJerin Jacob dev_info->default_txconf = (struct rte_eth_txconf) { 1446dcd7b1e1SJerin Jacob .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1447c97da2cbSMaciej Czekaj .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE | 1448c97da2cbSMaciej Czekaj DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1449c97da2cbSMaciej Czekaj DEV_TX_OFFLOAD_UDP_CKSUM | 1450c97da2cbSMaciej Czekaj DEV_TX_OFFLOAD_TCP_CKSUM, 1451dcd7b1e1SJerin Jacob }; 1452bdad90d1SIvan Ilchenko 1453bdad90d1SIvan Ilchenko return 0; 1454dcd7b1e1SJerin Jacob } 1455dcd7b1e1SJerin Jacob 1456df6e0a06SSantosh Shukla static nicvf_iova_addr_t 1457394014bcSKamil Rytarowski rbdr_rte_mempool_get(void *dev, void *opaque) 14587413feeeSJerin Jacob { 14597413feeeSJerin Jacob uint16_t qidx; 14607413feeeSJerin Jacob uintptr_t mbuf; 14617413feeeSJerin Jacob struct nicvf_rxq *rxq; 1462394014bcSKamil Rytarowski struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev; 146321e3fb00SKamil Rytarowski struct nicvf *nic = (struct nicvf *)opaque; 146421e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 14657413feeeSJerin Jacob 146621e3fb00SKamil Rytarowski /* Get queue ranges for this VF */ 146721e3fb00SKamil Rytarowski nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end); 146821e3fb00SKamil Rytarowski 146921e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 1470394014bcSKamil Rytarowski rxq = eth_dev->data->rx_queues[qidx]; 14717413feeeSJerin Jacob /* Maintain equal buffer count across all pools */ 14727413feeeSJerin Jacob if (rxq->precharge_cnt >= rxq->qlen_mask) 14737413feeeSJerin Jacob continue; 14747413feeeSJerin Jacob rxq->precharge_cnt++; 14757413feeeSJerin Jacob mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); 14767413feeeSJerin Jacob if (mbuf) 14777413feeeSJerin Jacob return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); 14787413feeeSJerin Jacob } 14797413feeeSJerin Jacob return 0; 14807413feeeSJerin Jacob } 14817413feeeSJerin Jacob 14827413feeeSJerin Jacob static int 148371e76186SKamil Rytarowski nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) 14847413feeeSJerin Jacob { 14857413feeeSJerin Jacob int ret; 148634c2e702SJerin Jacob uint16_t qidx, data_off; 14877413feeeSJerin Jacob uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; 14887413feeeSJerin Jacob uint64_t mbuf_phys_off = 0; 14897413feeeSJerin Jacob struct nicvf_rxq *rxq; 14907413feeeSJerin Jacob struct rte_mbuf *mbuf; 149171e76186SKamil Rytarowski uint16_t rx_start, rx_end; 149271e76186SKamil Rytarowski uint16_t tx_start, tx_end; 1493d3bf2564SRakesh Kudurumalla int mask; 14947413feeeSJerin Jacob 14957413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 14967413feeeSJerin Jacob 14977413feeeSJerin Jacob /* Userspace process exited without proper shutdown in last run */ 14987413feeeSJerin Jacob if (nicvf_qset_rbdr_active(nic, 0)) 149971e76186SKamil Rytarowski nicvf_vf_stop(dev, nic, false); 150071e76186SKamil Rytarowski 150171e76186SKamil Rytarowski /* Get queue ranges for this VF */ 150271e76186SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 15037413feeeSJerin Jacob 15047413feeeSJerin Jacob /* 15057413feeeSJerin Jacob * Thunderx nicvf PMD can support more than one pool per port only when 15067413feeeSJerin Jacob * 1) Data payload size is same across all the pools in given port 15077413feeeSJerin Jacob * AND 15087413feeeSJerin Jacob * 2) All mbuffs in the pools are from the same hugepage 15097413feeeSJerin Jacob * AND 15107413feeeSJerin Jacob * 3) Mbuff metadata size is same across all the pools in given port 15117413feeeSJerin Jacob * 15127413feeeSJerin Jacob * This is to support existing application that uses multiple pool/port. 15137413feeeSJerin Jacob * But, the purpose of using multipool for QoS will not be addressed. 15147413feeeSJerin Jacob * 15157413feeeSJerin Jacob */ 15167413feeeSJerin Jacob 15177413feeeSJerin Jacob /* Validate mempool attributes */ 151871e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 15197413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 15207413feeeSJerin Jacob rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); 15217413feeeSJerin Jacob mbuf = rte_pktmbuf_alloc(rxq->pool); 15227413feeeSJerin Jacob if (mbuf == NULL) { 152371e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d " 152471e76186SKamil Rytarowski "pool=%s", 152571e76186SKamil Rytarowski nic->vf_id, qidx, rxq->pool->name); 15267413feeeSJerin Jacob return -ENOMEM; 15277413feeeSJerin Jacob } 152834c2e702SJerin Jacob data_off = nicvf_mbuff_meta_length(mbuf); 152934c2e702SJerin Jacob data_off += RTE_PKTMBUF_HEADROOM; 15307413feeeSJerin Jacob rte_pktmbuf_free(mbuf); 15317413feeeSJerin Jacob 153234c2e702SJerin Jacob if (data_off % RTE_CACHE_LINE_SIZE) { 153334c2e702SJerin Jacob PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d", 153434c2e702SJerin Jacob rxq->pool->name, data_off, 153534c2e702SJerin Jacob data_off % RTE_CACHE_LINE_SIZE); 153634c2e702SJerin Jacob return -EINVAL; 153734c2e702SJerin Jacob } 153834c2e702SJerin Jacob rxq->mbuf_phys_off -= data_off; 1539279d3319SRakesh Kudurumalla rxq->mbuf_phys_off -= nic->skip_bytes; 154034c2e702SJerin Jacob 15417413feeeSJerin Jacob if (mbuf_phys_off == 0) 15427413feeeSJerin Jacob mbuf_phys_off = rxq->mbuf_phys_off; 15437413feeeSJerin Jacob if (mbuf_phys_off != rxq->mbuf_phys_off) { 154471e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %" 154571e76186SKamil Rytarowski PRIx64, rxq->pool->name, nic->vf_id, 154671e76186SKamil Rytarowski mbuf_phys_off); 15477413feeeSJerin Jacob return -EINVAL; 15487413feeeSJerin Jacob } 15497413feeeSJerin Jacob } 15507413feeeSJerin Jacob 15517413feeeSJerin Jacob /* Check the level of buffers in the pool */ 15527413feeeSJerin Jacob total_rxq_desc = 0; 155371e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 15547413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 15557413feeeSJerin Jacob /* Count total numbers of rxq descs */ 15567413feeeSJerin Jacob total_rxq_desc += rxq->qlen_mask + 1; 15577413feeeSJerin Jacob exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; 15586d3cbd56SKamil Rytarowski exp_buffs *= dev->data->nb_rx_queues; 1559a0fd91ceSBruce Richardson if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { 15607413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", 15617413feeeSJerin Jacob rxq->pool->name, 1562a0fd91ceSBruce Richardson rte_mempool_avail_count(rxq->pool), 15637413feeeSJerin Jacob exp_buffs); 15647413feeeSJerin Jacob return -ENOENT; 15657413feeeSJerin Jacob } 15667413feeeSJerin Jacob } 15677413feeeSJerin Jacob 15687413feeeSJerin Jacob /* Check RBDR desc overflow */ 15697413feeeSJerin Jacob ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); 15707413feeeSJerin Jacob if (ret == 0) { 157171e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc " 157271e76186SKamil Rytarowski "VF%d", nic->vf_id); 15737413feeeSJerin Jacob return -ENOMEM; 15747413feeeSJerin Jacob } 15757413feeeSJerin Jacob 15767413feeeSJerin Jacob /* Enable qset */ 15777413feeeSJerin Jacob ret = nicvf_qset_config(nic); 15787413feeeSJerin Jacob if (ret) { 157971e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret, 158071e76186SKamil Rytarowski nic->vf_id); 15817413feeeSJerin Jacob return ret; 15827413feeeSJerin Jacob } 15837413feeeSJerin Jacob 15847413feeeSJerin Jacob /* Allocate RBDR and RBDR ring desc */ 15857413feeeSJerin Jacob nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); 15866d3cbd56SKamil Rytarowski ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz); 15877413feeeSJerin Jacob if (ret) { 158871e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc " 158971e76186SKamil Rytarowski "VF%d", nic->vf_id); 15907413feeeSJerin Jacob goto qset_reclaim; 15917413feeeSJerin Jacob } 15927413feeeSJerin Jacob 15937413feeeSJerin Jacob /* Enable and configure RBDR registers */ 15947413feeeSJerin Jacob ret = nicvf_qset_rbdr_config(nic, 0); 15957413feeeSJerin Jacob if (ret) { 159671e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret, 159771e76186SKamil Rytarowski nic->vf_id); 15987413feeeSJerin Jacob goto qset_rbdr_free; 15997413feeeSJerin Jacob } 16007413feeeSJerin Jacob 16017413feeeSJerin Jacob /* Fill rte_mempool buffers in RBDR pool and precharge it */ 1602394014bcSKamil Rytarowski ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get, 1603394014bcSKamil Rytarowski total_rxq_desc); 16047413feeeSJerin Jacob if (ret) { 160571e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret, 160671e76186SKamil Rytarowski nic->vf_id); 16077413feeeSJerin Jacob goto qset_rbdr_reclaim; 16087413feeeSJerin Jacob } 16097413feeeSJerin Jacob 161071e76186SKamil Rytarowski PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d", 161171e76186SKamil Rytarowski nic->rbdr->tail, nb_rbdr_desc, nic->vf_id); 16127413feeeSJerin Jacob 16137413feeeSJerin Jacob /* Configure VLAN Strip */ 1614d3bf2564SRakesh Kudurumalla mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 1615d3bf2564SRakesh Kudurumalla ETH_VLAN_EXTEND_MASK; 1616d3bf2564SRakesh Kudurumalla ret = nicvf_vlan_offload_config(dev, mask); 16177413feeeSJerin Jacob 16188a946db3SJerin Jacob /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data 16198a946db3SJerin Jacob * to the 64bit memory address. 16208a946db3SJerin Jacob * The alignment creates a hole in mbuf(between the end of headroom and 16218a946db3SJerin Jacob * packet data start). The new revision of the HW provides an option to 16228a946db3SJerin Jacob * disable the L3 alignment feature and make mbuf layout looks 16238a946db3SJerin Jacob * more like other NICs. For better application compatibility, disabling 16248a946db3SJerin Jacob * l3 alignment feature on the hardware revisions it supports 16258a946db3SJerin Jacob */ 16268a946db3SJerin Jacob nicvf_apad_config(nic, false); 16278a946db3SJerin Jacob 162871e76186SKamil Rytarowski /* Get queue ranges for this VF */ 162971e76186SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 163071e76186SKamil Rytarowski 16317413feeeSJerin Jacob /* Configure TX queues */ 163271e76186SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) { 163371e76186SKamil Rytarowski ret = nicvf_vf_start_tx_queue(dev, nic, 163471e76186SKamil Rytarowski qidx % MAX_SND_QUEUES_PER_QS); 16357413feeeSJerin Jacob if (ret) 16367413feeeSJerin Jacob goto start_txq_error; 16377413feeeSJerin Jacob } 16387413feeeSJerin Jacob 163971e76186SKamil Rytarowski /* Configure RX queues */ 164071e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 164171e76186SKamil Rytarowski ret = nicvf_vf_start_rx_queue(dev, nic, 164271e76186SKamil Rytarowski qidx % MAX_RCV_QUEUES_PER_QS); 164371e76186SKamil Rytarowski if (ret) 164471e76186SKamil Rytarowski goto start_rxq_error; 164571e76186SKamil Rytarowski } 164671e76186SKamil Rytarowski 164771e76186SKamil Rytarowski if (!nic->sqs_mode) { 16487413feeeSJerin Jacob /* Configure CPI algorithm */ 16497413feeeSJerin Jacob ret = nicvf_configure_cpi(dev); 16507413feeeSJerin Jacob if (ret) 16517413feeeSJerin Jacob goto start_txq_error; 16527413feeeSJerin Jacob 165371e76186SKamil Rytarowski ret = nicvf_mbox_get_rss_size(nic); 165471e76186SKamil Rytarowski if (ret) { 165571e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to get rss table size"); 165671e76186SKamil Rytarowski goto qset_rss_error; 165771e76186SKamil Rytarowski } 165871e76186SKamil Rytarowski 16597413feeeSJerin Jacob /* Configure RSS */ 16607413feeeSJerin Jacob ret = nicvf_configure_rss(dev); 16617413feeeSJerin Jacob if (ret) 16627413feeeSJerin Jacob goto qset_rss_error; 166371e76186SKamil Rytarowski } 166471e76186SKamil Rytarowski 166571e76186SKamil Rytarowski /* Done; Let PF make the BGX's RX and TX switches to ON position */ 166671e76186SKamil Rytarowski nicvf_mbox_cfg_done(nic); 166771e76186SKamil Rytarowski return 0; 166871e76186SKamil Rytarowski 166971e76186SKamil Rytarowski qset_rss_error: 167071e76186SKamil Rytarowski nicvf_rss_term(nic); 167171e76186SKamil Rytarowski start_rxq_error: 167271e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) 167371e76186SKamil Rytarowski nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 167471e76186SKamil Rytarowski start_txq_error: 167571e76186SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) 167671e76186SKamil Rytarowski nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 167771e76186SKamil Rytarowski qset_rbdr_reclaim: 167871e76186SKamil Rytarowski nicvf_qset_rbdr_reclaim(nic, 0); 167971e76186SKamil Rytarowski nicvf_rbdr_release_mbufs(dev, nic); 168071e76186SKamil Rytarowski qset_rbdr_free: 168171e76186SKamil Rytarowski if (nic->rbdr) { 168271e76186SKamil Rytarowski rte_free(nic->rbdr); 168371e76186SKamil Rytarowski nic->rbdr = NULL; 168471e76186SKamil Rytarowski } 168571e76186SKamil Rytarowski qset_reclaim: 168671e76186SKamil Rytarowski nicvf_qset_reclaim(nic); 168771e76186SKamil Rytarowski return ret; 168871e76186SKamil Rytarowski } 168971e76186SKamil Rytarowski 169071e76186SKamil Rytarowski static int 169171e76186SKamil Rytarowski nicvf_dev_start(struct rte_eth_dev *dev) 169271e76186SKamil Rytarowski { 169371e76186SKamil Rytarowski uint16_t qidx; 169471e76186SKamil Rytarowski int ret; 169571e76186SKamil Rytarowski size_t i; 169671e76186SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 169771e76186SKamil Rytarowski struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 169871e76186SKamil Rytarowski uint16_t mtu; 169971e76186SKamil Rytarowski uint32_t buffsz = 0, rbdrsz = 0; 170071e76186SKamil Rytarowski struct rte_pktmbuf_pool_private *mbp_priv; 170171e76186SKamil Rytarowski struct nicvf_rxq *rxq; 170271e76186SKamil Rytarowski 170371e76186SKamil Rytarowski PMD_INIT_FUNC_TRACE(); 170471e76186SKamil Rytarowski 170571e76186SKamil Rytarowski /* This function must be called for a primary device */ 170671e76186SKamil Rytarowski assert_primary(nic); 170771e76186SKamil Rytarowski 170871e76186SKamil Rytarowski /* Validate RBDR buff size */ 170971e76186SKamil Rytarowski for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 171071e76186SKamil Rytarowski rxq = dev->data->rx_queues[qidx]; 171171e76186SKamil Rytarowski mbp_priv = rte_mempool_get_priv(rxq->pool); 171271e76186SKamil Rytarowski buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 171371e76186SKamil Rytarowski if (buffsz % 128) { 171471e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); 171571e76186SKamil Rytarowski return -EINVAL; 171671e76186SKamil Rytarowski } 171771e76186SKamil Rytarowski if (rbdrsz == 0) 171871e76186SKamil Rytarowski rbdrsz = buffsz; 171971e76186SKamil Rytarowski if (rbdrsz != buffsz) { 172071e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)", 172171e76186SKamil Rytarowski qidx, rbdrsz, buffsz); 172271e76186SKamil Rytarowski return -EINVAL; 172371e76186SKamil Rytarowski } 172471e76186SKamil Rytarowski } 17257413feeeSJerin Jacob 17267413feeeSJerin Jacob /* Configure loopback */ 17277413feeeSJerin Jacob ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); 17287413feeeSJerin Jacob if (ret) { 17297413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); 173071e76186SKamil Rytarowski return ret; 17317413feeeSJerin Jacob } 17327413feeeSJerin Jacob 17337413feeeSJerin Jacob /* Reset all statistics counters attached to this port */ 17347413feeeSJerin Jacob ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); 17357413feeeSJerin Jacob if (ret) { 17367413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); 173771e76186SKamil Rytarowski return ret; 17387413feeeSJerin Jacob } 17397413feeeSJerin Jacob 17407413feeeSJerin Jacob /* Setup scatter mode if needed by jumbo */ 17417413feeeSJerin Jacob if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 17427413feeeSJerin Jacob 2 * VLAN_TAG_SIZE > buffsz) 17437413feeeSJerin Jacob dev->data->scattered_rx = 1; 1744c97da2cbSMaciej Czekaj if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0) 17457413feeeSJerin Jacob dev->data->scattered_rx = 1; 17467413feeeSJerin Jacob 17477413feeeSJerin Jacob /* Setup MTU based on max_rx_pkt_len or default */ 1748c97da2cbSMaciej Czekaj mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ? 17497413feeeSJerin Jacob dev->data->dev_conf.rxmode.max_rx_pkt_len 175035b2d13fSOlivier Matz - RTE_ETHER_HDR_LEN : RTE_ETHER_MTU; 17517413feeeSJerin Jacob 17527413feeeSJerin Jacob if (nicvf_dev_set_mtu(dev, mtu)) { 17537413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to set default mtu size"); 17547413feeeSJerin Jacob return -EBUSY; 17557413feeeSJerin Jacob } 17567413feeeSJerin Jacob 175771e76186SKamil Rytarowski ret = nicvf_vf_start(dev, nic, rbdrsz); 175871e76186SKamil Rytarowski if (ret != 0) 175971e76186SKamil Rytarowski return ret; 176071e76186SKamil Rytarowski 176171e76186SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 176271e76186SKamil Rytarowski assert(nic->snicvf[i]); 176371e76186SKamil Rytarowski 176471e76186SKamil Rytarowski ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz); 176571e76186SKamil Rytarowski if (ret != 0) 176671e76186SKamil Rytarowski return ret; 176771e76186SKamil Rytarowski } 176871e76186SKamil Rytarowski 17695e64c812SPavan Nikhilesh /* Configure callbacks based on offloads */ 17707413feeeSJerin Jacob nicvf_set_tx_function(dev); 17717413feeeSJerin Jacob nicvf_set_rx_function(dev); 17727413feeeSJerin Jacob 17737413feeeSJerin Jacob return 0; 17747413feeeSJerin Jacob } 17757413feeeSJerin Jacob 17767413feeeSJerin Jacob static void 1777627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup) 17787413feeeSJerin Jacob { 1779627d4ba2SKamil Rytarowski size_t i; 17807413feeeSJerin Jacob int ret; 17817413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 17827413feeeSJerin Jacob 17837413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 17847413feeeSJerin Jacob 1785627d4ba2SKamil Rytarowski /* Teardown secondary vf first */ 1786627d4ba2SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 1787627d4ba2SKamil Rytarowski if (!nic->snicvf[i]) 1788627d4ba2SKamil Rytarowski continue; 1789627d4ba2SKamil Rytarowski 1790627d4ba2SKamil Rytarowski nicvf_vf_stop(dev, nic->snicvf[i], cleanup); 1791627d4ba2SKamil Rytarowski } 1792627d4ba2SKamil Rytarowski 1793627d4ba2SKamil Rytarowski /* Stop the primary VF now */ 1794627d4ba2SKamil Rytarowski nicvf_vf_stop(dev, nic, cleanup); 17957413feeeSJerin Jacob 17967413feeeSJerin Jacob /* Disable loopback */ 17977413feeeSJerin Jacob ret = nicvf_loopback_config(nic, 0); 17987413feeeSJerin Jacob if (ret) 17997413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); 18007413feeeSJerin Jacob 1801627d4ba2SKamil Rytarowski /* Reclaim CPI configuration */ 1802627d4ba2SKamil Rytarowski ret = nicvf_mbox_config_cpi(nic, 0); 1803627d4ba2SKamil Rytarowski if (ret) 1804627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret); 1805627d4ba2SKamil Rytarowski } 1806627d4ba2SKamil Rytarowski 1807627d4ba2SKamil Rytarowski static void 1808627d4ba2SKamil Rytarowski nicvf_dev_stop(struct rte_eth_dev *dev) 1809627d4ba2SKamil Rytarowski { 1810627d4ba2SKamil Rytarowski PMD_INIT_FUNC_TRACE(); 1811627d4ba2SKamil Rytarowski 1812627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(dev, false); 1813627d4ba2SKamil Rytarowski } 1814627d4ba2SKamil Rytarowski 1815627d4ba2SKamil Rytarowski static void 1816627d4ba2SKamil Rytarowski nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup) 1817627d4ba2SKamil Rytarowski { 1818627d4ba2SKamil Rytarowski int ret; 1819627d4ba2SKamil Rytarowski uint16_t qidx; 1820627d4ba2SKamil Rytarowski uint16_t tx_start, tx_end; 1821627d4ba2SKamil Rytarowski uint16_t rx_start, rx_end; 1822627d4ba2SKamil Rytarowski 1823627d4ba2SKamil Rytarowski PMD_INIT_FUNC_TRACE(); 1824627d4ba2SKamil Rytarowski 1825627d4ba2SKamil Rytarowski if (cleanup) { 1826627d4ba2SKamil Rytarowski /* Let PF make the BGX's RX and TX switches to OFF position */ 1827627d4ba2SKamil Rytarowski nicvf_mbox_shutdown(nic); 1828627d4ba2SKamil Rytarowski } 1829627d4ba2SKamil Rytarowski 18307413feeeSJerin Jacob /* Disable VLAN Strip */ 18317413feeeSJerin Jacob nicvf_vlan_hw_strip(nic, 0); 18327413feeeSJerin Jacob 1833627d4ba2SKamil Rytarowski /* Get queue ranges for this VF */ 1834627d4ba2SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1835627d4ba2SKamil Rytarowski 1836627d4ba2SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) 1837627d4ba2SKamil Rytarowski nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1838627d4ba2SKamil Rytarowski 1839627d4ba2SKamil Rytarowski /* Get queue ranges for this VF */ 1840627d4ba2SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 18417413feeeSJerin Jacob 18427413feeeSJerin Jacob /* Reclaim rq */ 1843627d4ba2SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) 1844627d4ba2SKamil Rytarowski nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 18457413feeeSJerin Jacob 18467413feeeSJerin Jacob /* Reclaim RBDR */ 18477413feeeSJerin Jacob ret = nicvf_qset_rbdr_reclaim(nic, 0); 18487413feeeSJerin Jacob if (ret) 18497413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); 18507413feeeSJerin Jacob 18517413feeeSJerin Jacob /* Move all charged buffers in RBDR back to pool */ 18527413feeeSJerin Jacob if (nic->rbdr != NULL) 18536d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbufs(dev, nic); 18547413feeeSJerin Jacob 18557413feeeSJerin Jacob /* Disable qset */ 1856627d4ba2SKamil Rytarowski ret = nicvf_qset_reclaim(nic); 18577413feeeSJerin Jacob if (ret) 18587413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); 18597413feeeSJerin Jacob 18607413feeeSJerin Jacob /* Disable all interrupts */ 18617413feeeSJerin Jacob nicvf_disable_all_interrupts(nic); 18627413feeeSJerin Jacob 18637413feeeSJerin Jacob /* Free RBDR SW structure */ 18647413feeeSJerin Jacob if (nic->rbdr) { 18657413feeeSJerin Jacob rte_free(nic->rbdr); 18667413feeeSJerin Jacob nic->rbdr = NULL; 18677413feeeSJerin Jacob } 18687413feeeSJerin Jacob } 18697413feeeSJerin Jacob 18707413feeeSJerin Jacob static void 18717413feeeSJerin Jacob nicvf_dev_close(struct rte_eth_dev *dev) 18727413feeeSJerin Jacob { 1873627d4ba2SKamil Rytarowski size_t i; 1874627d4ba2SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 1875627d4ba2SKamil Rytarowski 18767413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 18777413feeeSJerin Jacob 1878627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(dev, true); 1879f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, dev); 1880627d4ba2SKamil Rytarowski 1881627d4ba2SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 1882627d4ba2SKamil Rytarowski if (!nic->snicvf[i]) 1883627d4ba2SKamil Rytarowski continue; 1884627d4ba2SKamil Rytarowski 1885627d4ba2SKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]); 1886627d4ba2SKamil Rytarowski } 18877413feeeSJerin Jacob } 18887413feeeSJerin Jacob 1889bc79615aSJerin Jacob static int 1890b7004ab2SKamil Rytarowski nicvf_request_sqs(struct nicvf *nic) 1891b7004ab2SKamil Rytarowski { 1892b7004ab2SKamil Rytarowski size_t i; 1893b7004ab2SKamil Rytarowski 1894b7004ab2SKamil Rytarowski assert_primary(nic); 1895b7004ab2SKamil Rytarowski assert(nic->sqs_count > 0); 1896b7004ab2SKamil Rytarowski assert(nic->sqs_count <= MAX_SQS_PER_VF); 1897b7004ab2SKamil Rytarowski 1898b7004ab2SKamil Rytarowski /* Set no of Rx/Tx queues in each of the SQsets */ 1899b7004ab2SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 1900b7004ab2SKamil Rytarowski if (nicvf_svf_empty()) 1901b7004ab2SKamil Rytarowski rte_panic("Cannot assign sufficient number of " 1902b7004ab2SKamil Rytarowski "secondary queues to primary VF%" PRIu8 "\n", 1903b7004ab2SKamil Rytarowski nic->vf_id); 1904b7004ab2SKamil Rytarowski 1905b7004ab2SKamil Rytarowski nic->snicvf[i] = nicvf_svf_pop(); 1906b7004ab2SKamil Rytarowski nic->snicvf[i]->sqs_id = i; 1907b7004ab2SKamil Rytarowski } 1908b7004ab2SKamil Rytarowski 1909b7004ab2SKamil Rytarowski return nicvf_mbox_request_sqs(nic); 1910b7004ab2SKamil Rytarowski } 1911b7004ab2SKamil Rytarowski 1912b7004ab2SKamil Rytarowski static int 1913bc79615aSJerin Jacob nicvf_dev_configure(struct rte_eth_dev *dev) 1914bc79615aSJerin Jacob { 1915b7004ab2SKamil Rytarowski struct rte_eth_dev_data *data = dev->data; 1916b7004ab2SKamil Rytarowski struct rte_eth_conf *conf = &data->dev_conf; 1917bc79615aSJerin Jacob struct rte_eth_rxmode *rxmode = &conf->rxmode; 1918bc79615aSJerin Jacob struct rte_eth_txmode *txmode = &conf->txmode; 1919bc79615aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1920b7004ab2SKamil Rytarowski uint8_t cqcount; 1921bc79615aSJerin Jacob 1922bc79615aSJerin Jacob PMD_INIT_FUNC_TRACE(); 1923bc79615aSJerin Jacob 192473fb89ddSAndrew Rybchenko if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) 19258b945a7fSPavan Nikhilesh rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 19268b945a7fSPavan Nikhilesh 1927bc79615aSJerin Jacob if (!rte_eal_has_hugepages()) { 1928bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Huge page is not configured"); 1929bc79615aSJerin Jacob return -EINVAL; 1930bc79615aSJerin Jacob } 1931bc79615aSJerin Jacob 1932bc79615aSJerin Jacob if (txmode->mq_mode) { 1933bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 1934bc79615aSJerin Jacob return -EINVAL; 1935bc79615aSJerin Jacob } 1936bc79615aSJerin Jacob 1937bc79615aSJerin Jacob if (rxmode->mq_mode != ETH_MQ_RX_NONE && 1938bc79615aSJerin Jacob rxmode->mq_mode != ETH_MQ_RX_RSS) { 1939bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 1940bc79615aSJerin Jacob return -EINVAL; 1941bc79615aSJerin Jacob } 1942bc79615aSJerin Jacob 1943bc79615aSJerin Jacob if (rxmode->split_hdr_size) { 1944bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 1945bc79615aSJerin Jacob return -EINVAL; 1946bc79615aSJerin Jacob } 1947bc79615aSJerin Jacob 1948bc79615aSJerin Jacob if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 1949bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 1950bc79615aSJerin Jacob return -EINVAL; 1951bc79615aSJerin Jacob } 1952bc79615aSJerin Jacob 1953bc79615aSJerin Jacob if (conf->dcb_capability_en) { 1954bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "DCB enable not supported"); 1955bc79615aSJerin Jacob return -EINVAL; 1956bc79615aSJerin Jacob } 1957bc79615aSJerin Jacob 1958bc79615aSJerin Jacob if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1959bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Flow director not supported"); 1960bc79615aSJerin Jacob return -EINVAL; 1961bc79615aSJerin Jacob } 1962bc79615aSJerin Jacob 1963b7004ab2SKamil Rytarowski assert_primary(nic); 1964b7004ab2SKamil Rytarowski NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS); 1965b7004ab2SKamil Rytarowski cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues); 1966b7004ab2SKamil Rytarowski if (cqcount > MAX_RCV_QUEUES_PER_QS) { 1967b7004ab2SKamil Rytarowski nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS); 1968b7004ab2SKamil Rytarowski nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1; 1969b7004ab2SKamil Rytarowski } else { 1970b7004ab2SKamil Rytarowski nic->sqs_count = 0; 1971b7004ab2SKamil Rytarowski } 1972b7004ab2SKamil Rytarowski 1973b7004ab2SKamil Rytarowski assert(nic->sqs_count <= MAX_SQS_PER_VF); 1974b7004ab2SKamil Rytarowski 1975b7004ab2SKamil Rytarowski if (nic->sqs_count > 0) { 1976b7004ab2SKamil Rytarowski if (nicvf_request_sqs(nic)) { 1977b7004ab2SKamil Rytarowski rte_panic("Cannot assign sufficient number of " 1978b7004ab2SKamil Rytarowski "secondary queues to PORT%d VF%" PRIu8 "\n", 1979b7004ab2SKamil Rytarowski dev->data->port_id, nic->vf_id); 1980b7004ab2SKamil Rytarowski } 1981b7004ab2SKamil Rytarowski } 1982b7004ab2SKamil Rytarowski 19835e64c812SPavan Nikhilesh if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) 19845e64c812SPavan Nikhilesh nic->offload_cksum = 1; 19855e64c812SPavan Nikhilesh 1986bc79615aSJerin Jacob PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 1987bc79615aSJerin Jacob dev->data->port_id, nicvf_hw_cap(nic)); 1988bc79615aSJerin Jacob 1989bc79615aSJerin Jacob return 0; 1990bc79615aSJerin Jacob } 1991bc79615aSJerin Jacob 1992b8d96c71SHarman Kalra static int 1993b8d96c71SHarman Kalra nicvf_dev_set_link_up(struct rte_eth_dev *dev) 1994b8d96c71SHarman Kalra { 1995b8d96c71SHarman Kalra struct nicvf *nic = nicvf_pmd_priv(dev); 1996b8d96c71SHarman Kalra int rc, i; 1997b8d96c71SHarman Kalra 1998b8d96c71SHarman Kalra rc = nicvf_mbox_set_link_up_down(nic, true); 1999b8d96c71SHarman Kalra if (rc) 2000b8d96c71SHarman Kalra goto done; 2001b8d96c71SHarman Kalra 2002b8d96c71SHarman Kalra /* Start tx queues */ 2003b8d96c71SHarman Kalra for (i = 0; i < dev->data->nb_tx_queues; i++) 2004b8d96c71SHarman Kalra nicvf_dev_tx_queue_start(dev, i); 2005b8d96c71SHarman Kalra 2006b8d96c71SHarman Kalra done: 2007b8d96c71SHarman Kalra return rc; 2008b8d96c71SHarman Kalra } 2009b8d96c71SHarman Kalra 2010b8d96c71SHarman Kalra static int 2011b8d96c71SHarman Kalra nicvf_dev_set_link_down(struct rte_eth_dev *dev) 2012b8d96c71SHarman Kalra { 2013b8d96c71SHarman Kalra struct nicvf *nic = nicvf_pmd_priv(dev); 2014b8d96c71SHarman Kalra int i; 2015b8d96c71SHarman Kalra 2016b8d96c71SHarman Kalra /* Stop tx queues */ 2017b8d96c71SHarman Kalra for (i = 0; i < dev->data->nb_tx_queues; i++) 2018b8d96c71SHarman Kalra nicvf_dev_tx_queue_stop(dev, i); 2019b8d96c71SHarman Kalra 2020b8d96c71SHarman Kalra return nicvf_mbox_set_link_up_down(nic, false); 2021b8d96c71SHarman Kalra } 2022b8d96c71SHarman Kalra 2023e4387966SJerin Jacob /* Initialize and register driver with DPDK Application */ 2024e4387966SJerin Jacob static const struct eth_dev_ops nicvf_eth_dev_ops = { 2025bc79615aSJerin Jacob .dev_configure = nicvf_dev_configure, 20267413feeeSJerin Jacob .dev_start = nicvf_dev_start, 20277413feeeSJerin Jacob .dev_stop = nicvf_dev_stop, 20288fc70464SJerin Jacob .link_update = nicvf_dev_link_update, 20297413feeeSJerin Jacob .dev_close = nicvf_dev_close, 2030684fa771SJerin Jacob .stats_get = nicvf_dev_stats_get, 2031684fa771SJerin Jacob .stats_reset = nicvf_dev_stats_reset, 20326eae36eaSJerin Jacob .promiscuous_enable = nicvf_dev_promisc_enable, 2033dcd7b1e1SJerin Jacob .dev_infos_get = nicvf_dev_info_get, 20341c80e4fdSJerin Jacob .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 203565d9804eSJerin Jacob .mtu_set = nicvf_dev_set_mtu, 2036d3bf2564SRakesh Kudurumalla .vlan_offload_set = nicvf_vlan_offload_set, 203743362c6aSJerin Jacob .reta_update = nicvf_dev_reta_update, 203843362c6aSJerin Jacob .reta_query = nicvf_dev_reta_query, 203943362c6aSJerin Jacob .rss_hash_update = nicvf_dev_rss_hash_update, 204043362c6aSJerin Jacob .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 204186b4eb42SJerin Jacob .rx_queue_start = nicvf_dev_rx_queue_start, 204286b4eb42SJerin Jacob .rx_queue_stop = nicvf_dev_rx_queue_stop, 2043fc1f6c62SJerin Jacob .tx_queue_start = nicvf_dev_tx_queue_start, 2044fc1f6c62SJerin Jacob .tx_queue_stop = nicvf_dev_tx_queue_stop, 2045aa0d976eSJerin Jacob .rx_queue_setup = nicvf_dev_rx_queue_setup, 2046aa0d976eSJerin Jacob .rx_queue_release = nicvf_dev_rx_queue_release, 2047da14e00cSJerin Jacob .rx_queue_count = nicvf_dev_rx_queue_count, 20483f3c6f97SJerin Jacob .tx_queue_setup = nicvf_dev_tx_queue_setup, 20493f3c6f97SJerin Jacob .tx_queue_release = nicvf_dev_tx_queue_release, 2050b8d96c71SHarman Kalra .dev_set_link_up = nicvf_dev_set_link_up, 2051b8d96c71SHarman Kalra .dev_set_link_down = nicvf_dev_set_link_down, 2052606ee746SJerin Jacob .get_reg = nicvf_dev_get_regs, 2053e4387966SJerin Jacob }; 2054e4387966SJerin Jacob 2055d3bf2564SRakesh Kudurumalla static int 2056d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2057d3bf2564SRakesh Kudurumalla { 2058d3bf2564SRakesh Kudurumalla struct rte_eth_rxmode *rxmode; 2059d3bf2564SRakesh Kudurumalla struct nicvf *nic = nicvf_pmd_priv(dev); 2060d3bf2564SRakesh Kudurumalla rxmode = &dev->data->dev_conf.rxmode; 2061d3bf2564SRakesh Kudurumalla if (mask & ETH_VLAN_STRIP_MASK) { 2062d3bf2564SRakesh Kudurumalla if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2063d3bf2564SRakesh Kudurumalla nicvf_vlan_hw_strip(nic, true); 2064d3bf2564SRakesh Kudurumalla else 2065d3bf2564SRakesh Kudurumalla nicvf_vlan_hw_strip(nic, false); 2066d3bf2564SRakesh Kudurumalla } 2067d3bf2564SRakesh Kudurumalla 2068d3bf2564SRakesh Kudurumalla return 0; 2069d3bf2564SRakesh Kudurumalla } 2070d3bf2564SRakesh Kudurumalla 2071d3bf2564SRakesh Kudurumalla static int 2072d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2073d3bf2564SRakesh Kudurumalla { 2074d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_config(dev, mask); 2075d3bf2564SRakesh Kudurumalla 2076d3bf2564SRakesh Kudurumalla return 0; 2077d3bf2564SRakesh Kudurumalla } 2078d3bf2564SRakesh Kudurumalla 2079279d3319SRakesh Kudurumalla static inline int 2080279d3319SRakesh Kudurumalla nicvf_set_first_skip(struct rte_eth_dev *dev) 2081279d3319SRakesh Kudurumalla { 2082279d3319SRakesh Kudurumalla int bytes_to_skip = 0; 2083279d3319SRakesh Kudurumalla int ret = 0; 2084279d3319SRakesh Kudurumalla unsigned int i; 2085279d3319SRakesh Kudurumalla struct rte_kvargs *kvlist; 2086279d3319SRakesh Kudurumalla static const char *const skip[] = { 2087279d3319SRakesh Kudurumalla SKIP_DATA_BYTES, 2088279d3319SRakesh Kudurumalla NULL}; 2089279d3319SRakesh Kudurumalla struct nicvf *nic = nicvf_pmd_priv(dev); 2090279d3319SRakesh Kudurumalla 2091279d3319SRakesh Kudurumalla if (!dev->device->devargs) { 2092279d3319SRakesh Kudurumalla nicvf_first_skip_config(nic, 0); 2093279d3319SRakesh Kudurumalla return ret; 2094279d3319SRakesh Kudurumalla } 2095279d3319SRakesh Kudurumalla 2096279d3319SRakesh Kudurumalla kvlist = rte_kvargs_parse(dev->device->devargs->args, skip); 2097279d3319SRakesh Kudurumalla if (!kvlist) 2098279d3319SRakesh Kudurumalla return -EINVAL; 2099279d3319SRakesh Kudurumalla 2100279d3319SRakesh Kudurumalla if (kvlist->count == 0) 2101279d3319SRakesh Kudurumalla goto exit; 2102279d3319SRakesh Kudurumalla 2103279d3319SRakesh Kudurumalla for (i = 0; i != kvlist->count; ++i) { 2104279d3319SRakesh Kudurumalla const struct rte_kvargs_pair *pair = &kvlist->pairs[i]; 2105279d3319SRakesh Kudurumalla 2106279d3319SRakesh Kudurumalla if (!strcmp(pair->key, SKIP_DATA_BYTES)) 2107279d3319SRakesh Kudurumalla bytes_to_skip = atoi(pair->value); 2108279d3319SRakesh Kudurumalla } 2109279d3319SRakesh Kudurumalla 2110279d3319SRakesh Kudurumalla /*128 bytes amounts to one cache line*/ 2111279d3319SRakesh Kudurumalla if (bytes_to_skip >= 0 && bytes_to_skip < 128) { 2112279d3319SRakesh Kudurumalla if (!(bytes_to_skip % 8)) { 2113279d3319SRakesh Kudurumalla nicvf_first_skip_config(nic, (bytes_to_skip / 8)); 2114279d3319SRakesh Kudurumalla nic->skip_bytes = bytes_to_skip; 2115279d3319SRakesh Kudurumalla goto kvlist_free; 2116279d3319SRakesh Kudurumalla } else { 2117279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8"); 2118279d3319SRakesh Kudurumalla ret = -EINVAL; 2119279d3319SRakesh Kudurumalla goto exit; 2120279d3319SRakesh Kudurumalla } 2121279d3319SRakesh Kudurumalla } else { 2122279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128"); 2123279d3319SRakesh Kudurumalla ret = -EINVAL; 2124279d3319SRakesh Kudurumalla goto exit; 2125279d3319SRakesh Kudurumalla } 2126279d3319SRakesh Kudurumalla exit: 2127279d3319SRakesh Kudurumalla nicvf_first_skip_config(nic, 0); 2128279d3319SRakesh Kudurumalla kvlist_free: 2129279d3319SRakesh Kudurumalla rte_kvargs_free(kvlist); 2130279d3319SRakesh Kudurumalla return ret; 2131279d3319SRakesh Kudurumalla } 2132e4387966SJerin Jacob static int 2133230dce64SAmit Gupta nicvf_eth_dev_uninit(struct rte_eth_dev *dev) 2134230dce64SAmit Gupta { 2135230dce64SAmit Gupta PMD_INIT_FUNC_TRACE(); 2136230dce64SAmit Gupta 2137230dce64SAmit Gupta if (rte_eal_process_type() == RTE_PROC_PRIMARY) 2138230dce64SAmit Gupta nicvf_dev_close(dev); 2139230dce64SAmit Gupta 2140230dce64SAmit Gupta return 0; 2141230dce64SAmit Gupta } 2142230dce64SAmit Gupta static int 2143e4387966SJerin Jacob nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 2144e4387966SJerin Jacob { 2145e4387966SJerin Jacob int ret; 2146e4387966SJerin Jacob struct rte_pci_device *pci_dev; 2147e4387966SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(eth_dev); 2148e4387966SJerin Jacob 2149e4387966SJerin Jacob PMD_INIT_FUNC_TRACE(); 2150e4387966SJerin Jacob 2151e4387966SJerin Jacob eth_dev->dev_ops = &nicvf_eth_dev_ops; 2152e4387966SJerin Jacob 21537413feeeSJerin Jacob /* For secondary processes, the primary has done all the work */ 21547413feeeSJerin Jacob if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 215521e3fb00SKamil Rytarowski if (nic) { 21567413feeeSJerin Jacob /* Setup callbacks for secondary process */ 21577413feeeSJerin Jacob nicvf_set_tx_function(eth_dev); 21587413feeeSJerin Jacob nicvf_set_rx_function(eth_dev); 21597413feeeSJerin Jacob return 0; 216021e3fb00SKamil Rytarowski } else { 216121e3fb00SKamil Rytarowski /* If nic == NULL than it is secondary function 216221e3fb00SKamil Rytarowski * so ethdev need to be released by caller */ 216321e3fb00SKamil Rytarowski return ENOTSUP; 216421e3fb00SKamil Rytarowski } 21657413feeeSJerin Jacob } 21667413feeeSJerin Jacob 2167c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2168e4387966SJerin Jacob rte_eth_copy_pci_info(eth_dev, pci_dev); 2169e4387966SJerin Jacob 2170e4387966SJerin Jacob nic->device_id = pci_dev->id.device_id; 2171e4387966SJerin Jacob nic->vendor_id = pci_dev->id.vendor_id; 2172e4387966SJerin Jacob nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 2173e4387966SJerin Jacob nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2174e4387966SJerin Jacob 2175e4387966SJerin Jacob PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 2176e4387966SJerin Jacob pci_dev->id.vendor_id, pci_dev->id.device_id, 2177e4387966SJerin Jacob pci_dev->addr.domain, pci_dev->addr.bus, 2178e4387966SJerin Jacob pci_dev->addr.devid, pci_dev->addr.function); 2179e4387966SJerin Jacob 2180e4387966SJerin Jacob nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 2181e4387966SJerin Jacob if (!nic->reg_base) { 2182e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to map BAR0"); 2183e4387966SJerin Jacob ret = -ENODEV; 2184e4387966SJerin Jacob goto fail; 2185e4387966SJerin Jacob } 2186e4387966SJerin Jacob 2187e4387966SJerin Jacob nicvf_disable_all_interrupts(nic); 2188e4387966SJerin Jacob 2189f141adcaSKamil Rytarowski ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev); 2190e4387966SJerin Jacob if (ret) { 2191e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2192e4387966SJerin Jacob goto fail; 2193e4387966SJerin Jacob } 2194e4387966SJerin Jacob 2195e4387966SJerin Jacob ret = nicvf_mbox_check_pf_ready(nic); 2196e4387966SJerin Jacob if (ret) { 2197e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 2198e4387966SJerin Jacob goto alarm_fail; 2199e4387966SJerin Jacob } else { 2200e4387966SJerin Jacob PMD_INIT_LOG(INFO, 2201e4387966SJerin Jacob "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 2202e4387966SJerin Jacob nic->node, nic->vf_id, 2203e4387966SJerin Jacob nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 2204e4387966SJerin Jacob nic->sqs_mode ? "true" : "false", 2205e4387966SJerin Jacob nic->loopback_supported ? "true" : "false" 2206e4387966SJerin Jacob ); 2207e4387966SJerin Jacob } 2208e4387966SJerin Jacob 220921e3fb00SKamil Rytarowski ret = nicvf_base_init(nic); 221021e3fb00SKamil Rytarowski if (ret) { 221121e3fb00SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 221221e3fb00SKamil Rytarowski goto malloc_fail; 221321e3fb00SKamil Rytarowski } 221421e3fb00SKamil Rytarowski 2215e4387966SJerin Jacob if (nic->sqs_mode) { 221621e3fb00SKamil Rytarowski /* Push nic to stack of secondary vfs */ 221721e3fb00SKamil Rytarowski nicvf_svf_push(nic); 221821e3fb00SKamil Rytarowski 221921e3fb00SKamil Rytarowski /* Steal nic pointer from the device for further reuse */ 222021e3fb00SKamil Rytarowski eth_dev->data->dev_private = NULL; 222121e3fb00SKamil Rytarowski 222221e3fb00SKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 222321e3fb00SKamil Rytarowski ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic); 222421e3fb00SKamil Rytarowski if (ret) { 222521e3fb00SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to start period alarm"); 222621e3fb00SKamil Rytarowski goto fail; 222721e3fb00SKamil Rytarowski } 222821e3fb00SKamil Rytarowski 222998a7ea33SJerin Jacob /* Detach port by returning positive error number */ 223021e3fb00SKamil Rytarowski return ENOTSUP; 2231e4387966SJerin Jacob } 2232e4387966SJerin Jacob 223335b2d13fSOlivier Matz eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 223435b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN, 0); 2235e4387966SJerin Jacob if (eth_dev->data->mac_addrs == NULL) { 2236e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 2237e4387966SJerin Jacob ret = -ENOMEM; 2238e4387966SJerin Jacob goto alarm_fail; 2239e4387966SJerin Jacob } 2240538da7a1SOlivier Matz if (rte_is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr)) 2241538da7a1SOlivier Matz rte_eth_random_addr(&nic->mac_addr[0]); 2242e4387966SJerin Jacob 2243538da7a1SOlivier Matz rte_ether_addr_copy((struct rte_ether_addr *)nic->mac_addr, 2244e4387966SJerin Jacob ð_dev->data->mac_addrs[0]); 2245e4387966SJerin Jacob 2246e4387966SJerin Jacob ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 2247e4387966SJerin Jacob if (ret) { 2248e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to set mac addr"); 2249e4387966SJerin Jacob goto malloc_fail; 2250e4387966SJerin Jacob } 2251e4387966SJerin Jacob 2252279d3319SRakesh Kudurumalla ret = nicvf_set_first_skip(eth_dev); 2253279d3319SRakesh Kudurumalla if (ret) { 2254279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "Failed to configure first skip"); 2255279d3319SRakesh Kudurumalla goto malloc_fail; 2256279d3319SRakesh Kudurumalla } 2257e4387966SJerin Jacob PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 2258e4387966SJerin Jacob eth_dev->data->port_id, nic->vendor_id, nic->device_id, 2259e4387966SJerin Jacob nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 2260e4387966SJerin Jacob nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 2261e4387966SJerin Jacob 2262e4387966SJerin Jacob return 0; 2263e4387966SJerin Jacob 2264e4387966SJerin Jacob malloc_fail: 2265e4387966SJerin Jacob rte_free(eth_dev->data->mac_addrs); 2266e7f2fa88SDavid Marchand eth_dev->data->mac_addrs = NULL; 2267e4387966SJerin Jacob alarm_fail: 2268f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2269e4387966SJerin Jacob fail: 2270e4387966SJerin Jacob return ret; 2271e4387966SJerin Jacob } 2272e4387966SJerin Jacob 2273e4387966SJerin Jacob static const struct rte_pci_id pci_id_nicvf_map[] = { 2274e4387966SJerin Jacob { 2275e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2276e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2277398a1be1SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF, 2278e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2279398a1be1SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF, 2280e4387966SJerin Jacob }, 2281e4387966SJerin Jacob { 2282e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2283e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2284398a1be1SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2285e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2286398a1be1SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF, 2287e4387966SJerin Jacob }, 2288e4387966SJerin Jacob { 2289b72a7768SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2290b72a7768SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2291b72a7768SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2292b72a7768SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2293b72a7768SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF, 2294b72a7768SJerin Jacob }, 2295b72a7768SJerin Jacob { 2296174dd78eSJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2297174dd78eSJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2298174dd78eSJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2299174dd78eSJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2300174dd78eSJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF, 2301174dd78eSJerin Jacob }, 2302174dd78eSJerin Jacob { 2303e4387966SJerin Jacob .vendor_id = 0, 2304e4387966SJerin Jacob }, 2305e4387966SJerin Jacob }; 2306e4387966SJerin Jacob 2307fdf91e0fSJan Blunck static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2308fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 2309fdf91e0fSJan Blunck { 2310fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf), 2311fdf91e0fSJan Blunck nicvf_eth_dev_init); 2312fdf91e0fSJan Blunck } 2313fdf91e0fSJan Blunck 2314fdf91e0fSJan Blunck static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev) 2315fdf91e0fSJan Blunck { 2316230dce64SAmit Gupta return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit); 2317fdf91e0fSJan Blunck } 2318fdf91e0fSJan Blunck 2319fdf91e0fSJan Blunck static struct rte_pci_driver rte_nicvf_pmd = { 2320e4387966SJerin Jacob .id_table = pci_id_nicvf_map, 23216110b1c6SJerin Jacob .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES | 23226110b1c6SJerin Jacob RTE_PCI_DRV_INTR_LSC, 2323fdf91e0fSJan Blunck .probe = nicvf_eth_pci_probe, 2324fdf91e0fSJan Blunck .remove = nicvf_eth_pci_remove, 2325e4387966SJerin Jacob }; 2326e4387966SJerin Jacob 2327fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd); 232801f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); 232906e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci"); 2330279d3319SRakesh Kudurumalla RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>"); 2331