1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 2aaf4363eSJerin Jacob * Copyright(c) 2016 Cavium, Inc 3e4387966SJerin Jacob */ 4e4387966SJerin Jacob 5e4387966SJerin Jacob #include <assert.h> 6e4387966SJerin Jacob #include <stdio.h> 7e4387966SJerin Jacob #include <stdbool.h> 8e4387966SJerin Jacob #include <errno.h> 9e4387966SJerin Jacob #include <stdint.h> 10e4387966SJerin Jacob #include <string.h> 11e4387966SJerin Jacob #include <unistd.h> 12e4387966SJerin Jacob #include <stdarg.h> 13e4387966SJerin Jacob #include <inttypes.h> 14e4387966SJerin Jacob #include <netinet/in.h> 15e4387966SJerin Jacob #include <sys/queue.h> 16e4387966SJerin Jacob 17e4387966SJerin Jacob #include <rte_alarm.h> 18e4387966SJerin Jacob #include <rte_branch_prediction.h> 19e4387966SJerin Jacob #include <rte_byteorder.h> 20e4387966SJerin Jacob #include <rte_common.h> 21e4387966SJerin Jacob #include <rte_cycles.h> 22e4387966SJerin Jacob #include <rte_debug.h> 231acb7f54SDavid Marchand #include <dev_driver.h> 24e4387966SJerin Jacob #include <rte_eal.h> 25e4387966SJerin Jacob #include <rte_ether.h> 26df96fd0dSBruce Richardson #include <ethdev_driver.h> 27df96fd0dSBruce Richardson #include <ethdev_pci.h> 28e4387966SJerin Jacob #include <rte_interrupts.h> 29e4387966SJerin Jacob #include <rte_log.h> 30e4387966SJerin Jacob #include <rte_memory.h> 31e4387966SJerin Jacob #include <rte_memzone.h> 32e4387966SJerin Jacob #include <rte_malloc.h> 33e4387966SJerin Jacob #include <rte_random.h> 34e4387966SJerin Jacob #include <rte_pci.h> 351f37cb2bSDavid Marchand #include <bus_pci_driver.h> 36e4387966SJerin Jacob #include <rte_tailq.h> 37279d3319SRakesh Kudurumalla #include <rte_devargs.h> 38279d3319SRakesh Kudurumalla #include <rte_kvargs.h> 39e4387966SJerin Jacob 40e4387966SJerin Jacob #include "base/nicvf_plat.h" 41e4387966SJerin Jacob 42e4387966SJerin Jacob #include "nicvf_ethdev.h" 431c421f18SJerin Jacob #include "nicvf_rxtx.h" 44627d4ba2SKamil Rytarowski #include "nicvf_svf.h" 45e4387966SJerin Jacob #include "nicvf_logs.h" 46e4387966SJerin Jacob 4762024eb8SIvan Ilchenko static int nicvf_dev_stop(struct rte_eth_dev *dev); 48627d4ba2SKamil Rytarowski static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup); 49627d4ba2SKamil Rytarowski static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, 50627d4ba2SKamil Rytarowski bool cleanup); 51d3bf2564SRakesh Kudurumalla static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 52d3bf2564SRakesh Kudurumalla static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 537413feeeSJerin Jacob 54eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_mbox, mbox, NOTICE); 55eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_init, init, NOTICE); 56eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE); 57c563443cSPavan Nikhilesh 588f79c43fSHarman Kalra #define NICVF_QLM_MODE_SGMII 7 598f79c43fSHarman Kalra #define NICVF_QLM_MODE_XFI 12 608f79c43fSHarman Kalra 6144a86354SHanumanth Pothula #define BCAST_ACCEPT 0x01 6244a86354SHanumanth Pothula #define CAM_ACCEPT (1 << 3) 6344a86354SHanumanth Pothula #define BGX_MCAST_MODE(x) ((x) << 1) 6444a86354SHanumanth Pothula 658f79c43fSHarman Kalra enum nicvf_link_speed { 668f79c43fSHarman Kalra NICVF_LINK_SPEED_SGMII, 678f79c43fSHarman Kalra NICVF_LINK_SPEED_XAUI, 688f79c43fSHarman Kalra NICVF_LINK_SPEED_RXAUI, 698f79c43fSHarman Kalra NICVF_LINK_SPEED_10G_R, 708f79c43fSHarman Kalra NICVF_LINK_SPEED_40G_R, 718f79c43fSHarman Kalra NICVF_LINK_SPEED_RESERVE1, 728f79c43fSHarman Kalra NICVF_LINK_SPEED_QSGMII, 738f79c43fSHarman Kalra NICVF_LINK_SPEED_RESERVE2, 748f79c43fSHarman Kalra NICVF_LINK_SPEED_UNKNOWN = 255 758f79c43fSHarman Kalra }; 768f79c43fSHarman Kalra 778f79c43fSHarman Kalra static inline uint32_t 788f79c43fSHarman Kalra nicvf_parse_link_speeds(uint32_t link_speeds) 798f79c43fSHarman Kalra { 808f79c43fSHarman Kalra uint32_t link_speed = NICVF_LINK_SPEED_UNKNOWN; 818f79c43fSHarman Kalra 828f79c43fSHarman Kalra if (link_speeds & RTE_ETH_LINK_SPEED_40G) 838f79c43fSHarman Kalra link_speed = NICVF_LINK_SPEED_40G_R; 848f79c43fSHarman Kalra 858f79c43fSHarman Kalra if (link_speeds & RTE_ETH_LINK_SPEED_10G) { 868f79c43fSHarman Kalra link_speed = NICVF_LINK_SPEED_XAUI; 878f79c43fSHarman Kalra link_speed |= NICVF_LINK_SPEED_RXAUI; 888f79c43fSHarman Kalra link_speed |= NICVF_LINK_SPEED_10G_R; 898f79c43fSHarman Kalra } 908f79c43fSHarman Kalra 918f79c43fSHarman Kalra if (link_speeds & RTE_ETH_LINK_SPEED_5G) 928f79c43fSHarman Kalra link_speed = NICVF_LINK_SPEED_QSGMII; 938f79c43fSHarman Kalra 948f79c43fSHarman Kalra if (link_speeds & RTE_ETH_LINK_SPEED_1G) 958f79c43fSHarman Kalra link_speed = NICVF_LINK_SPEED_SGMII; 968f79c43fSHarman Kalra 978f79c43fSHarman Kalra return link_speed; 988f79c43fSHarman Kalra } 998f79c43fSHarman Kalra 1008f79c43fSHarman Kalra static inline uint8_t 1018f79c43fSHarman Kalra nicvf_parse_eth_link_duplex(uint32_t link_speeds) 1028f79c43fSHarman Kalra { 1038f79c43fSHarman Kalra if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) || 1048f79c43fSHarman Kalra (link_speeds & RTE_ETH_LINK_SPEED_100M_HD)) 1058f79c43fSHarman Kalra return RTE_ETH_LINK_HALF_DUPLEX; 1068f79c43fSHarman Kalra else 1078f79c43fSHarman Kalra return RTE_ETH_LINK_FULL_DUPLEX; 1088f79c43fSHarman Kalra } 1098f79c43fSHarman Kalra 1108f79c43fSHarman Kalra static int 1118f79c43fSHarman Kalra nicvf_apply_link_speed(struct rte_eth_dev *dev) 1128f79c43fSHarman Kalra { 1138f79c43fSHarman Kalra struct nicvf *nic = nicvf_pmd_priv(dev); 1148f79c43fSHarman Kalra struct rte_eth_conf *conf = &dev->data->dev_conf; 1158f79c43fSHarman Kalra struct change_link_mode cfg; 1168f79c43fSHarman Kalra if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) 1178f79c43fSHarman Kalra /* TODO: Handle this case */ 1188f79c43fSHarman Kalra return 0; 1198f79c43fSHarman Kalra 1208f79c43fSHarman Kalra cfg.speed = nicvf_parse_link_speeds(conf->link_speeds); 1218f79c43fSHarman Kalra cfg.autoneg = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) ? 1 : 0; 1228f79c43fSHarman Kalra cfg.duplex = nicvf_parse_eth_link_duplex(conf->link_speeds); 1238f79c43fSHarman Kalra cfg.qlm_mode = ((conf->link_speeds & RTE_ETH_LINK_SPEED_1G) ? 1248f79c43fSHarman Kalra NICVF_QLM_MODE_SGMII : 1258f79c43fSHarman Kalra (conf->link_speeds & RTE_ETH_LINK_SPEED_10G) ? 1268f79c43fSHarman Kalra NICVF_QLM_MODE_XFI : 0); 1278f79c43fSHarman Kalra 1288f79c43fSHarman Kalra if (cfg.speed != NICVF_LINK_SPEED_UNKNOWN && 1298f79c43fSHarman Kalra (cfg.speed != nic->speed || cfg.duplex != nic->duplex)) { 1308f79c43fSHarman Kalra nic->speed = cfg.speed; 1318f79c43fSHarman Kalra nic->duplex = cfg.duplex; 1328f79c43fSHarman Kalra return nicvf_mbox_change_mode(nic, &cfg); 1338f79c43fSHarman Kalra } else { 1348f79c43fSHarman Kalra return 0; 1358f79c43fSHarman Kalra } 1368f79c43fSHarman Kalra } 1378f79c43fSHarman Kalra 1388e14dc28SStephen Hemminger static void 1398e14dc28SStephen Hemminger nicvf_link_status_update(struct nicvf *nic, 1408fc70464SJerin Jacob struct rte_eth_link *link) 1418fc70464SJerin Jacob { 1428e14dc28SStephen Hemminger memset(link, 0, sizeof(*link)); 1438fc70464SJerin Jacob 144295968d1SFerruh Yigit link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 1458fc70464SJerin Jacob 1468fc70464SJerin Jacob if (nic->duplex == NICVF_HALF_DUPLEX) 147295968d1SFerruh Yigit link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1488fc70464SJerin Jacob else if (nic->duplex == NICVF_FULL_DUPLEX) 149295968d1SFerruh Yigit link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1508fc70464SJerin Jacob link->link_speed = nic->speed; 151295968d1SFerruh Yigit link->link_autoneg = RTE_ETH_LINK_AUTONEG; 1528fc70464SJerin Jacob } 1538fc70464SJerin Jacob 1541f7b83b8SHanumanth Pothula /*Poll for link status change by sending NIC_MBOX_MSG_BGX_LINK_CHANGE msg 1551f7b83b8SHanumanth Pothula * periodically to PF. 1561f7b83b8SHanumanth Pothula */ 157e4387966SJerin Jacob static void 158e4387966SJerin Jacob nicvf_interrupt(void *arg) 159e4387966SJerin Jacob { 160f141adcaSKamil Rytarowski struct rte_eth_dev *dev = arg; 161f141adcaSKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 1628e14dc28SStephen Hemminger struct rte_eth_link link; 163e4387966SJerin Jacob 1641f7b83b8SHanumanth Pothula rte_eth_linkstatus_get(dev, &link); 1651f7b83b8SHanumanth Pothula 1661f7b83b8SHanumanth Pothula nicvf_mbox_link_change(nic); 1671f7b83b8SHanumanth Pothula if (nic->link_up != link.link_status) { 1688e14dc28SStephen Hemminger if (dev->data->dev_conf.intr_conf.lsc) { 1698e14dc28SStephen Hemminger nicvf_link_status_update(nic, &link); 1708e14dc28SStephen Hemminger rte_eth_linkstatus_set(dev, &link); 1718e14dc28SStephen Hemminger 1725723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, 1738e14dc28SStephen Hemminger RTE_ETH_EVENT_INTR_LSC, 174cebe3d7bSThomas Monjalon NULL); 1758fc70464SJerin Jacob } 1768e14dc28SStephen Hemminger } 177e4387966SJerin Jacob 1781f7b83b8SHanumanth Pothula rte_eal_alarm_set(NICVF_INTR_LINK_POLL_INTERVAL_MS * 1000, 179f141adcaSKamil Rytarowski nicvf_interrupt, dev); 180f141adcaSKamil Rytarowski } 181f141adcaSKamil Rytarowski 18221e3fb00SKamil Rytarowski static void 183f141adcaSKamil Rytarowski nicvf_vf_interrupt(void *arg) 184f141adcaSKamil Rytarowski { 185f141adcaSKamil Rytarowski struct nicvf *nic = arg; 186f141adcaSKamil Rytarowski 187f141adcaSKamil Rytarowski nicvf_reg_poll_interrupts(nic); 188f141adcaSKamil Rytarowski 189f141adcaSKamil Rytarowski rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 190f141adcaSKamil Rytarowski nicvf_vf_interrupt, nic); 191e4387966SJerin Jacob } 192e4387966SJerin Jacob 193e4387966SJerin Jacob static int 194f141adcaSKamil Rytarowski nicvf_periodic_alarm_start(void (fn)(void *), void *arg) 195e4387966SJerin Jacob { 196f141adcaSKamil Rytarowski return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg); 197e4387966SJerin Jacob } 198e4387966SJerin Jacob 199e4387966SJerin Jacob static int 200f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(void (fn)(void *), void *arg) 201e4387966SJerin Jacob { 202f141adcaSKamil Rytarowski return rte_eal_alarm_cancel(fn, arg); 203e4387966SJerin Jacob } 204e4387966SJerin Jacob 2058fc70464SJerin Jacob /* 2068fc70464SJerin Jacob * Return 0 means link status changed, -1 means not changed 2078fc70464SJerin Jacob */ 2088fc70464SJerin Jacob static int 2090cca5670SAndriy Berestovskyy nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2108fc70464SJerin Jacob { 2110cca5670SAndriy Berestovskyy #define CHECK_INTERVAL 100 /* 100ms */ 2120cca5670SAndriy Berestovskyy #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 2138fc70464SJerin Jacob struct rte_eth_link link; 2148fc70464SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 2150cca5670SAndriy Berestovskyy int i; 2168fc70464SJerin Jacob 2178fc70464SJerin Jacob PMD_INIT_FUNC_TRACE(); 2188fc70464SJerin Jacob 2190cca5670SAndriy Berestovskyy if (wait_to_complete) { 2200cca5670SAndriy Berestovskyy /* rte_eth_link_get() might need to wait up to 9 seconds */ 2210cca5670SAndriy Berestovskyy for (i = 0; i < MAX_CHECK_TIME; i++) { 2228e14dc28SStephen Hemminger nicvf_link_status_update(nic, &link); 223295968d1SFerruh Yigit if (link.link_status == RTE_ETH_LINK_UP) 2240cca5670SAndriy Berestovskyy break; 2250cca5670SAndriy Berestovskyy rte_delay_ms(CHECK_INTERVAL); 2260cca5670SAndriy Berestovskyy } 2270cca5670SAndriy Berestovskyy } else { 2288e14dc28SStephen Hemminger nicvf_link_status_update(nic, &link); 2290cca5670SAndriy Berestovskyy } 2308e14dc28SStephen Hemminger 2318e14dc28SStephen Hemminger return rte_eth_linkstatus_set(dev, &link); 2328fc70464SJerin Jacob } 2338fc70464SJerin Jacob 234606ee746SJerin Jacob static int 23565d9804eSJerin Jacob nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 23665d9804eSJerin Jacob { 23765d9804eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 238c77875fbSNitin Saxena uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD; 239b7004ab2SKamil Rytarowski size_t i; 24065d9804eSJerin Jacob 24165d9804eSJerin Jacob PMD_INIT_FUNC_TRACE(); 24265d9804eSJerin Jacob 24365d9804eSJerin Jacob buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 24465d9804eSJerin Jacob 24565d9804eSJerin Jacob /* 24665d9804eSJerin Jacob * Refuse mtu that requires the support of scattered packets 24765d9804eSJerin Jacob * when this feature has not been enabled before. 24865d9804eSJerin Jacob */ 249c77875fbSNitin Saxena if (dev->data->dev_started && !dev->data->scattered_rx && 25065d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 25165d9804eSJerin Jacob return -EINVAL; 25265d9804eSJerin Jacob 25365d9804eSJerin Jacob /* check <seg size> * <max_seg> >= max_frame */ 25465d9804eSJerin Jacob if (dev->data->scattered_rx && 25565d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 25665d9804eSJerin Jacob return -EINVAL; 25765d9804eSJerin Jacob 258c77875fbSNitin Saxena if (nicvf_mbox_update_hw_max_frs(nic, mtu)) 25965d9804eSJerin Jacob return -EINVAL; 26065d9804eSJerin Jacob 26165d9804eSJerin Jacob nic->mtu = mtu; 262b7004ab2SKamil Rytarowski 263b7004ab2SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) 264b7004ab2SKamil Rytarowski nic->snicvf[i]->mtu = mtu; 265b7004ab2SKamil Rytarowski 26665d9804eSJerin Jacob return 0; 26765d9804eSJerin Jacob } 26865d9804eSJerin Jacob 26965d9804eSJerin Jacob static int 270606ee746SJerin Jacob nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 271606ee746SJerin Jacob { 272606ee746SJerin Jacob uint64_t *data = regs->data; 273606ee746SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 274606ee746SJerin Jacob 275001a1c0fSZyta Szpak if (data == NULL) { 276001a1c0fSZyta Szpak regs->length = nicvf_reg_get_count(); 277001a1c0fSZyta Szpak regs->width = THUNDERX_REG_BYTES; 278001a1c0fSZyta Szpak return 0; 279001a1c0fSZyta Szpak } 280606ee746SJerin Jacob 281606ee746SJerin Jacob /* Support only full register dump */ 282606ee746SJerin Jacob if ((regs->length == 0) || 283606ee746SJerin Jacob (regs->length == (uint32_t)nicvf_reg_get_count())) { 284606ee746SJerin Jacob regs->version = nic->vendor_id << 16 | nic->device_id; 285606ee746SJerin Jacob nicvf_reg_dump(nic, data); 286606ee746SJerin Jacob return 0; 287606ee746SJerin Jacob } 288606ee746SJerin Jacob return -ENOTSUP; 289606ee746SJerin Jacob } 290606ee746SJerin Jacob 291d5b0924bSMatan Azrad static int 292684fa771SJerin Jacob nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 293684fa771SJerin Jacob { 294684fa771SJerin Jacob uint16_t qidx; 295684fa771SJerin Jacob struct nicvf_hw_rx_qstats rx_qstats; 296684fa771SJerin Jacob struct nicvf_hw_tx_qstats tx_qstats; 297684fa771SJerin Jacob struct nicvf_hw_stats port_stats; 298684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 29921e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 30021e3fb00SKamil Rytarowski uint16_t tx_start, tx_end; 30121e3fb00SKamil Rytarowski size_t i; 30221e3fb00SKamil Rytarowski 30321e3fb00SKamil Rytarowski /* RX queue indices for the first VF */ 30421e3fb00SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 305684fa771SJerin Jacob 306684fa771SJerin Jacob /* Reading per RX ring stats */ 30721e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 308695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 309684fa771SJerin Jacob break; 310684fa771SJerin Jacob 311684fa771SJerin Jacob nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 312684fa771SJerin Jacob stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 313684fa771SJerin Jacob stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 314684fa771SJerin Jacob } 315684fa771SJerin Jacob 31621e3fb00SKamil Rytarowski /* TX queue indices for the first VF */ 31721e3fb00SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 31821e3fb00SKamil Rytarowski 319684fa771SJerin Jacob /* Reading per TX ring stats */ 32021e3fb00SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) { 321695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 322684fa771SJerin Jacob break; 323684fa771SJerin Jacob 324684fa771SJerin Jacob nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 325684fa771SJerin Jacob stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 326684fa771SJerin Jacob stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 327684fa771SJerin Jacob } 328684fa771SJerin Jacob 32921e3fb00SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 33021e3fb00SKamil Rytarowski struct nicvf *snic = nic->snicvf[i]; 33121e3fb00SKamil Rytarowski 33221e3fb00SKamil Rytarowski if (snic == NULL) 33321e3fb00SKamil Rytarowski break; 33421e3fb00SKamil Rytarowski 33521e3fb00SKamil Rytarowski /* RX queue indices for a secondary VF */ 33621e3fb00SKamil Rytarowski nicvf_rx_range(dev, snic, &rx_start, &rx_end); 33721e3fb00SKamil Rytarowski 33821e3fb00SKamil Rytarowski /* Reading per RX ring stats */ 33921e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 340695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 34121e3fb00SKamil Rytarowski break; 34221e3fb00SKamil Rytarowski 34321e3fb00SKamil Rytarowski nicvf_hw_get_rx_qstats(snic, &rx_qstats, 34421e3fb00SKamil Rytarowski qidx % MAX_RCV_QUEUES_PER_QS); 34521e3fb00SKamil Rytarowski stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 34621e3fb00SKamil Rytarowski stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 34721e3fb00SKamil Rytarowski } 34821e3fb00SKamil Rytarowski 34921e3fb00SKamil Rytarowski /* TX queue indices for a secondary VF */ 35021e3fb00SKamil Rytarowski nicvf_tx_range(dev, snic, &tx_start, &tx_end); 35121e3fb00SKamil Rytarowski /* Reading per TX ring stats */ 35221e3fb00SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) { 353695cd416SMarcin Wilk if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 35421e3fb00SKamil Rytarowski break; 35521e3fb00SKamil Rytarowski 35621e3fb00SKamil Rytarowski nicvf_hw_get_tx_qstats(snic, &tx_qstats, 35721e3fb00SKamil Rytarowski qidx % MAX_SND_QUEUES_PER_QS); 35821e3fb00SKamil Rytarowski stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 35921e3fb00SKamil Rytarowski stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 36021e3fb00SKamil Rytarowski } 36121e3fb00SKamil Rytarowski } 36221e3fb00SKamil Rytarowski 363684fa771SJerin Jacob nicvf_hw_get_stats(nic, &port_stats); 364684fa771SJerin Jacob stats->ibytes = port_stats.rx_bytes; 365684fa771SJerin Jacob stats->ipackets = port_stats.rx_ucast_frames; 366684fa771SJerin Jacob stats->ipackets += port_stats.rx_bcast_frames; 367684fa771SJerin Jacob stats->ipackets += port_stats.rx_mcast_frames; 368684fa771SJerin Jacob stats->ierrors = port_stats.rx_l2_errors; 369684fa771SJerin Jacob stats->imissed = port_stats.rx_drop_red; 370684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_overrun; 371684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_bcast; 372684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_mcast; 373684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_bcast; 374684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_mcast; 375684fa771SJerin Jacob 376684fa771SJerin Jacob stats->obytes = port_stats.tx_bytes_ok; 377684fa771SJerin Jacob stats->opackets = port_stats.tx_ucast_frames_ok; 378684fa771SJerin Jacob stats->opackets += port_stats.tx_bcast_frames_ok; 379684fa771SJerin Jacob stats->opackets += port_stats.tx_mcast_frames_ok; 380684fa771SJerin Jacob stats->oerrors = port_stats.tx_drops; 381d5b0924bSMatan Azrad 382d5b0924bSMatan Azrad return 0; 383684fa771SJerin Jacob } 384684fa771SJerin Jacob 3851c80e4fdSJerin Jacob static const uint32_t * 386ba6a168aSSivaramakrishnan Venkat nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 3871c80e4fdSJerin Jacob { 3881c80e4fdSJerin Jacob size_t copied; 3891c80e4fdSJerin Jacob static uint32_t ptypes[32]; 3901c80e4fdSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 391398a1be1SJerin Jacob static const uint32_t ptypes_common[] = { 3921c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4, 3931c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4_EXT, 3941c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6, 3951c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6_EXT, 3961c80e4fdSJerin Jacob RTE_PTYPE_L4_TCP, 3971c80e4fdSJerin Jacob RTE_PTYPE_L4_UDP, 3981c80e4fdSJerin Jacob RTE_PTYPE_L4_FRAG, 3991c80e4fdSJerin Jacob }; 400398a1be1SJerin Jacob static const uint32_t ptypes_tunnel[] = { 4011c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GRE, 4021c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GENEVE, 4031c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_VXLAN, 4041c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_NVGRE, 4051c80e4fdSJerin Jacob }; 4061c80e4fdSJerin Jacob 407398a1be1SJerin Jacob copied = sizeof(ptypes_common); 408398a1be1SJerin Jacob memcpy(ptypes, ptypes_common, copied); 409398a1be1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 410398a1be1SJerin Jacob memcpy((char *)ptypes + copied, ptypes_tunnel, 411398a1be1SJerin Jacob sizeof(ptypes_tunnel)); 412398a1be1SJerin Jacob copied += sizeof(ptypes_tunnel); 4131c80e4fdSJerin Jacob } 4141c80e4fdSJerin Jacob 4151c80e4fdSJerin Jacob 4165e64c812SPavan Nikhilesh /* All Ptypes are supported in all Rx functions. */ 417ba6a168aSSivaramakrishnan Venkat *no_of_elements = copied / sizeof(ptypes[0]); 4185e64c812SPavan Nikhilesh return ptypes; 4191c80e4fdSJerin Jacob } 4201c80e4fdSJerin Jacob 4219970a9adSIgor Romanov static int 422684fa771SJerin Jacob nicvf_dev_stats_reset(struct rte_eth_dev *dev) 423684fa771SJerin Jacob { 424684fa771SJerin Jacob int i; 425684fa771SJerin Jacob uint16_t rxqs = 0, txqs = 0; 426684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 42721e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 42821e3fb00SKamil Rytarowski uint16_t tx_start, tx_end; 4299970a9adSIgor Romanov int ret; 430684fa771SJerin Jacob 43121e3fb00SKamil Rytarowski /* Reset all primary nic counters */ 43221e3fb00SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 43321e3fb00SKamil Rytarowski for (i = rx_start; i <= rx_end; i++) 434684fa771SJerin Jacob rxqs |= (0x3 << (i * 2)); 43521e3fb00SKamil Rytarowski 43621e3fb00SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 43721e3fb00SKamil Rytarowski for (i = tx_start; i <= tx_end; i++) 438684fa771SJerin Jacob txqs |= (0x3 << (i * 2)); 439684fa771SJerin Jacob 4409970a9adSIgor Romanov ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 4419970a9adSIgor Romanov if (ret != 0) 4429970a9adSIgor Romanov return ret; 44321e3fb00SKamil Rytarowski 44421e3fb00SKamil Rytarowski /* Reset secondary nic queue counters */ 44521e3fb00SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 44621e3fb00SKamil Rytarowski struct nicvf *snic = nic->snicvf[i]; 44721e3fb00SKamil Rytarowski if (snic == NULL) 44821e3fb00SKamil Rytarowski break; 44921e3fb00SKamil Rytarowski 45021e3fb00SKamil Rytarowski nicvf_rx_range(dev, snic, &rx_start, &rx_end); 45121e3fb00SKamil Rytarowski for (i = rx_start; i <= rx_end; i++) 45221e3fb00SKamil Rytarowski rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2)); 45321e3fb00SKamil Rytarowski 45421e3fb00SKamil Rytarowski nicvf_tx_range(dev, snic, &tx_start, &tx_end); 45521e3fb00SKamil Rytarowski for (i = tx_start; i <= tx_end; i++) 45621e3fb00SKamil Rytarowski txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2)); 45721e3fb00SKamil Rytarowski 4589970a9adSIgor Romanov ret = nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs); 4599970a9adSIgor Romanov if (ret != 0) 4609970a9adSIgor Romanov return ret; 46121e3fb00SKamil Rytarowski } 4629970a9adSIgor Romanov 4639970a9adSIgor Romanov return 0; 464684fa771SJerin Jacob } 465684fa771SJerin Jacob 4666eae36eaSJerin Jacob /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 4679039c812SAndrew Rybchenko static int 4686eae36eaSJerin Jacob nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 4696eae36eaSJerin Jacob { 4709039c812SAndrew Rybchenko return 0; 4716eae36eaSJerin Jacob } 4726eae36eaSJerin Jacob 47343362c6aSJerin Jacob static inline uint64_t 47443362c6aSJerin Jacob nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 47543362c6aSJerin Jacob { 47643362c6aSJerin Jacob uint64_t nic_rss = 0; 47743362c6aSJerin Jacob 478295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_IPV4) 47943362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 48043362c6aSJerin Jacob 481295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_IPV6) 48243362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 48343362c6aSJerin Jacob 484295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_UDP) 48543362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 48643362c6aSJerin Jacob 487295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV4_TCP) 48843362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 48943362c6aSJerin Jacob 490295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_UDP) 49143362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 49243362c6aSJerin Jacob 493295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_NONFRAG_IPV6_TCP) 49443362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 49543362c6aSJerin Jacob 496295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_PORT) 49743362c6aSJerin Jacob nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 49843362c6aSJerin Jacob 49943362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 500295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_VXLAN) 50143362c6aSJerin Jacob nic_rss |= RSS_TUN_VXLAN_ENA; 50243362c6aSJerin Jacob 503295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_GENEVE) 50443362c6aSJerin Jacob nic_rss |= RSS_TUN_GENEVE_ENA; 50543362c6aSJerin Jacob 506295968d1SFerruh Yigit if (ethdev_rss & RTE_ETH_RSS_NVGRE) 50743362c6aSJerin Jacob nic_rss |= RSS_TUN_NVGRE_ENA; 50843362c6aSJerin Jacob } 50943362c6aSJerin Jacob 51043362c6aSJerin Jacob return nic_rss; 51143362c6aSJerin Jacob } 51243362c6aSJerin Jacob 51343362c6aSJerin Jacob static inline uint64_t 51443362c6aSJerin Jacob nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 51543362c6aSJerin Jacob { 51643362c6aSJerin Jacob uint64_t ethdev_rss = 0; 51743362c6aSJerin Jacob 51843362c6aSJerin Jacob if (nic_rss & RSS_IP_ENA) 519295968d1SFerruh Yigit ethdev_rss |= (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6); 52043362c6aSJerin Jacob 52143362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 522295968d1SFerruh Yigit ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_TCP | 523295968d1SFerruh Yigit RTE_ETH_RSS_NONFRAG_IPV6_TCP); 52443362c6aSJerin Jacob 52543362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 526295968d1SFerruh Yigit ethdev_rss |= (RTE_ETH_RSS_NONFRAG_IPV4_UDP | 527295968d1SFerruh Yigit RTE_ETH_RSS_NONFRAG_IPV6_UDP); 52843362c6aSJerin Jacob 52943362c6aSJerin Jacob if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 530295968d1SFerruh Yigit ethdev_rss |= RTE_ETH_RSS_PORT; 53143362c6aSJerin Jacob 53243362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 53343362c6aSJerin Jacob if (nic_rss & RSS_TUN_VXLAN_ENA) 534295968d1SFerruh Yigit ethdev_rss |= RTE_ETH_RSS_VXLAN; 53543362c6aSJerin Jacob 53643362c6aSJerin Jacob if (nic_rss & RSS_TUN_GENEVE_ENA) 537295968d1SFerruh Yigit ethdev_rss |= RTE_ETH_RSS_GENEVE; 53843362c6aSJerin Jacob 53943362c6aSJerin Jacob if (nic_rss & RSS_TUN_NVGRE_ENA) 540295968d1SFerruh Yigit ethdev_rss |= RTE_ETH_RSS_NVGRE; 54143362c6aSJerin Jacob } 54243362c6aSJerin Jacob return ethdev_rss; 54343362c6aSJerin Jacob } 54443362c6aSJerin Jacob 54543362c6aSJerin Jacob static int 54643362c6aSJerin Jacob nicvf_dev_reta_query(struct rte_eth_dev *dev, 54743362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 54843362c6aSJerin Jacob uint16_t reta_size) 54943362c6aSJerin Jacob { 55043362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 55143362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 55243362c6aSJerin Jacob int ret, i, j; 55343362c6aSJerin Jacob 55443362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 555d801c395SStephen Hemminger PMD_DRV_LOG(ERR, 556d801c395SStephen Hemminger "The size of hash lookup table configured " 557d801c395SStephen Hemminger "(%u) doesn't match the number hardware can supported " 558d801c395SStephen Hemminger "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 55943362c6aSJerin Jacob return -EINVAL; 56043362c6aSJerin Jacob } 56143362c6aSJerin Jacob 56243362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 56343362c6aSJerin Jacob if (ret) 56443362c6aSJerin Jacob return ret; 56543362c6aSJerin Jacob 56643362c6aSJerin Jacob /* Copy RETA table */ 567295968d1SFerruh Yigit for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) { 568295968d1SFerruh Yigit for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) 56943362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 57043362c6aSJerin Jacob reta_conf[i].reta[j] = tbl[j]; 57143362c6aSJerin Jacob } 57243362c6aSJerin Jacob 57343362c6aSJerin Jacob return 0; 57443362c6aSJerin Jacob } 57543362c6aSJerin Jacob 57643362c6aSJerin Jacob static int 57743362c6aSJerin Jacob nicvf_dev_reta_update(struct rte_eth_dev *dev, 57843362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 57943362c6aSJerin Jacob uint16_t reta_size) 58043362c6aSJerin Jacob { 58143362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 58243362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 58343362c6aSJerin Jacob int ret, i, j; 58443362c6aSJerin Jacob 58543362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 586d801c395SStephen Hemminger PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 587d801c395SStephen Hemminger "(%u) doesn't match the number hardware can supported " 588d801c395SStephen Hemminger "(%u)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 58943362c6aSJerin Jacob return -EINVAL; 59043362c6aSJerin Jacob } 59143362c6aSJerin Jacob 59243362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 59343362c6aSJerin Jacob if (ret) 59443362c6aSJerin Jacob return ret; 59543362c6aSJerin Jacob 59643362c6aSJerin Jacob /* Copy RETA table */ 597295968d1SFerruh Yigit for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_ETH_RETA_GROUP_SIZE); i++) { 598295968d1SFerruh Yigit for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) 59943362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 60043362c6aSJerin Jacob tbl[j] = reta_conf[i].reta[j]; 60143362c6aSJerin Jacob } 60243362c6aSJerin Jacob 60343362c6aSJerin Jacob return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 60443362c6aSJerin Jacob } 60543362c6aSJerin Jacob 60643362c6aSJerin Jacob static int 60743362c6aSJerin Jacob nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 60843362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 60943362c6aSJerin Jacob { 61043362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 61143362c6aSJerin Jacob 61243362c6aSJerin Jacob if (rss_conf->rss_key) 61343362c6aSJerin Jacob nicvf_rss_get_key(nic, rss_conf->rss_key); 61443362c6aSJerin Jacob 61543362c6aSJerin Jacob rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 61643362c6aSJerin Jacob rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 61743362c6aSJerin Jacob return 0; 61843362c6aSJerin Jacob } 61943362c6aSJerin Jacob 62043362c6aSJerin Jacob static int 62143362c6aSJerin Jacob nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 62243362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 62343362c6aSJerin Jacob { 62443362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 62543362c6aSJerin Jacob uint64_t nic_rss; 62643362c6aSJerin Jacob 62743362c6aSJerin Jacob if (rss_conf->rss_key && 62843362c6aSJerin Jacob rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 629d801c395SStephen Hemminger PMD_DRV_LOG(ERR, "Hash key size mismatch %u", 63043362c6aSJerin Jacob rss_conf->rss_key_len); 63143362c6aSJerin Jacob return -EINVAL; 63243362c6aSJerin Jacob } 63343362c6aSJerin Jacob 63443362c6aSJerin Jacob if (rss_conf->rss_key) 63543362c6aSJerin Jacob nicvf_rss_set_key(nic, rss_conf->rss_key); 63643362c6aSJerin Jacob 63743362c6aSJerin Jacob nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 63843362c6aSJerin Jacob nicvf_rss_set_cfg(nic, nic_rss); 63943362c6aSJerin Jacob return 0; 64043362c6aSJerin Jacob } 64143362c6aSJerin Jacob 642aa0d976eSJerin Jacob static int 6436d3cbd56SKamil Rytarowski nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 6446d3cbd56SKamil Rytarowski struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt) 645aa0d976eSJerin Jacob { 646aa0d976eSJerin Jacob const struct rte_memzone *rz; 647d1d861efSKamil Rytarowski uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); 648aa0d976eSJerin Jacob 649b7004ab2SKamil Rytarowski rz = rte_eth_dma_zone_reserve(dev, "cq_ring", 650b7004ab2SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), ring_size, 651aa0d976eSJerin Jacob NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 652aa0d976eSJerin Jacob if (rz == NULL) { 653aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 654aa0d976eSJerin Jacob return -ENOMEM; 655aa0d976eSJerin Jacob } 656aa0d976eSJerin Jacob 657aa0d976eSJerin Jacob memset(rz->addr, 0, ring_size); 658aa0d976eSJerin Jacob 659f17ca787SThomas Monjalon rxq->phys = rz->iova; 660aa0d976eSJerin Jacob rxq->desc = rz->addr; 661aa0d976eSJerin Jacob rxq->qlen_mask = desc_cnt - 1; 662aa0d976eSJerin Jacob 663aa0d976eSJerin Jacob return 0; 664aa0d976eSJerin Jacob } 665aa0d976eSJerin Jacob 6663f3c6f97SJerin Jacob static int 6676d3cbd56SKamil Rytarowski nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 6686d3cbd56SKamil Rytarowski struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt) 6693f3c6f97SJerin Jacob { 6703f3c6f97SJerin Jacob const struct rte_memzone *rz; 671d1d861efSKamil Rytarowski uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); 6723f3c6f97SJerin Jacob 673b7004ab2SKamil Rytarowski rz = rte_eth_dma_zone_reserve(dev, "sq", 674b7004ab2SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), ring_size, 6753f3c6f97SJerin Jacob NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 6763f3c6f97SJerin Jacob if (rz == NULL) { 6773f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 6783f3c6f97SJerin Jacob return -ENOMEM; 6793f3c6f97SJerin Jacob } 6803f3c6f97SJerin Jacob 6813f3c6f97SJerin Jacob memset(rz->addr, 0, ring_size); 6823f3c6f97SJerin Jacob 683f17ca787SThomas Monjalon sq->phys = rz->iova; 6843f3c6f97SJerin Jacob sq->desc = rz->addr; 6853f3c6f97SJerin Jacob sq->qlen_mask = desc_cnt - 1; 6863f3c6f97SJerin Jacob 6873f3c6f97SJerin Jacob return 0; 6883f3c6f97SJerin Jacob } 6893f3c6f97SJerin Jacob 6907413feeeSJerin Jacob static int 6916d3cbd56SKamil Rytarowski nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 6926d3cbd56SKamil Rytarowski uint32_t desc_cnt, uint32_t buffsz) 6937413feeeSJerin Jacob { 6947413feeeSJerin Jacob struct nicvf_rbdr *rbdr; 6957413feeeSJerin Jacob const struct rte_memzone *rz; 6967413feeeSJerin Jacob uint32_t ring_size; 6977413feeeSJerin Jacob 6987413feeeSJerin Jacob assert(nic->rbdr == NULL); 6997413feeeSJerin Jacob rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), 7007413feeeSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 7017413feeeSJerin Jacob if (rbdr == NULL) { 7027413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); 7037413feeeSJerin Jacob return -ENOMEM; 7047413feeeSJerin Jacob } 7057413feeeSJerin Jacob 706d1d861efSKamil Rytarowski ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; 707b7004ab2SKamil Rytarowski rz = rte_eth_dma_zone_reserve(dev, "rbdr", 708b7004ab2SKamil Rytarowski nicvf_netdev_qidx(nic, 0), ring_size, 7097413feeeSJerin Jacob NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); 7107413feeeSJerin Jacob if (rz == NULL) { 7117413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); 71233d1405fSYunjian Wang rte_free(rbdr); 7137413feeeSJerin Jacob return -ENOMEM; 7147413feeeSJerin Jacob } 7157413feeeSJerin Jacob 7167413feeeSJerin Jacob memset(rz->addr, 0, ring_size); 7177413feeeSJerin Jacob 718f17ca787SThomas Monjalon rbdr->phys = rz->iova; 7197413feeeSJerin Jacob rbdr->tail = 0; 7207413feeeSJerin Jacob rbdr->next_tail = 0; 7217413feeeSJerin Jacob rbdr->desc = rz->addr; 7227413feeeSJerin Jacob rbdr->buffsz = buffsz; 7237413feeeSJerin Jacob rbdr->qlen_mask = desc_cnt - 1; 7247413feeeSJerin Jacob rbdr->rbdr_status = 7257413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; 7267413feeeSJerin Jacob rbdr->rbdr_door = 7277413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; 7287413feeeSJerin Jacob 7297413feeeSJerin Jacob nic->rbdr = rbdr; 7307413feeeSJerin Jacob return 0; 7317413feeeSJerin Jacob } 7327413feeeSJerin Jacob 7337413feeeSJerin Jacob static void 73421e3fb00SKamil Rytarowski nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic, 735df6e0a06SSantosh Shukla nicvf_iova_addr_t phy) 7367413feeeSJerin Jacob { 7377413feeeSJerin Jacob uint16_t qidx; 7387413feeeSJerin Jacob void *obj; 7397413feeeSJerin Jacob struct nicvf_rxq *rxq; 74021e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 7417413feeeSJerin Jacob 74221e3fb00SKamil Rytarowski /* Get queue ranges for this VF */ 74321e3fb00SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 74421e3fb00SKamil Rytarowski 74521e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 7466d3cbd56SKamil Rytarowski rxq = dev->data->rx_queues[qidx]; 7477413feeeSJerin Jacob if (rxq->precharge_cnt) { 7487413feeeSJerin Jacob obj = (void *)nicvf_mbuff_phy2virt(phy, 7497413feeeSJerin Jacob rxq->mbuf_phys_off); 7507413feeeSJerin Jacob rte_mempool_put(rxq->pool, obj); 7517413feeeSJerin Jacob rxq->precharge_cnt--; 7527413feeeSJerin Jacob break; 7537413feeeSJerin Jacob } 7547413feeeSJerin Jacob } 7557413feeeSJerin Jacob } 7567413feeeSJerin Jacob 7577413feeeSJerin Jacob static inline void 7586d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic) 7597413feeeSJerin Jacob { 7607413feeeSJerin Jacob uint32_t qlen_mask, head; 7617413feeeSJerin Jacob struct rbdr_entry_t *entry; 7627413feeeSJerin Jacob struct nicvf_rbdr *rbdr = nic->rbdr; 7637413feeeSJerin Jacob 7647413feeeSJerin Jacob qlen_mask = rbdr->qlen_mask; 7657413feeeSJerin Jacob head = rbdr->head; 7667413feeeSJerin Jacob while (head != rbdr->tail) { 7677413feeeSJerin Jacob entry = rbdr->desc + head; 7686d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr); 7697413feeeSJerin Jacob head++; 7707413feeeSJerin Jacob head = head & qlen_mask; 7717413feeeSJerin Jacob } 7727413feeeSJerin Jacob } 7737413feeeSJerin Jacob 7743f3c6f97SJerin Jacob static inline void 7753f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 7763f3c6f97SJerin Jacob { 7773f3c6f97SJerin Jacob uint32_t head; 7783f3c6f97SJerin Jacob 7793f3c6f97SJerin Jacob head = txq->head; 7803f3c6f97SJerin Jacob while (head != txq->tail) { 7813f3c6f97SJerin Jacob if (txq->txbuffs[head]) { 7823f3c6f97SJerin Jacob rte_pktmbuf_free_seg(txq->txbuffs[head]); 7833f3c6f97SJerin Jacob txq->txbuffs[head] = NULL; 7843f3c6f97SJerin Jacob } 7853f3c6f97SJerin Jacob head++; 7863f3c6f97SJerin Jacob head = head & txq->qlen_mask; 7873f3c6f97SJerin Jacob } 7883f3c6f97SJerin Jacob } 7893f3c6f97SJerin Jacob 7903f3c6f97SJerin Jacob static void 7913f3c6f97SJerin Jacob nicvf_tx_queue_reset(struct nicvf_txq *txq) 7923f3c6f97SJerin Jacob { 7933f3c6f97SJerin Jacob uint32_t txq_desc_cnt = txq->qlen_mask + 1; 7943f3c6f97SJerin Jacob 7953f3c6f97SJerin Jacob memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 7963f3c6f97SJerin Jacob memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 7973f3c6f97SJerin Jacob txq->tail = 0; 7983f3c6f97SJerin Jacob txq->head = 0; 7993f3c6f97SJerin Jacob txq->xmit_bufs = 0; 8003f3c6f97SJerin Jacob } 8013f3c6f97SJerin Jacob 802fc1f6c62SJerin Jacob static inline int 80371e76186SKamil Rytarowski nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 80471e76186SKamil Rytarowski uint16_t qidx) 805fc1f6c62SJerin Jacob { 806fc1f6c62SJerin Jacob struct nicvf_txq *txq; 807fc1f6c62SJerin Jacob int ret; 808fc1f6c62SJerin Jacob 80971e76186SKamil Rytarowski assert(qidx < MAX_SND_QUEUES_PER_QS); 81071e76186SKamil Rytarowski 81171e76186SKamil Rytarowski if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 81271e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED) 813fc1f6c62SJerin Jacob return 0; 814fc1f6c62SJerin Jacob 81571e76186SKamil Rytarowski txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 816fc1f6c62SJerin Jacob txq->pool = NULL; 81771e76186SKamil Rytarowski ret = nicvf_qset_sq_config(nic, qidx, txq); 818fc1f6c62SJerin Jacob if (ret) { 81971e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d", 82071e76186SKamil Rytarowski nic->vf_id, qidx, ret); 821fc1f6c62SJerin Jacob goto config_sq_error; 822fc1f6c62SJerin Jacob } 823fc1f6c62SJerin Jacob 82471e76186SKamil Rytarowski dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 82571e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED; 826fc1f6c62SJerin Jacob return ret; 827fc1f6c62SJerin Jacob 828fc1f6c62SJerin Jacob config_sq_error: 82971e76186SKamil Rytarowski nicvf_qset_sq_reclaim(nic, qidx); 830fc1f6c62SJerin Jacob return ret; 831fc1f6c62SJerin Jacob } 832fc1f6c62SJerin Jacob 833fc1f6c62SJerin Jacob static inline int 834627d4ba2SKamil Rytarowski nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 835627d4ba2SKamil Rytarowski uint16_t qidx) 836fc1f6c62SJerin Jacob { 837fc1f6c62SJerin Jacob struct nicvf_txq *txq; 838fc1f6c62SJerin Jacob int ret; 839fc1f6c62SJerin Jacob 840627d4ba2SKamil Rytarowski assert(qidx < MAX_SND_QUEUES_PER_QS); 841627d4ba2SKamil Rytarowski 842627d4ba2SKamil Rytarowski if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 843627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED) 844fc1f6c62SJerin Jacob return 0; 845fc1f6c62SJerin Jacob 846627d4ba2SKamil Rytarowski ret = nicvf_qset_sq_reclaim(nic, qidx); 847fc1f6c62SJerin Jacob if (ret) 848627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d", 849627d4ba2SKamil Rytarowski nic->vf_id, qidx, ret); 850fc1f6c62SJerin Jacob 851627d4ba2SKamil Rytarowski txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 852fc1f6c62SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 853fc1f6c62SJerin Jacob nicvf_tx_queue_reset(txq); 854fc1f6c62SJerin Jacob 855627d4ba2SKamil Rytarowski dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 856627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 857fc1f6c62SJerin Jacob return ret; 858fc1f6c62SJerin Jacob } 85986b4eb42SJerin Jacob 86086b4eb42SJerin Jacob static inline int 86186b4eb42SJerin Jacob nicvf_configure_cpi(struct rte_eth_dev *dev) 86286b4eb42SJerin Jacob { 86386b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 86486b4eb42SJerin Jacob uint16_t qidx, qcnt; 86586b4eb42SJerin Jacob int ret; 86686b4eb42SJerin Jacob 86786b4eb42SJerin Jacob /* Count started rx queues */ 868394014bcSKamil Rytarowski for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++) 86986b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == 87086b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 87186b4eb42SJerin Jacob qcnt++; 87286b4eb42SJerin Jacob 87386b4eb42SJerin Jacob nic->cpi_alg = CPI_ALG_NONE; 87486b4eb42SJerin Jacob ret = nicvf_mbox_config_cpi(nic, qcnt); 87586b4eb42SJerin Jacob if (ret) 87686b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 87786b4eb42SJerin Jacob 87886b4eb42SJerin Jacob return ret; 87986b4eb42SJerin Jacob } 88086b4eb42SJerin Jacob 8817413feeeSJerin Jacob static inline int 8827413feeeSJerin Jacob nicvf_configure_rss(struct rte_eth_dev *dev) 8837413feeeSJerin Jacob { 8847413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 8857413feeeSJerin Jacob uint64_t rsshf; 8867413feeeSJerin Jacob int ret = -EINVAL; 8877413feeeSJerin Jacob 8887413feeeSJerin Jacob rsshf = nicvf_rss_ethdev_to_nic(nic, 8897413feeeSJerin Jacob dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 8907413feeeSJerin Jacob PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, 8917413feeeSJerin Jacob dev->data->dev_conf.rxmode.mq_mode, 8926d3cbd56SKamil Rytarowski dev->data->nb_rx_queues, 8936d3cbd56SKamil Rytarowski dev->data->dev_conf.lpbk_mode, rsshf); 8947413feeeSJerin Jacob 895295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE) 8967413feeeSJerin Jacob ret = nicvf_rss_term(nic); 897295968d1SFerruh Yigit else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) 8986d3cbd56SKamil Rytarowski ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf); 8997413feeeSJerin Jacob if (ret) 9007413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); 9017413feeeSJerin Jacob 9027413feeeSJerin Jacob return ret; 9037413feeeSJerin Jacob } 9047413feeeSJerin Jacob 90586b4eb42SJerin Jacob static int 90686b4eb42SJerin Jacob nicvf_configure_rss_reta(struct rte_eth_dev *dev) 90786b4eb42SJerin Jacob { 90886b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 90986b4eb42SJerin Jacob unsigned int idx, qmap_size; 91086b4eb42SJerin Jacob uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 91186b4eb42SJerin Jacob uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 91286b4eb42SJerin Jacob 91386b4eb42SJerin Jacob if (nic->cpi_alg != CPI_ALG_NONE) 91486b4eb42SJerin Jacob return -EINVAL; 91586b4eb42SJerin Jacob 91686b4eb42SJerin Jacob /* Prepare queue map */ 91786b4eb42SJerin Jacob for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 91886b4eb42SJerin Jacob if (dev->data->rx_queue_state[idx] == 91986b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 92086b4eb42SJerin Jacob qmap[qmap_size++] = idx; 92186b4eb42SJerin Jacob } 92286b4eb42SJerin Jacob 92386b4eb42SJerin Jacob /* Update default RSS RETA */ 92486b4eb42SJerin Jacob for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 92586b4eb42SJerin Jacob default_reta[idx] = qmap[idx % qmap_size]; 92686b4eb42SJerin Jacob 92786b4eb42SJerin Jacob return nicvf_rss_reta_update(nic, default_reta, 92886b4eb42SJerin Jacob NIC_MAX_RSS_IDR_TBL_SIZE); 92986b4eb42SJerin Jacob } 93086b4eb42SJerin Jacob 9313f3c6f97SJerin Jacob static void 9327483341aSXueming Li nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 9333f3c6f97SJerin Jacob { 9347483341aSXueming Li struct nicvf_txq *txq = dev->data->tx_queues[qid]; 9353f3c6f97SJerin Jacob 9363f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 9373f3c6f97SJerin Jacob 9383f3c6f97SJerin Jacob if (txq) { 9393f3c6f97SJerin Jacob if (txq->txbuffs != NULL) { 9403f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 9413f3c6f97SJerin Jacob rte_free(txq->txbuffs); 9423f3c6f97SJerin Jacob txq->txbuffs = NULL; 9433f3c6f97SJerin Jacob } 9443f3c6f97SJerin Jacob rte_free(txq); 9457483341aSXueming Li dev->data->tx_queues[qid] = NULL; 9463f3c6f97SJerin Jacob } 9473f3c6f97SJerin Jacob } 9483f3c6f97SJerin Jacob 9497413feeeSJerin Jacob static void 9507413feeeSJerin Jacob nicvf_set_tx_function(struct rte_eth_dev *dev) 9517413feeeSJerin Jacob { 952d9014196SFerruh Yigit struct nicvf_txq *txq = NULL; 9537413feeeSJerin Jacob size_t i; 9547413feeeSJerin Jacob bool multiseg = false; 9557413feeeSJerin Jacob 9567413feeeSJerin Jacob for (i = 0; i < dev->data->nb_tx_queues; i++) { 9577413feeeSJerin Jacob txq = dev->data->tx_queues[i]; 958295968d1SFerruh Yigit if (txq->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) { 9597413feeeSJerin Jacob multiseg = true; 9607413feeeSJerin Jacob break; 9617413feeeSJerin Jacob } 9627413feeeSJerin Jacob } 9637413feeeSJerin Jacob 9647413feeeSJerin Jacob /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 9657413feeeSJerin Jacob if (multiseg) { 9667413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); 9677413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; 9687413feeeSJerin Jacob } else { 9697413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); 9707413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts; 9717413feeeSJerin Jacob } 9727413feeeSJerin Jacob 973d9014196SFerruh Yigit if (!txq) 974d9014196SFerruh Yigit return; 975d9014196SFerruh Yigit 9767413feeeSJerin Jacob if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) 9777413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); 9787413feeeSJerin Jacob else 9797413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); 9807413feeeSJerin Jacob } 9817413feeeSJerin Jacob 9827413feeeSJerin Jacob static void 9837413feeeSJerin Jacob nicvf_set_rx_function(struct rte_eth_dev *dev) 9847413feeeSJerin Jacob { 9855e64c812SPavan Nikhilesh struct nicvf *nic = nicvf_pmd_priv(dev); 9865e64c812SPavan Nikhilesh 987d3bf2564SRakesh Kudurumalla const eth_rx_burst_t rx_burst_func[2][2][2] = { 988d3bf2564SRakesh Kudurumalla /* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */ 989d3bf2564SRakesh Kudurumalla [0][0][0] = nicvf_recv_pkts_no_offload, 990d3bf2564SRakesh Kudurumalla [0][0][1] = nicvf_recv_pkts_vlan_strip, 991d3bf2564SRakesh Kudurumalla [0][1][0] = nicvf_recv_pkts_cksum, 992d3bf2564SRakesh Kudurumalla [0][1][1] = nicvf_recv_pkts_cksum_vlan_strip, 993d3bf2564SRakesh Kudurumalla [1][0][0] = nicvf_recv_pkts_multiseg_no_offload, 994d3bf2564SRakesh Kudurumalla [1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip, 995d3bf2564SRakesh Kudurumalla [1][1][0] = nicvf_recv_pkts_multiseg_cksum, 996d3bf2564SRakesh Kudurumalla [1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip, 9975e64c812SPavan Nikhilesh }; 9985e64c812SPavan Nikhilesh 9995e64c812SPavan Nikhilesh dev->rx_pkt_burst = 1000d3bf2564SRakesh Kudurumalla rx_burst_func[dev->data->scattered_rx] 1001d3bf2564SRakesh Kudurumalla [nic->offload_cksum][nic->vlan_strip]; 10027413feeeSJerin Jacob } 10037413feeeSJerin Jacob 10043f3c6f97SJerin Jacob static int 10053f3c6f97SJerin Jacob nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 10063f3c6f97SJerin Jacob uint16_t nb_desc, unsigned int socket_id, 10073f3c6f97SJerin Jacob const struct rte_eth_txconf *tx_conf) 10083f3c6f97SJerin Jacob { 10093f3c6f97SJerin Jacob uint16_t tx_free_thresh; 1010c97da2cbSMaciej Czekaj bool is_single_pool; 10113f3c6f97SJerin Jacob struct nicvf_txq *txq; 10123f3c6f97SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1013a4996bd8SWei Dai uint64_t offloads; 10143f3c6f97SJerin Jacob 10153f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 10163f3c6f97SJerin Jacob 101721e3fb00SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 101821e3fb00SKamil Rytarowski nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1]; 101921e3fb00SKamil Rytarowski 102021e3fb00SKamil Rytarowski qidx = qidx % MAX_SND_QUEUES_PER_QS; 102121e3fb00SKamil Rytarowski 10223f3c6f97SJerin Jacob /* Socket id check */ 10233f3c6f97SJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 10243f3c6f97SJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 10253f3c6f97SJerin Jacob socket_id, nic->node); 10263f3c6f97SJerin Jacob 10273f3c6f97SJerin Jacob /* Tx deferred start is not supported */ 10283f3c6f97SJerin Jacob if (tx_conf->tx_deferred_start) { 10293f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 10303f3c6f97SJerin Jacob return -EINVAL; 10313f3c6f97SJerin Jacob } 10323f3c6f97SJerin Jacob 10333f3c6f97SJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 10343f3c6f97SJerin Jacob nb_desc = nicvf_qsize_sq_roundup(nb_desc); 10353f3c6f97SJerin Jacob if (nb_desc == 0) { 10363f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 10373f3c6f97SJerin Jacob return -EINVAL; 10383f3c6f97SJerin Jacob } 10393f3c6f97SJerin Jacob 10403f3c6f97SJerin Jacob /* Validate tx_free_thresh */ 10413f3c6f97SJerin Jacob tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 10423f3c6f97SJerin Jacob tx_conf->tx_free_thresh : 10433f3c6f97SJerin Jacob NICVF_DEFAULT_TX_FREE_THRESH); 10443f3c6f97SJerin Jacob 10453f3c6f97SJerin Jacob if (tx_free_thresh > (nb_desc) || 10463f3c6f97SJerin Jacob tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 10473f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, 10483f3c6f97SJerin Jacob "tx_free_thresh must be less than the number of TX " 10493f3c6f97SJerin Jacob "descriptors. (tx_free_thresh=%u port=%d " 10503f3c6f97SJerin Jacob "queue=%d)", (unsigned int)tx_free_thresh, 10513f3c6f97SJerin Jacob (int)dev->data->port_id, (int)qidx); 10523f3c6f97SJerin Jacob return -EINVAL; 10533f3c6f97SJerin Jacob } 10543f3c6f97SJerin Jacob 10553f3c6f97SJerin Jacob /* Free memory prior to re-allocation if needed. */ 105621e3fb00SKamil Rytarowski if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 10573f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 105821e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 10597483341aSXueming Li nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); 106021e3fb00SKamil Rytarowski dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 10613f3c6f97SJerin Jacob } 10623f3c6f97SJerin Jacob 10633f3c6f97SJerin Jacob /* Allocating tx queue data structure */ 10643f3c6f97SJerin Jacob txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 10653f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 10663f3c6f97SJerin Jacob if (txq == NULL) { 106721e3fb00SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", 106821e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 10693f3c6f97SJerin Jacob return -ENOMEM; 10703f3c6f97SJerin Jacob } 10713f3c6f97SJerin Jacob 10723f3c6f97SJerin Jacob txq->nic = nic; 10733f3c6f97SJerin Jacob txq->queue_id = qidx; 10743f3c6f97SJerin Jacob txq->tx_free_thresh = tx_free_thresh; 10753f3c6f97SJerin Jacob txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 10763f3c6f97SJerin Jacob txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 1077a4996bd8SWei Dai offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1078a4996bd8SWei Dai txq->offloads = offloads; 1079c97da2cbSMaciej Czekaj 1080295968d1SFerruh Yigit is_single_pool = !!(offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE); 10813f3c6f97SJerin Jacob 10823f3c6f97SJerin Jacob /* Choose optimum free threshold value for multipool case */ 10833f3c6f97SJerin Jacob if (!is_single_pool) { 10843f3c6f97SJerin Jacob txq->tx_free_thresh = (uint16_t) 10853f3c6f97SJerin Jacob (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 10863f3c6f97SJerin Jacob NICVF_TX_FREE_MPOOL_THRESH : 10873f3c6f97SJerin Jacob tx_conf->tx_free_thresh); 10881c421f18SJerin Jacob txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 10891c421f18SJerin Jacob } else { 10901c421f18SJerin Jacob txq->pool_free = nicvf_single_pool_free_xmited_buffers; 10913f3c6f97SJerin Jacob } 10923f3c6f97SJerin Jacob 10937483341aSXueming Li dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; 10947483341aSXueming Li 10953f3c6f97SJerin Jacob /* Allocate software ring */ 10963f3c6f97SJerin Jacob txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 10973f3c6f97SJerin Jacob nb_desc * sizeof(struct rte_mbuf *), 10983f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 10993f3c6f97SJerin Jacob 11003f3c6f97SJerin Jacob if (txq->txbuffs == NULL) { 11017483341aSXueming Li nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); 11023f3c6f97SJerin Jacob return -ENOMEM; 11033f3c6f97SJerin Jacob } 11043f3c6f97SJerin Jacob 11056d3cbd56SKamil Rytarowski if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) { 11063f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 11077483341aSXueming Li nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); 11083f3c6f97SJerin Jacob return -ENOMEM; 11093f3c6f97SJerin Jacob } 11103f3c6f97SJerin Jacob 11113f3c6f97SJerin Jacob nicvf_tx_queue_reset(txq); 11123f3c6f97SJerin Jacob 1113c97da2cbSMaciej Czekaj PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p" 1114c97da2cbSMaciej Czekaj " phys=0x%" PRIx64 " offloads=0x%" PRIx64, 111521e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, 1116c97da2cbSMaciej Czekaj txq->phys, txq->offloads); 11173f3c6f97SJerin Jacob 111821e3fb00SKamil Rytarowski dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 111921e3fb00SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 11203f3c6f97SJerin Jacob return 0; 11213f3c6f97SJerin Jacob } 11223f3c6f97SJerin Jacob 112386b4eb42SJerin Jacob static inline void 11246d3cbd56SKamil Rytarowski nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) 112586b4eb42SJerin Jacob { 112686b4eb42SJerin Jacob uint32_t rxq_cnt; 112786b4eb42SJerin Jacob uint32_t nb_pkts, released_pkts = 0; 112886b4eb42SJerin Jacob uint32_t refill_cnt = 0; 112986b4eb42SJerin Jacob struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 113086b4eb42SJerin Jacob 113186b4eb42SJerin Jacob if (dev->rx_pkt_burst == NULL) 113286b4eb42SJerin Jacob return; 113386b4eb42SJerin Jacob 11348d7d4fcdSKonstantin Ananyev while ((rxq_cnt = nicvf_dev_rx_queue_count(rxq))) { 113586b4eb42SJerin Jacob nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 113686b4eb42SJerin Jacob NICVF_MAX_RX_FREE_THRESH); 113786b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 113886b4eb42SJerin Jacob while (nb_pkts) { 113986b4eb42SJerin Jacob rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 114086b4eb42SJerin Jacob released_pkts++; 114186b4eb42SJerin Jacob } 114286b4eb42SJerin Jacob } 114386b4eb42SJerin Jacob 114421e3fb00SKamil Rytarowski 114521e3fb00SKamil Rytarowski refill_cnt += nicvf_dev_rbdr_refill(dev, 114621e3fb00SKamil Rytarowski nicvf_netdev_qidx(rxq->nic, rxq->queue_id)); 114721e3fb00SKamil Rytarowski 114886b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 114986b4eb42SJerin Jacob released_pkts, refill_cnt); 115086b4eb42SJerin Jacob } 115186b4eb42SJerin Jacob 1152aa0d976eSJerin Jacob static void 1153aa0d976eSJerin Jacob nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 1154aa0d976eSJerin Jacob { 1155aa0d976eSJerin Jacob rxq->head = 0; 1156aa0d976eSJerin Jacob rxq->available_space = 0; 1157aa0d976eSJerin Jacob rxq->recv_buffers = 0; 1158aa0d976eSJerin Jacob } 1159aa0d976eSJerin Jacob 116086b4eb42SJerin Jacob static inline int 116171e76186SKamil Rytarowski nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 116271e76186SKamil Rytarowski uint16_t qidx) 116386b4eb42SJerin Jacob { 116486b4eb42SJerin Jacob struct nicvf_rxq *rxq; 116586b4eb42SJerin Jacob int ret; 116686b4eb42SJerin Jacob 116771e76186SKamil Rytarowski assert(qidx < MAX_RCV_QUEUES_PER_QS); 116871e76186SKamil Rytarowski 116971e76186SKamil Rytarowski if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 117071e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED) 117186b4eb42SJerin Jacob return 0; 117286b4eb42SJerin Jacob 117386b4eb42SJerin Jacob /* Update rbdr pointer to all rxq */ 117471e76186SKamil Rytarowski rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 117586b4eb42SJerin Jacob rxq->shared_rbdr = nic->rbdr; 117686b4eb42SJerin Jacob 117786b4eb42SJerin Jacob ret = nicvf_qset_rq_config(nic, qidx, rxq); 117886b4eb42SJerin Jacob if (ret) { 117971e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d", 118071e76186SKamil Rytarowski nic->vf_id, qidx, ret); 118186b4eb42SJerin Jacob goto config_rq_error; 118286b4eb42SJerin Jacob } 118386b4eb42SJerin Jacob ret = nicvf_qset_cq_config(nic, qidx, rxq); 118486b4eb42SJerin Jacob if (ret) { 118571e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d", 118671e76186SKamil Rytarowski nic->vf_id, qidx, ret); 118786b4eb42SJerin Jacob goto config_cq_error; 118886b4eb42SJerin Jacob } 118986b4eb42SJerin Jacob 119071e76186SKamil Rytarowski dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 119171e76186SKamil Rytarowski RTE_ETH_QUEUE_STATE_STARTED; 119286b4eb42SJerin Jacob return 0; 119386b4eb42SJerin Jacob 119486b4eb42SJerin Jacob config_cq_error: 119586b4eb42SJerin Jacob nicvf_qset_cq_reclaim(nic, qidx); 119686b4eb42SJerin Jacob config_rq_error: 119786b4eb42SJerin Jacob nicvf_qset_rq_reclaim(nic, qidx); 119886b4eb42SJerin Jacob return ret; 119986b4eb42SJerin Jacob } 120086b4eb42SJerin Jacob 120186b4eb42SJerin Jacob static inline int 1202627d4ba2SKamil Rytarowski nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1203627d4ba2SKamil Rytarowski uint16_t qidx) 120486b4eb42SJerin Jacob { 120586b4eb42SJerin Jacob struct nicvf_rxq *rxq; 120686b4eb42SJerin Jacob int ret, other_error; 120786b4eb42SJerin Jacob 1208627d4ba2SKamil Rytarowski if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1209627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED) 121086b4eb42SJerin Jacob return 0; 121186b4eb42SJerin Jacob 121286b4eb42SJerin Jacob ret = nicvf_qset_rq_reclaim(nic, qidx); 121386b4eb42SJerin Jacob if (ret) 1214627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d", 1215627d4ba2SKamil Rytarowski nic->vf_id, qidx, ret); 121686b4eb42SJerin Jacob 121786b4eb42SJerin Jacob other_error = ret; 1218627d4ba2SKamil Rytarowski rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 12196d3cbd56SKamil Rytarowski nicvf_rx_queue_release_mbufs(dev, rxq); 122086b4eb42SJerin Jacob nicvf_rx_queue_reset(rxq); 122186b4eb42SJerin Jacob 122286b4eb42SJerin Jacob ret = nicvf_qset_cq_reclaim(nic, qidx); 122386b4eb42SJerin Jacob if (ret) 1224627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d", 1225627d4ba2SKamil Rytarowski nic->vf_id, qidx, ret); 122686b4eb42SJerin Jacob 122786b4eb42SJerin Jacob other_error |= ret; 1228627d4ba2SKamil Rytarowski dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1229627d4ba2SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 123086b4eb42SJerin Jacob return other_error; 123186b4eb42SJerin Jacob } 123286b4eb42SJerin Jacob 1233aa0d976eSJerin Jacob static void 12347483341aSXueming Li nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1235aa0d976eSJerin Jacob { 1236aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1237aa0d976eSJerin Jacob 12387483341aSXueming Li rte_free(dev->data->rx_queues[qid]); 1239aa0d976eSJerin Jacob } 1240aa0d976eSJerin Jacob 1241aa0d976eSJerin Jacob static int 124286b4eb42SJerin Jacob nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 124386b4eb42SJerin Jacob { 124471e76186SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 124586b4eb42SJerin Jacob int ret; 124686b4eb42SJerin Jacob 124771e76186SKamil Rytarowski if (qidx >= MAX_RCV_QUEUES_PER_QS) 124871e76186SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)]; 124971e76186SKamil Rytarowski 125071e76186SKamil Rytarowski qidx = qidx % MAX_RCV_QUEUES_PER_QS; 125171e76186SKamil Rytarowski 125271e76186SKamil Rytarowski ret = nicvf_vf_start_rx_queue(dev, nic, qidx); 125386b4eb42SJerin Jacob if (ret) 125486b4eb42SJerin Jacob return ret; 125586b4eb42SJerin Jacob 125686b4eb42SJerin Jacob ret = nicvf_configure_cpi(dev); 125786b4eb42SJerin Jacob if (ret) 125886b4eb42SJerin Jacob return ret; 125986b4eb42SJerin Jacob 126086b4eb42SJerin Jacob return nicvf_configure_rss_reta(dev); 126186b4eb42SJerin Jacob } 126286b4eb42SJerin Jacob 126386b4eb42SJerin Jacob static int 126486b4eb42SJerin Jacob nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 126586b4eb42SJerin Jacob { 126686b4eb42SJerin Jacob int ret; 1267627d4ba2SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 126886b4eb42SJerin Jacob 1269627d4ba2SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 1270627d4ba2SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1271627d4ba2SKamil Rytarowski 1272627d4ba2SKamil Rytarowski qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1273627d4ba2SKamil Rytarowski 1274627d4ba2SKamil Rytarowski ret = nicvf_vf_stop_rx_queue(dev, nic, qidx); 127586b4eb42SJerin Jacob ret |= nicvf_configure_cpi(dev); 127686b4eb42SJerin Jacob ret |= nicvf_configure_rss_reta(dev); 127786b4eb42SJerin Jacob return ret; 127886b4eb42SJerin Jacob } 127986b4eb42SJerin Jacob 128086b4eb42SJerin Jacob static int 1281fc1f6c62SJerin Jacob nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1282fc1f6c62SJerin Jacob { 128371e76186SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 128471e76186SKamil Rytarowski 128571e76186SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 128671e76186SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 128771e76186SKamil Rytarowski 128871e76186SKamil Rytarowski qidx = qidx % MAX_SND_QUEUES_PER_QS; 128971e76186SKamil Rytarowski 129071e76186SKamil Rytarowski return nicvf_vf_start_tx_queue(dev, nic, qidx); 1291fc1f6c62SJerin Jacob } 1292fc1f6c62SJerin Jacob 1293fc1f6c62SJerin Jacob static int 1294fc1f6c62SJerin Jacob nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1295fc1f6c62SJerin Jacob { 1296627d4ba2SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 1297627d4ba2SKamil Rytarowski 1298627d4ba2SKamil Rytarowski if (qidx >= MAX_SND_QUEUES_PER_QS) 1299627d4ba2SKamil Rytarowski nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1300627d4ba2SKamil Rytarowski 1301627d4ba2SKamil Rytarowski qidx = qidx % MAX_SND_QUEUES_PER_QS; 1302627d4ba2SKamil Rytarowski 1303627d4ba2SKamil Rytarowski return nicvf_vf_stop_tx_queue(dev, nic, qidx); 1304fc1f6c62SJerin Jacob } 1305fc1f6c62SJerin Jacob 13065c7ccb26SJerin Jacob static inline void 13075c7ccb26SJerin Jacob nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq) 13085c7ccb26SJerin Jacob { 13095c7ccb26SJerin Jacob uintptr_t p; 13105c7ccb26SJerin Jacob struct rte_mbuf mb_def; 1311279d3319SRakesh Kudurumalla struct nicvf *nic = rxq->nic; 13125c7ccb26SJerin Jacob 13135c7ccb26SJerin Jacob RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8); 131495b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0); 131595b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) - 131695b097c8SJerin Jacob offsetof(struct rte_mbuf, data_off) != 2); 131795b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) - 131895b097c8SJerin Jacob offsetof(struct rte_mbuf, data_off) != 4); 131995b097c8SJerin Jacob RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) - 132095b097c8SJerin Jacob offsetof(struct rte_mbuf, data_off) != 6); 13215e64c812SPavan Nikhilesh RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) - 13225e64c812SPavan Nikhilesh offsetof(struct nicvf_rxq, 13235e64c812SPavan Nikhilesh rxq_fastpath_data_start) > 128); 13245c7ccb26SJerin Jacob mb_def.nb_segs = 1; 1325279d3319SRakesh Kudurumalla mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes); 13265c7ccb26SJerin Jacob mb_def.port = rxq->port_id; 13275c7ccb26SJerin Jacob rte_mbuf_refcnt_set(&mb_def, 1); 13285c7ccb26SJerin Jacob 13295c7ccb26SJerin Jacob /* Prevent compiler reordering: rearm_data covers previous fields */ 13305c7ccb26SJerin Jacob rte_compiler_barrier(); 13315c7ccb26SJerin Jacob p = (uintptr_t)&mb_def.rearm_data; 13325c7ccb26SJerin Jacob rxq->mbuf_initializer.value = *(uint64_t *)p; 13335c7ccb26SJerin Jacob } 1334394014bcSKamil Rytarowski 1335fc1f6c62SJerin Jacob static int 1336aa0d976eSJerin Jacob nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 1337aa0d976eSJerin Jacob uint16_t nb_desc, unsigned int socket_id, 1338aa0d976eSJerin Jacob const struct rte_eth_rxconf *rx_conf, 1339aa0d976eSJerin Jacob struct rte_mempool *mp) 1340aa0d976eSJerin Jacob { 1341aa0d976eSJerin Jacob uint16_t rx_free_thresh; 1342aa0d976eSJerin Jacob struct nicvf_rxq *rxq; 1343aa0d976eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1344a4996bd8SWei Dai uint64_t offloads; 1345279d3319SRakesh Kudurumalla uint32_t buffsz; 1346279d3319SRakesh Kudurumalla struct rte_pktmbuf_pool_private *mbp_priv; 1347aa0d976eSJerin Jacob 1348aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1349aa0d976eSJerin Jacob 1350279d3319SRakesh Kudurumalla /* First skip check */ 1351279d3319SRakesh Kudurumalla mbp_priv = rte_mempool_get_priv(mp); 1352279d3319SRakesh Kudurumalla buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1353279d3319SRakesh Kudurumalla if (buffsz < (uint32_t)(nic->skip_bytes)) { 1354279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "First skip is more than configured buffer size"); 1355279d3319SRakesh Kudurumalla return -EINVAL; 1356279d3319SRakesh Kudurumalla } 1357279d3319SRakesh Kudurumalla 135821e3fb00SKamil Rytarowski if (qidx >= MAX_RCV_QUEUES_PER_QS) 135921e3fb00SKamil Rytarowski nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1]; 136021e3fb00SKamil Rytarowski 136121e3fb00SKamil Rytarowski qidx = qidx % MAX_RCV_QUEUES_PER_QS; 136221e3fb00SKamil Rytarowski 1363aa0d976eSJerin Jacob /* Socket id check */ 1364aa0d976eSJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 1365aa0d976eSJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 1366aa0d976eSJerin Jacob socket_id, nic->node); 1367aa0d976eSJerin Jacob 1368394014bcSKamil Rytarowski /* Mempool memory must be contiguous, so must be one memory segment*/ 1369aa0d976eSJerin Jacob if (mp->nb_mem_chunks != 1) { 1370394014bcSKamil Rytarowski PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages"); 1371394014bcSKamil Rytarowski return -EINVAL; 1372394014bcSKamil Rytarowski } 1373394014bcSKamil Rytarowski 1374394014bcSKamil Rytarowski /* Mempool memory must be physically contiguous */ 1375c47d7b90SAndrew Rybchenko if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) { 1376394014bcSKamil Rytarowski PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); 1377aa0d976eSJerin Jacob return -EINVAL; 1378aa0d976eSJerin Jacob } 1379aa0d976eSJerin Jacob 1380aa0d976eSJerin Jacob /* Rx deferred start is not supported */ 1381aa0d976eSJerin Jacob if (rx_conf->rx_deferred_start) { 1382aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 1383aa0d976eSJerin Jacob return -EINVAL; 1384aa0d976eSJerin Jacob } 1385aa0d976eSJerin Jacob 1386aa0d976eSJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 1387aa0d976eSJerin Jacob nb_desc = nicvf_qsize_cq_roundup(nb_desc); 1388aa0d976eSJerin Jacob if (nb_desc == 0) { 1389aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 1390aa0d976eSJerin Jacob return -EINVAL; 1391aa0d976eSJerin Jacob } 1392aa0d976eSJerin Jacob 1393279d3319SRakesh Kudurumalla 1394aa0d976eSJerin Jacob /* Check rx_free_thresh upper bound */ 1395aa0d976eSJerin Jacob rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 1396aa0d976eSJerin Jacob rx_conf->rx_free_thresh : 1397aa0d976eSJerin Jacob NICVF_DEFAULT_RX_FREE_THRESH); 1398aa0d976eSJerin Jacob if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 1399aa0d976eSJerin Jacob rx_free_thresh >= nb_desc * .75) { 1400aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 1401aa0d976eSJerin Jacob rx_free_thresh); 1402aa0d976eSJerin Jacob return -EINVAL; 1403aa0d976eSJerin Jacob } 1404aa0d976eSJerin Jacob 1405aa0d976eSJerin Jacob /* Free memory prior to re-allocation if needed */ 140621e3fb00SKamil Rytarowski if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 1407aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 140821e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 14097483341aSXueming Li nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); 141021e3fb00SKamil Rytarowski dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 1411aa0d976eSJerin Jacob } 1412aa0d976eSJerin Jacob 1413aa0d976eSJerin Jacob /* Allocate rxq memory */ 1414aa0d976eSJerin Jacob rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 1415aa0d976eSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 1416aa0d976eSJerin Jacob if (rxq == NULL) { 141721e3fb00SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", 141821e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx)); 1419aa0d976eSJerin Jacob return -ENOMEM; 1420aa0d976eSJerin Jacob } 1421aa0d976eSJerin Jacob 1422aa0d976eSJerin Jacob rxq->nic = nic; 1423aa0d976eSJerin Jacob rxq->pool = mp; 1424aa0d976eSJerin Jacob rxq->queue_id = qidx; 1425aa0d976eSJerin Jacob rxq->port_id = dev->data->port_id; 1426aa0d976eSJerin Jacob rxq->rx_free_thresh = rx_free_thresh; 1427aa0d976eSJerin Jacob rxq->rx_drop_en = rx_conf->rx_drop_en; 1428aa0d976eSJerin Jacob rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1429aa0d976eSJerin Jacob rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1430aa0d976eSJerin Jacob rxq->precharge_cnt = 0; 1431e2c519b3SJerin Jacob 1432e2c519b3SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2) 1433e2c519b3SJerin Jacob rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD; 1434e2c519b3SJerin Jacob else 1435aa0d976eSJerin Jacob rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1436aa0d976eSJerin Jacob 14377483341aSXueming Li dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; 14387483341aSXueming Li 14395c7ccb26SJerin Jacob nicvf_rxq_mbuf_setup(rxq); 1440e2c519b3SJerin Jacob 1441aa0d976eSJerin Jacob /* Alloc completion queue */ 14426d3cbd56SKamil Rytarowski if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) { 1443aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 14447483341aSXueming Li nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx)); 1445aa0d976eSJerin Jacob return -ENOMEM; 1446aa0d976eSJerin Jacob } 1447aa0d976eSJerin Jacob 1448aa0d976eSJerin Jacob nicvf_rx_queue_reset(rxq); 1449aa0d976eSJerin Jacob 1450a4996bd8SWei Dai offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1451c97da2cbSMaciej Czekaj PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)" 1452c97da2cbSMaciej Czekaj " phy=0x%" PRIx64 " offloads=0x%" PRIx64, 145321e3fb00SKamil Rytarowski nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, 1454a4996bd8SWei Dai rte_mempool_avail_count(mp), rxq->phys, offloads); 1455aa0d976eSJerin Jacob 145621e3fb00SKamil Rytarowski dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 145721e3fb00SKamil Rytarowski RTE_ETH_QUEUE_STATE_STOPPED; 1458aa0d976eSJerin Jacob return 0; 1459aa0d976eSJerin Jacob } 1460aa0d976eSJerin Jacob 1461bdad90d1SIvan Ilchenko static int 1462dcd7b1e1SJerin Jacob nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1463dcd7b1e1SJerin Jacob { 1464dcd7b1e1SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1465c0802544SFerruh Yigit struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1466dcd7b1e1SJerin Jacob 1467dcd7b1e1SJerin Jacob PMD_INIT_FUNC_TRACE(); 1468dcd7b1e1SJerin Jacob 1469ba2d05abSJerin Jacob /* Autonegotiation may be disabled */ 1470295968d1SFerruh Yigit dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED; 1471295968d1SFerruh Yigit dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M | 1472295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; 1473ba2d05abSJerin Jacob if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF) 1474295968d1SFerruh Yigit dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G; 1475ba2d05abSJerin Jacob 147635b2d13fSOlivier Matz dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; 147735b2d13fSOlivier Matz dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN; 147821e3fb00SKamil Rytarowski dev_info->max_rx_queues = 147921e3fb00SKamil Rytarowski (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 148021e3fb00SKamil Rytarowski dev_info->max_tx_queues = 148121e3fb00SKamil Rytarowski (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1482dcd7b1e1SJerin Jacob dev_info->max_mac_addrs = 1; 1483eac901ceSJan Blunck dev_info->max_vfs = pci_dev->max_vfs; 1484dcd7b1e1SJerin Jacob 1485ed665c3eSHanumanth Pothula dev_info->max_mtu = dev_info->max_rx_pktlen - 1486ed665c3eSHanumanth Pothula (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); 1487ed665c3eSHanumanth Pothula dev_info->min_mtu = dev_info->min_rx_bufsize - NIC_HW_L2_OVERHEAD; 1488ed665c3eSHanumanth Pothula 1489c97da2cbSMaciej Czekaj dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA; 1490c97da2cbSMaciej Czekaj dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA; 1491c97da2cbSMaciej Czekaj dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA; 1492c97da2cbSMaciej Czekaj dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA; 1493dcd7b1e1SJerin Jacob 1494dcd7b1e1SJerin Jacob dev_info->reta_size = nic->rss_info.rss_size; 1495dcd7b1e1SJerin Jacob dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1496dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1497dcd7b1e1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1498dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1499dcd7b1e1SJerin Jacob 1500dcd7b1e1SJerin Jacob dev_info->default_rxconf = (struct rte_eth_rxconf) { 1501dcd7b1e1SJerin Jacob .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1502dcd7b1e1SJerin Jacob .rx_drop_en = 0, 1503dcd7b1e1SJerin Jacob }; 1504dcd7b1e1SJerin Jacob 1505dcd7b1e1SJerin Jacob dev_info->default_txconf = (struct rte_eth_txconf) { 1506dcd7b1e1SJerin Jacob .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1507295968d1SFerruh Yigit .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 1508295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1509295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1510295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM, 1511dcd7b1e1SJerin Jacob }; 1512bdad90d1SIvan Ilchenko 1513bdad90d1SIvan Ilchenko return 0; 1514dcd7b1e1SJerin Jacob } 1515dcd7b1e1SJerin Jacob 1516df6e0a06SSantosh Shukla static nicvf_iova_addr_t 1517394014bcSKamil Rytarowski rbdr_rte_mempool_get(void *dev, void *opaque) 15187413feeeSJerin Jacob { 15197413feeeSJerin Jacob uint16_t qidx; 15207413feeeSJerin Jacob uintptr_t mbuf; 15217413feeeSJerin Jacob struct nicvf_rxq *rxq; 1522394014bcSKamil Rytarowski struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev; 152321e3fb00SKamil Rytarowski struct nicvf *nic = (struct nicvf *)opaque; 152421e3fb00SKamil Rytarowski uint16_t rx_start, rx_end; 15257413feeeSJerin Jacob 152621e3fb00SKamil Rytarowski /* Get queue ranges for this VF */ 152721e3fb00SKamil Rytarowski nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end); 152821e3fb00SKamil Rytarowski 152921e3fb00SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 1530394014bcSKamil Rytarowski rxq = eth_dev->data->rx_queues[qidx]; 15317413feeeSJerin Jacob /* Maintain equal buffer count across all pools */ 15327413feeeSJerin Jacob if (rxq->precharge_cnt >= rxq->qlen_mask) 15337413feeeSJerin Jacob continue; 15347413feeeSJerin Jacob rxq->precharge_cnt++; 15357413feeeSJerin Jacob mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); 15367413feeeSJerin Jacob if (mbuf) 15377413feeeSJerin Jacob return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); 15387413feeeSJerin Jacob } 15397413feeeSJerin Jacob return 0; 15407413feeeSJerin Jacob } 15417413feeeSJerin Jacob 15427413feeeSJerin Jacob static int 154371e76186SKamil Rytarowski nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) 15447413feeeSJerin Jacob { 15457413feeeSJerin Jacob int ret; 154634c2e702SJerin Jacob uint16_t qidx, data_off; 15477413feeeSJerin Jacob uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; 15487413feeeSJerin Jacob uint64_t mbuf_phys_off = 0; 15497413feeeSJerin Jacob struct nicvf_rxq *rxq; 15507413feeeSJerin Jacob struct rte_mbuf *mbuf; 155171e76186SKamil Rytarowski uint16_t rx_start, rx_end; 155271e76186SKamil Rytarowski uint16_t tx_start, tx_end; 1553d3bf2564SRakesh Kudurumalla int mask; 15547413feeeSJerin Jacob 15557413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 15567413feeeSJerin Jacob 15577413feeeSJerin Jacob /* Userspace process exited without proper shutdown in last run */ 15587413feeeSJerin Jacob if (nicvf_qset_rbdr_active(nic, 0)) 155971e76186SKamil Rytarowski nicvf_vf_stop(dev, nic, false); 156071e76186SKamil Rytarowski 156171e76186SKamil Rytarowski /* Get queue ranges for this VF */ 156271e76186SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 15637413feeeSJerin Jacob 15647413feeeSJerin Jacob /* 15657413feeeSJerin Jacob * Thunderx nicvf PMD can support more than one pool per port only when 15667413feeeSJerin Jacob * 1) Data payload size is same across all the pools in given port 15677413feeeSJerin Jacob * AND 15687413feeeSJerin Jacob * 2) All mbuffs in the pools are from the same hugepage 15697413feeeSJerin Jacob * AND 15707413feeeSJerin Jacob * 3) Mbuff metadata size is same across all the pools in given port 15717413feeeSJerin Jacob * 15727413feeeSJerin Jacob * This is to support existing application that uses multiple pool/port. 15737413feeeSJerin Jacob * But, the purpose of using multipool for QoS will not be addressed. 15747413feeeSJerin Jacob * 15757413feeeSJerin Jacob */ 15767413feeeSJerin Jacob 15777413feeeSJerin Jacob /* Validate mempool attributes */ 157871e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 15797413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 15807413feeeSJerin Jacob rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); 15817413feeeSJerin Jacob mbuf = rte_pktmbuf_alloc(rxq->pool); 15827413feeeSJerin Jacob if (mbuf == NULL) { 158371e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d " 158471e76186SKamil Rytarowski "pool=%s", 158571e76186SKamil Rytarowski nic->vf_id, qidx, rxq->pool->name); 15867413feeeSJerin Jacob return -ENOMEM; 15877413feeeSJerin Jacob } 158834c2e702SJerin Jacob data_off = nicvf_mbuff_meta_length(mbuf); 158934c2e702SJerin Jacob data_off += RTE_PKTMBUF_HEADROOM; 15907413feeeSJerin Jacob rte_pktmbuf_free(mbuf); 15917413feeeSJerin Jacob 159234c2e702SJerin Jacob if (data_off % RTE_CACHE_LINE_SIZE) { 159334c2e702SJerin Jacob PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d", 159434c2e702SJerin Jacob rxq->pool->name, data_off, 159534c2e702SJerin Jacob data_off % RTE_CACHE_LINE_SIZE); 159634c2e702SJerin Jacob return -EINVAL; 159734c2e702SJerin Jacob } 159834c2e702SJerin Jacob rxq->mbuf_phys_off -= data_off; 1599279d3319SRakesh Kudurumalla rxq->mbuf_phys_off -= nic->skip_bytes; 160034c2e702SJerin Jacob 16017413feeeSJerin Jacob if (mbuf_phys_off == 0) 16027413feeeSJerin Jacob mbuf_phys_off = rxq->mbuf_phys_off; 16037413feeeSJerin Jacob if (mbuf_phys_off != rxq->mbuf_phys_off) { 160471e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %" 160571e76186SKamil Rytarowski PRIx64, rxq->pool->name, nic->vf_id, 160671e76186SKamil Rytarowski mbuf_phys_off); 16077413feeeSJerin Jacob return -EINVAL; 16087413feeeSJerin Jacob } 16097413feeeSJerin Jacob } 16107413feeeSJerin Jacob 16117413feeeSJerin Jacob /* Check the level of buffers in the pool */ 16127413feeeSJerin Jacob total_rxq_desc = 0; 161371e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 16147413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 16157413feeeSJerin Jacob /* Count total numbers of rxq descs */ 16167413feeeSJerin Jacob total_rxq_desc += rxq->qlen_mask + 1; 16177413feeeSJerin Jacob exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; 16186d3cbd56SKamil Rytarowski exp_buffs *= dev->data->nb_rx_queues; 1619a0fd91ceSBruce Richardson if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { 16207413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", 16217413feeeSJerin Jacob rxq->pool->name, 1622a0fd91ceSBruce Richardson rte_mempool_avail_count(rxq->pool), 16237413feeeSJerin Jacob exp_buffs); 16247413feeeSJerin Jacob return -ENOENT; 16257413feeeSJerin Jacob } 16267413feeeSJerin Jacob } 16277413feeeSJerin Jacob 16287413feeeSJerin Jacob /* Check RBDR desc overflow */ 16297413feeeSJerin Jacob ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); 16307413feeeSJerin Jacob if (ret == 0) { 163171e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc " 163271e76186SKamil Rytarowski "VF%d", nic->vf_id); 16337413feeeSJerin Jacob return -ENOMEM; 16347413feeeSJerin Jacob } 16357413feeeSJerin Jacob 16367413feeeSJerin Jacob /* Enable qset */ 16377413feeeSJerin Jacob ret = nicvf_qset_config(nic); 16387413feeeSJerin Jacob if (ret) { 163971e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret, 164071e76186SKamil Rytarowski nic->vf_id); 16417413feeeSJerin Jacob return ret; 16427413feeeSJerin Jacob } 16437413feeeSJerin Jacob 16447413feeeSJerin Jacob /* Allocate RBDR and RBDR ring desc */ 16457413feeeSJerin Jacob nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); 16466d3cbd56SKamil Rytarowski ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz); 16477413feeeSJerin Jacob if (ret) { 164871e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc " 164971e76186SKamil Rytarowski "VF%d", nic->vf_id); 16507413feeeSJerin Jacob goto qset_reclaim; 16517413feeeSJerin Jacob } 16527413feeeSJerin Jacob 16537413feeeSJerin Jacob /* Enable and configure RBDR registers */ 16547413feeeSJerin Jacob ret = nicvf_qset_rbdr_config(nic, 0); 16557413feeeSJerin Jacob if (ret) { 165671e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret, 165771e76186SKamil Rytarowski nic->vf_id); 16587413feeeSJerin Jacob goto qset_rbdr_free; 16597413feeeSJerin Jacob } 16607413feeeSJerin Jacob 16617413feeeSJerin Jacob /* Fill rte_mempool buffers in RBDR pool and precharge it */ 1662394014bcSKamil Rytarowski ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get, 1663394014bcSKamil Rytarowski total_rxq_desc); 16647413feeeSJerin Jacob if (ret) { 166571e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret, 166671e76186SKamil Rytarowski nic->vf_id); 16677413feeeSJerin Jacob goto qset_rbdr_reclaim; 16687413feeeSJerin Jacob } 16697413feeeSJerin Jacob 167071e76186SKamil Rytarowski PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d", 167171e76186SKamil Rytarowski nic->rbdr->tail, nb_rbdr_desc, nic->vf_id); 16727413feeeSJerin Jacob 16737413feeeSJerin Jacob /* Configure VLAN Strip */ 1674295968d1SFerruh Yigit mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 1675295968d1SFerruh Yigit RTE_ETH_VLAN_EXTEND_MASK; 1676d3bf2564SRakesh Kudurumalla ret = nicvf_vlan_offload_config(dev, mask); 16777413feeeSJerin Jacob 16788a946db3SJerin Jacob /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data 16798a946db3SJerin Jacob * to the 64bit memory address. 16808a946db3SJerin Jacob * The alignment creates a hole in mbuf(between the end of headroom and 16818a946db3SJerin Jacob * packet data start). The new revision of the HW provides an option to 16828a946db3SJerin Jacob * disable the L3 alignment feature and make mbuf layout looks 16838a946db3SJerin Jacob * more like other NICs. For better application compatibility, disabling 16848a946db3SJerin Jacob * l3 alignment feature on the hardware revisions it supports 16858a946db3SJerin Jacob */ 16868a946db3SJerin Jacob nicvf_apad_config(nic, false); 16878a946db3SJerin Jacob 168871e76186SKamil Rytarowski /* Get queue ranges for this VF */ 168971e76186SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 169071e76186SKamil Rytarowski 16917413feeeSJerin Jacob /* Configure TX queues */ 169271e76186SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) { 169371e76186SKamil Rytarowski ret = nicvf_vf_start_tx_queue(dev, nic, 169471e76186SKamil Rytarowski qidx % MAX_SND_QUEUES_PER_QS); 16957413feeeSJerin Jacob if (ret) 16967413feeeSJerin Jacob goto start_txq_error; 16977413feeeSJerin Jacob } 16987413feeeSJerin Jacob 169971e76186SKamil Rytarowski /* Configure RX queues */ 170071e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) { 170171e76186SKamil Rytarowski ret = nicvf_vf_start_rx_queue(dev, nic, 170271e76186SKamil Rytarowski qidx % MAX_RCV_QUEUES_PER_QS); 170371e76186SKamil Rytarowski if (ret) 170471e76186SKamil Rytarowski goto start_rxq_error; 170571e76186SKamil Rytarowski } 170671e76186SKamil Rytarowski 170771e76186SKamil Rytarowski if (!nic->sqs_mode) { 17087413feeeSJerin Jacob /* Configure CPI algorithm */ 17097413feeeSJerin Jacob ret = nicvf_configure_cpi(dev); 17107413feeeSJerin Jacob if (ret) 17117413feeeSJerin Jacob goto start_txq_error; 17127413feeeSJerin Jacob 171371e76186SKamil Rytarowski ret = nicvf_mbox_get_rss_size(nic); 171471e76186SKamil Rytarowski if (ret) { 171571e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to get rss table size"); 171671e76186SKamil Rytarowski goto qset_rss_error; 171771e76186SKamil Rytarowski } 171871e76186SKamil Rytarowski 17197413feeeSJerin Jacob /* Configure RSS */ 17207413feeeSJerin Jacob ret = nicvf_configure_rss(dev); 17217413feeeSJerin Jacob if (ret) 17227413feeeSJerin Jacob goto qset_rss_error; 172371e76186SKamil Rytarowski } 172471e76186SKamil Rytarowski 172571e76186SKamil Rytarowski /* Done; Let PF make the BGX's RX and TX switches to ON position */ 172671e76186SKamil Rytarowski nicvf_mbox_cfg_done(nic); 172771e76186SKamil Rytarowski return 0; 172871e76186SKamil Rytarowski 172971e76186SKamil Rytarowski qset_rss_error: 173071e76186SKamil Rytarowski nicvf_rss_term(nic); 173171e76186SKamil Rytarowski start_rxq_error: 173271e76186SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) 173371e76186SKamil Rytarowski nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 173471e76186SKamil Rytarowski start_txq_error: 173571e76186SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) 173671e76186SKamil Rytarowski nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 173771e76186SKamil Rytarowski qset_rbdr_reclaim: 173871e76186SKamil Rytarowski nicvf_qset_rbdr_reclaim(nic, 0); 173971e76186SKamil Rytarowski nicvf_rbdr_release_mbufs(dev, nic); 174071e76186SKamil Rytarowski qset_rbdr_free: 174171e76186SKamil Rytarowski if (nic->rbdr) { 174271e76186SKamil Rytarowski rte_free(nic->rbdr); 174371e76186SKamil Rytarowski nic->rbdr = NULL; 174471e76186SKamil Rytarowski } 174571e76186SKamil Rytarowski qset_reclaim: 174671e76186SKamil Rytarowski nicvf_qset_reclaim(nic); 174771e76186SKamil Rytarowski return ret; 174871e76186SKamil Rytarowski } 174971e76186SKamil Rytarowski 175071e76186SKamil Rytarowski static int 175171e76186SKamil Rytarowski nicvf_dev_start(struct rte_eth_dev *dev) 175271e76186SKamil Rytarowski { 175371e76186SKamil Rytarowski uint16_t qidx; 175471e76186SKamil Rytarowski int ret; 175571e76186SKamil Rytarowski size_t i; 175671e76186SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 175771e76186SKamil Rytarowski struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 175871e76186SKamil Rytarowski uint16_t mtu; 175971e76186SKamil Rytarowski uint32_t buffsz = 0, rbdrsz = 0; 176071e76186SKamil Rytarowski struct rte_pktmbuf_pool_private *mbp_priv; 176171e76186SKamil Rytarowski struct nicvf_rxq *rxq; 176271e76186SKamil Rytarowski 176371e76186SKamil Rytarowski PMD_INIT_FUNC_TRACE(); 176471e76186SKamil Rytarowski 176571e76186SKamil Rytarowski /* This function must be called for a primary device */ 176671e76186SKamil Rytarowski assert_primary(nic); 176771e76186SKamil Rytarowski 176871e76186SKamil Rytarowski /* Validate RBDR buff size */ 176971e76186SKamil Rytarowski for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 177071e76186SKamil Rytarowski rxq = dev->data->rx_queues[qidx]; 177171e76186SKamil Rytarowski mbp_priv = rte_mempool_get_priv(rxq->pool); 177271e76186SKamil Rytarowski buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 177371e76186SKamil Rytarowski if (buffsz % 128) { 177471e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); 177571e76186SKamil Rytarowski return -EINVAL; 177671e76186SKamil Rytarowski } 177771e76186SKamil Rytarowski if (rbdrsz == 0) 177871e76186SKamil Rytarowski rbdrsz = buffsz; 177971e76186SKamil Rytarowski if (rbdrsz != buffsz) { 178071e76186SKamil Rytarowski PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)", 178171e76186SKamil Rytarowski qidx, rbdrsz, buffsz); 178271e76186SKamil Rytarowski return -EINVAL; 178371e76186SKamil Rytarowski } 178471e76186SKamil Rytarowski } 17857413feeeSJerin Jacob 17867413feeeSJerin Jacob /* Configure loopback */ 17877413feeeSJerin Jacob ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); 17887413feeeSJerin Jacob if (ret) { 17897413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); 179071e76186SKamil Rytarowski return ret; 17917413feeeSJerin Jacob } 17927413feeeSJerin Jacob 17937413feeeSJerin Jacob /* Reset all statistics counters attached to this port */ 17947413feeeSJerin Jacob ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); 17957413feeeSJerin Jacob if (ret) { 17967413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); 179771e76186SKamil Rytarowski return ret; 17987413feeeSJerin Jacob } 17997413feeeSJerin Jacob 18007413feeeSJerin Jacob /* Setup scatter mode if needed by jumbo */ 18011bb4a528SFerruh Yigit if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz) 18027413feeeSJerin Jacob dev->data->scattered_rx = 1; 1803295968d1SFerruh Yigit if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) != 0) 18047413feeeSJerin Jacob dev->data->scattered_rx = 1; 18057413feeeSJerin Jacob 18061bb4a528SFerruh Yigit /* Setup MTU */ 18071bb4a528SFerruh Yigit mtu = dev->data->mtu; 18087413feeeSJerin Jacob 18097413feeeSJerin Jacob if (nicvf_dev_set_mtu(dev, mtu)) { 18107413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to set default mtu size"); 18117413feeeSJerin Jacob return -EBUSY; 18127413feeeSJerin Jacob } 18137413feeeSJerin Jacob 18148f79c43fSHarman Kalra /* Apply new link configurations if changed */ 18158f79c43fSHarman Kalra ret = nicvf_apply_link_speed(dev); 18168f79c43fSHarman Kalra if (ret) { 1817*f665790aSDavid Marchand PMD_INIT_LOG(ERR, "Failed to set link configuration"); 18188f79c43fSHarman Kalra return ret; 18198f79c43fSHarman Kalra } 18208f79c43fSHarman Kalra 182171e76186SKamil Rytarowski ret = nicvf_vf_start(dev, nic, rbdrsz); 182271e76186SKamil Rytarowski if (ret != 0) 182371e76186SKamil Rytarowski return ret; 182471e76186SKamil Rytarowski 182571e76186SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 182671e76186SKamil Rytarowski assert(nic->snicvf[i]); 182771e76186SKamil Rytarowski 182871e76186SKamil Rytarowski ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz); 182971e76186SKamil Rytarowski if (ret != 0) 183071e76186SKamil Rytarowski return ret; 183171e76186SKamil Rytarowski } 183271e76186SKamil Rytarowski 18335e64c812SPavan Nikhilesh /* Configure callbacks based on offloads */ 18347413feeeSJerin Jacob nicvf_set_tx_function(dev); 18357413feeeSJerin Jacob nicvf_set_rx_function(dev); 18367413feeeSJerin Jacob 18377413feeeSJerin Jacob return 0; 18387413feeeSJerin Jacob } 18397413feeeSJerin Jacob 18407413feeeSJerin Jacob static void 1841627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup) 18427413feeeSJerin Jacob { 1843627d4ba2SKamil Rytarowski size_t i; 18447413feeeSJerin Jacob int ret; 18457413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 18467413feeeSJerin Jacob 18477413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 1848b8f5d2aeSThomas Monjalon dev->data->dev_started = 0; 18497413feeeSJerin Jacob 1850627d4ba2SKamil Rytarowski /* Teardown secondary vf first */ 1851627d4ba2SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 1852627d4ba2SKamil Rytarowski if (!nic->snicvf[i]) 1853627d4ba2SKamil Rytarowski continue; 1854627d4ba2SKamil Rytarowski 1855627d4ba2SKamil Rytarowski nicvf_vf_stop(dev, nic->snicvf[i], cleanup); 1856627d4ba2SKamil Rytarowski } 1857627d4ba2SKamil Rytarowski 1858627d4ba2SKamil Rytarowski /* Stop the primary VF now */ 1859627d4ba2SKamil Rytarowski nicvf_vf_stop(dev, nic, cleanup); 18607413feeeSJerin Jacob 18617413feeeSJerin Jacob /* Disable loopback */ 18627413feeeSJerin Jacob ret = nicvf_loopback_config(nic, 0); 18637413feeeSJerin Jacob if (ret) 18647413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); 18657413feeeSJerin Jacob 1866627d4ba2SKamil Rytarowski /* Reclaim CPI configuration */ 1867627d4ba2SKamil Rytarowski ret = nicvf_mbox_config_cpi(nic, 0); 1868627d4ba2SKamil Rytarowski if (ret) 1869627d4ba2SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret); 1870627d4ba2SKamil Rytarowski } 1871627d4ba2SKamil Rytarowski 187262024eb8SIvan Ilchenko static int 1873627d4ba2SKamil Rytarowski nicvf_dev_stop(struct rte_eth_dev *dev) 1874627d4ba2SKamil Rytarowski { 1875627d4ba2SKamil Rytarowski PMD_INIT_FUNC_TRACE(); 1876627d4ba2SKamil Rytarowski 1877627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(dev, false); 187862024eb8SIvan Ilchenko 187962024eb8SIvan Ilchenko return 0; 1880627d4ba2SKamil Rytarowski } 1881627d4ba2SKamil Rytarowski 1882627d4ba2SKamil Rytarowski static void 1883627d4ba2SKamil Rytarowski nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup) 1884627d4ba2SKamil Rytarowski { 1885627d4ba2SKamil Rytarowski int ret; 1886627d4ba2SKamil Rytarowski uint16_t qidx; 1887627d4ba2SKamil Rytarowski uint16_t tx_start, tx_end; 1888627d4ba2SKamil Rytarowski uint16_t rx_start, rx_end; 1889627d4ba2SKamil Rytarowski 1890627d4ba2SKamil Rytarowski PMD_INIT_FUNC_TRACE(); 1891627d4ba2SKamil Rytarowski 1892627d4ba2SKamil Rytarowski if (cleanup) { 1893627d4ba2SKamil Rytarowski /* Let PF make the BGX's RX and TX switches to OFF position */ 1894627d4ba2SKamil Rytarowski nicvf_mbox_shutdown(nic); 1895627d4ba2SKamil Rytarowski } 1896627d4ba2SKamil Rytarowski 18977413feeeSJerin Jacob /* Disable VLAN Strip */ 18987413feeeSJerin Jacob nicvf_vlan_hw_strip(nic, 0); 18997413feeeSJerin Jacob 1900627d4ba2SKamil Rytarowski /* Get queue ranges for this VF */ 1901627d4ba2SKamil Rytarowski nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1902627d4ba2SKamil Rytarowski 1903627d4ba2SKamil Rytarowski for (qidx = tx_start; qidx <= tx_end; qidx++) 1904627d4ba2SKamil Rytarowski nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1905627d4ba2SKamil Rytarowski 1906627d4ba2SKamil Rytarowski /* Get queue ranges for this VF */ 1907627d4ba2SKamil Rytarowski nicvf_rx_range(dev, nic, &rx_start, &rx_end); 19087413feeeSJerin Jacob 19097413feeeSJerin Jacob /* Reclaim rq */ 1910627d4ba2SKamil Rytarowski for (qidx = rx_start; qidx <= rx_end; qidx++) 1911627d4ba2SKamil Rytarowski nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 19127413feeeSJerin Jacob 19137413feeeSJerin Jacob /* Reclaim RBDR */ 19147413feeeSJerin Jacob ret = nicvf_qset_rbdr_reclaim(nic, 0); 19157413feeeSJerin Jacob if (ret) 19167413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); 19177413feeeSJerin Jacob 19187413feeeSJerin Jacob /* Move all charged buffers in RBDR back to pool */ 19197413feeeSJerin Jacob if (nic->rbdr != NULL) 19206d3cbd56SKamil Rytarowski nicvf_rbdr_release_mbufs(dev, nic); 19217413feeeSJerin Jacob 19227413feeeSJerin Jacob /* Disable qset */ 1923627d4ba2SKamil Rytarowski ret = nicvf_qset_reclaim(nic); 19247413feeeSJerin Jacob if (ret) 19257413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); 19267413feeeSJerin Jacob 19277413feeeSJerin Jacob /* Disable all interrupts */ 19287413feeeSJerin Jacob nicvf_disable_all_interrupts(nic); 19297413feeeSJerin Jacob 19307413feeeSJerin Jacob /* Free RBDR SW structure */ 19317413feeeSJerin Jacob if (nic->rbdr) { 19327413feeeSJerin Jacob rte_free(nic->rbdr); 19337413feeeSJerin Jacob nic->rbdr = NULL; 19347413feeeSJerin Jacob } 19357413feeeSJerin Jacob } 19367413feeeSJerin Jacob 1937b142387bSThomas Monjalon static int 19387413feeeSJerin Jacob nicvf_dev_close(struct rte_eth_dev *dev) 19397413feeeSJerin Jacob { 1940627d4ba2SKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 1941627d4ba2SKamil Rytarowski 19427413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 194330410493SThomas Monjalon if (rte_eal_process_type() != RTE_PROC_PRIMARY) 194430410493SThomas Monjalon return 0; 19457413feeeSJerin Jacob 1946627d4ba2SKamil Rytarowski nicvf_dev_stop_cleanup(dev, true); 1947f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, dev); 19481f7b83b8SHanumanth Pothula nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic); 1949b142387bSThomas Monjalon 1950d61138d4SHarman Kalra rte_intr_instance_free(nic->intr_handle); 1951d61138d4SHarman Kalra 1952b142387bSThomas Monjalon return 0; 19537413feeeSJerin Jacob } 19547413feeeSJerin Jacob 1955bc79615aSJerin Jacob static int 1956b7004ab2SKamil Rytarowski nicvf_request_sqs(struct nicvf *nic) 1957b7004ab2SKamil Rytarowski { 1958b7004ab2SKamil Rytarowski size_t i; 1959b7004ab2SKamil Rytarowski 1960b7004ab2SKamil Rytarowski assert_primary(nic); 1961b7004ab2SKamil Rytarowski assert(nic->sqs_count > 0); 1962b7004ab2SKamil Rytarowski assert(nic->sqs_count <= MAX_SQS_PER_VF); 1963b7004ab2SKamil Rytarowski 1964b7004ab2SKamil Rytarowski /* Set no of Rx/Tx queues in each of the SQsets */ 1965b7004ab2SKamil Rytarowski for (i = 0; i < nic->sqs_count; i++) { 1966b7004ab2SKamil Rytarowski if (nicvf_svf_empty()) 1967b7004ab2SKamil Rytarowski rte_panic("Cannot assign sufficient number of " 1968b7004ab2SKamil Rytarowski "secondary queues to primary VF%" PRIu8 "\n", 1969b7004ab2SKamil Rytarowski nic->vf_id); 1970b7004ab2SKamil Rytarowski 1971b7004ab2SKamil Rytarowski nic->snicvf[i] = nicvf_svf_pop(); 1972b7004ab2SKamil Rytarowski nic->snicvf[i]->sqs_id = i; 1973b7004ab2SKamil Rytarowski } 1974b7004ab2SKamil Rytarowski 1975b7004ab2SKamil Rytarowski return nicvf_mbox_request_sqs(nic); 1976b7004ab2SKamil Rytarowski } 1977b7004ab2SKamil Rytarowski 1978b7004ab2SKamil Rytarowski static int 1979bc79615aSJerin Jacob nicvf_dev_configure(struct rte_eth_dev *dev) 1980bc79615aSJerin Jacob { 1981b7004ab2SKamil Rytarowski struct rte_eth_dev_data *data = dev->data; 1982b7004ab2SKamil Rytarowski struct rte_eth_conf *conf = &data->dev_conf; 1983bc79615aSJerin Jacob struct rte_eth_rxmode *rxmode = &conf->rxmode; 1984bc79615aSJerin Jacob struct rte_eth_txmode *txmode = &conf->txmode; 1985bc79615aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1986b7004ab2SKamil Rytarowski uint8_t cqcount; 1987bc79615aSJerin Jacob 1988bc79615aSJerin Jacob PMD_INIT_FUNC_TRACE(); 1989bc79615aSJerin Jacob 1990295968d1SFerruh Yigit if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1991295968d1SFerruh Yigit rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 19928b945a7fSPavan Nikhilesh 1993bc79615aSJerin Jacob if (!rte_eal_has_hugepages()) { 1994bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Huge page is not configured"); 1995bc79615aSJerin Jacob return -EINVAL; 1996bc79615aSJerin Jacob } 1997bc79615aSJerin Jacob 1998bc79615aSJerin Jacob if (txmode->mq_mode) { 1999bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 2000bc79615aSJerin Jacob return -EINVAL; 2001bc79615aSJerin Jacob } 2002bc79615aSJerin Jacob 2003295968d1SFerruh Yigit if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && 2004295968d1SFerruh Yigit rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) { 2005bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 2006bc79615aSJerin Jacob return -EINVAL; 2007bc79615aSJerin Jacob } 2008bc79615aSJerin Jacob 2009bc79615aSJerin Jacob if (conf->dcb_capability_en) { 2010bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "DCB enable not supported"); 2011bc79615aSJerin Jacob return -EINVAL; 2012bc79615aSJerin Jacob } 2013bc79615aSJerin Jacob 2014b7004ab2SKamil Rytarowski assert_primary(nic); 2015b7004ab2SKamil Rytarowski NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS); 2016b7004ab2SKamil Rytarowski cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues); 2017b7004ab2SKamil Rytarowski if (cqcount > MAX_RCV_QUEUES_PER_QS) { 2018b7004ab2SKamil Rytarowski nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS); 2019b7004ab2SKamil Rytarowski nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1; 2020b7004ab2SKamil Rytarowski } else { 2021b7004ab2SKamil Rytarowski nic->sqs_count = 0; 2022b7004ab2SKamil Rytarowski } 2023b7004ab2SKamil Rytarowski 2024b7004ab2SKamil Rytarowski assert(nic->sqs_count <= MAX_SQS_PER_VF); 2025b7004ab2SKamil Rytarowski 2026b7004ab2SKamil Rytarowski if (nic->sqs_count > 0) { 2027b7004ab2SKamil Rytarowski if (nicvf_request_sqs(nic)) { 2028b7004ab2SKamil Rytarowski rte_panic("Cannot assign sufficient number of " 2029b7004ab2SKamil Rytarowski "secondary queues to PORT%d VF%" PRIu8 "\n", 2030b7004ab2SKamil Rytarowski dev->data->port_id, nic->vf_id); 2031b7004ab2SKamil Rytarowski } 2032b7004ab2SKamil Rytarowski } 2033b7004ab2SKamil Rytarowski 2034295968d1SFerruh Yigit if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) 20355e64c812SPavan Nikhilesh nic->offload_cksum = 1; 20365e64c812SPavan Nikhilesh 2037bc79615aSJerin Jacob PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 2038bc79615aSJerin Jacob dev->data->port_id, nicvf_hw_cap(nic)); 2039bc79615aSJerin Jacob 2040bc79615aSJerin Jacob return 0; 2041bc79615aSJerin Jacob } 2042bc79615aSJerin Jacob 2043b8d96c71SHarman Kalra static int 2044b8d96c71SHarman Kalra nicvf_dev_set_link_up(struct rte_eth_dev *dev) 2045b8d96c71SHarman Kalra { 2046b8d96c71SHarman Kalra struct nicvf *nic = nicvf_pmd_priv(dev); 2047b8d96c71SHarman Kalra int rc, i; 2048b8d96c71SHarman Kalra 2049b8d96c71SHarman Kalra rc = nicvf_mbox_set_link_up_down(nic, true); 2050b8d96c71SHarman Kalra if (rc) 2051b8d96c71SHarman Kalra goto done; 2052b8d96c71SHarman Kalra 2053b8d96c71SHarman Kalra /* Start tx queues */ 2054b8d96c71SHarman Kalra for (i = 0; i < dev->data->nb_tx_queues; i++) 2055b8d96c71SHarman Kalra nicvf_dev_tx_queue_start(dev, i); 2056b8d96c71SHarman Kalra 2057b8d96c71SHarman Kalra done: 2058b8d96c71SHarman Kalra return rc; 2059b8d96c71SHarman Kalra } 2060b8d96c71SHarman Kalra 2061b8d96c71SHarman Kalra static int 2062b8d96c71SHarman Kalra nicvf_dev_set_link_down(struct rte_eth_dev *dev) 2063b8d96c71SHarman Kalra { 2064b8d96c71SHarman Kalra struct nicvf *nic = nicvf_pmd_priv(dev); 2065b8d96c71SHarman Kalra int i; 2066b8d96c71SHarman Kalra 2067b8d96c71SHarman Kalra /* Stop tx queues */ 2068b8d96c71SHarman Kalra for (i = 0; i < dev->data->nb_tx_queues; i++) 2069b8d96c71SHarman Kalra nicvf_dev_tx_queue_stop(dev, i); 2070b8d96c71SHarman Kalra 2071b8d96c71SHarman Kalra return nicvf_mbox_set_link_up_down(nic, false); 2072b8d96c71SHarman Kalra } 2073b8d96c71SHarman Kalra 2074e4387966SJerin Jacob /* Initialize and register driver with DPDK Application */ 2075e4387966SJerin Jacob static const struct eth_dev_ops nicvf_eth_dev_ops = { 2076bc79615aSJerin Jacob .dev_configure = nicvf_dev_configure, 20777413feeeSJerin Jacob .dev_start = nicvf_dev_start, 20787413feeeSJerin Jacob .dev_stop = nicvf_dev_stop, 20798fc70464SJerin Jacob .link_update = nicvf_dev_link_update, 20807413feeeSJerin Jacob .dev_close = nicvf_dev_close, 2081684fa771SJerin Jacob .stats_get = nicvf_dev_stats_get, 2082684fa771SJerin Jacob .stats_reset = nicvf_dev_stats_reset, 20836eae36eaSJerin Jacob .promiscuous_enable = nicvf_dev_promisc_enable, 2084dcd7b1e1SJerin Jacob .dev_infos_get = nicvf_dev_info_get, 20851c80e4fdSJerin Jacob .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 208665d9804eSJerin Jacob .mtu_set = nicvf_dev_set_mtu, 2087d3bf2564SRakesh Kudurumalla .vlan_offload_set = nicvf_vlan_offload_set, 208843362c6aSJerin Jacob .reta_update = nicvf_dev_reta_update, 208943362c6aSJerin Jacob .reta_query = nicvf_dev_reta_query, 209043362c6aSJerin Jacob .rss_hash_update = nicvf_dev_rss_hash_update, 209143362c6aSJerin Jacob .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 209286b4eb42SJerin Jacob .rx_queue_start = nicvf_dev_rx_queue_start, 209386b4eb42SJerin Jacob .rx_queue_stop = nicvf_dev_rx_queue_stop, 2094fc1f6c62SJerin Jacob .tx_queue_start = nicvf_dev_tx_queue_start, 2095fc1f6c62SJerin Jacob .tx_queue_stop = nicvf_dev_tx_queue_stop, 2096aa0d976eSJerin Jacob .rx_queue_setup = nicvf_dev_rx_queue_setup, 2097aa0d976eSJerin Jacob .rx_queue_release = nicvf_dev_rx_queue_release, 20983f3c6f97SJerin Jacob .tx_queue_setup = nicvf_dev_tx_queue_setup, 20993f3c6f97SJerin Jacob .tx_queue_release = nicvf_dev_tx_queue_release, 2100b8d96c71SHarman Kalra .dev_set_link_up = nicvf_dev_set_link_up, 2101b8d96c71SHarman Kalra .dev_set_link_down = nicvf_dev_set_link_down, 2102606ee746SJerin Jacob .get_reg = nicvf_dev_get_regs, 2103e4387966SJerin Jacob }; 2104e4387966SJerin Jacob 2105d3bf2564SRakesh Kudurumalla static int 2106d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2107d3bf2564SRakesh Kudurumalla { 2108d3bf2564SRakesh Kudurumalla struct rte_eth_rxmode *rxmode; 2109d3bf2564SRakesh Kudurumalla struct nicvf *nic = nicvf_pmd_priv(dev); 2110d3bf2564SRakesh Kudurumalla rxmode = &dev->data->dev_conf.rxmode; 2111295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2112295968d1SFerruh Yigit if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 2113d3bf2564SRakesh Kudurumalla nicvf_vlan_hw_strip(nic, true); 2114d3bf2564SRakesh Kudurumalla else 2115d3bf2564SRakesh Kudurumalla nicvf_vlan_hw_strip(nic, false); 2116d3bf2564SRakesh Kudurumalla } 2117d3bf2564SRakesh Kudurumalla 2118d3bf2564SRakesh Kudurumalla return 0; 2119d3bf2564SRakesh Kudurumalla } 2120d3bf2564SRakesh Kudurumalla 2121d3bf2564SRakesh Kudurumalla static int 2122d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2123d3bf2564SRakesh Kudurumalla { 2124d3bf2564SRakesh Kudurumalla nicvf_vlan_offload_config(dev, mask); 2125d3bf2564SRakesh Kudurumalla 2126d3bf2564SRakesh Kudurumalla return 0; 2127d3bf2564SRakesh Kudurumalla } 2128d3bf2564SRakesh Kudurumalla 2129279d3319SRakesh Kudurumalla static inline int 2130279d3319SRakesh Kudurumalla nicvf_set_first_skip(struct rte_eth_dev *dev) 2131279d3319SRakesh Kudurumalla { 2132279d3319SRakesh Kudurumalla int bytes_to_skip = 0; 2133279d3319SRakesh Kudurumalla int ret = 0; 2134279d3319SRakesh Kudurumalla unsigned int i; 2135279d3319SRakesh Kudurumalla struct rte_kvargs *kvlist; 2136279d3319SRakesh Kudurumalla static const char *const skip[] = { 2137279d3319SRakesh Kudurumalla SKIP_DATA_BYTES, 2138279d3319SRakesh Kudurumalla NULL}; 2139279d3319SRakesh Kudurumalla struct nicvf *nic = nicvf_pmd_priv(dev); 2140279d3319SRakesh Kudurumalla 2141279d3319SRakesh Kudurumalla if (!dev->device->devargs) { 2142279d3319SRakesh Kudurumalla nicvf_first_skip_config(nic, 0); 2143279d3319SRakesh Kudurumalla return ret; 2144279d3319SRakesh Kudurumalla } 2145279d3319SRakesh Kudurumalla 2146279d3319SRakesh Kudurumalla kvlist = rte_kvargs_parse(dev->device->devargs->args, skip); 2147279d3319SRakesh Kudurumalla if (!kvlist) 2148279d3319SRakesh Kudurumalla return -EINVAL; 2149279d3319SRakesh Kudurumalla 2150279d3319SRakesh Kudurumalla if (kvlist->count == 0) 2151279d3319SRakesh Kudurumalla goto exit; 2152279d3319SRakesh Kudurumalla 2153279d3319SRakesh Kudurumalla for (i = 0; i != kvlist->count; ++i) { 2154279d3319SRakesh Kudurumalla const struct rte_kvargs_pair *pair = &kvlist->pairs[i]; 2155279d3319SRakesh Kudurumalla 2156279d3319SRakesh Kudurumalla if (!strcmp(pair->key, SKIP_DATA_BYTES)) 2157279d3319SRakesh Kudurumalla bytes_to_skip = atoi(pair->value); 2158279d3319SRakesh Kudurumalla } 2159279d3319SRakesh Kudurumalla 2160279d3319SRakesh Kudurumalla /*128 bytes amounts to one cache line*/ 2161279d3319SRakesh Kudurumalla if (bytes_to_skip >= 0 && bytes_to_skip < 128) { 2162279d3319SRakesh Kudurumalla if (!(bytes_to_skip % 8)) { 2163279d3319SRakesh Kudurumalla nicvf_first_skip_config(nic, (bytes_to_skip / 8)); 2164279d3319SRakesh Kudurumalla nic->skip_bytes = bytes_to_skip; 2165279d3319SRakesh Kudurumalla goto kvlist_free; 2166279d3319SRakesh Kudurumalla } else { 2167279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8"); 2168279d3319SRakesh Kudurumalla ret = -EINVAL; 2169279d3319SRakesh Kudurumalla goto exit; 2170279d3319SRakesh Kudurumalla } 2171279d3319SRakesh Kudurumalla } else { 2172279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128"); 2173279d3319SRakesh Kudurumalla ret = -EINVAL; 2174279d3319SRakesh Kudurumalla goto exit; 2175279d3319SRakesh Kudurumalla } 2176279d3319SRakesh Kudurumalla exit: 2177279d3319SRakesh Kudurumalla nicvf_first_skip_config(nic, 0); 2178279d3319SRakesh Kudurumalla kvlist_free: 2179279d3319SRakesh Kudurumalla rte_kvargs_free(kvlist); 2180279d3319SRakesh Kudurumalla return ret; 2181279d3319SRakesh Kudurumalla } 2182e4387966SJerin Jacob static int 2183230dce64SAmit Gupta nicvf_eth_dev_uninit(struct rte_eth_dev *dev) 2184230dce64SAmit Gupta { 2185230dce64SAmit Gupta PMD_INIT_FUNC_TRACE(); 2186230dce64SAmit Gupta nicvf_dev_close(dev); 2187230dce64SAmit Gupta return 0; 2188230dce64SAmit Gupta } 218944a86354SHanumanth Pothula 219044a86354SHanumanth Pothula static inline uint64_t ether_addr_to_u64(uint8_t *addr) 219144a86354SHanumanth Pothula { 219244a86354SHanumanth Pothula uint64_t u = 0; 219344a86354SHanumanth Pothula int i; 219444a86354SHanumanth Pothula 219544a86354SHanumanth Pothula for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 219644a86354SHanumanth Pothula u = u << 8 | addr[i]; 219744a86354SHanumanth Pothula 219844a86354SHanumanth Pothula return u; 219944a86354SHanumanth Pothula } 220044a86354SHanumanth Pothula 2201230dce64SAmit Gupta static int 2202e4387966SJerin Jacob nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 2203e4387966SJerin Jacob { 220444a86354SHanumanth Pothula uint8_t dmac_ctrl_reg = 0; 2205e4387966SJerin Jacob int ret; 2206e4387966SJerin Jacob struct rte_pci_device *pci_dev; 2207e4387966SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(eth_dev); 2208e4387966SJerin Jacob 2209e4387966SJerin Jacob PMD_INIT_FUNC_TRACE(); 2210e4387966SJerin Jacob 2211e4387966SJerin Jacob eth_dev->dev_ops = &nicvf_eth_dev_ops; 2212cbfc6111SFerruh Yigit eth_dev->rx_queue_count = nicvf_dev_rx_queue_count; 2213e4387966SJerin Jacob 22147413feeeSJerin Jacob /* For secondary processes, the primary has done all the work */ 22157413feeeSJerin Jacob if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 221621e3fb00SKamil Rytarowski if (nic) { 22177413feeeSJerin Jacob /* Setup callbacks for secondary process */ 22187413feeeSJerin Jacob nicvf_set_tx_function(eth_dev); 22197413feeeSJerin Jacob nicvf_set_rx_function(eth_dev); 22207413feeeSJerin Jacob return 0; 222121e3fb00SKamil Rytarowski } else { 222221e3fb00SKamil Rytarowski /* If nic == NULL than it is secondary function 222321e3fb00SKamil Rytarowski * so ethdev need to be released by caller */ 222421e3fb00SKamil Rytarowski return ENOTSUP; 222521e3fb00SKamil Rytarowski } 22267413feeeSJerin Jacob } 22277413feeeSJerin Jacob 2228c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2229e4387966SJerin Jacob rte_eth_copy_pci_info(eth_dev, pci_dev); 2230f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2231e4387966SJerin Jacob 2232e4387966SJerin Jacob nic->device_id = pci_dev->id.device_id; 2233e4387966SJerin Jacob nic->vendor_id = pci_dev->id.vendor_id; 2234e4387966SJerin Jacob nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 2235e4387966SJerin Jacob nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2236e4387966SJerin Jacob 22372fc03b23SThomas Monjalon PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) " PCI_PRI_FMT, 2238e4387966SJerin Jacob pci_dev->id.vendor_id, pci_dev->id.device_id, 2239e4387966SJerin Jacob pci_dev->addr.domain, pci_dev->addr.bus, 2240e4387966SJerin Jacob pci_dev->addr.devid, pci_dev->addr.function); 2241e4387966SJerin Jacob 2242e4387966SJerin Jacob nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 2243e4387966SJerin Jacob if (!nic->reg_base) { 2244e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to map BAR0"); 2245e4387966SJerin Jacob ret = -ENODEV; 2246e4387966SJerin Jacob goto fail; 2247e4387966SJerin Jacob } 2248e4387966SJerin Jacob 2249d61138d4SHarman Kalra /* Allocate interrupt instance */ 2250d61138d4SHarman Kalra nic->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED); 2251d61138d4SHarman Kalra if (nic->intr_handle == NULL) { 2252d61138d4SHarman Kalra PMD_INIT_LOG(ERR, "Failed to allocate intr handle"); 2253d61138d4SHarman Kalra ret = -ENODEV; 2254d61138d4SHarman Kalra goto fail; 2255d61138d4SHarman Kalra } 2256d61138d4SHarman Kalra 2257e4387966SJerin Jacob nicvf_disable_all_interrupts(nic); 2258e4387966SJerin Jacob 22591f7b83b8SHanumanth Pothula /* To read mbox messages */ 22601f7b83b8SHanumanth Pothula ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic); 22611f7b83b8SHanumanth Pothula if (ret) { 22621f7b83b8SHanumanth Pothula PMD_INIT_LOG(ERR, "Failed to start period alarm"); 22631f7b83b8SHanumanth Pothula goto fail; 22641f7b83b8SHanumanth Pothula } 22651f7b83b8SHanumanth Pothula 22661f7b83b8SHanumanth Pothula /* To poll link status change*/ 2267f141adcaSKamil Rytarowski ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev); 2268e4387966SJerin Jacob if (ret) { 2269e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2270e4387966SJerin Jacob goto fail; 2271e4387966SJerin Jacob } 2272e4387966SJerin Jacob 2273e4387966SJerin Jacob ret = nicvf_mbox_check_pf_ready(nic); 2274e4387966SJerin Jacob if (ret) { 2275e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 2276e4387966SJerin Jacob goto alarm_fail; 2277e4387966SJerin Jacob } else { 2278e4387966SJerin Jacob PMD_INIT_LOG(INFO, 2279e4387966SJerin Jacob "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 2280e4387966SJerin Jacob nic->node, nic->vf_id, 2281e4387966SJerin Jacob nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 2282e4387966SJerin Jacob nic->sqs_mode ? "true" : "false", 2283e4387966SJerin Jacob nic->loopback_supported ? "true" : "false" 2284e4387966SJerin Jacob ); 2285e4387966SJerin Jacob } 2286e4387966SJerin Jacob 2287a32d2f5cSHanumanth Pothula /* To make sure RX DMAC register is set to default value (0x3) */ 2288a32d2f5cSHanumanth Pothula nicvf_mbox_reset_xcast(nic); 2289a32d2f5cSHanumanth Pothula 229021e3fb00SKamil Rytarowski ret = nicvf_base_init(nic); 229121e3fb00SKamil Rytarowski if (ret) { 229221e3fb00SKamil Rytarowski PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 229321e3fb00SKamil Rytarowski goto malloc_fail; 229421e3fb00SKamil Rytarowski } 229521e3fb00SKamil Rytarowski 2296e4387966SJerin Jacob if (nic->sqs_mode) { 229721e3fb00SKamil Rytarowski /* Push nic to stack of secondary vfs */ 229821e3fb00SKamil Rytarowski nicvf_svf_push(nic); 229921e3fb00SKamil Rytarowski 230021e3fb00SKamil Rytarowski /* Steal nic pointer from the device for further reuse */ 230121e3fb00SKamil Rytarowski eth_dev->data->dev_private = NULL; 230221e3fb00SKamil Rytarowski 230321e3fb00SKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 230421e3fb00SKamil Rytarowski 230598a7ea33SJerin Jacob /* Detach port by returning positive error number */ 230621e3fb00SKamil Rytarowski return ENOTSUP; 2307e4387966SJerin Jacob } 2308e4387966SJerin Jacob 230935b2d13fSOlivier Matz eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 231035b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN, 0); 2311e4387966SJerin Jacob if (eth_dev->data->mac_addrs == NULL) { 2312e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 2313e4387966SJerin Jacob ret = -ENOMEM; 2314e4387966SJerin Jacob goto alarm_fail; 2315e4387966SJerin Jacob } 2316538da7a1SOlivier Matz if (rte_is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr)) 2317538da7a1SOlivier Matz rte_eth_random_addr(&nic->mac_addr[0]); 2318e4387966SJerin Jacob 2319538da7a1SOlivier Matz rte_ether_addr_copy((struct rte_ether_addr *)nic->mac_addr, 2320e4387966SJerin Jacob ð_dev->data->mac_addrs[0]); 2321e4387966SJerin Jacob 2322e4387966SJerin Jacob ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 2323e4387966SJerin Jacob if (ret) { 2324e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to set mac addr"); 2325e4387966SJerin Jacob goto malloc_fail; 2326e4387966SJerin Jacob } 2327e4387966SJerin Jacob 232844a86354SHanumanth Pothula /* set DMAC CTRL reg to allow MAC */ 232944a86354SHanumanth Pothula dmac_ctrl_reg = BCAST_ACCEPT | BGX_MCAST_MODE(2) | CAM_ACCEPT; 233044a86354SHanumanth Pothula ret = nicvf_mbox_set_xcast(nic, dmac_ctrl_reg, 233144a86354SHanumanth Pothula ether_addr_to_u64(nic->mac_addr)); 233244a86354SHanumanth Pothula if (ret) { 233344a86354SHanumanth Pothula PMD_INIT_LOG(ERR, "Failed to set mac addr"); 233444a86354SHanumanth Pothula goto malloc_fail; 233544a86354SHanumanth Pothula } 233644a86354SHanumanth Pothula 2337279d3319SRakesh Kudurumalla ret = nicvf_set_first_skip(eth_dev); 2338279d3319SRakesh Kudurumalla if (ret) { 2339279d3319SRakesh Kudurumalla PMD_INIT_LOG(ERR, "Failed to configure first skip"); 2340279d3319SRakesh Kudurumalla goto malloc_fail; 2341279d3319SRakesh Kudurumalla } 2342c2c4f87bSAman Deep Singh PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=" RTE_ETHER_ADDR_PRT_FMT, 2343e4387966SJerin Jacob eth_dev->data->port_id, nic->vendor_id, nic->device_id, 2344e4387966SJerin Jacob nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 2345e4387966SJerin Jacob nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 2346e4387966SJerin Jacob 2347e4387966SJerin Jacob return 0; 2348e4387966SJerin Jacob 2349e4387966SJerin Jacob malloc_fail: 2350e4387966SJerin Jacob rte_free(eth_dev->data->mac_addrs); 2351e7f2fa88SDavid Marchand eth_dev->data->mac_addrs = NULL; 2352e4387966SJerin Jacob alarm_fail: 2353f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2354e4387966SJerin Jacob fail: 2355e4387966SJerin Jacob return ret; 2356e4387966SJerin Jacob } 2357e4387966SJerin Jacob 2358e4387966SJerin Jacob static const struct rte_pci_id pci_id_nicvf_map[] = { 2359e4387966SJerin Jacob { 2360e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2361e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2362398a1be1SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF, 2363e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2364398a1be1SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF, 2365e4387966SJerin Jacob }, 2366e4387966SJerin Jacob { 2367e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2368e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2369398a1be1SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2370e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2371398a1be1SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF, 2372e4387966SJerin Jacob }, 2373e4387966SJerin Jacob { 2374b72a7768SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2375b72a7768SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2376b72a7768SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2377b72a7768SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2378b72a7768SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF, 2379b72a7768SJerin Jacob }, 2380b72a7768SJerin Jacob { 2381174dd78eSJerin Jacob .class_id = RTE_CLASS_ANY_ID, 2382174dd78eSJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 2383174dd78eSJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2384174dd78eSJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2385174dd78eSJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF, 2386174dd78eSJerin Jacob }, 2387174dd78eSJerin Jacob { 2388e4387966SJerin Jacob .vendor_id = 0, 2389e4387966SJerin Jacob }, 2390e4387966SJerin Jacob }; 2391e4387966SJerin Jacob 2392fdf91e0fSJan Blunck static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2393fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 2394fdf91e0fSJan Blunck { 2395fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf), 2396fdf91e0fSJan Blunck nicvf_eth_dev_init); 2397fdf91e0fSJan Blunck } 2398fdf91e0fSJan Blunck 2399fdf91e0fSJan Blunck static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev) 2400fdf91e0fSJan Blunck { 2401230dce64SAmit Gupta return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit); 2402fdf91e0fSJan Blunck } 2403fdf91e0fSJan Blunck 2404fdf91e0fSJan Blunck static struct rte_pci_driver rte_nicvf_pmd = { 2405e4387966SJerin Jacob .id_table = pci_id_nicvf_map, 24066110b1c6SJerin Jacob .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES | 24076110b1c6SJerin Jacob RTE_PCI_DRV_INTR_LSC, 2408fdf91e0fSJan Blunck .probe = nicvf_eth_pci_probe, 2409fdf91e0fSJan Blunck .remove = nicvf_eth_pci_remove, 2410e4387966SJerin Jacob }; 2411e4387966SJerin Jacob 2412fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd); 241301f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); 241406e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci"); 2415279d3319SRakesh Kudurumalla RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>"); 2416