1e4387966SJerin Jacob /* 2e4387966SJerin Jacob * BSD LICENSE 3e4387966SJerin Jacob * 4e4387966SJerin Jacob * Copyright (C) Cavium networks Ltd. 2016. 5e4387966SJerin Jacob * 6e4387966SJerin Jacob * Redistribution and use in source and binary forms, with or without 7e4387966SJerin Jacob * modification, are permitted provided that the following conditions 8e4387966SJerin Jacob * are met: 9e4387966SJerin Jacob * 10e4387966SJerin Jacob * * Redistributions of source code must retain the above copyright 11e4387966SJerin Jacob * notice, this list of conditions and the following disclaimer. 12e4387966SJerin Jacob * * Redistributions in binary form must reproduce the above copyright 13e4387966SJerin Jacob * notice, this list of conditions and the following disclaimer in 14e4387966SJerin Jacob * the documentation and/or other materials provided with the 15e4387966SJerin Jacob * distribution. 16e4387966SJerin Jacob * * Neither the name of Cavium networks nor the names of its 17e4387966SJerin Jacob * contributors may be used to endorse or promote products derived 18e4387966SJerin Jacob * from this software without specific prior written permission. 19e4387966SJerin Jacob * 20e4387966SJerin Jacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21e4387966SJerin Jacob * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22e4387966SJerin Jacob * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23e4387966SJerin Jacob * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24e4387966SJerin Jacob * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25e4387966SJerin Jacob * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26e4387966SJerin Jacob * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27e4387966SJerin Jacob * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28e4387966SJerin Jacob * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29e4387966SJerin Jacob * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30e4387966SJerin Jacob * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31e4387966SJerin Jacob */ 32e4387966SJerin Jacob 33e4387966SJerin Jacob #include <assert.h> 34e4387966SJerin Jacob #include <stdio.h> 35e4387966SJerin Jacob #include <stdbool.h> 36e4387966SJerin Jacob #include <errno.h> 37e4387966SJerin Jacob #include <stdint.h> 38e4387966SJerin Jacob #include <string.h> 39e4387966SJerin Jacob #include <unistd.h> 40e4387966SJerin Jacob #include <stdarg.h> 41e4387966SJerin Jacob #include <inttypes.h> 42e4387966SJerin Jacob #include <netinet/in.h> 43e4387966SJerin Jacob #include <sys/queue.h> 44e4387966SJerin Jacob #include <sys/timerfd.h> 45e4387966SJerin Jacob 46e4387966SJerin Jacob #include <rte_alarm.h> 47e4387966SJerin Jacob #include <rte_atomic.h> 48e4387966SJerin Jacob #include <rte_branch_prediction.h> 49e4387966SJerin Jacob #include <rte_byteorder.h> 50e4387966SJerin Jacob #include <rte_common.h> 51e4387966SJerin Jacob #include <rte_cycles.h> 52e4387966SJerin Jacob #include <rte_debug.h> 53e4387966SJerin Jacob #include <rte_dev.h> 54e4387966SJerin Jacob #include <rte_eal.h> 55e4387966SJerin Jacob #include <rte_ether.h> 56e4387966SJerin Jacob #include <rte_ethdev.h> 57e4387966SJerin Jacob #include <rte_interrupts.h> 58e4387966SJerin Jacob #include <rte_log.h> 59e4387966SJerin Jacob #include <rte_memory.h> 60e4387966SJerin Jacob #include <rte_memzone.h> 61e4387966SJerin Jacob #include <rte_malloc.h> 62e4387966SJerin Jacob #include <rte_random.h> 63e4387966SJerin Jacob #include <rte_pci.h> 64e4387966SJerin Jacob #include <rte_tailq.h> 65e4387966SJerin Jacob 66e4387966SJerin Jacob #include "base/nicvf_plat.h" 67e4387966SJerin Jacob 68e4387966SJerin Jacob #include "nicvf_ethdev.h" 691c421f18SJerin Jacob #include "nicvf_rxtx.h" 70e4387966SJerin Jacob #include "nicvf_logs.h" 71e4387966SJerin Jacob 727413feeeSJerin Jacob static void nicvf_dev_stop(struct rte_eth_dev *dev); 737413feeeSJerin Jacob 748fc70464SJerin Jacob static inline int 758fc70464SJerin Jacob nicvf_atomic_write_link_status(struct rte_eth_dev *dev, 768fc70464SJerin Jacob struct rte_eth_link *link) 778fc70464SJerin Jacob { 788fc70464SJerin Jacob struct rte_eth_link *dst = &dev->data->dev_link; 798fc70464SJerin Jacob struct rte_eth_link *src = link; 808fc70464SJerin Jacob 818fc70464SJerin Jacob if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 828fc70464SJerin Jacob *(uint64_t *)src) == 0) 838fc70464SJerin Jacob return -1; 848fc70464SJerin Jacob 858fc70464SJerin Jacob return 0; 868fc70464SJerin Jacob } 878fc70464SJerin Jacob 888fc70464SJerin Jacob static inline void 898fc70464SJerin Jacob nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) 908fc70464SJerin Jacob { 918fc70464SJerin Jacob link->link_status = nic->link_up; 928fc70464SJerin Jacob link->link_duplex = ETH_LINK_AUTONEG; 938fc70464SJerin Jacob if (nic->duplex == NICVF_HALF_DUPLEX) 948fc70464SJerin Jacob link->link_duplex = ETH_LINK_HALF_DUPLEX; 958fc70464SJerin Jacob else if (nic->duplex == NICVF_FULL_DUPLEX) 968fc70464SJerin Jacob link->link_duplex = ETH_LINK_FULL_DUPLEX; 978fc70464SJerin Jacob link->link_speed = nic->speed; 988fc70464SJerin Jacob link->link_autoneg = ETH_LINK_SPEED_AUTONEG; 998fc70464SJerin Jacob } 1008fc70464SJerin Jacob 101e4387966SJerin Jacob static void 102e4387966SJerin Jacob nicvf_interrupt(void *arg) 103e4387966SJerin Jacob { 104e4387966SJerin Jacob struct nicvf *nic = arg; 105e4387966SJerin Jacob 1068fc70464SJerin Jacob if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 1078fc70464SJerin Jacob if (nic->eth_dev->data->dev_conf.intr_conf.lsc) 1088fc70464SJerin Jacob nicvf_set_eth_link_status(nic, 1098fc70464SJerin Jacob &nic->eth_dev->data->dev_link); 1108fc70464SJerin Jacob _rte_eth_dev_callback_process(nic->eth_dev, 1118fc70464SJerin Jacob RTE_ETH_EVENT_INTR_LSC); 1128fc70464SJerin Jacob } 113e4387966SJerin Jacob 114e4387966SJerin Jacob rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 115e4387966SJerin Jacob nicvf_interrupt, nic); 116e4387966SJerin Jacob } 117e4387966SJerin Jacob 118e4387966SJerin Jacob static int 119e4387966SJerin Jacob nicvf_periodic_alarm_start(struct nicvf *nic) 120e4387966SJerin Jacob { 121e4387966SJerin Jacob return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 122e4387966SJerin Jacob nicvf_interrupt, nic); 123e4387966SJerin Jacob } 124e4387966SJerin Jacob 125e4387966SJerin Jacob static int 126e4387966SJerin Jacob nicvf_periodic_alarm_stop(struct nicvf *nic) 127e4387966SJerin Jacob { 128e4387966SJerin Jacob return rte_eal_alarm_cancel(nicvf_interrupt, nic); 129e4387966SJerin Jacob } 130e4387966SJerin Jacob 1318fc70464SJerin Jacob /* 1328fc70464SJerin Jacob * Return 0 means link status changed, -1 means not changed 1338fc70464SJerin Jacob */ 1348fc70464SJerin Jacob static int 1358fc70464SJerin Jacob nicvf_dev_link_update(struct rte_eth_dev *dev, 1368fc70464SJerin Jacob int wait_to_complete __rte_unused) 1378fc70464SJerin Jacob { 1388fc70464SJerin Jacob struct rte_eth_link link; 1398fc70464SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1408fc70464SJerin Jacob 1418fc70464SJerin Jacob PMD_INIT_FUNC_TRACE(); 1428fc70464SJerin Jacob 1438fc70464SJerin Jacob memset(&link, 0, sizeof(link)); 1448fc70464SJerin Jacob nicvf_set_eth_link_status(nic, &link); 1458fc70464SJerin Jacob return nicvf_atomic_write_link_status(dev, &link); 1468fc70464SJerin Jacob } 1478fc70464SJerin Jacob 148606ee746SJerin Jacob static int 14965d9804eSJerin Jacob nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 15065d9804eSJerin Jacob { 15165d9804eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 15265d9804eSJerin Jacob uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 15365d9804eSJerin Jacob 15465d9804eSJerin Jacob PMD_INIT_FUNC_TRACE(); 15565d9804eSJerin Jacob 15665d9804eSJerin Jacob if (frame_size > NIC_HW_MAX_FRS) 15765d9804eSJerin Jacob return -EINVAL; 15865d9804eSJerin Jacob 15965d9804eSJerin Jacob if (frame_size < NIC_HW_MIN_FRS) 16065d9804eSJerin Jacob return -EINVAL; 16165d9804eSJerin Jacob 16265d9804eSJerin Jacob buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 16365d9804eSJerin Jacob 16465d9804eSJerin Jacob /* 16565d9804eSJerin Jacob * Refuse mtu that requires the support of scattered packets 16665d9804eSJerin Jacob * when this feature has not been enabled before. 16765d9804eSJerin Jacob */ 16865d9804eSJerin Jacob if (!dev->data->scattered_rx && 16965d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 17065d9804eSJerin Jacob return -EINVAL; 17165d9804eSJerin Jacob 17265d9804eSJerin Jacob /* check <seg size> * <max_seg> >= max_frame */ 17365d9804eSJerin Jacob if (dev->data->scattered_rx && 17465d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 17565d9804eSJerin Jacob return -EINVAL; 17665d9804eSJerin Jacob 17765d9804eSJerin Jacob if (frame_size > ETHER_MAX_LEN) 17865d9804eSJerin Jacob dev->data->dev_conf.rxmode.jumbo_frame = 1; 17965d9804eSJerin Jacob else 18065d9804eSJerin Jacob dev->data->dev_conf.rxmode.jumbo_frame = 0; 18165d9804eSJerin Jacob 18265d9804eSJerin Jacob if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) 18365d9804eSJerin Jacob return -EINVAL; 18465d9804eSJerin Jacob 18565d9804eSJerin Jacob /* Update max frame size */ 18665d9804eSJerin Jacob dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; 18765d9804eSJerin Jacob nic->mtu = mtu; 18865d9804eSJerin Jacob return 0; 18965d9804eSJerin Jacob } 19065d9804eSJerin Jacob 19165d9804eSJerin Jacob static int 192606ee746SJerin Jacob nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 193606ee746SJerin Jacob { 194606ee746SJerin Jacob uint64_t *data = regs->data; 195606ee746SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 196606ee746SJerin Jacob 197001a1c0fSZyta Szpak if (data == NULL) { 198001a1c0fSZyta Szpak regs->length = nicvf_reg_get_count(); 199001a1c0fSZyta Szpak regs->width = THUNDERX_REG_BYTES; 200001a1c0fSZyta Szpak return 0; 201001a1c0fSZyta Szpak } 202606ee746SJerin Jacob 203606ee746SJerin Jacob /* Support only full register dump */ 204606ee746SJerin Jacob if ((regs->length == 0) || 205606ee746SJerin Jacob (regs->length == (uint32_t)nicvf_reg_get_count())) { 206606ee746SJerin Jacob regs->version = nic->vendor_id << 16 | nic->device_id; 207606ee746SJerin Jacob nicvf_reg_dump(nic, data); 208606ee746SJerin Jacob return 0; 209606ee746SJerin Jacob } 210606ee746SJerin Jacob return -ENOTSUP; 211606ee746SJerin Jacob } 212606ee746SJerin Jacob 213684fa771SJerin Jacob static void 214684fa771SJerin Jacob nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 215684fa771SJerin Jacob { 216684fa771SJerin Jacob uint16_t qidx; 217684fa771SJerin Jacob struct nicvf_hw_rx_qstats rx_qstats; 218684fa771SJerin Jacob struct nicvf_hw_tx_qstats tx_qstats; 219684fa771SJerin Jacob struct nicvf_hw_stats port_stats; 220684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 221684fa771SJerin Jacob 222684fa771SJerin Jacob /* Reading per RX ring stats */ 223684fa771SJerin Jacob for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 224684fa771SJerin Jacob if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 225684fa771SJerin Jacob break; 226684fa771SJerin Jacob 227684fa771SJerin Jacob nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 228684fa771SJerin Jacob stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 229684fa771SJerin Jacob stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 230684fa771SJerin Jacob } 231684fa771SJerin Jacob 232684fa771SJerin Jacob /* Reading per TX ring stats */ 233684fa771SJerin Jacob for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) { 234684fa771SJerin Jacob if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 235684fa771SJerin Jacob break; 236684fa771SJerin Jacob 237684fa771SJerin Jacob nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 238684fa771SJerin Jacob stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 239684fa771SJerin Jacob stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 240684fa771SJerin Jacob } 241684fa771SJerin Jacob 242684fa771SJerin Jacob nicvf_hw_get_stats(nic, &port_stats); 243684fa771SJerin Jacob stats->ibytes = port_stats.rx_bytes; 244684fa771SJerin Jacob stats->ipackets = port_stats.rx_ucast_frames; 245684fa771SJerin Jacob stats->ipackets += port_stats.rx_bcast_frames; 246684fa771SJerin Jacob stats->ipackets += port_stats.rx_mcast_frames; 247684fa771SJerin Jacob stats->ierrors = port_stats.rx_l2_errors; 248684fa771SJerin Jacob stats->imissed = port_stats.rx_drop_red; 249684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_overrun; 250684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_bcast; 251684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_mcast; 252684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_bcast; 253684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_mcast; 254684fa771SJerin Jacob 255684fa771SJerin Jacob stats->obytes = port_stats.tx_bytes_ok; 256684fa771SJerin Jacob stats->opackets = port_stats.tx_ucast_frames_ok; 257684fa771SJerin Jacob stats->opackets += port_stats.tx_bcast_frames_ok; 258684fa771SJerin Jacob stats->opackets += port_stats.tx_mcast_frames_ok; 259684fa771SJerin Jacob stats->oerrors = port_stats.tx_drops; 260684fa771SJerin Jacob } 261684fa771SJerin Jacob 2621c80e4fdSJerin Jacob static const uint32_t * 2631c80e4fdSJerin Jacob nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) 2641c80e4fdSJerin Jacob { 2651c80e4fdSJerin Jacob size_t copied; 2661c80e4fdSJerin Jacob static uint32_t ptypes[32]; 2671c80e4fdSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 2681c80e4fdSJerin Jacob static const uint32_t ptypes_pass1[] = { 2691c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4, 2701c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4_EXT, 2711c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6, 2721c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6_EXT, 2731c80e4fdSJerin Jacob RTE_PTYPE_L4_TCP, 2741c80e4fdSJerin Jacob RTE_PTYPE_L4_UDP, 2751c80e4fdSJerin Jacob RTE_PTYPE_L4_FRAG, 2761c80e4fdSJerin Jacob }; 2771c80e4fdSJerin Jacob static const uint32_t ptypes_pass2[] = { 2781c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GRE, 2791c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GENEVE, 2801c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_VXLAN, 2811c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_NVGRE, 2821c80e4fdSJerin Jacob }; 2831c80e4fdSJerin Jacob static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; 2841c80e4fdSJerin Jacob 2851c80e4fdSJerin Jacob copied = sizeof(ptypes_pass1); 2861c80e4fdSJerin Jacob memcpy(ptypes, ptypes_pass1, copied); 2871c80e4fdSJerin Jacob if (nicvf_hw_version(nic) == NICVF_PASS2) { 2881c80e4fdSJerin Jacob memcpy((char *)ptypes + copied, ptypes_pass2, 2891c80e4fdSJerin Jacob sizeof(ptypes_pass2)); 2901c80e4fdSJerin Jacob copied += sizeof(ptypes_pass2); 2911c80e4fdSJerin Jacob } 2921c80e4fdSJerin Jacob 2931c80e4fdSJerin Jacob memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); 2941c80e4fdSJerin Jacob if (dev->rx_pkt_burst == nicvf_recv_pkts || 2951c80e4fdSJerin Jacob dev->rx_pkt_burst == nicvf_recv_pkts_multiseg) 2961c80e4fdSJerin Jacob return ptypes; 2971c80e4fdSJerin Jacob 2981c80e4fdSJerin Jacob return NULL; 2991c80e4fdSJerin Jacob } 3001c80e4fdSJerin Jacob 301684fa771SJerin Jacob static void 302684fa771SJerin Jacob nicvf_dev_stats_reset(struct rte_eth_dev *dev) 303684fa771SJerin Jacob { 304684fa771SJerin Jacob int i; 305684fa771SJerin Jacob uint16_t rxqs = 0, txqs = 0; 306684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 307684fa771SJerin Jacob 308684fa771SJerin Jacob for (i = 0; i < dev->data->nb_rx_queues; i++) 309684fa771SJerin Jacob rxqs |= (0x3 << (i * 2)); 310684fa771SJerin Jacob for (i = 0; i < dev->data->nb_tx_queues; i++) 311684fa771SJerin Jacob txqs |= (0x3 << (i * 2)); 312684fa771SJerin Jacob 313684fa771SJerin Jacob nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 314684fa771SJerin Jacob } 315684fa771SJerin Jacob 3166eae36eaSJerin Jacob /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 3176eae36eaSJerin Jacob static void 3186eae36eaSJerin Jacob nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 3196eae36eaSJerin Jacob { 3206eae36eaSJerin Jacob } 3216eae36eaSJerin Jacob 32243362c6aSJerin Jacob static inline uint64_t 32343362c6aSJerin Jacob nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 32443362c6aSJerin Jacob { 32543362c6aSJerin Jacob uint64_t nic_rss = 0; 32643362c6aSJerin Jacob 32743362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV4) 32843362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 32943362c6aSJerin Jacob 33043362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV6) 33143362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 33243362c6aSJerin Jacob 33343362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 33443362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 33543362c6aSJerin Jacob 33643362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 33743362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 33843362c6aSJerin Jacob 33943362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 34043362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 34143362c6aSJerin Jacob 34243362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 34343362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 34443362c6aSJerin Jacob 34543362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_PORT) 34643362c6aSJerin Jacob nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 34743362c6aSJerin Jacob 34843362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 34943362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_VXLAN) 35043362c6aSJerin Jacob nic_rss |= RSS_TUN_VXLAN_ENA; 35143362c6aSJerin Jacob 35243362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_GENEVE) 35343362c6aSJerin Jacob nic_rss |= RSS_TUN_GENEVE_ENA; 35443362c6aSJerin Jacob 35543362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NVGRE) 35643362c6aSJerin Jacob nic_rss |= RSS_TUN_NVGRE_ENA; 35743362c6aSJerin Jacob } 35843362c6aSJerin Jacob 35943362c6aSJerin Jacob return nic_rss; 36043362c6aSJerin Jacob } 36143362c6aSJerin Jacob 36243362c6aSJerin Jacob static inline uint64_t 36343362c6aSJerin Jacob nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 36443362c6aSJerin Jacob { 36543362c6aSJerin Jacob uint64_t ethdev_rss = 0; 36643362c6aSJerin Jacob 36743362c6aSJerin Jacob if (nic_rss & RSS_IP_ENA) 36843362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 36943362c6aSJerin Jacob 37043362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 37143362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 37243362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_TCP); 37343362c6aSJerin Jacob 37443362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 37543362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 37643362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_UDP); 37743362c6aSJerin Jacob 37843362c6aSJerin Jacob if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 37943362c6aSJerin Jacob ethdev_rss |= ETH_RSS_PORT; 38043362c6aSJerin Jacob 38143362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 38243362c6aSJerin Jacob if (nic_rss & RSS_TUN_VXLAN_ENA) 38343362c6aSJerin Jacob ethdev_rss |= ETH_RSS_VXLAN; 38443362c6aSJerin Jacob 38543362c6aSJerin Jacob if (nic_rss & RSS_TUN_GENEVE_ENA) 38643362c6aSJerin Jacob ethdev_rss |= ETH_RSS_GENEVE; 38743362c6aSJerin Jacob 38843362c6aSJerin Jacob if (nic_rss & RSS_TUN_NVGRE_ENA) 38943362c6aSJerin Jacob ethdev_rss |= ETH_RSS_NVGRE; 39043362c6aSJerin Jacob } 39143362c6aSJerin Jacob return ethdev_rss; 39243362c6aSJerin Jacob } 39343362c6aSJerin Jacob 39443362c6aSJerin Jacob static int 39543362c6aSJerin Jacob nicvf_dev_reta_query(struct rte_eth_dev *dev, 39643362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 39743362c6aSJerin Jacob uint16_t reta_size) 39843362c6aSJerin Jacob { 39943362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 40043362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 40143362c6aSJerin Jacob int ret, i, j; 40243362c6aSJerin Jacob 40343362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 40443362c6aSJerin Jacob RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 40543362c6aSJerin Jacob "(%d) doesn't match the number hardware can supported " 40643362c6aSJerin Jacob "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 40743362c6aSJerin Jacob return -EINVAL; 40843362c6aSJerin Jacob } 40943362c6aSJerin Jacob 41043362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 41143362c6aSJerin Jacob if (ret) 41243362c6aSJerin Jacob return ret; 41343362c6aSJerin Jacob 41443362c6aSJerin Jacob /* Copy RETA table */ 41543362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 41643362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 41743362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 41843362c6aSJerin Jacob reta_conf[i].reta[j] = tbl[j]; 41943362c6aSJerin Jacob } 42043362c6aSJerin Jacob 42143362c6aSJerin Jacob return 0; 42243362c6aSJerin Jacob } 42343362c6aSJerin Jacob 42443362c6aSJerin Jacob static int 42543362c6aSJerin Jacob nicvf_dev_reta_update(struct rte_eth_dev *dev, 42643362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 42743362c6aSJerin Jacob uint16_t reta_size) 42843362c6aSJerin Jacob { 42943362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 43043362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 43143362c6aSJerin Jacob int ret, i, j; 43243362c6aSJerin Jacob 43343362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 43443362c6aSJerin Jacob RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 43543362c6aSJerin Jacob "(%d) doesn't match the number hardware can supported " 43643362c6aSJerin Jacob "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 43743362c6aSJerin Jacob return -EINVAL; 43843362c6aSJerin Jacob } 43943362c6aSJerin Jacob 44043362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 44143362c6aSJerin Jacob if (ret) 44243362c6aSJerin Jacob return ret; 44343362c6aSJerin Jacob 44443362c6aSJerin Jacob /* Copy RETA table */ 44543362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 44643362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 44743362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 44843362c6aSJerin Jacob tbl[j] = reta_conf[i].reta[j]; 44943362c6aSJerin Jacob } 45043362c6aSJerin Jacob 45143362c6aSJerin Jacob return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 45243362c6aSJerin Jacob } 45343362c6aSJerin Jacob 45443362c6aSJerin Jacob static int 45543362c6aSJerin Jacob nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 45643362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 45743362c6aSJerin Jacob { 45843362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 45943362c6aSJerin Jacob 46043362c6aSJerin Jacob if (rss_conf->rss_key) 46143362c6aSJerin Jacob nicvf_rss_get_key(nic, rss_conf->rss_key); 46243362c6aSJerin Jacob 46343362c6aSJerin Jacob rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 46443362c6aSJerin Jacob rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 46543362c6aSJerin Jacob return 0; 46643362c6aSJerin Jacob } 46743362c6aSJerin Jacob 46843362c6aSJerin Jacob static int 46943362c6aSJerin Jacob nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 47043362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 47143362c6aSJerin Jacob { 47243362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 47343362c6aSJerin Jacob uint64_t nic_rss; 47443362c6aSJerin Jacob 47543362c6aSJerin Jacob if (rss_conf->rss_key && 47643362c6aSJerin Jacob rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 47743362c6aSJerin Jacob RTE_LOG(ERR, PMD, "Hash key size mismatch %d", 47843362c6aSJerin Jacob rss_conf->rss_key_len); 47943362c6aSJerin Jacob return -EINVAL; 48043362c6aSJerin Jacob } 48143362c6aSJerin Jacob 48243362c6aSJerin Jacob if (rss_conf->rss_key) 48343362c6aSJerin Jacob nicvf_rss_set_key(nic, rss_conf->rss_key); 48443362c6aSJerin Jacob 48543362c6aSJerin Jacob nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 48643362c6aSJerin Jacob nicvf_rss_set_cfg(nic, nic_rss); 48743362c6aSJerin Jacob return 0; 48843362c6aSJerin Jacob } 48943362c6aSJerin Jacob 490aa0d976eSJerin Jacob static int 491aa0d976eSJerin Jacob nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx, 492aa0d976eSJerin Jacob uint32_t desc_cnt) 493aa0d976eSJerin Jacob { 494aa0d976eSJerin Jacob const struct rte_memzone *rz; 495d1d861efSKamil Rytarowski uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); 496aa0d976eSJerin Jacob 497aa0d976eSJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size, 498aa0d976eSJerin Jacob NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 499aa0d976eSJerin Jacob if (rz == NULL) { 500aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 501aa0d976eSJerin Jacob return -ENOMEM; 502aa0d976eSJerin Jacob } 503aa0d976eSJerin Jacob 504aa0d976eSJerin Jacob memset(rz->addr, 0, ring_size); 505aa0d976eSJerin Jacob 506aa0d976eSJerin Jacob rxq->phys = rz->phys_addr; 507aa0d976eSJerin Jacob rxq->desc = rz->addr; 508aa0d976eSJerin Jacob rxq->qlen_mask = desc_cnt - 1; 509aa0d976eSJerin Jacob 510aa0d976eSJerin Jacob return 0; 511aa0d976eSJerin Jacob } 512aa0d976eSJerin Jacob 5133f3c6f97SJerin Jacob static int 5143f3c6f97SJerin Jacob nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx, 5153f3c6f97SJerin Jacob uint32_t desc_cnt) 5163f3c6f97SJerin Jacob { 5173f3c6f97SJerin Jacob const struct rte_memzone *rz; 518d1d861efSKamil Rytarowski uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); 5193f3c6f97SJerin Jacob 5203f3c6f97SJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size, 5213f3c6f97SJerin Jacob NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 5223f3c6f97SJerin Jacob if (rz == NULL) { 5233f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 5243f3c6f97SJerin Jacob return -ENOMEM; 5253f3c6f97SJerin Jacob } 5263f3c6f97SJerin Jacob 5273f3c6f97SJerin Jacob memset(rz->addr, 0, ring_size); 5283f3c6f97SJerin Jacob 5293f3c6f97SJerin Jacob sq->phys = rz->phys_addr; 5303f3c6f97SJerin Jacob sq->desc = rz->addr; 5313f3c6f97SJerin Jacob sq->qlen_mask = desc_cnt - 1; 5323f3c6f97SJerin Jacob 5333f3c6f97SJerin Jacob return 0; 5343f3c6f97SJerin Jacob } 5353f3c6f97SJerin Jacob 5367413feeeSJerin Jacob static int 5377413feeeSJerin Jacob nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz) 5387413feeeSJerin Jacob { 5397413feeeSJerin Jacob struct nicvf_rbdr *rbdr; 5407413feeeSJerin Jacob const struct rte_memzone *rz; 5417413feeeSJerin Jacob uint32_t ring_size; 5427413feeeSJerin Jacob 5437413feeeSJerin Jacob assert(nic->rbdr == NULL); 5447413feeeSJerin Jacob rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), 5457413feeeSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 5467413feeeSJerin Jacob if (rbdr == NULL) { 5477413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); 5487413feeeSJerin Jacob return -ENOMEM; 5497413feeeSJerin Jacob } 5507413feeeSJerin Jacob 551d1d861efSKamil Rytarowski ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; 5527413feeeSJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "rbdr", 0, ring_size, 5537413feeeSJerin Jacob NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); 5547413feeeSJerin Jacob if (rz == NULL) { 5557413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); 5567413feeeSJerin Jacob return -ENOMEM; 5577413feeeSJerin Jacob } 5587413feeeSJerin Jacob 5597413feeeSJerin Jacob memset(rz->addr, 0, ring_size); 5607413feeeSJerin Jacob 5617413feeeSJerin Jacob rbdr->phys = rz->phys_addr; 5627413feeeSJerin Jacob rbdr->tail = 0; 5637413feeeSJerin Jacob rbdr->next_tail = 0; 5647413feeeSJerin Jacob rbdr->desc = rz->addr; 5657413feeeSJerin Jacob rbdr->buffsz = buffsz; 5667413feeeSJerin Jacob rbdr->qlen_mask = desc_cnt - 1; 5677413feeeSJerin Jacob rbdr->rbdr_status = 5687413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; 5697413feeeSJerin Jacob rbdr->rbdr_door = 5707413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; 5717413feeeSJerin Jacob 5727413feeeSJerin Jacob nic->rbdr = rbdr; 5737413feeeSJerin Jacob return 0; 5747413feeeSJerin Jacob } 5757413feeeSJerin Jacob 5767413feeeSJerin Jacob static void 5777413feeeSJerin Jacob nicvf_rbdr_release_mbuf(struct nicvf *nic, nicvf_phys_addr_t phy) 5787413feeeSJerin Jacob { 5797413feeeSJerin Jacob uint16_t qidx; 5807413feeeSJerin Jacob void *obj; 5817413feeeSJerin Jacob struct nicvf_rxq *rxq; 5827413feeeSJerin Jacob 5837413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 5847413feeeSJerin Jacob rxq = nic->eth_dev->data->rx_queues[qidx]; 5857413feeeSJerin Jacob if (rxq->precharge_cnt) { 5867413feeeSJerin Jacob obj = (void *)nicvf_mbuff_phy2virt(phy, 5877413feeeSJerin Jacob rxq->mbuf_phys_off); 5887413feeeSJerin Jacob rte_mempool_put(rxq->pool, obj); 5897413feeeSJerin Jacob rxq->precharge_cnt--; 5907413feeeSJerin Jacob break; 5917413feeeSJerin Jacob } 5927413feeeSJerin Jacob } 5937413feeeSJerin Jacob } 5947413feeeSJerin Jacob 5957413feeeSJerin Jacob static inline void 5967413feeeSJerin Jacob nicvf_rbdr_release_mbufs(struct nicvf *nic) 5977413feeeSJerin Jacob { 5987413feeeSJerin Jacob uint32_t qlen_mask, head; 5997413feeeSJerin Jacob struct rbdr_entry_t *entry; 6007413feeeSJerin Jacob struct nicvf_rbdr *rbdr = nic->rbdr; 6017413feeeSJerin Jacob 6027413feeeSJerin Jacob qlen_mask = rbdr->qlen_mask; 6037413feeeSJerin Jacob head = rbdr->head; 6047413feeeSJerin Jacob while (head != rbdr->tail) { 6057413feeeSJerin Jacob entry = rbdr->desc + head; 6067413feeeSJerin Jacob nicvf_rbdr_release_mbuf(nic, entry->full_addr); 6077413feeeSJerin Jacob head++; 6087413feeeSJerin Jacob head = head & qlen_mask; 6097413feeeSJerin Jacob } 6107413feeeSJerin Jacob } 6117413feeeSJerin Jacob 6123f3c6f97SJerin Jacob static inline void 6133f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 6143f3c6f97SJerin Jacob { 6153f3c6f97SJerin Jacob uint32_t head; 6163f3c6f97SJerin Jacob 6173f3c6f97SJerin Jacob head = txq->head; 6183f3c6f97SJerin Jacob while (head != txq->tail) { 6193f3c6f97SJerin Jacob if (txq->txbuffs[head]) { 6203f3c6f97SJerin Jacob rte_pktmbuf_free_seg(txq->txbuffs[head]); 6213f3c6f97SJerin Jacob txq->txbuffs[head] = NULL; 6223f3c6f97SJerin Jacob } 6233f3c6f97SJerin Jacob head++; 6243f3c6f97SJerin Jacob head = head & txq->qlen_mask; 6253f3c6f97SJerin Jacob } 6263f3c6f97SJerin Jacob } 6273f3c6f97SJerin Jacob 6283f3c6f97SJerin Jacob static void 6293f3c6f97SJerin Jacob nicvf_tx_queue_reset(struct nicvf_txq *txq) 6303f3c6f97SJerin Jacob { 6313f3c6f97SJerin Jacob uint32_t txq_desc_cnt = txq->qlen_mask + 1; 6323f3c6f97SJerin Jacob 6333f3c6f97SJerin Jacob memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 6343f3c6f97SJerin Jacob memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 6353f3c6f97SJerin Jacob txq->tail = 0; 6363f3c6f97SJerin Jacob txq->head = 0; 6373f3c6f97SJerin Jacob txq->xmit_bufs = 0; 6383f3c6f97SJerin Jacob } 6393f3c6f97SJerin Jacob 640fc1f6c62SJerin Jacob static inline int 641fc1f6c62SJerin Jacob nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) 642fc1f6c62SJerin Jacob { 643fc1f6c62SJerin Jacob struct nicvf_txq *txq; 644fc1f6c62SJerin Jacob int ret; 645fc1f6c62SJerin Jacob 646fc1f6c62SJerin Jacob if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) 647fc1f6c62SJerin Jacob return 0; 648fc1f6c62SJerin Jacob 649fc1f6c62SJerin Jacob txq = dev->data->tx_queues[qidx]; 650fc1f6c62SJerin Jacob txq->pool = NULL; 651fc1f6c62SJerin Jacob ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq); 652fc1f6c62SJerin Jacob if (ret) { 653fc1f6c62SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret); 654fc1f6c62SJerin Jacob goto config_sq_error; 655fc1f6c62SJerin Jacob } 656fc1f6c62SJerin Jacob 657fc1f6c62SJerin Jacob dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 658fc1f6c62SJerin Jacob return ret; 659fc1f6c62SJerin Jacob 660fc1f6c62SJerin Jacob config_sq_error: 661fc1f6c62SJerin Jacob nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); 662fc1f6c62SJerin Jacob return ret; 663fc1f6c62SJerin Jacob } 664fc1f6c62SJerin Jacob 665fc1f6c62SJerin Jacob static inline int 666fc1f6c62SJerin Jacob nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) 667fc1f6c62SJerin Jacob { 668fc1f6c62SJerin Jacob struct nicvf_txq *txq; 669fc1f6c62SJerin Jacob int ret; 670fc1f6c62SJerin Jacob 671fc1f6c62SJerin Jacob if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) 672fc1f6c62SJerin Jacob return 0; 673fc1f6c62SJerin Jacob 674fc1f6c62SJerin Jacob ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); 675fc1f6c62SJerin Jacob if (ret) 676fc1f6c62SJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret); 677fc1f6c62SJerin Jacob 678fc1f6c62SJerin Jacob txq = dev->data->tx_queues[qidx]; 679fc1f6c62SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 680fc1f6c62SJerin Jacob nicvf_tx_queue_reset(txq); 681fc1f6c62SJerin Jacob 682fc1f6c62SJerin Jacob dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 683fc1f6c62SJerin Jacob return ret; 684fc1f6c62SJerin Jacob } 68586b4eb42SJerin Jacob 68686b4eb42SJerin Jacob static inline int 68786b4eb42SJerin Jacob nicvf_configure_cpi(struct rte_eth_dev *dev) 68886b4eb42SJerin Jacob { 68986b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 69086b4eb42SJerin Jacob uint16_t qidx, qcnt; 69186b4eb42SJerin Jacob int ret; 69286b4eb42SJerin Jacob 69386b4eb42SJerin Jacob /* Count started rx queues */ 69486b4eb42SJerin Jacob for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) 69586b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == 69686b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 69786b4eb42SJerin Jacob qcnt++; 69886b4eb42SJerin Jacob 69986b4eb42SJerin Jacob nic->cpi_alg = CPI_ALG_NONE; 70086b4eb42SJerin Jacob ret = nicvf_mbox_config_cpi(nic, qcnt); 70186b4eb42SJerin Jacob if (ret) 70286b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 70386b4eb42SJerin Jacob 70486b4eb42SJerin Jacob return ret; 70586b4eb42SJerin Jacob } 70686b4eb42SJerin Jacob 7077413feeeSJerin Jacob static inline int 7087413feeeSJerin Jacob nicvf_configure_rss(struct rte_eth_dev *dev) 7097413feeeSJerin Jacob { 7107413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 7117413feeeSJerin Jacob uint64_t rsshf; 7127413feeeSJerin Jacob int ret = -EINVAL; 7137413feeeSJerin Jacob 7147413feeeSJerin Jacob rsshf = nicvf_rss_ethdev_to_nic(nic, 7157413feeeSJerin Jacob dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 7167413feeeSJerin Jacob PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, 7177413feeeSJerin Jacob dev->data->dev_conf.rxmode.mq_mode, 7187413feeeSJerin Jacob nic->eth_dev->data->nb_rx_queues, 7197413feeeSJerin Jacob nic->eth_dev->data->dev_conf.lpbk_mode, rsshf); 7207413feeeSJerin Jacob 7217413feeeSJerin Jacob if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 7227413feeeSJerin Jacob ret = nicvf_rss_term(nic); 7237413feeeSJerin Jacob else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 7247413feeeSJerin Jacob ret = nicvf_rss_config(nic, 7257413feeeSJerin Jacob nic->eth_dev->data->nb_rx_queues, rsshf); 7267413feeeSJerin Jacob if (ret) 7277413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); 7287413feeeSJerin Jacob 7297413feeeSJerin Jacob return ret; 7307413feeeSJerin Jacob } 7317413feeeSJerin Jacob 73286b4eb42SJerin Jacob static int 73386b4eb42SJerin Jacob nicvf_configure_rss_reta(struct rte_eth_dev *dev) 73486b4eb42SJerin Jacob { 73586b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 73686b4eb42SJerin Jacob unsigned int idx, qmap_size; 73786b4eb42SJerin Jacob uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 73886b4eb42SJerin Jacob uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 73986b4eb42SJerin Jacob 74086b4eb42SJerin Jacob if (nic->cpi_alg != CPI_ALG_NONE) 74186b4eb42SJerin Jacob return -EINVAL; 74286b4eb42SJerin Jacob 74386b4eb42SJerin Jacob /* Prepare queue map */ 74486b4eb42SJerin Jacob for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 74586b4eb42SJerin Jacob if (dev->data->rx_queue_state[idx] == 74686b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 74786b4eb42SJerin Jacob qmap[qmap_size++] = idx; 74886b4eb42SJerin Jacob } 74986b4eb42SJerin Jacob 75086b4eb42SJerin Jacob /* Update default RSS RETA */ 75186b4eb42SJerin Jacob for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 75286b4eb42SJerin Jacob default_reta[idx] = qmap[idx % qmap_size]; 75386b4eb42SJerin Jacob 75486b4eb42SJerin Jacob return nicvf_rss_reta_update(nic, default_reta, 75586b4eb42SJerin Jacob NIC_MAX_RSS_IDR_TBL_SIZE); 75686b4eb42SJerin Jacob } 75786b4eb42SJerin Jacob 7583f3c6f97SJerin Jacob static void 7593f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(void *sq) 7603f3c6f97SJerin Jacob { 7613f3c6f97SJerin Jacob struct nicvf_txq *txq; 7623f3c6f97SJerin Jacob 7633f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 7643f3c6f97SJerin Jacob 7653f3c6f97SJerin Jacob txq = (struct nicvf_txq *)sq; 7663f3c6f97SJerin Jacob if (txq) { 7673f3c6f97SJerin Jacob if (txq->txbuffs != NULL) { 7683f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 7693f3c6f97SJerin Jacob rte_free(txq->txbuffs); 7703f3c6f97SJerin Jacob txq->txbuffs = NULL; 7713f3c6f97SJerin Jacob } 7723f3c6f97SJerin Jacob rte_free(txq); 7733f3c6f97SJerin Jacob } 7743f3c6f97SJerin Jacob } 7753f3c6f97SJerin Jacob 7767413feeeSJerin Jacob static void 7777413feeeSJerin Jacob nicvf_set_tx_function(struct rte_eth_dev *dev) 7787413feeeSJerin Jacob { 7797413feeeSJerin Jacob struct nicvf_txq *txq; 7807413feeeSJerin Jacob size_t i; 7817413feeeSJerin Jacob bool multiseg = false; 7827413feeeSJerin Jacob 7837413feeeSJerin Jacob for (i = 0; i < dev->data->nb_tx_queues; i++) { 7847413feeeSJerin Jacob txq = dev->data->tx_queues[i]; 7857413feeeSJerin Jacob if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) { 7867413feeeSJerin Jacob multiseg = true; 7877413feeeSJerin Jacob break; 7887413feeeSJerin Jacob } 7897413feeeSJerin Jacob } 7907413feeeSJerin Jacob 7917413feeeSJerin Jacob /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 7927413feeeSJerin Jacob if (multiseg) { 7937413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); 7947413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; 7957413feeeSJerin Jacob } else { 7967413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); 7977413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts; 7987413feeeSJerin Jacob } 7997413feeeSJerin Jacob 8007413feeeSJerin Jacob if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) 8017413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); 8027413feeeSJerin Jacob else 8037413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); 8047413feeeSJerin Jacob } 8057413feeeSJerin Jacob 8067413feeeSJerin Jacob static void 8077413feeeSJerin Jacob nicvf_set_rx_function(struct rte_eth_dev *dev) 8087413feeeSJerin Jacob { 8097413feeeSJerin Jacob if (dev->data->scattered_rx) { 8107413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback"); 8117413feeeSJerin Jacob dev->rx_pkt_burst = nicvf_recv_pkts_multiseg; 8127413feeeSJerin Jacob } else { 8137413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-segment rx callback"); 8147413feeeSJerin Jacob dev->rx_pkt_burst = nicvf_recv_pkts; 8157413feeeSJerin Jacob } 8167413feeeSJerin Jacob } 8177413feeeSJerin Jacob 8183f3c6f97SJerin Jacob static int 8193f3c6f97SJerin Jacob nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 8203f3c6f97SJerin Jacob uint16_t nb_desc, unsigned int socket_id, 8213f3c6f97SJerin Jacob const struct rte_eth_txconf *tx_conf) 8223f3c6f97SJerin Jacob { 8233f3c6f97SJerin Jacob uint16_t tx_free_thresh; 8243f3c6f97SJerin Jacob uint8_t is_single_pool; 8253f3c6f97SJerin Jacob struct nicvf_txq *txq; 8263f3c6f97SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 8273f3c6f97SJerin Jacob 8283f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 8293f3c6f97SJerin Jacob 8303f3c6f97SJerin Jacob /* Socket id check */ 8313f3c6f97SJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 8323f3c6f97SJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 8333f3c6f97SJerin Jacob socket_id, nic->node); 8343f3c6f97SJerin Jacob 8353f3c6f97SJerin Jacob /* Tx deferred start is not supported */ 8363f3c6f97SJerin Jacob if (tx_conf->tx_deferred_start) { 8373f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 8383f3c6f97SJerin Jacob return -EINVAL; 8393f3c6f97SJerin Jacob } 8403f3c6f97SJerin Jacob 8413f3c6f97SJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 8423f3c6f97SJerin Jacob nb_desc = nicvf_qsize_sq_roundup(nb_desc); 8433f3c6f97SJerin Jacob if (nb_desc == 0) { 8443f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 8453f3c6f97SJerin Jacob return -EINVAL; 8463f3c6f97SJerin Jacob } 8473f3c6f97SJerin Jacob 8483f3c6f97SJerin Jacob /* Validate tx_free_thresh */ 8493f3c6f97SJerin Jacob tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 8503f3c6f97SJerin Jacob tx_conf->tx_free_thresh : 8513f3c6f97SJerin Jacob NICVF_DEFAULT_TX_FREE_THRESH); 8523f3c6f97SJerin Jacob 8533f3c6f97SJerin Jacob if (tx_free_thresh > (nb_desc) || 8543f3c6f97SJerin Jacob tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 8553f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, 8563f3c6f97SJerin Jacob "tx_free_thresh must be less than the number of TX " 8573f3c6f97SJerin Jacob "descriptors. (tx_free_thresh=%u port=%d " 8583f3c6f97SJerin Jacob "queue=%d)", (unsigned int)tx_free_thresh, 8593f3c6f97SJerin Jacob (int)dev->data->port_id, (int)qidx); 8603f3c6f97SJerin Jacob return -EINVAL; 8613f3c6f97SJerin Jacob } 8623f3c6f97SJerin Jacob 8633f3c6f97SJerin Jacob /* Free memory prior to re-allocation if needed. */ 8643f3c6f97SJerin Jacob if (dev->data->tx_queues[qidx] != NULL) { 8653f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 8663f3c6f97SJerin Jacob qidx); 8673f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]); 8683f3c6f97SJerin Jacob dev->data->tx_queues[qidx] = NULL; 8693f3c6f97SJerin Jacob } 8703f3c6f97SJerin Jacob 8713f3c6f97SJerin Jacob /* Allocating tx queue data structure */ 8723f3c6f97SJerin Jacob txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 8733f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 8743f3c6f97SJerin Jacob if (txq == NULL) { 8753f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx); 8763f3c6f97SJerin Jacob return -ENOMEM; 8773f3c6f97SJerin Jacob } 8783f3c6f97SJerin Jacob 8793f3c6f97SJerin Jacob txq->nic = nic; 8803f3c6f97SJerin Jacob txq->queue_id = qidx; 8813f3c6f97SJerin Jacob txq->tx_free_thresh = tx_free_thresh; 8823f3c6f97SJerin Jacob txq->txq_flags = tx_conf->txq_flags; 8833f3c6f97SJerin Jacob txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 8843f3c6f97SJerin Jacob txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 8853f3c6f97SJerin Jacob is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && 8863f3c6f97SJerin Jacob txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); 8873f3c6f97SJerin Jacob 8883f3c6f97SJerin Jacob /* Choose optimum free threshold value for multipool case */ 8893f3c6f97SJerin Jacob if (!is_single_pool) { 8903f3c6f97SJerin Jacob txq->tx_free_thresh = (uint16_t) 8913f3c6f97SJerin Jacob (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 8923f3c6f97SJerin Jacob NICVF_TX_FREE_MPOOL_THRESH : 8933f3c6f97SJerin Jacob tx_conf->tx_free_thresh); 8941c421f18SJerin Jacob txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 8951c421f18SJerin Jacob } else { 8961c421f18SJerin Jacob txq->pool_free = nicvf_single_pool_free_xmited_buffers; 8973f3c6f97SJerin Jacob } 8983f3c6f97SJerin Jacob 8993f3c6f97SJerin Jacob /* Allocate software ring */ 9003f3c6f97SJerin Jacob txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 9013f3c6f97SJerin Jacob nb_desc * sizeof(struct rte_mbuf *), 9023f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 9033f3c6f97SJerin Jacob 9043f3c6f97SJerin Jacob if (txq->txbuffs == NULL) { 9053f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 9063f3c6f97SJerin Jacob return -ENOMEM; 9073f3c6f97SJerin Jacob } 9083f3c6f97SJerin Jacob 9093f3c6f97SJerin Jacob if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) { 9103f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 9113f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 9123f3c6f97SJerin Jacob return -ENOMEM; 9133f3c6f97SJerin Jacob } 9143f3c6f97SJerin Jacob 9153f3c6f97SJerin Jacob nicvf_tx_queue_reset(txq); 9163f3c6f97SJerin Jacob 9173f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, 9183f3c6f97SJerin Jacob qidx, txq, nb_desc, txq->desc, txq->phys); 9193f3c6f97SJerin Jacob 9203f3c6f97SJerin Jacob dev->data->tx_queues[qidx] = txq; 9213f3c6f97SJerin Jacob dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 9223f3c6f97SJerin Jacob return 0; 9233f3c6f97SJerin Jacob } 9243f3c6f97SJerin Jacob 92586b4eb42SJerin Jacob static inline void 92686b4eb42SJerin Jacob nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq) 92786b4eb42SJerin Jacob { 92886b4eb42SJerin Jacob uint32_t rxq_cnt; 92986b4eb42SJerin Jacob uint32_t nb_pkts, released_pkts = 0; 93086b4eb42SJerin Jacob uint32_t refill_cnt = 0; 93186b4eb42SJerin Jacob struct rte_eth_dev *dev = rxq->nic->eth_dev; 93286b4eb42SJerin Jacob struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 93386b4eb42SJerin Jacob 93486b4eb42SJerin Jacob if (dev->rx_pkt_burst == NULL) 93586b4eb42SJerin Jacob return; 93686b4eb42SJerin Jacob 93786b4eb42SJerin Jacob while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) { 93886b4eb42SJerin Jacob nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 93986b4eb42SJerin Jacob NICVF_MAX_RX_FREE_THRESH); 94086b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 94186b4eb42SJerin Jacob while (nb_pkts) { 94286b4eb42SJerin Jacob rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 94386b4eb42SJerin Jacob released_pkts++; 94486b4eb42SJerin Jacob } 94586b4eb42SJerin Jacob } 94686b4eb42SJerin Jacob 94786b4eb42SJerin Jacob refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id); 94886b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 94986b4eb42SJerin Jacob released_pkts, refill_cnt); 95086b4eb42SJerin Jacob } 95186b4eb42SJerin Jacob 952aa0d976eSJerin Jacob static void 953aa0d976eSJerin Jacob nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 954aa0d976eSJerin Jacob { 955aa0d976eSJerin Jacob rxq->head = 0; 956aa0d976eSJerin Jacob rxq->available_space = 0; 957aa0d976eSJerin Jacob rxq->recv_buffers = 0; 958aa0d976eSJerin Jacob } 959aa0d976eSJerin Jacob 96086b4eb42SJerin Jacob static inline int 96186b4eb42SJerin Jacob nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) 96286b4eb42SJerin Jacob { 96386b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 96486b4eb42SJerin Jacob struct nicvf_rxq *rxq; 96586b4eb42SJerin Jacob int ret; 96686b4eb42SJerin Jacob 96786b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) 96886b4eb42SJerin Jacob return 0; 96986b4eb42SJerin Jacob 97086b4eb42SJerin Jacob /* Update rbdr pointer to all rxq */ 97186b4eb42SJerin Jacob rxq = dev->data->rx_queues[qidx]; 97286b4eb42SJerin Jacob rxq->shared_rbdr = nic->rbdr; 97386b4eb42SJerin Jacob 97486b4eb42SJerin Jacob ret = nicvf_qset_rq_config(nic, qidx, rxq); 97586b4eb42SJerin Jacob if (ret) { 97686b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret); 97786b4eb42SJerin Jacob goto config_rq_error; 97886b4eb42SJerin Jacob } 97986b4eb42SJerin Jacob ret = nicvf_qset_cq_config(nic, qidx, rxq); 98086b4eb42SJerin Jacob if (ret) { 98186b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret); 98286b4eb42SJerin Jacob goto config_cq_error; 98386b4eb42SJerin Jacob } 98486b4eb42SJerin Jacob 98586b4eb42SJerin Jacob dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 98686b4eb42SJerin Jacob return 0; 98786b4eb42SJerin Jacob 98886b4eb42SJerin Jacob config_cq_error: 98986b4eb42SJerin Jacob nicvf_qset_cq_reclaim(nic, qidx); 99086b4eb42SJerin Jacob config_rq_error: 99186b4eb42SJerin Jacob nicvf_qset_rq_reclaim(nic, qidx); 99286b4eb42SJerin Jacob return ret; 99386b4eb42SJerin Jacob } 99486b4eb42SJerin Jacob 99586b4eb42SJerin Jacob static inline int 99686b4eb42SJerin Jacob nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) 99786b4eb42SJerin Jacob { 99886b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 99986b4eb42SJerin Jacob struct nicvf_rxq *rxq; 100086b4eb42SJerin Jacob int ret, other_error; 100186b4eb42SJerin Jacob 100286b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) 100386b4eb42SJerin Jacob return 0; 100486b4eb42SJerin Jacob 100586b4eb42SJerin Jacob ret = nicvf_qset_rq_reclaim(nic, qidx); 100686b4eb42SJerin Jacob if (ret) 100786b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret); 100886b4eb42SJerin Jacob 100986b4eb42SJerin Jacob other_error = ret; 101086b4eb42SJerin Jacob rxq = dev->data->rx_queues[qidx]; 101186b4eb42SJerin Jacob nicvf_rx_queue_release_mbufs(rxq); 101286b4eb42SJerin Jacob nicvf_rx_queue_reset(rxq); 101386b4eb42SJerin Jacob 101486b4eb42SJerin Jacob ret = nicvf_qset_cq_reclaim(nic, qidx); 101586b4eb42SJerin Jacob if (ret) 101686b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret); 101786b4eb42SJerin Jacob 101886b4eb42SJerin Jacob other_error |= ret; 101986b4eb42SJerin Jacob dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 102086b4eb42SJerin Jacob return other_error; 102186b4eb42SJerin Jacob } 102286b4eb42SJerin Jacob 1023aa0d976eSJerin Jacob static void 1024aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(void *rx_queue) 1025aa0d976eSJerin Jacob { 1026aa0d976eSJerin Jacob struct nicvf_rxq *rxq = rx_queue; 1027aa0d976eSJerin Jacob 1028aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1029aa0d976eSJerin Jacob 1030aa0d976eSJerin Jacob if (rxq) 1031aa0d976eSJerin Jacob rte_free(rxq); 1032aa0d976eSJerin Jacob } 1033aa0d976eSJerin Jacob 1034aa0d976eSJerin Jacob static int 103586b4eb42SJerin Jacob nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 103686b4eb42SJerin Jacob { 103786b4eb42SJerin Jacob int ret; 103886b4eb42SJerin Jacob 103986b4eb42SJerin Jacob ret = nicvf_start_rx_queue(dev, qidx); 104086b4eb42SJerin Jacob if (ret) 104186b4eb42SJerin Jacob return ret; 104286b4eb42SJerin Jacob 104386b4eb42SJerin Jacob ret = nicvf_configure_cpi(dev); 104486b4eb42SJerin Jacob if (ret) 104586b4eb42SJerin Jacob return ret; 104686b4eb42SJerin Jacob 104786b4eb42SJerin Jacob return nicvf_configure_rss_reta(dev); 104886b4eb42SJerin Jacob } 104986b4eb42SJerin Jacob 105086b4eb42SJerin Jacob static int 105186b4eb42SJerin Jacob nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 105286b4eb42SJerin Jacob { 105386b4eb42SJerin Jacob int ret; 105486b4eb42SJerin Jacob 105586b4eb42SJerin Jacob ret = nicvf_stop_rx_queue(dev, qidx); 105686b4eb42SJerin Jacob ret |= nicvf_configure_cpi(dev); 105786b4eb42SJerin Jacob ret |= nicvf_configure_rss_reta(dev); 105886b4eb42SJerin Jacob return ret; 105986b4eb42SJerin Jacob } 106086b4eb42SJerin Jacob 106186b4eb42SJerin Jacob static int 1062fc1f6c62SJerin Jacob nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1063fc1f6c62SJerin Jacob { 1064fc1f6c62SJerin Jacob return nicvf_start_tx_queue(dev, qidx); 1065fc1f6c62SJerin Jacob } 1066fc1f6c62SJerin Jacob 1067fc1f6c62SJerin Jacob static int 1068fc1f6c62SJerin Jacob nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1069fc1f6c62SJerin Jacob { 1070fc1f6c62SJerin Jacob return nicvf_stop_tx_queue(dev, qidx); 1071fc1f6c62SJerin Jacob } 1072fc1f6c62SJerin Jacob 1073fc1f6c62SJerin Jacob static int 1074aa0d976eSJerin Jacob nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 1075aa0d976eSJerin Jacob uint16_t nb_desc, unsigned int socket_id, 1076aa0d976eSJerin Jacob const struct rte_eth_rxconf *rx_conf, 1077aa0d976eSJerin Jacob struct rte_mempool *mp) 1078aa0d976eSJerin Jacob { 1079aa0d976eSJerin Jacob uint16_t rx_free_thresh; 1080aa0d976eSJerin Jacob struct nicvf_rxq *rxq; 1081aa0d976eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1082aa0d976eSJerin Jacob 1083aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1084aa0d976eSJerin Jacob 1085aa0d976eSJerin Jacob /* Socket id check */ 1086aa0d976eSJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 1087aa0d976eSJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 1088aa0d976eSJerin Jacob socket_id, nic->node); 1089aa0d976eSJerin Jacob 1090aa0d976eSJerin Jacob /* Mempool memory should be contiguous */ 1091aa0d976eSJerin Jacob if (mp->nb_mem_chunks != 1) { 1092aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz"); 1093aa0d976eSJerin Jacob return -EINVAL; 1094aa0d976eSJerin Jacob } 1095aa0d976eSJerin Jacob 1096aa0d976eSJerin Jacob /* Rx deferred start is not supported */ 1097aa0d976eSJerin Jacob if (rx_conf->rx_deferred_start) { 1098aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 1099aa0d976eSJerin Jacob return -EINVAL; 1100aa0d976eSJerin Jacob } 1101aa0d976eSJerin Jacob 1102aa0d976eSJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 1103aa0d976eSJerin Jacob nb_desc = nicvf_qsize_cq_roundup(nb_desc); 1104aa0d976eSJerin Jacob if (nb_desc == 0) { 1105aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 1106aa0d976eSJerin Jacob return -EINVAL; 1107aa0d976eSJerin Jacob } 1108aa0d976eSJerin Jacob 1109aa0d976eSJerin Jacob /* Check rx_free_thresh upper bound */ 1110aa0d976eSJerin Jacob rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 1111aa0d976eSJerin Jacob rx_conf->rx_free_thresh : 1112aa0d976eSJerin Jacob NICVF_DEFAULT_RX_FREE_THRESH); 1113aa0d976eSJerin Jacob if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 1114aa0d976eSJerin Jacob rx_free_thresh >= nb_desc * .75) { 1115aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 1116aa0d976eSJerin Jacob rx_free_thresh); 1117aa0d976eSJerin Jacob return -EINVAL; 1118aa0d976eSJerin Jacob } 1119aa0d976eSJerin Jacob 1120aa0d976eSJerin Jacob /* Free memory prior to re-allocation if needed */ 1121aa0d976eSJerin Jacob if (dev->data->rx_queues[qidx] != NULL) { 1122aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 1123aa0d976eSJerin Jacob qidx); 1124aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]); 1125aa0d976eSJerin Jacob dev->data->rx_queues[qidx] = NULL; 1126aa0d976eSJerin Jacob } 1127aa0d976eSJerin Jacob 1128aa0d976eSJerin Jacob /* Allocate rxq memory */ 1129aa0d976eSJerin Jacob rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 1130aa0d976eSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 1131aa0d976eSJerin Jacob if (rxq == NULL) { 1132aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx); 1133aa0d976eSJerin Jacob return -ENOMEM; 1134aa0d976eSJerin Jacob } 1135aa0d976eSJerin Jacob 1136aa0d976eSJerin Jacob rxq->nic = nic; 1137aa0d976eSJerin Jacob rxq->pool = mp; 1138aa0d976eSJerin Jacob rxq->queue_id = qidx; 1139aa0d976eSJerin Jacob rxq->port_id = dev->data->port_id; 1140aa0d976eSJerin Jacob rxq->rx_free_thresh = rx_free_thresh; 1141aa0d976eSJerin Jacob rxq->rx_drop_en = rx_conf->rx_drop_en; 1142aa0d976eSJerin Jacob rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1143aa0d976eSJerin Jacob rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1144aa0d976eSJerin Jacob rxq->precharge_cnt = 0; 1145aa0d976eSJerin Jacob rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1146aa0d976eSJerin Jacob 1147aa0d976eSJerin Jacob /* Alloc completion queue */ 1148aa0d976eSJerin Jacob if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) { 1149aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 1150aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(rxq); 1151aa0d976eSJerin Jacob return -ENOMEM; 1152aa0d976eSJerin Jacob } 1153aa0d976eSJerin Jacob 1154aa0d976eSJerin Jacob nicvf_rx_queue_reset(rxq); 1155aa0d976eSJerin Jacob 1156aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, 1157aa0d976eSJerin Jacob qidx, rxq, mp->name, nb_desc, 1158a0fd91ceSBruce Richardson rte_mempool_avail_count(mp), rxq->phys); 1159aa0d976eSJerin Jacob 1160aa0d976eSJerin Jacob dev->data->rx_queues[qidx] = rxq; 1161aa0d976eSJerin Jacob dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 1162aa0d976eSJerin Jacob return 0; 1163aa0d976eSJerin Jacob } 1164aa0d976eSJerin Jacob 1165dcd7b1e1SJerin Jacob static void 1166dcd7b1e1SJerin Jacob nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1167dcd7b1e1SJerin Jacob { 1168dcd7b1e1SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1169dcd7b1e1SJerin Jacob 1170dcd7b1e1SJerin Jacob PMD_INIT_FUNC_TRACE(); 1171dcd7b1e1SJerin Jacob 1172dcd7b1e1SJerin Jacob dev_info->min_rx_bufsize = ETHER_MIN_MTU; 1173dcd7b1e1SJerin Jacob dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; 1174dcd7b1e1SJerin Jacob dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS; 1175dcd7b1e1SJerin Jacob dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS; 1176dcd7b1e1SJerin Jacob dev_info->max_mac_addrs = 1; 1177dcd7b1e1SJerin Jacob dev_info->max_vfs = dev->pci_dev->max_vfs; 1178dcd7b1e1SJerin Jacob 1179dcd7b1e1SJerin Jacob dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1180dcd7b1e1SJerin Jacob dev_info->tx_offload_capa = 1181dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_IPV4_CKSUM | 1182dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_UDP_CKSUM | 1183dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_TCP_CKSUM | 1184dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_TCP_TSO | 1185dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 1186dcd7b1e1SJerin Jacob 1187dcd7b1e1SJerin Jacob dev_info->reta_size = nic->rss_info.rss_size; 1188dcd7b1e1SJerin Jacob dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1189dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1190dcd7b1e1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1191dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1192dcd7b1e1SJerin Jacob 1193dcd7b1e1SJerin Jacob dev_info->default_rxconf = (struct rte_eth_rxconf) { 1194dcd7b1e1SJerin Jacob .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1195dcd7b1e1SJerin Jacob .rx_drop_en = 0, 1196dcd7b1e1SJerin Jacob }; 1197dcd7b1e1SJerin Jacob 1198dcd7b1e1SJerin Jacob dev_info->default_txconf = (struct rte_eth_txconf) { 1199dcd7b1e1SJerin Jacob .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1200dcd7b1e1SJerin Jacob .txq_flags = 1201dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOMULTSEGS | 1202dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOREFCOUNT | 1203dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOMULTMEMP | 1204dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOVLANOFFL | 1205dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOXSUMSCTP, 1206dcd7b1e1SJerin Jacob }; 1207dcd7b1e1SJerin Jacob } 1208dcd7b1e1SJerin Jacob 12097413feeeSJerin Jacob static nicvf_phys_addr_t 12107413feeeSJerin Jacob rbdr_rte_mempool_get(void *opaque) 12117413feeeSJerin Jacob { 12127413feeeSJerin Jacob uint16_t qidx; 12137413feeeSJerin Jacob uintptr_t mbuf; 12147413feeeSJerin Jacob struct nicvf_rxq *rxq; 12157413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv((struct rte_eth_dev *)opaque); 12167413feeeSJerin Jacob 12177413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 12187413feeeSJerin Jacob rxq = nic->eth_dev->data->rx_queues[qidx]; 12197413feeeSJerin Jacob /* Maintain equal buffer count across all pools */ 12207413feeeSJerin Jacob if (rxq->precharge_cnt >= rxq->qlen_mask) 12217413feeeSJerin Jacob continue; 12227413feeeSJerin Jacob rxq->precharge_cnt++; 12237413feeeSJerin Jacob mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); 12247413feeeSJerin Jacob if (mbuf) 12257413feeeSJerin Jacob return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); 12267413feeeSJerin Jacob } 12277413feeeSJerin Jacob return 0; 12287413feeeSJerin Jacob } 12297413feeeSJerin Jacob 12307413feeeSJerin Jacob static int 12317413feeeSJerin Jacob nicvf_dev_start(struct rte_eth_dev *dev) 12327413feeeSJerin Jacob { 12337413feeeSJerin Jacob int ret; 12347413feeeSJerin Jacob uint16_t qidx; 12357413feeeSJerin Jacob uint32_t buffsz = 0, rbdrsz = 0; 12367413feeeSJerin Jacob uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; 12377413feeeSJerin Jacob uint64_t mbuf_phys_off = 0; 12387413feeeSJerin Jacob struct nicvf_rxq *rxq; 12397413feeeSJerin Jacob struct rte_pktmbuf_pool_private *mbp_priv; 12407413feeeSJerin Jacob struct rte_mbuf *mbuf; 12417413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 12427413feeeSJerin Jacob struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 12437413feeeSJerin Jacob uint16_t mtu; 12447413feeeSJerin Jacob 12457413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 12467413feeeSJerin Jacob 12477413feeeSJerin Jacob /* Userspace process exited without proper shutdown in last run */ 12487413feeeSJerin Jacob if (nicvf_qset_rbdr_active(nic, 0)) 12497413feeeSJerin Jacob nicvf_dev_stop(dev); 12507413feeeSJerin Jacob 12517413feeeSJerin Jacob /* 12527413feeeSJerin Jacob * Thunderx nicvf PMD can support more than one pool per port only when 12537413feeeSJerin Jacob * 1) Data payload size is same across all the pools in given port 12547413feeeSJerin Jacob * AND 12557413feeeSJerin Jacob * 2) All mbuffs in the pools are from the same hugepage 12567413feeeSJerin Jacob * AND 12577413feeeSJerin Jacob * 3) Mbuff metadata size is same across all the pools in given port 12587413feeeSJerin Jacob * 12597413feeeSJerin Jacob * This is to support existing application that uses multiple pool/port. 12607413feeeSJerin Jacob * But, the purpose of using multipool for QoS will not be addressed. 12617413feeeSJerin Jacob * 12627413feeeSJerin Jacob */ 12637413feeeSJerin Jacob 12647413feeeSJerin Jacob /* Validate RBDR buff size */ 12657413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 12667413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 12677413feeeSJerin Jacob mbp_priv = rte_mempool_get_priv(rxq->pool); 12687413feeeSJerin Jacob buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 12697413feeeSJerin Jacob if (buffsz % 128) { 12707413feeeSJerin Jacob PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); 12717413feeeSJerin Jacob return -EINVAL; 12727413feeeSJerin Jacob } 12737413feeeSJerin Jacob if (rbdrsz == 0) 12747413feeeSJerin Jacob rbdrsz = buffsz; 12757413feeeSJerin Jacob if (rbdrsz != buffsz) { 12767413feeeSJerin Jacob PMD_INIT_LOG(ERR, "buffsz not same, qid=%d (%d/%d)", 12777413feeeSJerin Jacob qidx, rbdrsz, buffsz); 12787413feeeSJerin Jacob return -EINVAL; 12797413feeeSJerin Jacob } 12807413feeeSJerin Jacob } 12817413feeeSJerin Jacob 12827413feeeSJerin Jacob /* Validate mempool attributes */ 12837413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 12847413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 12857413feeeSJerin Jacob rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); 12867413feeeSJerin Jacob mbuf = rte_pktmbuf_alloc(rxq->pool); 12877413feeeSJerin Jacob if (mbuf == NULL) { 12887413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed allocate mbuf qid=%d pool=%s", 12897413feeeSJerin Jacob qidx, rxq->pool->name); 12907413feeeSJerin Jacob return -ENOMEM; 12917413feeeSJerin Jacob } 12927413feeeSJerin Jacob rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf); 12937413feeeSJerin Jacob rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM; 12947413feeeSJerin Jacob rte_pktmbuf_free(mbuf); 12957413feeeSJerin Jacob 12967413feeeSJerin Jacob if (mbuf_phys_off == 0) 12977413feeeSJerin Jacob mbuf_phys_off = rxq->mbuf_phys_off; 12987413feeeSJerin Jacob if (mbuf_phys_off != rxq->mbuf_phys_off) { 12997413feeeSJerin Jacob PMD_INIT_LOG(ERR, "pool params not same,%s %" PRIx64, 13007413feeeSJerin Jacob rxq->pool->name, mbuf_phys_off); 13017413feeeSJerin Jacob return -EINVAL; 13027413feeeSJerin Jacob } 13037413feeeSJerin Jacob } 13047413feeeSJerin Jacob 13057413feeeSJerin Jacob /* Check the level of buffers in the pool */ 13067413feeeSJerin Jacob total_rxq_desc = 0; 13077413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 13087413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 13097413feeeSJerin Jacob /* Count total numbers of rxq descs */ 13107413feeeSJerin Jacob total_rxq_desc += rxq->qlen_mask + 1; 13117413feeeSJerin Jacob exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; 13127413feeeSJerin Jacob exp_buffs *= nic->eth_dev->data->nb_rx_queues; 1313a0fd91ceSBruce Richardson if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { 13147413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", 13157413feeeSJerin Jacob rxq->pool->name, 1316a0fd91ceSBruce Richardson rte_mempool_avail_count(rxq->pool), 13177413feeeSJerin Jacob exp_buffs); 13187413feeeSJerin Jacob return -ENOENT; 13197413feeeSJerin Jacob } 13207413feeeSJerin Jacob } 13217413feeeSJerin Jacob 13227413feeeSJerin Jacob /* Check RBDR desc overflow */ 13237413feeeSJerin Jacob ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); 13247413feeeSJerin Jacob if (ret == 0) { 13257413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc"); 13267413feeeSJerin Jacob return -ENOMEM; 13277413feeeSJerin Jacob } 13287413feeeSJerin Jacob 13297413feeeSJerin Jacob /* Enable qset */ 13307413feeeSJerin Jacob ret = nicvf_qset_config(nic); 13317413feeeSJerin Jacob if (ret) { 13327413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to enable qset %d", ret); 13337413feeeSJerin Jacob return ret; 13347413feeeSJerin Jacob } 13357413feeeSJerin Jacob 13367413feeeSJerin Jacob /* Allocate RBDR and RBDR ring desc */ 13377413feeeSJerin Jacob nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); 13387413feeeSJerin Jacob ret = nicvf_qset_rbdr_alloc(nic, nb_rbdr_desc, rbdrsz); 13397413feeeSJerin Jacob if (ret) { 13407413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc"); 13417413feeeSJerin Jacob goto qset_reclaim; 13427413feeeSJerin Jacob } 13437413feeeSJerin Jacob 13447413feeeSJerin Jacob /* Enable and configure RBDR registers */ 13457413feeeSJerin Jacob ret = nicvf_qset_rbdr_config(nic, 0); 13467413feeeSJerin Jacob if (ret) { 13477413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure rbdr %d", ret); 13487413feeeSJerin Jacob goto qset_rbdr_free; 13497413feeeSJerin Jacob } 13507413feeeSJerin Jacob 13517413feeeSJerin Jacob /* Fill rte_mempool buffers in RBDR pool and precharge it */ 13527413feeeSJerin Jacob ret = nicvf_qset_rbdr_precharge(nic, 0, rbdr_rte_mempool_get, 13537413feeeSJerin Jacob dev, total_rxq_desc); 13547413feeeSJerin Jacob if (ret) { 13557413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret); 13567413feeeSJerin Jacob goto qset_rbdr_reclaim; 13577413feeeSJerin Jacob } 13587413feeeSJerin Jacob 13597413feeeSJerin Jacob PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR", 13607413feeeSJerin Jacob nic->rbdr->tail, nb_rbdr_desc); 13617413feeeSJerin Jacob 13627413feeeSJerin Jacob /* Configure RX queues */ 13637413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 13647413feeeSJerin Jacob ret = nicvf_start_rx_queue(dev, qidx); 13657413feeeSJerin Jacob if (ret) 13667413feeeSJerin Jacob goto start_rxq_error; 13677413feeeSJerin Jacob } 13687413feeeSJerin Jacob 13697413feeeSJerin Jacob /* Configure VLAN Strip */ 13707413feeeSJerin Jacob nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip); 13717413feeeSJerin Jacob 13727413feeeSJerin Jacob /* Configure TX queues */ 13737413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) { 13747413feeeSJerin Jacob ret = nicvf_start_tx_queue(dev, qidx); 13757413feeeSJerin Jacob if (ret) 13767413feeeSJerin Jacob goto start_txq_error; 13777413feeeSJerin Jacob } 13787413feeeSJerin Jacob 13797413feeeSJerin Jacob /* Configure CPI algorithm */ 13807413feeeSJerin Jacob ret = nicvf_configure_cpi(dev); 13817413feeeSJerin Jacob if (ret) 13827413feeeSJerin Jacob goto start_txq_error; 13837413feeeSJerin Jacob 13847413feeeSJerin Jacob /* Configure RSS */ 13857413feeeSJerin Jacob ret = nicvf_configure_rss(dev); 13867413feeeSJerin Jacob if (ret) 13877413feeeSJerin Jacob goto qset_rss_error; 13887413feeeSJerin Jacob 13897413feeeSJerin Jacob /* Configure loopback */ 13907413feeeSJerin Jacob ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); 13917413feeeSJerin Jacob if (ret) { 13927413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); 13937413feeeSJerin Jacob goto qset_rss_error; 13947413feeeSJerin Jacob } 13957413feeeSJerin Jacob 13967413feeeSJerin Jacob /* Reset all statistics counters attached to this port */ 13977413feeeSJerin Jacob ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); 13987413feeeSJerin Jacob if (ret) { 13997413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); 14007413feeeSJerin Jacob goto qset_rss_error; 14017413feeeSJerin Jacob } 14027413feeeSJerin Jacob 14037413feeeSJerin Jacob /* Setup scatter mode if needed by jumbo */ 14047413feeeSJerin Jacob if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 14057413feeeSJerin Jacob 2 * VLAN_TAG_SIZE > buffsz) 14067413feeeSJerin Jacob dev->data->scattered_rx = 1; 14077413feeeSJerin Jacob if (rx_conf->enable_scatter) 14087413feeeSJerin Jacob dev->data->scattered_rx = 1; 14097413feeeSJerin Jacob 14107413feeeSJerin Jacob /* Setup MTU based on max_rx_pkt_len or default */ 14117413feeeSJerin Jacob mtu = dev->data->dev_conf.rxmode.jumbo_frame ? 14127413feeeSJerin Jacob dev->data->dev_conf.rxmode.max_rx_pkt_len 14137413feeeSJerin Jacob - ETHER_HDR_LEN - ETHER_CRC_LEN 14147413feeeSJerin Jacob : ETHER_MTU; 14157413feeeSJerin Jacob 14167413feeeSJerin Jacob if (nicvf_dev_set_mtu(dev, mtu)) { 14177413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to set default mtu size"); 14187413feeeSJerin Jacob return -EBUSY; 14197413feeeSJerin Jacob } 14207413feeeSJerin Jacob 14217413feeeSJerin Jacob /* Configure callbacks based on scatter mode */ 14227413feeeSJerin Jacob nicvf_set_tx_function(dev); 14237413feeeSJerin Jacob nicvf_set_rx_function(dev); 14247413feeeSJerin Jacob 14257413feeeSJerin Jacob /* Done; Let PF make the BGX's RX and TX switches to ON position */ 14267413feeeSJerin Jacob nicvf_mbox_cfg_done(nic); 14277413feeeSJerin Jacob return 0; 14287413feeeSJerin Jacob 14297413feeeSJerin Jacob qset_rss_error: 14307413feeeSJerin Jacob nicvf_rss_term(nic); 14317413feeeSJerin Jacob start_txq_error: 14327413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) 14337413feeeSJerin Jacob nicvf_stop_tx_queue(dev, qidx); 14347413feeeSJerin Jacob start_rxq_error: 14357413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) 14367413feeeSJerin Jacob nicvf_stop_rx_queue(dev, qidx); 14377413feeeSJerin Jacob qset_rbdr_reclaim: 14387413feeeSJerin Jacob nicvf_qset_rbdr_reclaim(nic, 0); 14397413feeeSJerin Jacob nicvf_rbdr_release_mbufs(nic); 14407413feeeSJerin Jacob qset_rbdr_free: 14417413feeeSJerin Jacob if (nic->rbdr) { 14427413feeeSJerin Jacob rte_free(nic->rbdr); 14437413feeeSJerin Jacob nic->rbdr = NULL; 14447413feeeSJerin Jacob } 14457413feeeSJerin Jacob qset_reclaim: 14467413feeeSJerin Jacob nicvf_qset_reclaim(nic); 14477413feeeSJerin Jacob return ret; 14487413feeeSJerin Jacob } 14497413feeeSJerin Jacob 14507413feeeSJerin Jacob static void 14517413feeeSJerin Jacob nicvf_dev_stop(struct rte_eth_dev *dev) 14527413feeeSJerin Jacob { 14537413feeeSJerin Jacob int ret; 14547413feeeSJerin Jacob uint16_t qidx; 14557413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 14567413feeeSJerin Jacob 14577413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 14587413feeeSJerin Jacob 14597413feeeSJerin Jacob /* Let PF make the BGX's RX and TX switches to OFF position */ 14607413feeeSJerin Jacob nicvf_mbox_shutdown(nic); 14617413feeeSJerin Jacob 14627413feeeSJerin Jacob /* Disable loopback */ 14637413feeeSJerin Jacob ret = nicvf_loopback_config(nic, 0); 14647413feeeSJerin Jacob if (ret) 14657413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); 14667413feeeSJerin Jacob 14677413feeeSJerin Jacob /* Disable VLAN Strip */ 14687413feeeSJerin Jacob nicvf_vlan_hw_strip(nic, 0); 14697413feeeSJerin Jacob 14707413feeeSJerin Jacob /* Reclaim sq */ 14717413feeeSJerin Jacob for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) 14727413feeeSJerin Jacob nicvf_stop_tx_queue(dev, qidx); 14737413feeeSJerin Jacob 14747413feeeSJerin Jacob /* Reclaim rq */ 14757413feeeSJerin Jacob for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) 14767413feeeSJerin Jacob nicvf_stop_rx_queue(dev, qidx); 14777413feeeSJerin Jacob 14787413feeeSJerin Jacob /* Reclaim RBDR */ 14797413feeeSJerin Jacob ret = nicvf_qset_rbdr_reclaim(nic, 0); 14807413feeeSJerin Jacob if (ret) 14817413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); 14827413feeeSJerin Jacob 14837413feeeSJerin Jacob /* Move all charged buffers in RBDR back to pool */ 14847413feeeSJerin Jacob if (nic->rbdr != NULL) 14857413feeeSJerin Jacob nicvf_rbdr_release_mbufs(nic); 14867413feeeSJerin Jacob 14877413feeeSJerin Jacob /* Reclaim CPI configuration */ 14887413feeeSJerin Jacob if (!nic->sqs_mode) { 14897413feeeSJerin Jacob ret = nicvf_mbox_config_cpi(nic, 0); 14907413feeeSJerin Jacob if (ret) 14917413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim CPI config"); 14927413feeeSJerin Jacob } 14937413feeeSJerin Jacob 14947413feeeSJerin Jacob /* Disable qset */ 14957413feeeSJerin Jacob ret = nicvf_qset_config(nic); 14967413feeeSJerin Jacob if (ret) 14977413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); 14987413feeeSJerin Jacob 14997413feeeSJerin Jacob /* Disable all interrupts */ 15007413feeeSJerin Jacob nicvf_disable_all_interrupts(nic); 15017413feeeSJerin Jacob 15027413feeeSJerin Jacob /* Free RBDR SW structure */ 15037413feeeSJerin Jacob if (nic->rbdr) { 15047413feeeSJerin Jacob rte_free(nic->rbdr); 15057413feeeSJerin Jacob nic->rbdr = NULL; 15067413feeeSJerin Jacob } 15077413feeeSJerin Jacob } 15087413feeeSJerin Jacob 15097413feeeSJerin Jacob static void 15107413feeeSJerin Jacob nicvf_dev_close(struct rte_eth_dev *dev) 15117413feeeSJerin Jacob { 15127413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 15137413feeeSJerin Jacob 15147413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 15157413feeeSJerin Jacob 15167413feeeSJerin Jacob nicvf_dev_stop(dev); 15177413feeeSJerin Jacob nicvf_periodic_alarm_stop(nic); 15187413feeeSJerin Jacob } 15197413feeeSJerin Jacob 1520bc79615aSJerin Jacob static int 1521bc79615aSJerin Jacob nicvf_dev_configure(struct rte_eth_dev *dev) 1522bc79615aSJerin Jacob { 1523bc79615aSJerin Jacob struct rte_eth_conf *conf = &dev->data->dev_conf; 1524bc79615aSJerin Jacob struct rte_eth_rxmode *rxmode = &conf->rxmode; 1525bc79615aSJerin Jacob struct rte_eth_txmode *txmode = &conf->txmode; 1526bc79615aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1527bc79615aSJerin Jacob 1528bc79615aSJerin Jacob PMD_INIT_FUNC_TRACE(); 1529bc79615aSJerin Jacob 1530bc79615aSJerin Jacob if (!rte_eal_has_hugepages()) { 1531bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Huge page is not configured"); 1532bc79615aSJerin Jacob return -EINVAL; 1533bc79615aSJerin Jacob } 1534bc79615aSJerin Jacob 1535bc79615aSJerin Jacob if (txmode->mq_mode) { 1536bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 1537bc79615aSJerin Jacob return -EINVAL; 1538bc79615aSJerin Jacob } 1539bc79615aSJerin Jacob 1540bc79615aSJerin Jacob if (rxmode->mq_mode != ETH_MQ_RX_NONE && 1541bc79615aSJerin Jacob rxmode->mq_mode != ETH_MQ_RX_RSS) { 1542bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 1543bc79615aSJerin Jacob return -EINVAL; 1544bc79615aSJerin Jacob } 1545bc79615aSJerin Jacob 1546bc79615aSJerin Jacob if (!rxmode->hw_strip_crc) { 1547bc79615aSJerin Jacob PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); 1548bc79615aSJerin Jacob rxmode->hw_strip_crc = 1; 1549bc79615aSJerin Jacob } 1550bc79615aSJerin Jacob 1551bc79615aSJerin Jacob if (rxmode->hw_ip_checksum) { 1552bc79615aSJerin Jacob PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); 1553bc79615aSJerin Jacob rxmode->hw_ip_checksum = 0; 1554bc79615aSJerin Jacob } 1555bc79615aSJerin Jacob 1556bc79615aSJerin Jacob if (rxmode->split_hdr_size) { 1557bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 1558bc79615aSJerin Jacob return -EINVAL; 1559bc79615aSJerin Jacob } 1560bc79615aSJerin Jacob 1561bc79615aSJerin Jacob if (rxmode->hw_vlan_filter) { 1562bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "VLAN filter not supported"); 1563bc79615aSJerin Jacob return -EINVAL; 1564bc79615aSJerin Jacob } 1565bc79615aSJerin Jacob 1566bc79615aSJerin Jacob if (rxmode->hw_vlan_extend) { 1567bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "VLAN extended not supported"); 1568bc79615aSJerin Jacob return -EINVAL; 1569bc79615aSJerin Jacob } 1570bc79615aSJerin Jacob 1571bc79615aSJerin Jacob if (rxmode->enable_lro) { 1572bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "LRO not supported"); 1573bc79615aSJerin Jacob return -EINVAL; 1574bc79615aSJerin Jacob } 1575bc79615aSJerin Jacob 1576bc79615aSJerin Jacob if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 1577bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 1578bc79615aSJerin Jacob return -EINVAL; 1579bc79615aSJerin Jacob } 1580bc79615aSJerin Jacob 1581bc79615aSJerin Jacob if (conf->dcb_capability_en) { 1582bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "DCB enable not supported"); 1583bc79615aSJerin Jacob return -EINVAL; 1584bc79615aSJerin Jacob } 1585bc79615aSJerin Jacob 1586bc79615aSJerin Jacob if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1587bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Flow director not supported"); 1588bc79615aSJerin Jacob return -EINVAL; 1589bc79615aSJerin Jacob } 1590bc79615aSJerin Jacob 1591bc79615aSJerin Jacob PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 1592bc79615aSJerin Jacob dev->data->port_id, nicvf_hw_cap(nic)); 1593bc79615aSJerin Jacob 1594bc79615aSJerin Jacob return 0; 1595bc79615aSJerin Jacob } 1596bc79615aSJerin Jacob 1597e4387966SJerin Jacob /* Initialize and register driver with DPDK Application */ 1598e4387966SJerin Jacob static const struct eth_dev_ops nicvf_eth_dev_ops = { 1599bc79615aSJerin Jacob .dev_configure = nicvf_dev_configure, 16007413feeeSJerin Jacob .dev_start = nicvf_dev_start, 16017413feeeSJerin Jacob .dev_stop = nicvf_dev_stop, 16028fc70464SJerin Jacob .link_update = nicvf_dev_link_update, 16037413feeeSJerin Jacob .dev_close = nicvf_dev_close, 1604684fa771SJerin Jacob .stats_get = nicvf_dev_stats_get, 1605684fa771SJerin Jacob .stats_reset = nicvf_dev_stats_reset, 16066eae36eaSJerin Jacob .promiscuous_enable = nicvf_dev_promisc_enable, 1607dcd7b1e1SJerin Jacob .dev_infos_get = nicvf_dev_info_get, 16081c80e4fdSJerin Jacob .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 160965d9804eSJerin Jacob .mtu_set = nicvf_dev_set_mtu, 161043362c6aSJerin Jacob .reta_update = nicvf_dev_reta_update, 161143362c6aSJerin Jacob .reta_query = nicvf_dev_reta_query, 161243362c6aSJerin Jacob .rss_hash_update = nicvf_dev_rss_hash_update, 161343362c6aSJerin Jacob .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 161486b4eb42SJerin Jacob .rx_queue_start = nicvf_dev_rx_queue_start, 161586b4eb42SJerin Jacob .rx_queue_stop = nicvf_dev_rx_queue_stop, 1616fc1f6c62SJerin Jacob .tx_queue_start = nicvf_dev_tx_queue_start, 1617fc1f6c62SJerin Jacob .tx_queue_stop = nicvf_dev_tx_queue_stop, 1618aa0d976eSJerin Jacob .rx_queue_setup = nicvf_dev_rx_queue_setup, 1619aa0d976eSJerin Jacob .rx_queue_release = nicvf_dev_rx_queue_release, 1620da14e00cSJerin Jacob .rx_queue_count = nicvf_dev_rx_queue_count, 16213f3c6f97SJerin Jacob .tx_queue_setup = nicvf_dev_tx_queue_setup, 16223f3c6f97SJerin Jacob .tx_queue_release = nicvf_dev_tx_queue_release, 1623606ee746SJerin Jacob .get_reg = nicvf_dev_get_regs, 1624e4387966SJerin Jacob }; 1625e4387966SJerin Jacob 1626e4387966SJerin Jacob static int 1627e4387966SJerin Jacob nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 1628e4387966SJerin Jacob { 1629e4387966SJerin Jacob int ret; 1630e4387966SJerin Jacob struct rte_pci_device *pci_dev; 1631e4387966SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(eth_dev); 1632e4387966SJerin Jacob 1633e4387966SJerin Jacob PMD_INIT_FUNC_TRACE(); 1634e4387966SJerin Jacob 1635e4387966SJerin Jacob eth_dev->dev_ops = &nicvf_eth_dev_ops; 1636e4387966SJerin Jacob 16377413feeeSJerin Jacob /* For secondary processes, the primary has done all the work */ 16387413feeeSJerin Jacob if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 16397413feeeSJerin Jacob /* Setup callbacks for secondary process */ 16407413feeeSJerin Jacob nicvf_set_tx_function(eth_dev); 16417413feeeSJerin Jacob nicvf_set_rx_function(eth_dev); 16427413feeeSJerin Jacob return 0; 16437413feeeSJerin Jacob } 16447413feeeSJerin Jacob 1645e4387966SJerin Jacob pci_dev = eth_dev->pci_dev; 1646e4387966SJerin Jacob rte_eth_copy_pci_info(eth_dev, pci_dev); 1647e4387966SJerin Jacob 1648e4387966SJerin Jacob nic->device_id = pci_dev->id.device_id; 1649e4387966SJerin Jacob nic->vendor_id = pci_dev->id.vendor_id; 1650e4387966SJerin Jacob nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 1651e4387966SJerin Jacob nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1652e4387966SJerin Jacob nic->eth_dev = eth_dev; 1653e4387966SJerin Jacob 1654e4387966SJerin Jacob PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 1655e4387966SJerin Jacob pci_dev->id.vendor_id, pci_dev->id.device_id, 1656e4387966SJerin Jacob pci_dev->addr.domain, pci_dev->addr.bus, 1657e4387966SJerin Jacob pci_dev->addr.devid, pci_dev->addr.function); 1658e4387966SJerin Jacob 1659e4387966SJerin Jacob nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 1660e4387966SJerin Jacob if (!nic->reg_base) { 1661e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to map BAR0"); 1662e4387966SJerin Jacob ret = -ENODEV; 1663e4387966SJerin Jacob goto fail; 1664e4387966SJerin Jacob } 1665e4387966SJerin Jacob 1666e4387966SJerin Jacob nicvf_disable_all_interrupts(nic); 1667e4387966SJerin Jacob 1668e4387966SJerin Jacob ret = nicvf_periodic_alarm_start(nic); 1669e4387966SJerin Jacob if (ret) { 1670e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to start period alarm"); 1671e4387966SJerin Jacob goto fail; 1672e4387966SJerin Jacob } 1673e4387966SJerin Jacob 1674e4387966SJerin Jacob ret = nicvf_mbox_check_pf_ready(nic); 1675e4387966SJerin Jacob if (ret) { 1676e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 1677e4387966SJerin Jacob goto alarm_fail; 1678e4387966SJerin Jacob } else { 1679e4387966SJerin Jacob PMD_INIT_LOG(INFO, 1680e4387966SJerin Jacob "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 1681e4387966SJerin Jacob nic->node, nic->vf_id, 1682e4387966SJerin Jacob nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 1683e4387966SJerin Jacob nic->sqs_mode ? "true" : "false", 1684e4387966SJerin Jacob nic->loopback_supported ? "true" : "false" 1685e4387966SJerin Jacob ); 1686e4387966SJerin Jacob } 1687e4387966SJerin Jacob 1688e4387966SJerin Jacob if (nic->sqs_mode) { 1689e4387966SJerin Jacob PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching..."); 1690e4387966SJerin Jacob /* Detach port by returning Positive error number */ 1691e4387966SJerin Jacob ret = ENOTSUP; 1692e4387966SJerin Jacob goto alarm_fail; 1693e4387966SJerin Jacob } 1694e4387966SJerin Jacob 1695e4387966SJerin Jacob eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); 1696e4387966SJerin Jacob if (eth_dev->data->mac_addrs == NULL) { 1697e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 1698e4387966SJerin Jacob ret = -ENOMEM; 1699e4387966SJerin Jacob goto alarm_fail; 1700e4387966SJerin Jacob } 1701e4387966SJerin Jacob if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) 1702e4387966SJerin Jacob eth_random_addr(&nic->mac_addr[0]); 1703e4387966SJerin Jacob 1704e4387966SJerin Jacob ether_addr_copy((struct ether_addr *)nic->mac_addr, 1705e4387966SJerin Jacob ð_dev->data->mac_addrs[0]); 1706e4387966SJerin Jacob 1707e4387966SJerin Jacob ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 1708e4387966SJerin Jacob if (ret) { 1709e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to set mac addr"); 1710e4387966SJerin Jacob goto malloc_fail; 1711e4387966SJerin Jacob } 1712e4387966SJerin Jacob 1713e4387966SJerin Jacob ret = nicvf_base_init(nic); 1714e4387966SJerin Jacob if (ret) { 1715e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 1716e4387966SJerin Jacob goto malloc_fail; 1717e4387966SJerin Jacob } 1718e4387966SJerin Jacob 1719e4387966SJerin Jacob ret = nicvf_mbox_get_rss_size(nic); 1720e4387966SJerin Jacob if (ret) { 1721e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to get rss table size"); 1722e4387966SJerin Jacob goto malloc_fail; 1723e4387966SJerin Jacob } 1724e4387966SJerin Jacob 1725e4387966SJerin Jacob PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 1726e4387966SJerin Jacob eth_dev->data->port_id, nic->vendor_id, nic->device_id, 1727e4387966SJerin Jacob nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 1728e4387966SJerin Jacob nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 1729e4387966SJerin Jacob 1730e4387966SJerin Jacob return 0; 1731e4387966SJerin Jacob 1732e4387966SJerin Jacob malloc_fail: 1733e4387966SJerin Jacob rte_free(eth_dev->data->mac_addrs); 1734e4387966SJerin Jacob alarm_fail: 1735e4387966SJerin Jacob nicvf_periodic_alarm_stop(nic); 1736e4387966SJerin Jacob fail: 1737e4387966SJerin Jacob return ret; 1738e4387966SJerin Jacob } 1739e4387966SJerin Jacob 1740e4387966SJerin Jacob static const struct rte_pci_id pci_id_nicvf_map[] = { 1741e4387966SJerin Jacob { 1742e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 1743e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 1744e4387966SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF, 1745e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 1746e4387966SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF, 1747e4387966SJerin Jacob }, 1748e4387966SJerin Jacob { 1749e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 1750e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 1751e4387966SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF, 1752e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 1753e4387966SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF, 1754e4387966SJerin Jacob }, 1755e4387966SJerin Jacob { 1756e4387966SJerin Jacob .vendor_id = 0, 1757e4387966SJerin Jacob }, 1758e4387966SJerin Jacob }; 1759e4387966SJerin Jacob 1760e4387966SJerin Jacob static struct eth_driver rte_nicvf_pmd = { 1761e4387966SJerin Jacob .pci_drv = { 1762e4387966SJerin Jacob .name = "rte_nicvf_pmd", 1763e4387966SJerin Jacob .id_table = pci_id_nicvf_map, 1764e4387966SJerin Jacob .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1765e4387966SJerin Jacob }, 1766e4387966SJerin Jacob .eth_dev_init = nicvf_eth_dev_init, 1767e4387966SJerin Jacob .dev_private_size = sizeof(struct nicvf), 1768e4387966SJerin Jacob }; 1769e4387966SJerin Jacob 1770e4387966SJerin Jacob static int 1771e4387966SJerin Jacob rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused) 1772e4387966SJerin Jacob { 1773e4387966SJerin Jacob PMD_INIT_FUNC_TRACE(); 1774e4387966SJerin Jacob PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s", 1775e4387966SJerin Jacob THUNDERX_NICVF_PMD_VERSION); 1776e4387966SJerin Jacob 1777e4387966SJerin Jacob rte_eth_driver_register(&rte_nicvf_pmd); 1778e4387966SJerin Jacob return 0; 1779e4387966SJerin Jacob } 1780e4387966SJerin Jacob 1781e4387966SJerin Jacob static struct rte_driver rte_nicvf_driver = { 1782e4387966SJerin Jacob .type = PMD_PDEV, 1783e4387966SJerin Jacob .init = rte_nicvf_pmd_init, 1784e4387966SJerin Jacob }; 1785e4387966SJerin Jacob 1786*2f45703cSPablo de Lara PMD_REGISTER_DRIVER(rte_nicvf_driver, net_thunderx); 1787*2f45703cSPablo de Lara DRIVER_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); 1788