1e4387966SJerin Jacob /* 2e4387966SJerin Jacob * BSD LICENSE 3e4387966SJerin Jacob * 4e4387966SJerin Jacob * Copyright (C) Cavium networks Ltd. 2016. 5e4387966SJerin Jacob * 6e4387966SJerin Jacob * Redistribution and use in source and binary forms, with or without 7e4387966SJerin Jacob * modification, are permitted provided that the following conditions 8e4387966SJerin Jacob * are met: 9e4387966SJerin Jacob * 10e4387966SJerin Jacob * * Redistributions of source code must retain the above copyright 11e4387966SJerin Jacob * notice, this list of conditions and the following disclaimer. 12e4387966SJerin Jacob * * Redistributions in binary form must reproduce the above copyright 13e4387966SJerin Jacob * notice, this list of conditions and the following disclaimer in 14e4387966SJerin Jacob * the documentation and/or other materials provided with the 15e4387966SJerin Jacob * distribution. 16e4387966SJerin Jacob * * Neither the name of Cavium networks nor the names of its 17e4387966SJerin Jacob * contributors may be used to endorse or promote products derived 18e4387966SJerin Jacob * from this software without specific prior written permission. 19e4387966SJerin Jacob * 20e4387966SJerin Jacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21e4387966SJerin Jacob * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22e4387966SJerin Jacob * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23e4387966SJerin Jacob * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24e4387966SJerin Jacob * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25e4387966SJerin Jacob * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26e4387966SJerin Jacob * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27e4387966SJerin Jacob * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28e4387966SJerin Jacob * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29e4387966SJerin Jacob * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30e4387966SJerin Jacob * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31e4387966SJerin Jacob */ 32e4387966SJerin Jacob 33e4387966SJerin Jacob #include <assert.h> 34e4387966SJerin Jacob #include <stdio.h> 35e4387966SJerin Jacob #include <stdbool.h> 36e4387966SJerin Jacob #include <errno.h> 37e4387966SJerin Jacob #include <stdint.h> 38e4387966SJerin Jacob #include <string.h> 39e4387966SJerin Jacob #include <unistd.h> 40e4387966SJerin Jacob #include <stdarg.h> 41e4387966SJerin Jacob #include <inttypes.h> 42e4387966SJerin Jacob #include <netinet/in.h> 43e4387966SJerin Jacob #include <sys/queue.h> 44e4387966SJerin Jacob #include <sys/timerfd.h> 45e4387966SJerin Jacob 46e4387966SJerin Jacob #include <rte_alarm.h> 47e4387966SJerin Jacob #include <rte_atomic.h> 48e4387966SJerin Jacob #include <rte_branch_prediction.h> 49e4387966SJerin Jacob #include <rte_byteorder.h> 50e4387966SJerin Jacob #include <rte_common.h> 51e4387966SJerin Jacob #include <rte_cycles.h> 52e4387966SJerin Jacob #include <rte_debug.h> 53e4387966SJerin Jacob #include <rte_dev.h> 54e4387966SJerin Jacob #include <rte_eal.h> 55e4387966SJerin Jacob #include <rte_ether.h> 56e4387966SJerin Jacob #include <rte_ethdev.h> 57e4387966SJerin Jacob #include <rte_interrupts.h> 58e4387966SJerin Jacob #include <rte_log.h> 59e4387966SJerin Jacob #include <rte_memory.h> 60e4387966SJerin Jacob #include <rte_memzone.h> 61e4387966SJerin Jacob #include <rte_malloc.h> 62e4387966SJerin Jacob #include <rte_random.h> 63e4387966SJerin Jacob #include <rte_pci.h> 64e4387966SJerin Jacob #include <rte_tailq.h> 65e4387966SJerin Jacob 66e4387966SJerin Jacob #include "base/nicvf_plat.h" 67e4387966SJerin Jacob 68e4387966SJerin Jacob #include "nicvf_ethdev.h" 691c421f18SJerin Jacob #include "nicvf_rxtx.h" 70e4387966SJerin Jacob #include "nicvf_logs.h" 71e4387966SJerin Jacob 727413feeeSJerin Jacob static void nicvf_dev_stop(struct rte_eth_dev *dev); 737413feeeSJerin Jacob 748fc70464SJerin Jacob static inline int 758fc70464SJerin Jacob nicvf_atomic_write_link_status(struct rte_eth_dev *dev, 768fc70464SJerin Jacob struct rte_eth_link *link) 778fc70464SJerin Jacob { 788fc70464SJerin Jacob struct rte_eth_link *dst = &dev->data->dev_link; 798fc70464SJerin Jacob struct rte_eth_link *src = link; 808fc70464SJerin Jacob 818fc70464SJerin Jacob if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 828fc70464SJerin Jacob *(uint64_t *)src) == 0) 838fc70464SJerin Jacob return -1; 848fc70464SJerin Jacob 858fc70464SJerin Jacob return 0; 868fc70464SJerin Jacob } 878fc70464SJerin Jacob 888fc70464SJerin Jacob static inline void 898fc70464SJerin Jacob nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) 908fc70464SJerin Jacob { 918fc70464SJerin Jacob link->link_status = nic->link_up; 928fc70464SJerin Jacob link->link_duplex = ETH_LINK_AUTONEG; 938fc70464SJerin Jacob if (nic->duplex == NICVF_HALF_DUPLEX) 948fc70464SJerin Jacob link->link_duplex = ETH_LINK_HALF_DUPLEX; 958fc70464SJerin Jacob else if (nic->duplex == NICVF_FULL_DUPLEX) 968fc70464SJerin Jacob link->link_duplex = ETH_LINK_FULL_DUPLEX; 978fc70464SJerin Jacob link->link_speed = nic->speed; 988fc70464SJerin Jacob link->link_autoneg = ETH_LINK_SPEED_AUTONEG; 998fc70464SJerin Jacob } 1008fc70464SJerin Jacob 101e4387966SJerin Jacob static void 102e4387966SJerin Jacob nicvf_interrupt(void *arg) 103e4387966SJerin Jacob { 104*f141adcaSKamil Rytarowski struct rte_eth_dev *dev = arg; 105*f141adcaSKamil Rytarowski struct nicvf *nic = nicvf_pmd_priv(dev); 106e4387966SJerin Jacob 1078fc70464SJerin Jacob if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 108*f141adcaSKamil Rytarowski if (dev->data->dev_conf.intr_conf.lsc) 109*f141adcaSKamil Rytarowski nicvf_set_eth_link_status(nic, &dev->data->dev_link); 110*f141adcaSKamil Rytarowski _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); 1118fc70464SJerin Jacob } 112e4387966SJerin Jacob 113e4387966SJerin Jacob rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 114*f141adcaSKamil Rytarowski nicvf_interrupt, dev); 115*f141adcaSKamil Rytarowski } 116*f141adcaSKamil Rytarowski 117*f141adcaSKamil Rytarowski static void __rte_unused 118*f141adcaSKamil Rytarowski nicvf_vf_interrupt(void *arg) 119*f141adcaSKamil Rytarowski { 120*f141adcaSKamil Rytarowski struct nicvf *nic = arg; 121*f141adcaSKamil Rytarowski 122*f141adcaSKamil Rytarowski nicvf_reg_poll_interrupts(nic); 123*f141adcaSKamil Rytarowski 124*f141adcaSKamil Rytarowski rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 125*f141adcaSKamil Rytarowski nicvf_vf_interrupt, nic); 126e4387966SJerin Jacob } 127e4387966SJerin Jacob 128e4387966SJerin Jacob static int 129*f141adcaSKamil Rytarowski nicvf_periodic_alarm_start(void (fn)(void *), void *arg) 130e4387966SJerin Jacob { 131*f141adcaSKamil Rytarowski return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg); 132e4387966SJerin Jacob } 133e4387966SJerin Jacob 134e4387966SJerin Jacob static int 135*f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(void (fn)(void *), void *arg) 136e4387966SJerin Jacob { 137*f141adcaSKamil Rytarowski return rte_eal_alarm_cancel(fn, arg); 138e4387966SJerin Jacob } 139e4387966SJerin Jacob 1408fc70464SJerin Jacob /* 1418fc70464SJerin Jacob * Return 0 means link status changed, -1 means not changed 1428fc70464SJerin Jacob */ 1438fc70464SJerin Jacob static int 1448fc70464SJerin Jacob nicvf_dev_link_update(struct rte_eth_dev *dev, 1458fc70464SJerin Jacob int wait_to_complete __rte_unused) 1468fc70464SJerin Jacob { 1478fc70464SJerin Jacob struct rte_eth_link link; 1488fc70464SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1498fc70464SJerin Jacob 1508fc70464SJerin Jacob PMD_INIT_FUNC_TRACE(); 1518fc70464SJerin Jacob 1528fc70464SJerin Jacob memset(&link, 0, sizeof(link)); 1538fc70464SJerin Jacob nicvf_set_eth_link_status(nic, &link); 1548fc70464SJerin Jacob return nicvf_atomic_write_link_status(dev, &link); 1558fc70464SJerin Jacob } 1568fc70464SJerin Jacob 157606ee746SJerin Jacob static int 15865d9804eSJerin Jacob nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 15965d9804eSJerin Jacob { 16065d9804eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 16165d9804eSJerin Jacob uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 16265d9804eSJerin Jacob 16365d9804eSJerin Jacob PMD_INIT_FUNC_TRACE(); 16465d9804eSJerin Jacob 16565d9804eSJerin Jacob if (frame_size > NIC_HW_MAX_FRS) 16665d9804eSJerin Jacob return -EINVAL; 16765d9804eSJerin Jacob 16865d9804eSJerin Jacob if (frame_size < NIC_HW_MIN_FRS) 16965d9804eSJerin Jacob return -EINVAL; 17065d9804eSJerin Jacob 17165d9804eSJerin Jacob buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 17265d9804eSJerin Jacob 17365d9804eSJerin Jacob /* 17465d9804eSJerin Jacob * Refuse mtu that requires the support of scattered packets 17565d9804eSJerin Jacob * when this feature has not been enabled before. 17665d9804eSJerin Jacob */ 17765d9804eSJerin Jacob if (!dev->data->scattered_rx && 17865d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 17965d9804eSJerin Jacob return -EINVAL; 18065d9804eSJerin Jacob 18165d9804eSJerin Jacob /* check <seg size> * <max_seg> >= max_frame */ 18265d9804eSJerin Jacob if (dev->data->scattered_rx && 18365d9804eSJerin Jacob (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 18465d9804eSJerin Jacob return -EINVAL; 18565d9804eSJerin Jacob 18665d9804eSJerin Jacob if (frame_size > ETHER_MAX_LEN) 18765d9804eSJerin Jacob dev->data->dev_conf.rxmode.jumbo_frame = 1; 18865d9804eSJerin Jacob else 18965d9804eSJerin Jacob dev->data->dev_conf.rxmode.jumbo_frame = 0; 19065d9804eSJerin Jacob 19165d9804eSJerin Jacob if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) 19265d9804eSJerin Jacob return -EINVAL; 19365d9804eSJerin Jacob 19465d9804eSJerin Jacob /* Update max frame size */ 19565d9804eSJerin Jacob dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; 19665d9804eSJerin Jacob nic->mtu = mtu; 19765d9804eSJerin Jacob return 0; 19865d9804eSJerin Jacob } 19965d9804eSJerin Jacob 20065d9804eSJerin Jacob static int 201606ee746SJerin Jacob nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 202606ee746SJerin Jacob { 203606ee746SJerin Jacob uint64_t *data = regs->data; 204606ee746SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 205606ee746SJerin Jacob 206001a1c0fSZyta Szpak if (data == NULL) { 207001a1c0fSZyta Szpak regs->length = nicvf_reg_get_count(); 208001a1c0fSZyta Szpak regs->width = THUNDERX_REG_BYTES; 209001a1c0fSZyta Szpak return 0; 210001a1c0fSZyta Szpak } 211606ee746SJerin Jacob 212606ee746SJerin Jacob /* Support only full register dump */ 213606ee746SJerin Jacob if ((regs->length == 0) || 214606ee746SJerin Jacob (regs->length == (uint32_t)nicvf_reg_get_count())) { 215606ee746SJerin Jacob regs->version = nic->vendor_id << 16 | nic->device_id; 216606ee746SJerin Jacob nicvf_reg_dump(nic, data); 217606ee746SJerin Jacob return 0; 218606ee746SJerin Jacob } 219606ee746SJerin Jacob return -ENOTSUP; 220606ee746SJerin Jacob } 221606ee746SJerin Jacob 222684fa771SJerin Jacob static void 223684fa771SJerin Jacob nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 224684fa771SJerin Jacob { 225684fa771SJerin Jacob uint16_t qidx; 226684fa771SJerin Jacob struct nicvf_hw_rx_qstats rx_qstats; 227684fa771SJerin Jacob struct nicvf_hw_tx_qstats tx_qstats; 228684fa771SJerin Jacob struct nicvf_hw_stats port_stats; 229684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 230684fa771SJerin Jacob 231684fa771SJerin Jacob /* Reading per RX ring stats */ 232684fa771SJerin Jacob for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 233684fa771SJerin Jacob if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 234684fa771SJerin Jacob break; 235684fa771SJerin Jacob 236684fa771SJerin Jacob nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 237684fa771SJerin Jacob stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 238684fa771SJerin Jacob stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 239684fa771SJerin Jacob } 240684fa771SJerin Jacob 241684fa771SJerin Jacob /* Reading per TX ring stats */ 242684fa771SJerin Jacob for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) { 243684fa771SJerin Jacob if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 244684fa771SJerin Jacob break; 245684fa771SJerin Jacob 246684fa771SJerin Jacob nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 247684fa771SJerin Jacob stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 248684fa771SJerin Jacob stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 249684fa771SJerin Jacob } 250684fa771SJerin Jacob 251684fa771SJerin Jacob nicvf_hw_get_stats(nic, &port_stats); 252684fa771SJerin Jacob stats->ibytes = port_stats.rx_bytes; 253684fa771SJerin Jacob stats->ipackets = port_stats.rx_ucast_frames; 254684fa771SJerin Jacob stats->ipackets += port_stats.rx_bcast_frames; 255684fa771SJerin Jacob stats->ipackets += port_stats.rx_mcast_frames; 256684fa771SJerin Jacob stats->ierrors = port_stats.rx_l2_errors; 257684fa771SJerin Jacob stats->imissed = port_stats.rx_drop_red; 258684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_overrun; 259684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_bcast; 260684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_mcast; 261684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_bcast; 262684fa771SJerin Jacob stats->imissed += port_stats.rx_drop_l3_mcast; 263684fa771SJerin Jacob 264684fa771SJerin Jacob stats->obytes = port_stats.tx_bytes_ok; 265684fa771SJerin Jacob stats->opackets = port_stats.tx_ucast_frames_ok; 266684fa771SJerin Jacob stats->opackets += port_stats.tx_bcast_frames_ok; 267684fa771SJerin Jacob stats->opackets += port_stats.tx_mcast_frames_ok; 268684fa771SJerin Jacob stats->oerrors = port_stats.tx_drops; 269684fa771SJerin Jacob } 270684fa771SJerin Jacob 2711c80e4fdSJerin Jacob static const uint32_t * 2721c80e4fdSJerin Jacob nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) 2731c80e4fdSJerin Jacob { 2741c80e4fdSJerin Jacob size_t copied; 2751c80e4fdSJerin Jacob static uint32_t ptypes[32]; 2761c80e4fdSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 277398a1be1SJerin Jacob static const uint32_t ptypes_common[] = { 2781c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4, 2791c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV4_EXT, 2801c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6, 2811c80e4fdSJerin Jacob RTE_PTYPE_L3_IPV6_EXT, 2821c80e4fdSJerin Jacob RTE_PTYPE_L4_TCP, 2831c80e4fdSJerin Jacob RTE_PTYPE_L4_UDP, 2841c80e4fdSJerin Jacob RTE_PTYPE_L4_FRAG, 2851c80e4fdSJerin Jacob }; 286398a1be1SJerin Jacob static const uint32_t ptypes_tunnel[] = { 2871c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GRE, 2881c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_GENEVE, 2891c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_VXLAN, 2901c80e4fdSJerin Jacob RTE_PTYPE_TUNNEL_NVGRE, 2911c80e4fdSJerin Jacob }; 2921c80e4fdSJerin Jacob static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; 2931c80e4fdSJerin Jacob 294398a1be1SJerin Jacob copied = sizeof(ptypes_common); 295398a1be1SJerin Jacob memcpy(ptypes, ptypes_common, copied); 296398a1be1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 297398a1be1SJerin Jacob memcpy((char *)ptypes + copied, ptypes_tunnel, 298398a1be1SJerin Jacob sizeof(ptypes_tunnel)); 299398a1be1SJerin Jacob copied += sizeof(ptypes_tunnel); 3001c80e4fdSJerin Jacob } 3011c80e4fdSJerin Jacob 3021c80e4fdSJerin Jacob memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); 3031c80e4fdSJerin Jacob if (dev->rx_pkt_burst == nicvf_recv_pkts || 3041c80e4fdSJerin Jacob dev->rx_pkt_burst == nicvf_recv_pkts_multiseg) 3051c80e4fdSJerin Jacob return ptypes; 3061c80e4fdSJerin Jacob 3071c80e4fdSJerin Jacob return NULL; 3081c80e4fdSJerin Jacob } 3091c80e4fdSJerin Jacob 310684fa771SJerin Jacob static void 311684fa771SJerin Jacob nicvf_dev_stats_reset(struct rte_eth_dev *dev) 312684fa771SJerin Jacob { 313684fa771SJerin Jacob int i; 314684fa771SJerin Jacob uint16_t rxqs = 0, txqs = 0; 315684fa771SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 316684fa771SJerin Jacob 317684fa771SJerin Jacob for (i = 0; i < dev->data->nb_rx_queues; i++) 318684fa771SJerin Jacob rxqs |= (0x3 << (i * 2)); 319684fa771SJerin Jacob for (i = 0; i < dev->data->nb_tx_queues; i++) 320684fa771SJerin Jacob txqs |= (0x3 << (i * 2)); 321684fa771SJerin Jacob 322684fa771SJerin Jacob nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 323684fa771SJerin Jacob } 324684fa771SJerin Jacob 3256eae36eaSJerin Jacob /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 3266eae36eaSJerin Jacob static void 3276eae36eaSJerin Jacob nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 3286eae36eaSJerin Jacob { 3296eae36eaSJerin Jacob } 3306eae36eaSJerin Jacob 33143362c6aSJerin Jacob static inline uint64_t 33243362c6aSJerin Jacob nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 33343362c6aSJerin Jacob { 33443362c6aSJerin Jacob uint64_t nic_rss = 0; 33543362c6aSJerin Jacob 33643362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV4) 33743362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 33843362c6aSJerin Jacob 33943362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV6) 34043362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 34143362c6aSJerin Jacob 34243362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 34343362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 34443362c6aSJerin Jacob 34543362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 34643362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 34743362c6aSJerin Jacob 34843362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 34943362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 35043362c6aSJerin Jacob 35143362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 35243362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 35343362c6aSJerin Jacob 35443362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_PORT) 35543362c6aSJerin Jacob nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 35643362c6aSJerin Jacob 35743362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 35843362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_VXLAN) 35943362c6aSJerin Jacob nic_rss |= RSS_TUN_VXLAN_ENA; 36043362c6aSJerin Jacob 36143362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_GENEVE) 36243362c6aSJerin Jacob nic_rss |= RSS_TUN_GENEVE_ENA; 36343362c6aSJerin Jacob 36443362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NVGRE) 36543362c6aSJerin Jacob nic_rss |= RSS_TUN_NVGRE_ENA; 36643362c6aSJerin Jacob } 36743362c6aSJerin Jacob 36843362c6aSJerin Jacob return nic_rss; 36943362c6aSJerin Jacob } 37043362c6aSJerin Jacob 37143362c6aSJerin Jacob static inline uint64_t 37243362c6aSJerin Jacob nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 37343362c6aSJerin Jacob { 37443362c6aSJerin Jacob uint64_t ethdev_rss = 0; 37543362c6aSJerin Jacob 37643362c6aSJerin Jacob if (nic_rss & RSS_IP_ENA) 37743362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 37843362c6aSJerin Jacob 37943362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 38043362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 38143362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_TCP); 38243362c6aSJerin Jacob 38343362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 38443362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 38543362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_UDP); 38643362c6aSJerin Jacob 38743362c6aSJerin Jacob if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 38843362c6aSJerin Jacob ethdev_rss |= ETH_RSS_PORT; 38943362c6aSJerin Jacob 39043362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 39143362c6aSJerin Jacob if (nic_rss & RSS_TUN_VXLAN_ENA) 39243362c6aSJerin Jacob ethdev_rss |= ETH_RSS_VXLAN; 39343362c6aSJerin Jacob 39443362c6aSJerin Jacob if (nic_rss & RSS_TUN_GENEVE_ENA) 39543362c6aSJerin Jacob ethdev_rss |= ETH_RSS_GENEVE; 39643362c6aSJerin Jacob 39743362c6aSJerin Jacob if (nic_rss & RSS_TUN_NVGRE_ENA) 39843362c6aSJerin Jacob ethdev_rss |= ETH_RSS_NVGRE; 39943362c6aSJerin Jacob } 40043362c6aSJerin Jacob return ethdev_rss; 40143362c6aSJerin Jacob } 40243362c6aSJerin Jacob 40343362c6aSJerin Jacob static int 40443362c6aSJerin Jacob nicvf_dev_reta_query(struct rte_eth_dev *dev, 40543362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 40643362c6aSJerin Jacob uint16_t reta_size) 40743362c6aSJerin Jacob { 40843362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 40943362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 41043362c6aSJerin Jacob int ret, i, j; 41143362c6aSJerin Jacob 41243362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 41343362c6aSJerin Jacob RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 41443362c6aSJerin Jacob "(%d) doesn't match the number hardware can supported " 41543362c6aSJerin Jacob "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 41643362c6aSJerin Jacob return -EINVAL; 41743362c6aSJerin Jacob } 41843362c6aSJerin Jacob 41943362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 42043362c6aSJerin Jacob if (ret) 42143362c6aSJerin Jacob return ret; 42243362c6aSJerin Jacob 42343362c6aSJerin Jacob /* Copy RETA table */ 42443362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 42543362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 42643362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 42743362c6aSJerin Jacob reta_conf[i].reta[j] = tbl[j]; 42843362c6aSJerin Jacob } 42943362c6aSJerin Jacob 43043362c6aSJerin Jacob return 0; 43143362c6aSJerin Jacob } 43243362c6aSJerin Jacob 43343362c6aSJerin Jacob static int 43443362c6aSJerin Jacob nicvf_dev_reta_update(struct rte_eth_dev *dev, 43543362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 43643362c6aSJerin Jacob uint16_t reta_size) 43743362c6aSJerin Jacob { 43843362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 43943362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 44043362c6aSJerin Jacob int ret, i, j; 44143362c6aSJerin Jacob 44243362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 44343362c6aSJerin Jacob RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 44443362c6aSJerin Jacob "(%d) doesn't match the number hardware can supported " 44543362c6aSJerin Jacob "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 44643362c6aSJerin Jacob return -EINVAL; 44743362c6aSJerin Jacob } 44843362c6aSJerin Jacob 44943362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 45043362c6aSJerin Jacob if (ret) 45143362c6aSJerin Jacob return ret; 45243362c6aSJerin Jacob 45343362c6aSJerin Jacob /* Copy RETA table */ 45443362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 45543362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 45643362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 45743362c6aSJerin Jacob tbl[j] = reta_conf[i].reta[j]; 45843362c6aSJerin Jacob } 45943362c6aSJerin Jacob 46043362c6aSJerin Jacob return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 46143362c6aSJerin Jacob } 46243362c6aSJerin Jacob 46343362c6aSJerin Jacob static int 46443362c6aSJerin Jacob nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 46543362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 46643362c6aSJerin Jacob { 46743362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 46843362c6aSJerin Jacob 46943362c6aSJerin Jacob if (rss_conf->rss_key) 47043362c6aSJerin Jacob nicvf_rss_get_key(nic, rss_conf->rss_key); 47143362c6aSJerin Jacob 47243362c6aSJerin Jacob rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 47343362c6aSJerin Jacob rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 47443362c6aSJerin Jacob return 0; 47543362c6aSJerin Jacob } 47643362c6aSJerin Jacob 47743362c6aSJerin Jacob static int 47843362c6aSJerin Jacob nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 47943362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 48043362c6aSJerin Jacob { 48143362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 48243362c6aSJerin Jacob uint64_t nic_rss; 48343362c6aSJerin Jacob 48443362c6aSJerin Jacob if (rss_conf->rss_key && 48543362c6aSJerin Jacob rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 48643362c6aSJerin Jacob RTE_LOG(ERR, PMD, "Hash key size mismatch %d", 48743362c6aSJerin Jacob rss_conf->rss_key_len); 48843362c6aSJerin Jacob return -EINVAL; 48943362c6aSJerin Jacob } 49043362c6aSJerin Jacob 49143362c6aSJerin Jacob if (rss_conf->rss_key) 49243362c6aSJerin Jacob nicvf_rss_set_key(nic, rss_conf->rss_key); 49343362c6aSJerin Jacob 49443362c6aSJerin Jacob nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 49543362c6aSJerin Jacob nicvf_rss_set_cfg(nic, nic_rss); 49643362c6aSJerin Jacob return 0; 49743362c6aSJerin Jacob } 49843362c6aSJerin Jacob 499aa0d976eSJerin Jacob static int 500aa0d976eSJerin Jacob nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx, 501aa0d976eSJerin Jacob uint32_t desc_cnt) 502aa0d976eSJerin Jacob { 503aa0d976eSJerin Jacob const struct rte_memzone *rz; 504d1d861efSKamil Rytarowski uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); 505aa0d976eSJerin Jacob 506aa0d976eSJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size, 507aa0d976eSJerin Jacob NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 508aa0d976eSJerin Jacob if (rz == NULL) { 509aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 510aa0d976eSJerin Jacob return -ENOMEM; 511aa0d976eSJerin Jacob } 512aa0d976eSJerin Jacob 513aa0d976eSJerin Jacob memset(rz->addr, 0, ring_size); 514aa0d976eSJerin Jacob 515aa0d976eSJerin Jacob rxq->phys = rz->phys_addr; 516aa0d976eSJerin Jacob rxq->desc = rz->addr; 517aa0d976eSJerin Jacob rxq->qlen_mask = desc_cnt - 1; 518aa0d976eSJerin Jacob 519aa0d976eSJerin Jacob return 0; 520aa0d976eSJerin Jacob } 521aa0d976eSJerin Jacob 5223f3c6f97SJerin Jacob static int 5233f3c6f97SJerin Jacob nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx, 5243f3c6f97SJerin Jacob uint32_t desc_cnt) 5253f3c6f97SJerin Jacob { 5263f3c6f97SJerin Jacob const struct rte_memzone *rz; 527d1d861efSKamil Rytarowski uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); 5283f3c6f97SJerin Jacob 5293f3c6f97SJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size, 5303f3c6f97SJerin Jacob NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 5313f3c6f97SJerin Jacob if (rz == NULL) { 5323f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 5333f3c6f97SJerin Jacob return -ENOMEM; 5343f3c6f97SJerin Jacob } 5353f3c6f97SJerin Jacob 5363f3c6f97SJerin Jacob memset(rz->addr, 0, ring_size); 5373f3c6f97SJerin Jacob 5383f3c6f97SJerin Jacob sq->phys = rz->phys_addr; 5393f3c6f97SJerin Jacob sq->desc = rz->addr; 5403f3c6f97SJerin Jacob sq->qlen_mask = desc_cnt - 1; 5413f3c6f97SJerin Jacob 5423f3c6f97SJerin Jacob return 0; 5433f3c6f97SJerin Jacob } 5443f3c6f97SJerin Jacob 5457413feeeSJerin Jacob static int 5467413feeeSJerin Jacob nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz) 5477413feeeSJerin Jacob { 5487413feeeSJerin Jacob struct nicvf_rbdr *rbdr; 5497413feeeSJerin Jacob const struct rte_memzone *rz; 5507413feeeSJerin Jacob uint32_t ring_size; 5517413feeeSJerin Jacob 5527413feeeSJerin Jacob assert(nic->rbdr == NULL); 5537413feeeSJerin Jacob rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), 5547413feeeSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 5557413feeeSJerin Jacob if (rbdr == NULL) { 5567413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); 5577413feeeSJerin Jacob return -ENOMEM; 5587413feeeSJerin Jacob } 5597413feeeSJerin Jacob 560d1d861efSKamil Rytarowski ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; 5617413feeeSJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "rbdr", 0, ring_size, 5627413feeeSJerin Jacob NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); 5637413feeeSJerin Jacob if (rz == NULL) { 5647413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); 5657413feeeSJerin Jacob return -ENOMEM; 5667413feeeSJerin Jacob } 5677413feeeSJerin Jacob 5687413feeeSJerin Jacob memset(rz->addr, 0, ring_size); 5697413feeeSJerin Jacob 5707413feeeSJerin Jacob rbdr->phys = rz->phys_addr; 5717413feeeSJerin Jacob rbdr->tail = 0; 5727413feeeSJerin Jacob rbdr->next_tail = 0; 5737413feeeSJerin Jacob rbdr->desc = rz->addr; 5747413feeeSJerin Jacob rbdr->buffsz = buffsz; 5757413feeeSJerin Jacob rbdr->qlen_mask = desc_cnt - 1; 5767413feeeSJerin Jacob rbdr->rbdr_status = 5777413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; 5787413feeeSJerin Jacob rbdr->rbdr_door = 5797413feeeSJerin Jacob nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; 5807413feeeSJerin Jacob 5817413feeeSJerin Jacob nic->rbdr = rbdr; 5827413feeeSJerin Jacob return 0; 5837413feeeSJerin Jacob } 5847413feeeSJerin Jacob 5857413feeeSJerin Jacob static void 5867413feeeSJerin Jacob nicvf_rbdr_release_mbuf(struct nicvf *nic, nicvf_phys_addr_t phy) 5877413feeeSJerin Jacob { 5887413feeeSJerin Jacob uint16_t qidx; 5897413feeeSJerin Jacob void *obj; 5907413feeeSJerin Jacob struct nicvf_rxq *rxq; 5917413feeeSJerin Jacob 5927413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 5937413feeeSJerin Jacob rxq = nic->eth_dev->data->rx_queues[qidx]; 5947413feeeSJerin Jacob if (rxq->precharge_cnt) { 5957413feeeSJerin Jacob obj = (void *)nicvf_mbuff_phy2virt(phy, 5967413feeeSJerin Jacob rxq->mbuf_phys_off); 5977413feeeSJerin Jacob rte_mempool_put(rxq->pool, obj); 5987413feeeSJerin Jacob rxq->precharge_cnt--; 5997413feeeSJerin Jacob break; 6007413feeeSJerin Jacob } 6017413feeeSJerin Jacob } 6027413feeeSJerin Jacob } 6037413feeeSJerin Jacob 6047413feeeSJerin Jacob static inline void 6057413feeeSJerin Jacob nicvf_rbdr_release_mbufs(struct nicvf *nic) 6067413feeeSJerin Jacob { 6077413feeeSJerin Jacob uint32_t qlen_mask, head; 6087413feeeSJerin Jacob struct rbdr_entry_t *entry; 6097413feeeSJerin Jacob struct nicvf_rbdr *rbdr = nic->rbdr; 6107413feeeSJerin Jacob 6117413feeeSJerin Jacob qlen_mask = rbdr->qlen_mask; 6127413feeeSJerin Jacob head = rbdr->head; 6137413feeeSJerin Jacob while (head != rbdr->tail) { 6147413feeeSJerin Jacob entry = rbdr->desc + head; 6157413feeeSJerin Jacob nicvf_rbdr_release_mbuf(nic, entry->full_addr); 6167413feeeSJerin Jacob head++; 6177413feeeSJerin Jacob head = head & qlen_mask; 6187413feeeSJerin Jacob } 6197413feeeSJerin Jacob } 6207413feeeSJerin Jacob 6213f3c6f97SJerin Jacob static inline void 6223f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 6233f3c6f97SJerin Jacob { 6243f3c6f97SJerin Jacob uint32_t head; 6253f3c6f97SJerin Jacob 6263f3c6f97SJerin Jacob head = txq->head; 6273f3c6f97SJerin Jacob while (head != txq->tail) { 6283f3c6f97SJerin Jacob if (txq->txbuffs[head]) { 6293f3c6f97SJerin Jacob rte_pktmbuf_free_seg(txq->txbuffs[head]); 6303f3c6f97SJerin Jacob txq->txbuffs[head] = NULL; 6313f3c6f97SJerin Jacob } 6323f3c6f97SJerin Jacob head++; 6333f3c6f97SJerin Jacob head = head & txq->qlen_mask; 6343f3c6f97SJerin Jacob } 6353f3c6f97SJerin Jacob } 6363f3c6f97SJerin Jacob 6373f3c6f97SJerin Jacob static void 6383f3c6f97SJerin Jacob nicvf_tx_queue_reset(struct nicvf_txq *txq) 6393f3c6f97SJerin Jacob { 6403f3c6f97SJerin Jacob uint32_t txq_desc_cnt = txq->qlen_mask + 1; 6413f3c6f97SJerin Jacob 6423f3c6f97SJerin Jacob memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 6433f3c6f97SJerin Jacob memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 6443f3c6f97SJerin Jacob txq->tail = 0; 6453f3c6f97SJerin Jacob txq->head = 0; 6463f3c6f97SJerin Jacob txq->xmit_bufs = 0; 6473f3c6f97SJerin Jacob } 6483f3c6f97SJerin Jacob 649fc1f6c62SJerin Jacob static inline int 650fc1f6c62SJerin Jacob nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) 651fc1f6c62SJerin Jacob { 652fc1f6c62SJerin Jacob struct nicvf_txq *txq; 653fc1f6c62SJerin Jacob int ret; 654fc1f6c62SJerin Jacob 655fc1f6c62SJerin Jacob if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) 656fc1f6c62SJerin Jacob return 0; 657fc1f6c62SJerin Jacob 658fc1f6c62SJerin Jacob txq = dev->data->tx_queues[qidx]; 659fc1f6c62SJerin Jacob txq->pool = NULL; 660fc1f6c62SJerin Jacob ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq); 661fc1f6c62SJerin Jacob if (ret) { 662fc1f6c62SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret); 663fc1f6c62SJerin Jacob goto config_sq_error; 664fc1f6c62SJerin Jacob } 665fc1f6c62SJerin Jacob 666fc1f6c62SJerin Jacob dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 667fc1f6c62SJerin Jacob return ret; 668fc1f6c62SJerin Jacob 669fc1f6c62SJerin Jacob config_sq_error: 670fc1f6c62SJerin Jacob nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); 671fc1f6c62SJerin Jacob return ret; 672fc1f6c62SJerin Jacob } 673fc1f6c62SJerin Jacob 674fc1f6c62SJerin Jacob static inline int 675fc1f6c62SJerin Jacob nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) 676fc1f6c62SJerin Jacob { 677fc1f6c62SJerin Jacob struct nicvf_txq *txq; 678fc1f6c62SJerin Jacob int ret; 679fc1f6c62SJerin Jacob 680fc1f6c62SJerin Jacob if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) 681fc1f6c62SJerin Jacob return 0; 682fc1f6c62SJerin Jacob 683fc1f6c62SJerin Jacob ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); 684fc1f6c62SJerin Jacob if (ret) 685fc1f6c62SJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret); 686fc1f6c62SJerin Jacob 687fc1f6c62SJerin Jacob txq = dev->data->tx_queues[qidx]; 688fc1f6c62SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 689fc1f6c62SJerin Jacob nicvf_tx_queue_reset(txq); 690fc1f6c62SJerin Jacob 691fc1f6c62SJerin Jacob dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 692fc1f6c62SJerin Jacob return ret; 693fc1f6c62SJerin Jacob } 69486b4eb42SJerin Jacob 69586b4eb42SJerin Jacob static inline int 69686b4eb42SJerin Jacob nicvf_configure_cpi(struct rte_eth_dev *dev) 69786b4eb42SJerin Jacob { 69886b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 69986b4eb42SJerin Jacob uint16_t qidx, qcnt; 70086b4eb42SJerin Jacob int ret; 70186b4eb42SJerin Jacob 70286b4eb42SJerin Jacob /* Count started rx queues */ 703394014bcSKamil Rytarowski for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++) 70486b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == 70586b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 70686b4eb42SJerin Jacob qcnt++; 70786b4eb42SJerin Jacob 70886b4eb42SJerin Jacob nic->cpi_alg = CPI_ALG_NONE; 70986b4eb42SJerin Jacob ret = nicvf_mbox_config_cpi(nic, qcnt); 71086b4eb42SJerin Jacob if (ret) 71186b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 71286b4eb42SJerin Jacob 71386b4eb42SJerin Jacob return ret; 71486b4eb42SJerin Jacob } 71586b4eb42SJerin Jacob 7167413feeeSJerin Jacob static inline int 7177413feeeSJerin Jacob nicvf_configure_rss(struct rte_eth_dev *dev) 7187413feeeSJerin Jacob { 7197413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 7207413feeeSJerin Jacob uint64_t rsshf; 7217413feeeSJerin Jacob int ret = -EINVAL; 7227413feeeSJerin Jacob 7237413feeeSJerin Jacob rsshf = nicvf_rss_ethdev_to_nic(nic, 7247413feeeSJerin Jacob dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 7257413feeeSJerin Jacob PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, 7267413feeeSJerin Jacob dev->data->dev_conf.rxmode.mq_mode, 7277413feeeSJerin Jacob nic->eth_dev->data->nb_rx_queues, 7287413feeeSJerin Jacob nic->eth_dev->data->dev_conf.lpbk_mode, rsshf); 7297413feeeSJerin Jacob 7307413feeeSJerin Jacob if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 7317413feeeSJerin Jacob ret = nicvf_rss_term(nic); 7327413feeeSJerin Jacob else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 7337413feeeSJerin Jacob ret = nicvf_rss_config(nic, 7347413feeeSJerin Jacob nic->eth_dev->data->nb_rx_queues, rsshf); 7357413feeeSJerin Jacob if (ret) 7367413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); 7377413feeeSJerin Jacob 7387413feeeSJerin Jacob return ret; 7397413feeeSJerin Jacob } 7407413feeeSJerin Jacob 74186b4eb42SJerin Jacob static int 74286b4eb42SJerin Jacob nicvf_configure_rss_reta(struct rte_eth_dev *dev) 74386b4eb42SJerin Jacob { 74486b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 74586b4eb42SJerin Jacob unsigned int idx, qmap_size; 74686b4eb42SJerin Jacob uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 74786b4eb42SJerin Jacob uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 74886b4eb42SJerin Jacob 74986b4eb42SJerin Jacob if (nic->cpi_alg != CPI_ALG_NONE) 75086b4eb42SJerin Jacob return -EINVAL; 75186b4eb42SJerin Jacob 75286b4eb42SJerin Jacob /* Prepare queue map */ 75386b4eb42SJerin Jacob for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 75486b4eb42SJerin Jacob if (dev->data->rx_queue_state[idx] == 75586b4eb42SJerin Jacob RTE_ETH_QUEUE_STATE_STARTED) 75686b4eb42SJerin Jacob qmap[qmap_size++] = idx; 75786b4eb42SJerin Jacob } 75886b4eb42SJerin Jacob 75986b4eb42SJerin Jacob /* Update default RSS RETA */ 76086b4eb42SJerin Jacob for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 76186b4eb42SJerin Jacob default_reta[idx] = qmap[idx % qmap_size]; 76286b4eb42SJerin Jacob 76386b4eb42SJerin Jacob return nicvf_rss_reta_update(nic, default_reta, 76486b4eb42SJerin Jacob NIC_MAX_RSS_IDR_TBL_SIZE); 76586b4eb42SJerin Jacob } 76686b4eb42SJerin Jacob 7673f3c6f97SJerin Jacob static void 7683f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(void *sq) 7693f3c6f97SJerin Jacob { 7703f3c6f97SJerin Jacob struct nicvf_txq *txq; 7713f3c6f97SJerin Jacob 7723f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 7733f3c6f97SJerin Jacob 7743f3c6f97SJerin Jacob txq = (struct nicvf_txq *)sq; 7753f3c6f97SJerin Jacob if (txq) { 7763f3c6f97SJerin Jacob if (txq->txbuffs != NULL) { 7773f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 7783f3c6f97SJerin Jacob rte_free(txq->txbuffs); 7793f3c6f97SJerin Jacob txq->txbuffs = NULL; 7803f3c6f97SJerin Jacob } 7813f3c6f97SJerin Jacob rte_free(txq); 7823f3c6f97SJerin Jacob } 7833f3c6f97SJerin Jacob } 7843f3c6f97SJerin Jacob 7857413feeeSJerin Jacob static void 7867413feeeSJerin Jacob nicvf_set_tx_function(struct rte_eth_dev *dev) 7877413feeeSJerin Jacob { 7887413feeeSJerin Jacob struct nicvf_txq *txq; 7897413feeeSJerin Jacob size_t i; 7907413feeeSJerin Jacob bool multiseg = false; 7917413feeeSJerin Jacob 7927413feeeSJerin Jacob for (i = 0; i < dev->data->nb_tx_queues; i++) { 7937413feeeSJerin Jacob txq = dev->data->tx_queues[i]; 7947413feeeSJerin Jacob if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) { 7957413feeeSJerin Jacob multiseg = true; 7967413feeeSJerin Jacob break; 7977413feeeSJerin Jacob } 7987413feeeSJerin Jacob } 7997413feeeSJerin Jacob 8007413feeeSJerin Jacob /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 8017413feeeSJerin Jacob if (multiseg) { 8027413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); 8037413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; 8047413feeeSJerin Jacob } else { 8057413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); 8067413feeeSJerin Jacob dev->tx_pkt_burst = nicvf_xmit_pkts; 8077413feeeSJerin Jacob } 8087413feeeSJerin Jacob 8097413feeeSJerin Jacob if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) 8107413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); 8117413feeeSJerin Jacob else 8127413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); 8137413feeeSJerin Jacob } 8147413feeeSJerin Jacob 8157413feeeSJerin Jacob static void 8167413feeeSJerin Jacob nicvf_set_rx_function(struct rte_eth_dev *dev) 8177413feeeSJerin Jacob { 8187413feeeSJerin Jacob if (dev->data->scattered_rx) { 8197413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback"); 8207413feeeSJerin Jacob dev->rx_pkt_burst = nicvf_recv_pkts_multiseg; 8217413feeeSJerin Jacob } else { 8227413feeeSJerin Jacob PMD_DRV_LOG(DEBUG, "Using single-segment rx callback"); 8237413feeeSJerin Jacob dev->rx_pkt_burst = nicvf_recv_pkts; 8247413feeeSJerin Jacob } 8257413feeeSJerin Jacob } 8267413feeeSJerin Jacob 8273f3c6f97SJerin Jacob static int 8283f3c6f97SJerin Jacob nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 8293f3c6f97SJerin Jacob uint16_t nb_desc, unsigned int socket_id, 8303f3c6f97SJerin Jacob const struct rte_eth_txconf *tx_conf) 8313f3c6f97SJerin Jacob { 8323f3c6f97SJerin Jacob uint16_t tx_free_thresh; 8333f3c6f97SJerin Jacob uint8_t is_single_pool; 8343f3c6f97SJerin Jacob struct nicvf_txq *txq; 8353f3c6f97SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 8363f3c6f97SJerin Jacob 8373f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 8383f3c6f97SJerin Jacob 8393f3c6f97SJerin Jacob /* Socket id check */ 8403f3c6f97SJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 8413f3c6f97SJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 8423f3c6f97SJerin Jacob socket_id, nic->node); 8433f3c6f97SJerin Jacob 8443f3c6f97SJerin Jacob /* Tx deferred start is not supported */ 8453f3c6f97SJerin Jacob if (tx_conf->tx_deferred_start) { 8463f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 8473f3c6f97SJerin Jacob return -EINVAL; 8483f3c6f97SJerin Jacob } 8493f3c6f97SJerin Jacob 8503f3c6f97SJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 8513f3c6f97SJerin Jacob nb_desc = nicvf_qsize_sq_roundup(nb_desc); 8523f3c6f97SJerin Jacob if (nb_desc == 0) { 8533f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 8543f3c6f97SJerin Jacob return -EINVAL; 8553f3c6f97SJerin Jacob } 8563f3c6f97SJerin Jacob 8573f3c6f97SJerin Jacob /* Validate tx_free_thresh */ 8583f3c6f97SJerin Jacob tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 8593f3c6f97SJerin Jacob tx_conf->tx_free_thresh : 8603f3c6f97SJerin Jacob NICVF_DEFAULT_TX_FREE_THRESH); 8613f3c6f97SJerin Jacob 8623f3c6f97SJerin Jacob if (tx_free_thresh > (nb_desc) || 8633f3c6f97SJerin Jacob tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 8643f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, 8653f3c6f97SJerin Jacob "tx_free_thresh must be less than the number of TX " 8663f3c6f97SJerin Jacob "descriptors. (tx_free_thresh=%u port=%d " 8673f3c6f97SJerin Jacob "queue=%d)", (unsigned int)tx_free_thresh, 8683f3c6f97SJerin Jacob (int)dev->data->port_id, (int)qidx); 8693f3c6f97SJerin Jacob return -EINVAL; 8703f3c6f97SJerin Jacob } 8713f3c6f97SJerin Jacob 8723f3c6f97SJerin Jacob /* Free memory prior to re-allocation if needed. */ 8733f3c6f97SJerin Jacob if (dev->data->tx_queues[qidx] != NULL) { 8743f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 8753f3c6f97SJerin Jacob qidx); 8763f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]); 8773f3c6f97SJerin Jacob dev->data->tx_queues[qidx] = NULL; 8783f3c6f97SJerin Jacob } 8793f3c6f97SJerin Jacob 8803f3c6f97SJerin Jacob /* Allocating tx queue data structure */ 8813f3c6f97SJerin Jacob txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 8823f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 8833f3c6f97SJerin Jacob if (txq == NULL) { 8843f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx); 8853f3c6f97SJerin Jacob return -ENOMEM; 8863f3c6f97SJerin Jacob } 8873f3c6f97SJerin Jacob 8883f3c6f97SJerin Jacob txq->nic = nic; 8893f3c6f97SJerin Jacob txq->queue_id = qidx; 8903f3c6f97SJerin Jacob txq->tx_free_thresh = tx_free_thresh; 8913f3c6f97SJerin Jacob txq->txq_flags = tx_conf->txq_flags; 8923f3c6f97SJerin Jacob txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 8933f3c6f97SJerin Jacob txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 8943f3c6f97SJerin Jacob is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && 8953f3c6f97SJerin Jacob txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); 8963f3c6f97SJerin Jacob 8973f3c6f97SJerin Jacob /* Choose optimum free threshold value for multipool case */ 8983f3c6f97SJerin Jacob if (!is_single_pool) { 8993f3c6f97SJerin Jacob txq->tx_free_thresh = (uint16_t) 9003f3c6f97SJerin Jacob (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 9013f3c6f97SJerin Jacob NICVF_TX_FREE_MPOOL_THRESH : 9023f3c6f97SJerin Jacob tx_conf->tx_free_thresh); 9031c421f18SJerin Jacob txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 9041c421f18SJerin Jacob } else { 9051c421f18SJerin Jacob txq->pool_free = nicvf_single_pool_free_xmited_buffers; 9063f3c6f97SJerin Jacob } 9073f3c6f97SJerin Jacob 9083f3c6f97SJerin Jacob /* Allocate software ring */ 9093f3c6f97SJerin Jacob txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 9103f3c6f97SJerin Jacob nb_desc * sizeof(struct rte_mbuf *), 9113f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 9123f3c6f97SJerin Jacob 9133f3c6f97SJerin Jacob if (txq->txbuffs == NULL) { 9143f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 9153f3c6f97SJerin Jacob return -ENOMEM; 9163f3c6f97SJerin Jacob } 9173f3c6f97SJerin Jacob 9183f3c6f97SJerin Jacob if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) { 9193f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 9203f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 9213f3c6f97SJerin Jacob return -ENOMEM; 9223f3c6f97SJerin Jacob } 9233f3c6f97SJerin Jacob 9243f3c6f97SJerin Jacob nicvf_tx_queue_reset(txq); 9253f3c6f97SJerin Jacob 9263f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, 9273f3c6f97SJerin Jacob qidx, txq, nb_desc, txq->desc, txq->phys); 9283f3c6f97SJerin Jacob 9293f3c6f97SJerin Jacob dev->data->tx_queues[qidx] = txq; 9303f3c6f97SJerin Jacob dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 9313f3c6f97SJerin Jacob return 0; 9323f3c6f97SJerin Jacob } 9333f3c6f97SJerin Jacob 93486b4eb42SJerin Jacob static inline void 93586b4eb42SJerin Jacob nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq) 93686b4eb42SJerin Jacob { 93786b4eb42SJerin Jacob uint32_t rxq_cnt; 93886b4eb42SJerin Jacob uint32_t nb_pkts, released_pkts = 0; 93986b4eb42SJerin Jacob uint32_t refill_cnt = 0; 94086b4eb42SJerin Jacob struct rte_eth_dev *dev = rxq->nic->eth_dev; 94186b4eb42SJerin Jacob struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 94286b4eb42SJerin Jacob 94386b4eb42SJerin Jacob if (dev->rx_pkt_burst == NULL) 94486b4eb42SJerin Jacob return; 94586b4eb42SJerin Jacob 94686b4eb42SJerin Jacob while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) { 94786b4eb42SJerin Jacob nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 94886b4eb42SJerin Jacob NICVF_MAX_RX_FREE_THRESH); 94986b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 95086b4eb42SJerin Jacob while (nb_pkts) { 95186b4eb42SJerin Jacob rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 95286b4eb42SJerin Jacob released_pkts++; 95386b4eb42SJerin Jacob } 95486b4eb42SJerin Jacob } 95586b4eb42SJerin Jacob 95686b4eb42SJerin Jacob refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id); 95786b4eb42SJerin Jacob PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 95886b4eb42SJerin Jacob released_pkts, refill_cnt); 95986b4eb42SJerin Jacob } 96086b4eb42SJerin Jacob 961aa0d976eSJerin Jacob static void 962aa0d976eSJerin Jacob nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 963aa0d976eSJerin Jacob { 964aa0d976eSJerin Jacob rxq->head = 0; 965aa0d976eSJerin Jacob rxq->available_space = 0; 966aa0d976eSJerin Jacob rxq->recv_buffers = 0; 967aa0d976eSJerin Jacob } 968aa0d976eSJerin Jacob 96986b4eb42SJerin Jacob static inline int 97086b4eb42SJerin Jacob nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) 97186b4eb42SJerin Jacob { 97286b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 97386b4eb42SJerin Jacob struct nicvf_rxq *rxq; 97486b4eb42SJerin Jacob int ret; 97586b4eb42SJerin Jacob 97686b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) 97786b4eb42SJerin Jacob return 0; 97886b4eb42SJerin Jacob 97986b4eb42SJerin Jacob /* Update rbdr pointer to all rxq */ 98086b4eb42SJerin Jacob rxq = dev->data->rx_queues[qidx]; 98186b4eb42SJerin Jacob rxq->shared_rbdr = nic->rbdr; 98286b4eb42SJerin Jacob 98386b4eb42SJerin Jacob ret = nicvf_qset_rq_config(nic, qidx, rxq); 98486b4eb42SJerin Jacob if (ret) { 98586b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret); 98686b4eb42SJerin Jacob goto config_rq_error; 98786b4eb42SJerin Jacob } 98886b4eb42SJerin Jacob ret = nicvf_qset_cq_config(nic, qidx, rxq); 98986b4eb42SJerin Jacob if (ret) { 99086b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret); 99186b4eb42SJerin Jacob goto config_cq_error; 99286b4eb42SJerin Jacob } 99386b4eb42SJerin Jacob 99486b4eb42SJerin Jacob dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 99586b4eb42SJerin Jacob return 0; 99686b4eb42SJerin Jacob 99786b4eb42SJerin Jacob config_cq_error: 99886b4eb42SJerin Jacob nicvf_qset_cq_reclaim(nic, qidx); 99986b4eb42SJerin Jacob config_rq_error: 100086b4eb42SJerin Jacob nicvf_qset_rq_reclaim(nic, qidx); 100186b4eb42SJerin Jacob return ret; 100286b4eb42SJerin Jacob } 100386b4eb42SJerin Jacob 100486b4eb42SJerin Jacob static inline int 100586b4eb42SJerin Jacob nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) 100686b4eb42SJerin Jacob { 100786b4eb42SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 100886b4eb42SJerin Jacob struct nicvf_rxq *rxq; 100986b4eb42SJerin Jacob int ret, other_error; 101086b4eb42SJerin Jacob 101186b4eb42SJerin Jacob if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) 101286b4eb42SJerin Jacob return 0; 101386b4eb42SJerin Jacob 101486b4eb42SJerin Jacob ret = nicvf_qset_rq_reclaim(nic, qidx); 101586b4eb42SJerin Jacob if (ret) 101686b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret); 101786b4eb42SJerin Jacob 101886b4eb42SJerin Jacob other_error = ret; 101986b4eb42SJerin Jacob rxq = dev->data->rx_queues[qidx]; 102086b4eb42SJerin Jacob nicvf_rx_queue_release_mbufs(rxq); 102186b4eb42SJerin Jacob nicvf_rx_queue_reset(rxq); 102286b4eb42SJerin Jacob 102386b4eb42SJerin Jacob ret = nicvf_qset_cq_reclaim(nic, qidx); 102486b4eb42SJerin Jacob if (ret) 102586b4eb42SJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret); 102686b4eb42SJerin Jacob 102786b4eb42SJerin Jacob other_error |= ret; 102886b4eb42SJerin Jacob dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 102986b4eb42SJerin Jacob return other_error; 103086b4eb42SJerin Jacob } 103186b4eb42SJerin Jacob 1032aa0d976eSJerin Jacob static void 1033aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(void *rx_queue) 1034aa0d976eSJerin Jacob { 1035aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1036aa0d976eSJerin Jacob 1037394014bcSKamil Rytarowski rte_free(rx_queue); 1038aa0d976eSJerin Jacob } 1039aa0d976eSJerin Jacob 1040aa0d976eSJerin Jacob static int 104186b4eb42SJerin Jacob nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 104286b4eb42SJerin Jacob { 104386b4eb42SJerin Jacob int ret; 104486b4eb42SJerin Jacob 104586b4eb42SJerin Jacob ret = nicvf_start_rx_queue(dev, qidx); 104686b4eb42SJerin Jacob if (ret) 104786b4eb42SJerin Jacob return ret; 104886b4eb42SJerin Jacob 104986b4eb42SJerin Jacob ret = nicvf_configure_cpi(dev); 105086b4eb42SJerin Jacob if (ret) 105186b4eb42SJerin Jacob return ret; 105286b4eb42SJerin Jacob 105386b4eb42SJerin Jacob return nicvf_configure_rss_reta(dev); 105486b4eb42SJerin Jacob } 105586b4eb42SJerin Jacob 105686b4eb42SJerin Jacob static int 105786b4eb42SJerin Jacob nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 105886b4eb42SJerin Jacob { 105986b4eb42SJerin Jacob int ret; 106086b4eb42SJerin Jacob 106186b4eb42SJerin Jacob ret = nicvf_stop_rx_queue(dev, qidx); 106286b4eb42SJerin Jacob ret |= nicvf_configure_cpi(dev); 106386b4eb42SJerin Jacob ret |= nicvf_configure_rss_reta(dev); 106486b4eb42SJerin Jacob return ret; 106586b4eb42SJerin Jacob } 106686b4eb42SJerin Jacob 106786b4eb42SJerin Jacob static int 1068fc1f6c62SJerin Jacob nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1069fc1f6c62SJerin Jacob { 1070fc1f6c62SJerin Jacob return nicvf_start_tx_queue(dev, qidx); 1071fc1f6c62SJerin Jacob } 1072fc1f6c62SJerin Jacob 1073fc1f6c62SJerin Jacob static int 1074fc1f6c62SJerin Jacob nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1075fc1f6c62SJerin Jacob { 1076fc1f6c62SJerin Jacob return nicvf_stop_tx_queue(dev, qidx); 1077fc1f6c62SJerin Jacob } 1078fc1f6c62SJerin Jacob 1079394014bcSKamil Rytarowski 1080fc1f6c62SJerin Jacob static int 1081aa0d976eSJerin Jacob nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 1082aa0d976eSJerin Jacob uint16_t nb_desc, unsigned int socket_id, 1083aa0d976eSJerin Jacob const struct rte_eth_rxconf *rx_conf, 1084aa0d976eSJerin Jacob struct rte_mempool *mp) 1085aa0d976eSJerin Jacob { 1086aa0d976eSJerin Jacob uint16_t rx_free_thresh; 1087aa0d976eSJerin Jacob struct nicvf_rxq *rxq; 1088aa0d976eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1089aa0d976eSJerin Jacob 1090aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 1091aa0d976eSJerin Jacob 1092aa0d976eSJerin Jacob /* Socket id check */ 1093aa0d976eSJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 1094aa0d976eSJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 1095aa0d976eSJerin Jacob socket_id, nic->node); 1096aa0d976eSJerin Jacob 1097394014bcSKamil Rytarowski /* Mempool memory must be contiguous, so must be one memory segment*/ 1098aa0d976eSJerin Jacob if (mp->nb_mem_chunks != 1) { 1099394014bcSKamil Rytarowski PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages"); 1100394014bcSKamil Rytarowski return -EINVAL; 1101394014bcSKamil Rytarowski } 1102394014bcSKamil Rytarowski 1103394014bcSKamil Rytarowski /* Mempool memory must be physically contiguous */ 1104394014bcSKamil Rytarowski if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) { 1105394014bcSKamil Rytarowski PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); 1106aa0d976eSJerin Jacob return -EINVAL; 1107aa0d976eSJerin Jacob } 1108aa0d976eSJerin Jacob 1109aa0d976eSJerin Jacob /* Rx deferred start is not supported */ 1110aa0d976eSJerin Jacob if (rx_conf->rx_deferred_start) { 1111aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 1112aa0d976eSJerin Jacob return -EINVAL; 1113aa0d976eSJerin Jacob } 1114aa0d976eSJerin Jacob 1115aa0d976eSJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 1116aa0d976eSJerin Jacob nb_desc = nicvf_qsize_cq_roundup(nb_desc); 1117aa0d976eSJerin Jacob if (nb_desc == 0) { 1118aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 1119aa0d976eSJerin Jacob return -EINVAL; 1120aa0d976eSJerin Jacob } 1121aa0d976eSJerin Jacob 1122aa0d976eSJerin Jacob /* Check rx_free_thresh upper bound */ 1123aa0d976eSJerin Jacob rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 1124aa0d976eSJerin Jacob rx_conf->rx_free_thresh : 1125aa0d976eSJerin Jacob NICVF_DEFAULT_RX_FREE_THRESH); 1126aa0d976eSJerin Jacob if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 1127aa0d976eSJerin Jacob rx_free_thresh >= nb_desc * .75) { 1128aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 1129aa0d976eSJerin Jacob rx_free_thresh); 1130aa0d976eSJerin Jacob return -EINVAL; 1131aa0d976eSJerin Jacob } 1132aa0d976eSJerin Jacob 1133aa0d976eSJerin Jacob /* Free memory prior to re-allocation if needed */ 1134aa0d976eSJerin Jacob if (dev->data->rx_queues[qidx] != NULL) { 1135aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 1136aa0d976eSJerin Jacob qidx); 1137aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]); 1138aa0d976eSJerin Jacob dev->data->rx_queues[qidx] = NULL; 1139aa0d976eSJerin Jacob } 1140aa0d976eSJerin Jacob 1141aa0d976eSJerin Jacob /* Allocate rxq memory */ 1142aa0d976eSJerin Jacob rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 1143aa0d976eSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 1144aa0d976eSJerin Jacob if (rxq == NULL) { 1145aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx); 1146aa0d976eSJerin Jacob return -ENOMEM; 1147aa0d976eSJerin Jacob } 1148aa0d976eSJerin Jacob 1149aa0d976eSJerin Jacob rxq->nic = nic; 1150aa0d976eSJerin Jacob rxq->pool = mp; 1151aa0d976eSJerin Jacob rxq->queue_id = qidx; 1152aa0d976eSJerin Jacob rxq->port_id = dev->data->port_id; 1153aa0d976eSJerin Jacob rxq->rx_free_thresh = rx_free_thresh; 1154aa0d976eSJerin Jacob rxq->rx_drop_en = rx_conf->rx_drop_en; 1155aa0d976eSJerin Jacob rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1156aa0d976eSJerin Jacob rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1157aa0d976eSJerin Jacob rxq->precharge_cnt = 0; 1158e2c519b3SJerin Jacob 1159e2c519b3SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2) 1160e2c519b3SJerin Jacob rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD; 1161e2c519b3SJerin Jacob else 1162aa0d976eSJerin Jacob rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1163aa0d976eSJerin Jacob 1164e2c519b3SJerin Jacob 1165aa0d976eSJerin Jacob /* Alloc completion queue */ 1166aa0d976eSJerin Jacob if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) { 1167aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 1168aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(rxq); 1169aa0d976eSJerin Jacob return -ENOMEM; 1170aa0d976eSJerin Jacob } 1171aa0d976eSJerin Jacob 1172aa0d976eSJerin Jacob nicvf_rx_queue_reset(rxq); 1173aa0d976eSJerin Jacob 1174aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, 1175aa0d976eSJerin Jacob qidx, rxq, mp->name, nb_desc, 1176a0fd91ceSBruce Richardson rte_mempool_avail_count(mp), rxq->phys); 1177aa0d976eSJerin Jacob 1178aa0d976eSJerin Jacob dev->data->rx_queues[qidx] = rxq; 1179aa0d976eSJerin Jacob dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 1180aa0d976eSJerin Jacob return 0; 1181aa0d976eSJerin Jacob } 1182aa0d976eSJerin Jacob 1183dcd7b1e1SJerin Jacob static void 1184dcd7b1e1SJerin Jacob nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1185dcd7b1e1SJerin Jacob { 1186dcd7b1e1SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1187dcd7b1e1SJerin Jacob 1188dcd7b1e1SJerin Jacob PMD_INIT_FUNC_TRACE(); 1189dcd7b1e1SJerin Jacob 1190dcd7b1e1SJerin Jacob dev_info->min_rx_bufsize = ETHER_MIN_MTU; 1191dcd7b1e1SJerin Jacob dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; 1192dcd7b1e1SJerin Jacob dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS; 1193dcd7b1e1SJerin Jacob dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS; 1194dcd7b1e1SJerin Jacob dev_info->max_mac_addrs = 1; 1195dcd7b1e1SJerin Jacob dev_info->max_vfs = dev->pci_dev->max_vfs; 1196dcd7b1e1SJerin Jacob 1197dcd7b1e1SJerin Jacob dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1198dcd7b1e1SJerin Jacob dev_info->tx_offload_capa = 1199dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_IPV4_CKSUM | 1200dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_UDP_CKSUM | 1201dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_TCP_CKSUM | 1202dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_TCP_TSO | 1203dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 1204dcd7b1e1SJerin Jacob 1205dcd7b1e1SJerin Jacob dev_info->reta_size = nic->rss_info.rss_size; 1206dcd7b1e1SJerin Jacob dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1207dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1208dcd7b1e1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1209dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1210dcd7b1e1SJerin Jacob 1211dcd7b1e1SJerin Jacob dev_info->default_rxconf = (struct rte_eth_rxconf) { 1212dcd7b1e1SJerin Jacob .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1213dcd7b1e1SJerin Jacob .rx_drop_en = 0, 1214dcd7b1e1SJerin Jacob }; 1215dcd7b1e1SJerin Jacob 1216dcd7b1e1SJerin Jacob dev_info->default_txconf = (struct rte_eth_txconf) { 1217dcd7b1e1SJerin Jacob .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1218dcd7b1e1SJerin Jacob .txq_flags = 1219dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOMULTSEGS | 1220dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOREFCOUNT | 1221dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOMULTMEMP | 1222dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOVLANOFFL | 1223dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOXSUMSCTP, 1224dcd7b1e1SJerin Jacob }; 1225dcd7b1e1SJerin Jacob } 1226dcd7b1e1SJerin Jacob 12277413feeeSJerin Jacob static nicvf_phys_addr_t 1228394014bcSKamil Rytarowski rbdr_rte_mempool_get(void *dev, void *opaque) 12297413feeeSJerin Jacob { 12307413feeeSJerin Jacob uint16_t qidx; 12317413feeeSJerin Jacob uintptr_t mbuf; 12327413feeeSJerin Jacob struct nicvf_rxq *rxq; 1233394014bcSKamil Rytarowski struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev; 1234394014bcSKamil Rytarowski struct nicvf *nic __rte_unused = (struct nicvf *)opaque; 12357413feeeSJerin Jacob 1236394014bcSKamil Rytarowski for (qidx = 0; qidx < eth_dev->data->nb_rx_queues; qidx++) { 1237394014bcSKamil Rytarowski rxq = eth_dev->data->rx_queues[qidx]; 12387413feeeSJerin Jacob /* Maintain equal buffer count across all pools */ 12397413feeeSJerin Jacob if (rxq->precharge_cnt >= rxq->qlen_mask) 12407413feeeSJerin Jacob continue; 12417413feeeSJerin Jacob rxq->precharge_cnt++; 12427413feeeSJerin Jacob mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); 12437413feeeSJerin Jacob if (mbuf) 12447413feeeSJerin Jacob return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); 12457413feeeSJerin Jacob } 12467413feeeSJerin Jacob return 0; 12477413feeeSJerin Jacob } 12487413feeeSJerin Jacob 12497413feeeSJerin Jacob static int 12507413feeeSJerin Jacob nicvf_dev_start(struct rte_eth_dev *dev) 12517413feeeSJerin Jacob { 12527413feeeSJerin Jacob int ret; 12537413feeeSJerin Jacob uint16_t qidx; 12547413feeeSJerin Jacob uint32_t buffsz = 0, rbdrsz = 0; 12557413feeeSJerin Jacob uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; 12567413feeeSJerin Jacob uint64_t mbuf_phys_off = 0; 12577413feeeSJerin Jacob struct nicvf_rxq *rxq; 12587413feeeSJerin Jacob struct rte_pktmbuf_pool_private *mbp_priv; 12597413feeeSJerin Jacob struct rte_mbuf *mbuf; 12607413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 12617413feeeSJerin Jacob struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 12627413feeeSJerin Jacob uint16_t mtu; 12637413feeeSJerin Jacob 12647413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 12657413feeeSJerin Jacob 12667413feeeSJerin Jacob /* Userspace process exited without proper shutdown in last run */ 12677413feeeSJerin Jacob if (nicvf_qset_rbdr_active(nic, 0)) 12687413feeeSJerin Jacob nicvf_dev_stop(dev); 12697413feeeSJerin Jacob 12707413feeeSJerin Jacob /* 12717413feeeSJerin Jacob * Thunderx nicvf PMD can support more than one pool per port only when 12727413feeeSJerin Jacob * 1) Data payload size is same across all the pools in given port 12737413feeeSJerin Jacob * AND 12747413feeeSJerin Jacob * 2) All mbuffs in the pools are from the same hugepage 12757413feeeSJerin Jacob * AND 12767413feeeSJerin Jacob * 3) Mbuff metadata size is same across all the pools in given port 12777413feeeSJerin Jacob * 12787413feeeSJerin Jacob * This is to support existing application that uses multiple pool/port. 12797413feeeSJerin Jacob * But, the purpose of using multipool for QoS will not be addressed. 12807413feeeSJerin Jacob * 12817413feeeSJerin Jacob */ 12827413feeeSJerin Jacob 12837413feeeSJerin Jacob /* Validate RBDR buff size */ 12847413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 12857413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 12867413feeeSJerin Jacob mbp_priv = rte_mempool_get_priv(rxq->pool); 12877413feeeSJerin Jacob buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 12887413feeeSJerin Jacob if (buffsz % 128) { 12897413feeeSJerin Jacob PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); 12907413feeeSJerin Jacob return -EINVAL; 12917413feeeSJerin Jacob } 12927413feeeSJerin Jacob if (rbdrsz == 0) 12937413feeeSJerin Jacob rbdrsz = buffsz; 12947413feeeSJerin Jacob if (rbdrsz != buffsz) { 12957413feeeSJerin Jacob PMD_INIT_LOG(ERR, "buffsz not same, qid=%d (%d/%d)", 12967413feeeSJerin Jacob qidx, rbdrsz, buffsz); 12977413feeeSJerin Jacob return -EINVAL; 12987413feeeSJerin Jacob } 12997413feeeSJerin Jacob } 13007413feeeSJerin Jacob 13017413feeeSJerin Jacob /* Validate mempool attributes */ 13027413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 13037413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 13047413feeeSJerin Jacob rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); 13057413feeeSJerin Jacob mbuf = rte_pktmbuf_alloc(rxq->pool); 13067413feeeSJerin Jacob if (mbuf == NULL) { 13077413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed allocate mbuf qid=%d pool=%s", 13087413feeeSJerin Jacob qidx, rxq->pool->name); 13097413feeeSJerin Jacob return -ENOMEM; 13107413feeeSJerin Jacob } 13117413feeeSJerin Jacob rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf); 13127413feeeSJerin Jacob rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM; 13137413feeeSJerin Jacob rte_pktmbuf_free(mbuf); 13147413feeeSJerin Jacob 13157413feeeSJerin Jacob if (mbuf_phys_off == 0) 13167413feeeSJerin Jacob mbuf_phys_off = rxq->mbuf_phys_off; 13177413feeeSJerin Jacob if (mbuf_phys_off != rxq->mbuf_phys_off) { 13187413feeeSJerin Jacob PMD_INIT_LOG(ERR, "pool params not same,%s %" PRIx64, 13197413feeeSJerin Jacob rxq->pool->name, mbuf_phys_off); 13207413feeeSJerin Jacob return -EINVAL; 13217413feeeSJerin Jacob } 13227413feeeSJerin Jacob } 13237413feeeSJerin Jacob 13247413feeeSJerin Jacob /* Check the level of buffers in the pool */ 13257413feeeSJerin Jacob total_rxq_desc = 0; 13267413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 13277413feeeSJerin Jacob rxq = dev->data->rx_queues[qidx]; 13287413feeeSJerin Jacob /* Count total numbers of rxq descs */ 13297413feeeSJerin Jacob total_rxq_desc += rxq->qlen_mask + 1; 13307413feeeSJerin Jacob exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; 13317413feeeSJerin Jacob exp_buffs *= nic->eth_dev->data->nb_rx_queues; 1332a0fd91ceSBruce Richardson if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { 13337413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", 13347413feeeSJerin Jacob rxq->pool->name, 1335a0fd91ceSBruce Richardson rte_mempool_avail_count(rxq->pool), 13367413feeeSJerin Jacob exp_buffs); 13377413feeeSJerin Jacob return -ENOENT; 13387413feeeSJerin Jacob } 13397413feeeSJerin Jacob } 13407413feeeSJerin Jacob 13417413feeeSJerin Jacob /* Check RBDR desc overflow */ 13427413feeeSJerin Jacob ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); 13437413feeeSJerin Jacob if (ret == 0) { 13447413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc"); 13457413feeeSJerin Jacob return -ENOMEM; 13467413feeeSJerin Jacob } 13477413feeeSJerin Jacob 13487413feeeSJerin Jacob /* Enable qset */ 13497413feeeSJerin Jacob ret = nicvf_qset_config(nic); 13507413feeeSJerin Jacob if (ret) { 13517413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to enable qset %d", ret); 13527413feeeSJerin Jacob return ret; 13537413feeeSJerin Jacob } 13547413feeeSJerin Jacob 13557413feeeSJerin Jacob /* Allocate RBDR and RBDR ring desc */ 13567413feeeSJerin Jacob nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); 13577413feeeSJerin Jacob ret = nicvf_qset_rbdr_alloc(nic, nb_rbdr_desc, rbdrsz); 13587413feeeSJerin Jacob if (ret) { 13597413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc"); 13607413feeeSJerin Jacob goto qset_reclaim; 13617413feeeSJerin Jacob } 13627413feeeSJerin Jacob 13637413feeeSJerin Jacob /* Enable and configure RBDR registers */ 13647413feeeSJerin Jacob ret = nicvf_qset_rbdr_config(nic, 0); 13657413feeeSJerin Jacob if (ret) { 13667413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure rbdr %d", ret); 13677413feeeSJerin Jacob goto qset_rbdr_free; 13687413feeeSJerin Jacob } 13697413feeeSJerin Jacob 13707413feeeSJerin Jacob /* Fill rte_mempool buffers in RBDR pool and precharge it */ 1371394014bcSKamil Rytarowski ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get, 1372394014bcSKamil Rytarowski total_rxq_desc); 13737413feeeSJerin Jacob if (ret) { 13747413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret); 13757413feeeSJerin Jacob goto qset_rbdr_reclaim; 13767413feeeSJerin Jacob } 13777413feeeSJerin Jacob 13787413feeeSJerin Jacob PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR", 13797413feeeSJerin Jacob nic->rbdr->tail, nb_rbdr_desc); 13807413feeeSJerin Jacob 13817413feeeSJerin Jacob /* Configure RX queues */ 13827413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { 13837413feeeSJerin Jacob ret = nicvf_start_rx_queue(dev, qidx); 13847413feeeSJerin Jacob if (ret) 13857413feeeSJerin Jacob goto start_rxq_error; 13867413feeeSJerin Jacob } 13877413feeeSJerin Jacob 13887413feeeSJerin Jacob /* Configure VLAN Strip */ 13897413feeeSJerin Jacob nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip); 13907413feeeSJerin Jacob 13917413feeeSJerin Jacob /* Configure TX queues */ 13927413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) { 13937413feeeSJerin Jacob ret = nicvf_start_tx_queue(dev, qidx); 13947413feeeSJerin Jacob if (ret) 13957413feeeSJerin Jacob goto start_txq_error; 13967413feeeSJerin Jacob } 13977413feeeSJerin Jacob 13987413feeeSJerin Jacob /* Configure CPI algorithm */ 13997413feeeSJerin Jacob ret = nicvf_configure_cpi(dev); 14007413feeeSJerin Jacob if (ret) 14017413feeeSJerin Jacob goto start_txq_error; 14027413feeeSJerin Jacob 14037413feeeSJerin Jacob /* Configure RSS */ 14047413feeeSJerin Jacob ret = nicvf_configure_rss(dev); 14057413feeeSJerin Jacob if (ret) 14067413feeeSJerin Jacob goto qset_rss_error; 14077413feeeSJerin Jacob 14087413feeeSJerin Jacob /* Configure loopback */ 14097413feeeSJerin Jacob ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); 14107413feeeSJerin Jacob if (ret) { 14117413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); 14127413feeeSJerin Jacob goto qset_rss_error; 14137413feeeSJerin Jacob } 14147413feeeSJerin Jacob 14157413feeeSJerin Jacob /* Reset all statistics counters attached to this port */ 14167413feeeSJerin Jacob ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); 14177413feeeSJerin Jacob if (ret) { 14187413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); 14197413feeeSJerin Jacob goto qset_rss_error; 14207413feeeSJerin Jacob } 14217413feeeSJerin Jacob 14227413feeeSJerin Jacob /* Setup scatter mode if needed by jumbo */ 14237413feeeSJerin Jacob if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 14247413feeeSJerin Jacob 2 * VLAN_TAG_SIZE > buffsz) 14257413feeeSJerin Jacob dev->data->scattered_rx = 1; 14267413feeeSJerin Jacob if (rx_conf->enable_scatter) 14277413feeeSJerin Jacob dev->data->scattered_rx = 1; 14287413feeeSJerin Jacob 14297413feeeSJerin Jacob /* Setup MTU based on max_rx_pkt_len or default */ 14307413feeeSJerin Jacob mtu = dev->data->dev_conf.rxmode.jumbo_frame ? 14317413feeeSJerin Jacob dev->data->dev_conf.rxmode.max_rx_pkt_len 14327413feeeSJerin Jacob - ETHER_HDR_LEN - ETHER_CRC_LEN 14337413feeeSJerin Jacob : ETHER_MTU; 14347413feeeSJerin Jacob 14357413feeeSJerin Jacob if (nicvf_dev_set_mtu(dev, mtu)) { 14367413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to set default mtu size"); 14377413feeeSJerin Jacob return -EBUSY; 14387413feeeSJerin Jacob } 14397413feeeSJerin Jacob 14407413feeeSJerin Jacob /* Configure callbacks based on scatter mode */ 14417413feeeSJerin Jacob nicvf_set_tx_function(dev); 14427413feeeSJerin Jacob nicvf_set_rx_function(dev); 14437413feeeSJerin Jacob 14447413feeeSJerin Jacob /* Done; Let PF make the BGX's RX and TX switches to ON position */ 14457413feeeSJerin Jacob nicvf_mbox_cfg_done(nic); 14467413feeeSJerin Jacob return 0; 14477413feeeSJerin Jacob 14487413feeeSJerin Jacob qset_rss_error: 14497413feeeSJerin Jacob nicvf_rss_term(nic); 14507413feeeSJerin Jacob start_txq_error: 14517413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) 14527413feeeSJerin Jacob nicvf_stop_tx_queue(dev, qidx); 14537413feeeSJerin Jacob start_rxq_error: 14547413feeeSJerin Jacob for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) 14557413feeeSJerin Jacob nicvf_stop_rx_queue(dev, qidx); 14567413feeeSJerin Jacob qset_rbdr_reclaim: 14577413feeeSJerin Jacob nicvf_qset_rbdr_reclaim(nic, 0); 14587413feeeSJerin Jacob nicvf_rbdr_release_mbufs(nic); 14597413feeeSJerin Jacob qset_rbdr_free: 14607413feeeSJerin Jacob if (nic->rbdr) { 14617413feeeSJerin Jacob rte_free(nic->rbdr); 14627413feeeSJerin Jacob nic->rbdr = NULL; 14637413feeeSJerin Jacob } 14647413feeeSJerin Jacob qset_reclaim: 14657413feeeSJerin Jacob nicvf_qset_reclaim(nic); 14667413feeeSJerin Jacob return ret; 14677413feeeSJerin Jacob } 14687413feeeSJerin Jacob 14697413feeeSJerin Jacob static void 14707413feeeSJerin Jacob nicvf_dev_stop(struct rte_eth_dev *dev) 14717413feeeSJerin Jacob { 14727413feeeSJerin Jacob int ret; 14737413feeeSJerin Jacob uint16_t qidx; 14747413feeeSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 14757413feeeSJerin Jacob 14767413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 14777413feeeSJerin Jacob 14787413feeeSJerin Jacob /* Let PF make the BGX's RX and TX switches to OFF position */ 14797413feeeSJerin Jacob nicvf_mbox_shutdown(nic); 14807413feeeSJerin Jacob 14817413feeeSJerin Jacob /* Disable loopback */ 14827413feeeSJerin Jacob ret = nicvf_loopback_config(nic, 0); 14837413feeeSJerin Jacob if (ret) 14847413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); 14857413feeeSJerin Jacob 14867413feeeSJerin Jacob /* Disable VLAN Strip */ 14877413feeeSJerin Jacob nicvf_vlan_hw_strip(nic, 0); 14887413feeeSJerin Jacob 14897413feeeSJerin Jacob /* Reclaim sq */ 14907413feeeSJerin Jacob for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) 14917413feeeSJerin Jacob nicvf_stop_tx_queue(dev, qidx); 14927413feeeSJerin Jacob 14937413feeeSJerin Jacob /* Reclaim rq */ 14947413feeeSJerin Jacob for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) 14957413feeeSJerin Jacob nicvf_stop_rx_queue(dev, qidx); 14967413feeeSJerin Jacob 14977413feeeSJerin Jacob /* Reclaim RBDR */ 14987413feeeSJerin Jacob ret = nicvf_qset_rbdr_reclaim(nic, 0); 14997413feeeSJerin Jacob if (ret) 15007413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); 15017413feeeSJerin Jacob 15027413feeeSJerin Jacob /* Move all charged buffers in RBDR back to pool */ 15037413feeeSJerin Jacob if (nic->rbdr != NULL) 15047413feeeSJerin Jacob nicvf_rbdr_release_mbufs(nic); 15057413feeeSJerin Jacob 15067413feeeSJerin Jacob /* Reclaim CPI configuration */ 15077413feeeSJerin Jacob if (!nic->sqs_mode) { 15087413feeeSJerin Jacob ret = nicvf_mbox_config_cpi(nic, 0); 15097413feeeSJerin Jacob if (ret) 15107413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to reclaim CPI config"); 15117413feeeSJerin Jacob } 15127413feeeSJerin Jacob 15137413feeeSJerin Jacob /* Disable qset */ 15147413feeeSJerin Jacob ret = nicvf_qset_config(nic); 15157413feeeSJerin Jacob if (ret) 15167413feeeSJerin Jacob PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); 15177413feeeSJerin Jacob 15187413feeeSJerin Jacob /* Disable all interrupts */ 15197413feeeSJerin Jacob nicvf_disable_all_interrupts(nic); 15207413feeeSJerin Jacob 15217413feeeSJerin Jacob /* Free RBDR SW structure */ 15227413feeeSJerin Jacob if (nic->rbdr) { 15237413feeeSJerin Jacob rte_free(nic->rbdr); 15247413feeeSJerin Jacob nic->rbdr = NULL; 15257413feeeSJerin Jacob } 15267413feeeSJerin Jacob } 15277413feeeSJerin Jacob 15287413feeeSJerin Jacob static void 15297413feeeSJerin Jacob nicvf_dev_close(struct rte_eth_dev *dev) 15307413feeeSJerin Jacob { 15317413feeeSJerin Jacob PMD_INIT_FUNC_TRACE(); 15327413feeeSJerin Jacob 15337413feeeSJerin Jacob nicvf_dev_stop(dev); 1534*f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, dev); 15357413feeeSJerin Jacob } 15367413feeeSJerin Jacob 1537bc79615aSJerin Jacob static int 1538bc79615aSJerin Jacob nicvf_dev_configure(struct rte_eth_dev *dev) 1539bc79615aSJerin Jacob { 1540bc79615aSJerin Jacob struct rte_eth_conf *conf = &dev->data->dev_conf; 1541bc79615aSJerin Jacob struct rte_eth_rxmode *rxmode = &conf->rxmode; 1542bc79615aSJerin Jacob struct rte_eth_txmode *txmode = &conf->txmode; 1543bc79615aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1544bc79615aSJerin Jacob 1545bc79615aSJerin Jacob PMD_INIT_FUNC_TRACE(); 1546bc79615aSJerin Jacob 1547bc79615aSJerin Jacob if (!rte_eal_has_hugepages()) { 1548bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Huge page is not configured"); 1549bc79615aSJerin Jacob return -EINVAL; 1550bc79615aSJerin Jacob } 1551bc79615aSJerin Jacob 1552bc79615aSJerin Jacob if (txmode->mq_mode) { 1553bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 1554bc79615aSJerin Jacob return -EINVAL; 1555bc79615aSJerin Jacob } 1556bc79615aSJerin Jacob 1557bc79615aSJerin Jacob if (rxmode->mq_mode != ETH_MQ_RX_NONE && 1558bc79615aSJerin Jacob rxmode->mq_mode != ETH_MQ_RX_RSS) { 1559bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 1560bc79615aSJerin Jacob return -EINVAL; 1561bc79615aSJerin Jacob } 1562bc79615aSJerin Jacob 1563bc79615aSJerin Jacob if (!rxmode->hw_strip_crc) { 1564bc79615aSJerin Jacob PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); 1565bc79615aSJerin Jacob rxmode->hw_strip_crc = 1; 1566bc79615aSJerin Jacob } 1567bc79615aSJerin Jacob 1568bc79615aSJerin Jacob if (rxmode->hw_ip_checksum) { 1569bc79615aSJerin Jacob PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); 1570bc79615aSJerin Jacob rxmode->hw_ip_checksum = 0; 1571bc79615aSJerin Jacob } 1572bc79615aSJerin Jacob 1573bc79615aSJerin Jacob if (rxmode->split_hdr_size) { 1574bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 1575bc79615aSJerin Jacob return -EINVAL; 1576bc79615aSJerin Jacob } 1577bc79615aSJerin Jacob 1578bc79615aSJerin Jacob if (rxmode->hw_vlan_filter) { 1579bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "VLAN filter not supported"); 1580bc79615aSJerin Jacob return -EINVAL; 1581bc79615aSJerin Jacob } 1582bc79615aSJerin Jacob 1583bc79615aSJerin Jacob if (rxmode->hw_vlan_extend) { 1584bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "VLAN extended not supported"); 1585bc79615aSJerin Jacob return -EINVAL; 1586bc79615aSJerin Jacob } 1587bc79615aSJerin Jacob 1588bc79615aSJerin Jacob if (rxmode->enable_lro) { 1589bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "LRO not supported"); 1590bc79615aSJerin Jacob return -EINVAL; 1591bc79615aSJerin Jacob } 1592bc79615aSJerin Jacob 1593bc79615aSJerin Jacob if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 1594bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 1595bc79615aSJerin Jacob return -EINVAL; 1596bc79615aSJerin Jacob } 1597bc79615aSJerin Jacob 1598bc79615aSJerin Jacob if (conf->dcb_capability_en) { 1599bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "DCB enable not supported"); 1600bc79615aSJerin Jacob return -EINVAL; 1601bc79615aSJerin Jacob } 1602bc79615aSJerin Jacob 1603bc79615aSJerin Jacob if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1604bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Flow director not supported"); 1605bc79615aSJerin Jacob return -EINVAL; 1606bc79615aSJerin Jacob } 1607bc79615aSJerin Jacob 1608bc79615aSJerin Jacob PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 1609bc79615aSJerin Jacob dev->data->port_id, nicvf_hw_cap(nic)); 1610bc79615aSJerin Jacob 1611bc79615aSJerin Jacob return 0; 1612bc79615aSJerin Jacob } 1613bc79615aSJerin Jacob 1614e4387966SJerin Jacob /* Initialize and register driver with DPDK Application */ 1615e4387966SJerin Jacob static const struct eth_dev_ops nicvf_eth_dev_ops = { 1616bc79615aSJerin Jacob .dev_configure = nicvf_dev_configure, 16177413feeeSJerin Jacob .dev_start = nicvf_dev_start, 16187413feeeSJerin Jacob .dev_stop = nicvf_dev_stop, 16198fc70464SJerin Jacob .link_update = nicvf_dev_link_update, 16207413feeeSJerin Jacob .dev_close = nicvf_dev_close, 1621684fa771SJerin Jacob .stats_get = nicvf_dev_stats_get, 1622684fa771SJerin Jacob .stats_reset = nicvf_dev_stats_reset, 16236eae36eaSJerin Jacob .promiscuous_enable = nicvf_dev_promisc_enable, 1624dcd7b1e1SJerin Jacob .dev_infos_get = nicvf_dev_info_get, 16251c80e4fdSJerin Jacob .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 162665d9804eSJerin Jacob .mtu_set = nicvf_dev_set_mtu, 162743362c6aSJerin Jacob .reta_update = nicvf_dev_reta_update, 162843362c6aSJerin Jacob .reta_query = nicvf_dev_reta_query, 162943362c6aSJerin Jacob .rss_hash_update = nicvf_dev_rss_hash_update, 163043362c6aSJerin Jacob .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 163186b4eb42SJerin Jacob .rx_queue_start = nicvf_dev_rx_queue_start, 163286b4eb42SJerin Jacob .rx_queue_stop = nicvf_dev_rx_queue_stop, 1633fc1f6c62SJerin Jacob .tx_queue_start = nicvf_dev_tx_queue_start, 1634fc1f6c62SJerin Jacob .tx_queue_stop = nicvf_dev_tx_queue_stop, 1635aa0d976eSJerin Jacob .rx_queue_setup = nicvf_dev_rx_queue_setup, 1636aa0d976eSJerin Jacob .rx_queue_release = nicvf_dev_rx_queue_release, 1637da14e00cSJerin Jacob .rx_queue_count = nicvf_dev_rx_queue_count, 16383f3c6f97SJerin Jacob .tx_queue_setup = nicvf_dev_tx_queue_setup, 16393f3c6f97SJerin Jacob .tx_queue_release = nicvf_dev_tx_queue_release, 1640606ee746SJerin Jacob .get_reg = nicvf_dev_get_regs, 1641e4387966SJerin Jacob }; 1642e4387966SJerin Jacob 1643e4387966SJerin Jacob static int 1644e4387966SJerin Jacob nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 1645e4387966SJerin Jacob { 1646e4387966SJerin Jacob int ret; 1647e4387966SJerin Jacob struct rte_pci_device *pci_dev; 1648e4387966SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(eth_dev); 1649e4387966SJerin Jacob 1650e4387966SJerin Jacob PMD_INIT_FUNC_TRACE(); 1651e4387966SJerin Jacob 1652e4387966SJerin Jacob eth_dev->dev_ops = &nicvf_eth_dev_ops; 1653e4387966SJerin Jacob 16547413feeeSJerin Jacob /* For secondary processes, the primary has done all the work */ 16557413feeeSJerin Jacob if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 16567413feeeSJerin Jacob /* Setup callbacks for secondary process */ 16577413feeeSJerin Jacob nicvf_set_tx_function(eth_dev); 16587413feeeSJerin Jacob nicvf_set_rx_function(eth_dev); 16597413feeeSJerin Jacob return 0; 16607413feeeSJerin Jacob } 16617413feeeSJerin Jacob 1662e4387966SJerin Jacob pci_dev = eth_dev->pci_dev; 1663e4387966SJerin Jacob rte_eth_copy_pci_info(eth_dev, pci_dev); 1664e4387966SJerin Jacob 1665e4387966SJerin Jacob nic->device_id = pci_dev->id.device_id; 1666e4387966SJerin Jacob nic->vendor_id = pci_dev->id.vendor_id; 1667e4387966SJerin Jacob nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 1668e4387966SJerin Jacob nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1669e4387966SJerin Jacob nic->eth_dev = eth_dev; 1670e4387966SJerin Jacob 1671e4387966SJerin Jacob PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 1672e4387966SJerin Jacob pci_dev->id.vendor_id, pci_dev->id.device_id, 1673e4387966SJerin Jacob pci_dev->addr.domain, pci_dev->addr.bus, 1674e4387966SJerin Jacob pci_dev->addr.devid, pci_dev->addr.function); 1675e4387966SJerin Jacob 1676e4387966SJerin Jacob nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 1677e4387966SJerin Jacob if (!nic->reg_base) { 1678e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to map BAR0"); 1679e4387966SJerin Jacob ret = -ENODEV; 1680e4387966SJerin Jacob goto fail; 1681e4387966SJerin Jacob } 1682e4387966SJerin Jacob 1683e4387966SJerin Jacob nicvf_disable_all_interrupts(nic); 1684e4387966SJerin Jacob 1685*f141adcaSKamil Rytarowski ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev); 1686e4387966SJerin Jacob if (ret) { 1687e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to start period alarm"); 1688e4387966SJerin Jacob goto fail; 1689e4387966SJerin Jacob } 1690e4387966SJerin Jacob 1691e4387966SJerin Jacob ret = nicvf_mbox_check_pf_ready(nic); 1692e4387966SJerin Jacob if (ret) { 1693e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 1694e4387966SJerin Jacob goto alarm_fail; 1695e4387966SJerin Jacob } else { 1696e4387966SJerin Jacob PMD_INIT_LOG(INFO, 1697e4387966SJerin Jacob "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 1698e4387966SJerin Jacob nic->node, nic->vf_id, 1699e4387966SJerin Jacob nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 1700e4387966SJerin Jacob nic->sqs_mode ? "true" : "false", 1701e4387966SJerin Jacob nic->loopback_supported ? "true" : "false" 1702e4387966SJerin Jacob ); 1703e4387966SJerin Jacob } 1704e4387966SJerin Jacob 1705e4387966SJerin Jacob if (nic->sqs_mode) { 1706e4387966SJerin Jacob PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching..."); 1707e4387966SJerin Jacob /* Detach port by returning Positive error number */ 1708e4387966SJerin Jacob ret = ENOTSUP; 1709e4387966SJerin Jacob goto alarm_fail; 1710e4387966SJerin Jacob } 1711e4387966SJerin Jacob 1712e4387966SJerin Jacob eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); 1713e4387966SJerin Jacob if (eth_dev->data->mac_addrs == NULL) { 1714e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 1715e4387966SJerin Jacob ret = -ENOMEM; 1716e4387966SJerin Jacob goto alarm_fail; 1717e4387966SJerin Jacob } 1718e4387966SJerin Jacob if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) 1719e4387966SJerin Jacob eth_random_addr(&nic->mac_addr[0]); 1720e4387966SJerin Jacob 1721e4387966SJerin Jacob ether_addr_copy((struct ether_addr *)nic->mac_addr, 1722e4387966SJerin Jacob ð_dev->data->mac_addrs[0]); 1723e4387966SJerin Jacob 1724e4387966SJerin Jacob ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 1725e4387966SJerin Jacob if (ret) { 1726e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to set mac addr"); 1727e4387966SJerin Jacob goto malloc_fail; 1728e4387966SJerin Jacob } 1729e4387966SJerin Jacob 1730e4387966SJerin Jacob ret = nicvf_base_init(nic); 1731e4387966SJerin Jacob if (ret) { 1732e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 1733e4387966SJerin Jacob goto malloc_fail; 1734e4387966SJerin Jacob } 1735e4387966SJerin Jacob 1736e4387966SJerin Jacob PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 1737e4387966SJerin Jacob eth_dev->data->port_id, nic->vendor_id, nic->device_id, 1738e4387966SJerin Jacob nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 1739e4387966SJerin Jacob nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 1740e4387966SJerin Jacob 1741e4387966SJerin Jacob return 0; 1742e4387966SJerin Jacob 1743e4387966SJerin Jacob malloc_fail: 1744e4387966SJerin Jacob rte_free(eth_dev->data->mac_addrs); 1745e4387966SJerin Jacob alarm_fail: 1746*f141adcaSKamil Rytarowski nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 1747e4387966SJerin Jacob fail: 1748e4387966SJerin Jacob return ret; 1749e4387966SJerin Jacob } 1750e4387966SJerin Jacob 1751e4387966SJerin Jacob static const struct rte_pci_id pci_id_nicvf_map[] = { 1752e4387966SJerin Jacob { 1753e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 1754e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 1755398a1be1SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF, 1756e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 1757398a1be1SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF, 1758e4387966SJerin Jacob }, 1759e4387966SJerin Jacob { 1760e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 1761e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 1762398a1be1SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 1763e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 1764398a1be1SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF, 1765e4387966SJerin Jacob }, 1766e4387966SJerin Jacob { 1767b72a7768SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 1768b72a7768SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 1769b72a7768SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 1770b72a7768SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 1771b72a7768SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF, 1772b72a7768SJerin Jacob }, 1773b72a7768SJerin Jacob { 1774e4387966SJerin Jacob .vendor_id = 0, 1775e4387966SJerin Jacob }, 1776e4387966SJerin Jacob }; 1777e4387966SJerin Jacob 1778e4387966SJerin Jacob static struct eth_driver rte_nicvf_pmd = { 1779e4387966SJerin Jacob .pci_drv = { 1780e4387966SJerin Jacob .id_table = pci_id_nicvf_map, 1781e4387966SJerin Jacob .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1782c830cb29SDavid Marchand .probe = rte_eth_dev_pci_probe, 1783c830cb29SDavid Marchand .remove = rte_eth_dev_pci_remove, 1784e4387966SJerin Jacob }, 1785e4387966SJerin Jacob .eth_dev_init = nicvf_eth_dev_init, 1786e4387966SJerin Jacob .dev_private_size = sizeof(struct nicvf), 1787e4387966SJerin Jacob }; 1788e4387966SJerin Jacob 1789c830cb29SDavid Marchand DRIVER_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv); 17902f45703cSPablo de Lara DRIVER_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); 1791