1e4387966SJerin Jacob /* 2e4387966SJerin Jacob * BSD LICENSE 3e4387966SJerin Jacob * 4e4387966SJerin Jacob * Copyright (C) Cavium networks Ltd. 2016. 5e4387966SJerin Jacob * 6e4387966SJerin Jacob * Redistribution and use in source and binary forms, with or without 7e4387966SJerin Jacob * modification, are permitted provided that the following conditions 8e4387966SJerin Jacob * are met: 9e4387966SJerin Jacob * 10e4387966SJerin Jacob * * Redistributions of source code must retain the above copyright 11e4387966SJerin Jacob * notice, this list of conditions and the following disclaimer. 12e4387966SJerin Jacob * * Redistributions in binary form must reproduce the above copyright 13e4387966SJerin Jacob * notice, this list of conditions and the following disclaimer in 14e4387966SJerin Jacob * the documentation and/or other materials provided with the 15e4387966SJerin Jacob * distribution. 16e4387966SJerin Jacob * * Neither the name of Cavium networks nor the names of its 17e4387966SJerin Jacob * contributors may be used to endorse or promote products derived 18e4387966SJerin Jacob * from this software without specific prior written permission. 19e4387966SJerin Jacob * 20e4387966SJerin Jacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21e4387966SJerin Jacob * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22e4387966SJerin Jacob * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23e4387966SJerin Jacob * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24e4387966SJerin Jacob * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25e4387966SJerin Jacob * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26e4387966SJerin Jacob * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27e4387966SJerin Jacob * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28e4387966SJerin Jacob * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29e4387966SJerin Jacob * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30e4387966SJerin Jacob * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31e4387966SJerin Jacob */ 32e4387966SJerin Jacob 33e4387966SJerin Jacob #include <assert.h> 34e4387966SJerin Jacob #include <stdio.h> 35e4387966SJerin Jacob #include <stdbool.h> 36e4387966SJerin Jacob #include <errno.h> 37e4387966SJerin Jacob #include <stdint.h> 38e4387966SJerin Jacob #include <string.h> 39e4387966SJerin Jacob #include <unistd.h> 40e4387966SJerin Jacob #include <stdarg.h> 41e4387966SJerin Jacob #include <inttypes.h> 42e4387966SJerin Jacob #include <netinet/in.h> 43e4387966SJerin Jacob #include <sys/queue.h> 44e4387966SJerin Jacob #include <sys/timerfd.h> 45e4387966SJerin Jacob 46e4387966SJerin Jacob #include <rte_alarm.h> 47e4387966SJerin Jacob #include <rte_atomic.h> 48e4387966SJerin Jacob #include <rte_branch_prediction.h> 49e4387966SJerin Jacob #include <rte_byteorder.h> 50e4387966SJerin Jacob #include <rte_common.h> 51e4387966SJerin Jacob #include <rte_cycles.h> 52e4387966SJerin Jacob #include <rte_debug.h> 53e4387966SJerin Jacob #include <rte_dev.h> 54e4387966SJerin Jacob #include <rte_eal.h> 55e4387966SJerin Jacob #include <rte_ether.h> 56e4387966SJerin Jacob #include <rte_ethdev.h> 57e4387966SJerin Jacob #include <rte_interrupts.h> 58e4387966SJerin Jacob #include <rte_log.h> 59e4387966SJerin Jacob #include <rte_memory.h> 60e4387966SJerin Jacob #include <rte_memzone.h> 61e4387966SJerin Jacob #include <rte_malloc.h> 62e4387966SJerin Jacob #include <rte_random.h> 63e4387966SJerin Jacob #include <rte_pci.h> 64e4387966SJerin Jacob #include <rte_tailq.h> 65e4387966SJerin Jacob 66e4387966SJerin Jacob #include "base/nicvf_plat.h" 67e4387966SJerin Jacob 68e4387966SJerin Jacob #include "nicvf_ethdev.h" 69e4387966SJerin Jacob 70e4387966SJerin Jacob #include "nicvf_logs.h" 71e4387966SJerin Jacob 728fc70464SJerin Jacob static inline int 738fc70464SJerin Jacob nicvf_atomic_write_link_status(struct rte_eth_dev *dev, 748fc70464SJerin Jacob struct rte_eth_link *link) 758fc70464SJerin Jacob { 768fc70464SJerin Jacob struct rte_eth_link *dst = &dev->data->dev_link; 778fc70464SJerin Jacob struct rte_eth_link *src = link; 788fc70464SJerin Jacob 798fc70464SJerin Jacob if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 808fc70464SJerin Jacob *(uint64_t *)src) == 0) 818fc70464SJerin Jacob return -1; 828fc70464SJerin Jacob 838fc70464SJerin Jacob return 0; 848fc70464SJerin Jacob } 858fc70464SJerin Jacob 868fc70464SJerin Jacob static inline void 878fc70464SJerin Jacob nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) 888fc70464SJerin Jacob { 898fc70464SJerin Jacob link->link_status = nic->link_up; 908fc70464SJerin Jacob link->link_duplex = ETH_LINK_AUTONEG; 918fc70464SJerin Jacob if (nic->duplex == NICVF_HALF_DUPLEX) 928fc70464SJerin Jacob link->link_duplex = ETH_LINK_HALF_DUPLEX; 938fc70464SJerin Jacob else if (nic->duplex == NICVF_FULL_DUPLEX) 948fc70464SJerin Jacob link->link_duplex = ETH_LINK_FULL_DUPLEX; 958fc70464SJerin Jacob link->link_speed = nic->speed; 968fc70464SJerin Jacob link->link_autoneg = ETH_LINK_SPEED_AUTONEG; 978fc70464SJerin Jacob } 988fc70464SJerin Jacob 99e4387966SJerin Jacob static void 100e4387966SJerin Jacob nicvf_interrupt(void *arg) 101e4387966SJerin Jacob { 102e4387966SJerin Jacob struct nicvf *nic = arg; 103e4387966SJerin Jacob 1048fc70464SJerin Jacob if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 1058fc70464SJerin Jacob if (nic->eth_dev->data->dev_conf.intr_conf.lsc) 1068fc70464SJerin Jacob nicvf_set_eth_link_status(nic, 1078fc70464SJerin Jacob &nic->eth_dev->data->dev_link); 1088fc70464SJerin Jacob _rte_eth_dev_callback_process(nic->eth_dev, 1098fc70464SJerin Jacob RTE_ETH_EVENT_INTR_LSC); 1108fc70464SJerin Jacob } 111e4387966SJerin Jacob 112e4387966SJerin Jacob rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 113e4387966SJerin Jacob nicvf_interrupt, nic); 114e4387966SJerin Jacob } 115e4387966SJerin Jacob 116e4387966SJerin Jacob static int 117e4387966SJerin Jacob nicvf_periodic_alarm_start(struct nicvf *nic) 118e4387966SJerin Jacob { 119e4387966SJerin Jacob return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 120e4387966SJerin Jacob nicvf_interrupt, nic); 121e4387966SJerin Jacob } 122e4387966SJerin Jacob 123e4387966SJerin Jacob static int 124e4387966SJerin Jacob nicvf_periodic_alarm_stop(struct nicvf *nic) 125e4387966SJerin Jacob { 126e4387966SJerin Jacob return rte_eal_alarm_cancel(nicvf_interrupt, nic); 127e4387966SJerin Jacob } 128e4387966SJerin Jacob 1298fc70464SJerin Jacob /* 1308fc70464SJerin Jacob * Return 0 means link status changed, -1 means not changed 1318fc70464SJerin Jacob */ 1328fc70464SJerin Jacob static int 1338fc70464SJerin Jacob nicvf_dev_link_update(struct rte_eth_dev *dev, 1348fc70464SJerin Jacob int wait_to_complete __rte_unused) 1358fc70464SJerin Jacob { 1368fc70464SJerin Jacob struct rte_eth_link link; 1378fc70464SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 1388fc70464SJerin Jacob 1398fc70464SJerin Jacob PMD_INIT_FUNC_TRACE(); 1408fc70464SJerin Jacob 1418fc70464SJerin Jacob memset(&link, 0, sizeof(link)); 1428fc70464SJerin Jacob nicvf_set_eth_link_status(nic, &link); 1438fc70464SJerin Jacob return nicvf_atomic_write_link_status(dev, &link); 1448fc70464SJerin Jacob } 1458fc70464SJerin Jacob 146606ee746SJerin Jacob static int 147606ee746SJerin Jacob nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused) 148606ee746SJerin Jacob { 149606ee746SJerin Jacob return nicvf_reg_get_count(); 150606ee746SJerin Jacob } 151606ee746SJerin Jacob 152606ee746SJerin Jacob static int 153606ee746SJerin Jacob nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 154606ee746SJerin Jacob { 155606ee746SJerin Jacob uint64_t *data = regs->data; 156606ee746SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 157606ee746SJerin Jacob 158606ee746SJerin Jacob if (data == NULL) 159606ee746SJerin Jacob return -EINVAL; 160606ee746SJerin Jacob 161606ee746SJerin Jacob /* Support only full register dump */ 162606ee746SJerin Jacob if ((regs->length == 0) || 163606ee746SJerin Jacob (regs->length == (uint32_t)nicvf_reg_get_count())) { 164606ee746SJerin Jacob regs->version = nic->vendor_id << 16 | nic->device_id; 165606ee746SJerin Jacob nicvf_reg_dump(nic, data); 166606ee746SJerin Jacob return 0; 167606ee746SJerin Jacob } 168606ee746SJerin Jacob return -ENOTSUP; 169606ee746SJerin Jacob } 170606ee746SJerin Jacob 171*43362c6aSJerin Jacob static inline uint64_t 172*43362c6aSJerin Jacob nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 173*43362c6aSJerin Jacob { 174*43362c6aSJerin Jacob uint64_t nic_rss = 0; 175*43362c6aSJerin Jacob 176*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV4) 177*43362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 178*43362c6aSJerin Jacob 179*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_IPV6) 180*43362c6aSJerin Jacob nic_rss |= RSS_IP_ENA; 181*43362c6aSJerin Jacob 182*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 183*43362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 184*43362c6aSJerin Jacob 185*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 186*43362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 187*43362c6aSJerin Jacob 188*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 189*43362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 190*43362c6aSJerin Jacob 191*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 192*43362c6aSJerin Jacob nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 193*43362c6aSJerin Jacob 194*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_PORT) 195*43362c6aSJerin Jacob nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 196*43362c6aSJerin Jacob 197*43362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 198*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_VXLAN) 199*43362c6aSJerin Jacob nic_rss |= RSS_TUN_VXLAN_ENA; 200*43362c6aSJerin Jacob 201*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_GENEVE) 202*43362c6aSJerin Jacob nic_rss |= RSS_TUN_GENEVE_ENA; 203*43362c6aSJerin Jacob 204*43362c6aSJerin Jacob if (ethdev_rss & ETH_RSS_NVGRE) 205*43362c6aSJerin Jacob nic_rss |= RSS_TUN_NVGRE_ENA; 206*43362c6aSJerin Jacob } 207*43362c6aSJerin Jacob 208*43362c6aSJerin Jacob return nic_rss; 209*43362c6aSJerin Jacob } 210*43362c6aSJerin Jacob 211*43362c6aSJerin Jacob static inline uint64_t 212*43362c6aSJerin Jacob nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 213*43362c6aSJerin Jacob { 214*43362c6aSJerin Jacob uint64_t ethdev_rss = 0; 215*43362c6aSJerin Jacob 216*43362c6aSJerin Jacob if (nic_rss & RSS_IP_ENA) 217*43362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 218*43362c6aSJerin Jacob 219*43362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 220*43362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 221*43362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_TCP); 222*43362c6aSJerin Jacob 223*43362c6aSJerin Jacob if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 224*43362c6aSJerin Jacob ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 225*43362c6aSJerin Jacob ETH_RSS_NONFRAG_IPV6_UDP); 226*43362c6aSJerin Jacob 227*43362c6aSJerin Jacob if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 228*43362c6aSJerin Jacob ethdev_rss |= ETH_RSS_PORT; 229*43362c6aSJerin Jacob 230*43362c6aSJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 231*43362c6aSJerin Jacob if (nic_rss & RSS_TUN_VXLAN_ENA) 232*43362c6aSJerin Jacob ethdev_rss |= ETH_RSS_VXLAN; 233*43362c6aSJerin Jacob 234*43362c6aSJerin Jacob if (nic_rss & RSS_TUN_GENEVE_ENA) 235*43362c6aSJerin Jacob ethdev_rss |= ETH_RSS_GENEVE; 236*43362c6aSJerin Jacob 237*43362c6aSJerin Jacob if (nic_rss & RSS_TUN_NVGRE_ENA) 238*43362c6aSJerin Jacob ethdev_rss |= ETH_RSS_NVGRE; 239*43362c6aSJerin Jacob } 240*43362c6aSJerin Jacob return ethdev_rss; 241*43362c6aSJerin Jacob } 242*43362c6aSJerin Jacob 243*43362c6aSJerin Jacob static int 244*43362c6aSJerin Jacob nicvf_dev_reta_query(struct rte_eth_dev *dev, 245*43362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 246*43362c6aSJerin Jacob uint16_t reta_size) 247*43362c6aSJerin Jacob { 248*43362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 249*43362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 250*43362c6aSJerin Jacob int ret, i, j; 251*43362c6aSJerin Jacob 252*43362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 253*43362c6aSJerin Jacob RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 254*43362c6aSJerin Jacob "(%d) doesn't match the number hardware can supported " 255*43362c6aSJerin Jacob "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 256*43362c6aSJerin Jacob return -EINVAL; 257*43362c6aSJerin Jacob } 258*43362c6aSJerin Jacob 259*43362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 260*43362c6aSJerin Jacob if (ret) 261*43362c6aSJerin Jacob return ret; 262*43362c6aSJerin Jacob 263*43362c6aSJerin Jacob /* Copy RETA table */ 264*43362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 265*43362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 266*43362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 267*43362c6aSJerin Jacob reta_conf[i].reta[j] = tbl[j]; 268*43362c6aSJerin Jacob } 269*43362c6aSJerin Jacob 270*43362c6aSJerin Jacob return 0; 271*43362c6aSJerin Jacob } 272*43362c6aSJerin Jacob 273*43362c6aSJerin Jacob static int 274*43362c6aSJerin Jacob nicvf_dev_reta_update(struct rte_eth_dev *dev, 275*43362c6aSJerin Jacob struct rte_eth_rss_reta_entry64 *reta_conf, 276*43362c6aSJerin Jacob uint16_t reta_size) 277*43362c6aSJerin Jacob { 278*43362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 279*43362c6aSJerin Jacob uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 280*43362c6aSJerin Jacob int ret, i, j; 281*43362c6aSJerin Jacob 282*43362c6aSJerin Jacob if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 283*43362c6aSJerin Jacob RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 284*43362c6aSJerin Jacob "(%d) doesn't match the number hardware can supported " 285*43362c6aSJerin Jacob "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 286*43362c6aSJerin Jacob return -EINVAL; 287*43362c6aSJerin Jacob } 288*43362c6aSJerin Jacob 289*43362c6aSJerin Jacob ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 290*43362c6aSJerin Jacob if (ret) 291*43362c6aSJerin Jacob return ret; 292*43362c6aSJerin Jacob 293*43362c6aSJerin Jacob /* Copy RETA table */ 294*43362c6aSJerin Jacob for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 295*43362c6aSJerin Jacob for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 296*43362c6aSJerin Jacob if ((reta_conf[i].mask >> j) & 0x01) 297*43362c6aSJerin Jacob tbl[j] = reta_conf[i].reta[j]; 298*43362c6aSJerin Jacob } 299*43362c6aSJerin Jacob 300*43362c6aSJerin Jacob return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 301*43362c6aSJerin Jacob } 302*43362c6aSJerin Jacob 303*43362c6aSJerin Jacob static int 304*43362c6aSJerin Jacob nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 305*43362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 306*43362c6aSJerin Jacob { 307*43362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 308*43362c6aSJerin Jacob 309*43362c6aSJerin Jacob if (rss_conf->rss_key) 310*43362c6aSJerin Jacob nicvf_rss_get_key(nic, rss_conf->rss_key); 311*43362c6aSJerin Jacob 312*43362c6aSJerin Jacob rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 313*43362c6aSJerin Jacob rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 314*43362c6aSJerin Jacob return 0; 315*43362c6aSJerin Jacob } 316*43362c6aSJerin Jacob 317*43362c6aSJerin Jacob static int 318*43362c6aSJerin Jacob nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 319*43362c6aSJerin Jacob struct rte_eth_rss_conf *rss_conf) 320*43362c6aSJerin Jacob { 321*43362c6aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 322*43362c6aSJerin Jacob uint64_t nic_rss; 323*43362c6aSJerin Jacob 324*43362c6aSJerin Jacob if (rss_conf->rss_key && 325*43362c6aSJerin Jacob rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 326*43362c6aSJerin Jacob RTE_LOG(ERR, PMD, "Hash key size mismatch %d", 327*43362c6aSJerin Jacob rss_conf->rss_key_len); 328*43362c6aSJerin Jacob return -EINVAL; 329*43362c6aSJerin Jacob } 330*43362c6aSJerin Jacob 331*43362c6aSJerin Jacob if (rss_conf->rss_key) 332*43362c6aSJerin Jacob nicvf_rss_set_key(nic, rss_conf->rss_key); 333*43362c6aSJerin Jacob 334*43362c6aSJerin Jacob nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 335*43362c6aSJerin Jacob nicvf_rss_set_cfg(nic, nic_rss); 336*43362c6aSJerin Jacob return 0; 337*43362c6aSJerin Jacob } 338*43362c6aSJerin Jacob 339aa0d976eSJerin Jacob static int 340aa0d976eSJerin Jacob nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx, 341aa0d976eSJerin Jacob uint32_t desc_cnt) 342aa0d976eSJerin Jacob { 343aa0d976eSJerin Jacob const struct rte_memzone *rz; 344aa0d976eSJerin Jacob uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t); 345aa0d976eSJerin Jacob 346aa0d976eSJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size, 347aa0d976eSJerin Jacob NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 348aa0d976eSJerin Jacob if (rz == NULL) { 349aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 350aa0d976eSJerin Jacob return -ENOMEM; 351aa0d976eSJerin Jacob } 352aa0d976eSJerin Jacob 353aa0d976eSJerin Jacob memset(rz->addr, 0, ring_size); 354aa0d976eSJerin Jacob 355aa0d976eSJerin Jacob rxq->phys = rz->phys_addr; 356aa0d976eSJerin Jacob rxq->desc = rz->addr; 357aa0d976eSJerin Jacob rxq->qlen_mask = desc_cnt - 1; 358aa0d976eSJerin Jacob 359aa0d976eSJerin Jacob return 0; 360aa0d976eSJerin Jacob } 361aa0d976eSJerin Jacob 3623f3c6f97SJerin Jacob static int 3633f3c6f97SJerin Jacob nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx, 3643f3c6f97SJerin Jacob uint32_t desc_cnt) 3653f3c6f97SJerin Jacob { 3663f3c6f97SJerin Jacob const struct rte_memzone *rz; 3673f3c6f97SJerin Jacob uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t); 3683f3c6f97SJerin Jacob 3693f3c6f97SJerin Jacob rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size, 3703f3c6f97SJerin Jacob NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 3713f3c6f97SJerin Jacob if (rz == NULL) { 3723f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 3733f3c6f97SJerin Jacob return -ENOMEM; 3743f3c6f97SJerin Jacob } 3753f3c6f97SJerin Jacob 3763f3c6f97SJerin Jacob memset(rz->addr, 0, ring_size); 3773f3c6f97SJerin Jacob 3783f3c6f97SJerin Jacob sq->phys = rz->phys_addr; 3793f3c6f97SJerin Jacob sq->desc = rz->addr; 3803f3c6f97SJerin Jacob sq->qlen_mask = desc_cnt - 1; 3813f3c6f97SJerin Jacob 3823f3c6f97SJerin Jacob return 0; 3833f3c6f97SJerin Jacob } 3843f3c6f97SJerin Jacob 3853f3c6f97SJerin Jacob static inline void 3863f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 3873f3c6f97SJerin Jacob { 3883f3c6f97SJerin Jacob uint32_t head; 3893f3c6f97SJerin Jacob 3903f3c6f97SJerin Jacob head = txq->head; 3913f3c6f97SJerin Jacob while (head != txq->tail) { 3923f3c6f97SJerin Jacob if (txq->txbuffs[head]) { 3933f3c6f97SJerin Jacob rte_pktmbuf_free_seg(txq->txbuffs[head]); 3943f3c6f97SJerin Jacob txq->txbuffs[head] = NULL; 3953f3c6f97SJerin Jacob } 3963f3c6f97SJerin Jacob head++; 3973f3c6f97SJerin Jacob head = head & txq->qlen_mask; 3983f3c6f97SJerin Jacob } 3993f3c6f97SJerin Jacob } 4003f3c6f97SJerin Jacob 4013f3c6f97SJerin Jacob static void 4023f3c6f97SJerin Jacob nicvf_tx_queue_reset(struct nicvf_txq *txq) 4033f3c6f97SJerin Jacob { 4043f3c6f97SJerin Jacob uint32_t txq_desc_cnt = txq->qlen_mask + 1; 4053f3c6f97SJerin Jacob 4063f3c6f97SJerin Jacob memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 4073f3c6f97SJerin Jacob memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 4083f3c6f97SJerin Jacob txq->tail = 0; 4093f3c6f97SJerin Jacob txq->head = 0; 4103f3c6f97SJerin Jacob txq->xmit_bufs = 0; 4113f3c6f97SJerin Jacob } 4123f3c6f97SJerin Jacob 4133f3c6f97SJerin Jacob static void 4143f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(void *sq) 4153f3c6f97SJerin Jacob { 4163f3c6f97SJerin Jacob struct nicvf_txq *txq; 4173f3c6f97SJerin Jacob 4183f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 4193f3c6f97SJerin Jacob 4203f3c6f97SJerin Jacob txq = (struct nicvf_txq *)sq; 4213f3c6f97SJerin Jacob if (txq) { 4223f3c6f97SJerin Jacob if (txq->txbuffs != NULL) { 4233f3c6f97SJerin Jacob nicvf_tx_queue_release_mbufs(txq); 4243f3c6f97SJerin Jacob rte_free(txq->txbuffs); 4253f3c6f97SJerin Jacob txq->txbuffs = NULL; 4263f3c6f97SJerin Jacob } 4273f3c6f97SJerin Jacob rte_free(txq); 4283f3c6f97SJerin Jacob } 4293f3c6f97SJerin Jacob } 4303f3c6f97SJerin Jacob 4313f3c6f97SJerin Jacob static int 4323f3c6f97SJerin Jacob nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 4333f3c6f97SJerin Jacob uint16_t nb_desc, unsigned int socket_id, 4343f3c6f97SJerin Jacob const struct rte_eth_txconf *tx_conf) 4353f3c6f97SJerin Jacob { 4363f3c6f97SJerin Jacob uint16_t tx_free_thresh; 4373f3c6f97SJerin Jacob uint8_t is_single_pool; 4383f3c6f97SJerin Jacob struct nicvf_txq *txq; 4393f3c6f97SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 4403f3c6f97SJerin Jacob 4413f3c6f97SJerin Jacob PMD_INIT_FUNC_TRACE(); 4423f3c6f97SJerin Jacob 4433f3c6f97SJerin Jacob /* Socket id check */ 4443f3c6f97SJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 4453f3c6f97SJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 4463f3c6f97SJerin Jacob socket_id, nic->node); 4473f3c6f97SJerin Jacob 4483f3c6f97SJerin Jacob /* Tx deferred start is not supported */ 4493f3c6f97SJerin Jacob if (tx_conf->tx_deferred_start) { 4503f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 4513f3c6f97SJerin Jacob return -EINVAL; 4523f3c6f97SJerin Jacob } 4533f3c6f97SJerin Jacob 4543f3c6f97SJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 4553f3c6f97SJerin Jacob nb_desc = nicvf_qsize_sq_roundup(nb_desc); 4563f3c6f97SJerin Jacob if (nb_desc == 0) { 4573f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 4583f3c6f97SJerin Jacob return -EINVAL; 4593f3c6f97SJerin Jacob } 4603f3c6f97SJerin Jacob 4613f3c6f97SJerin Jacob /* Validate tx_free_thresh */ 4623f3c6f97SJerin Jacob tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 4633f3c6f97SJerin Jacob tx_conf->tx_free_thresh : 4643f3c6f97SJerin Jacob NICVF_DEFAULT_TX_FREE_THRESH); 4653f3c6f97SJerin Jacob 4663f3c6f97SJerin Jacob if (tx_free_thresh > (nb_desc) || 4673f3c6f97SJerin Jacob tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 4683f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, 4693f3c6f97SJerin Jacob "tx_free_thresh must be less than the number of TX " 4703f3c6f97SJerin Jacob "descriptors. (tx_free_thresh=%u port=%d " 4713f3c6f97SJerin Jacob "queue=%d)", (unsigned int)tx_free_thresh, 4723f3c6f97SJerin Jacob (int)dev->data->port_id, (int)qidx); 4733f3c6f97SJerin Jacob return -EINVAL; 4743f3c6f97SJerin Jacob } 4753f3c6f97SJerin Jacob 4763f3c6f97SJerin Jacob /* Free memory prior to re-allocation if needed. */ 4773f3c6f97SJerin Jacob if (dev->data->tx_queues[qidx] != NULL) { 4783f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 4793f3c6f97SJerin Jacob qidx); 4803f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]); 4813f3c6f97SJerin Jacob dev->data->tx_queues[qidx] = NULL; 4823f3c6f97SJerin Jacob } 4833f3c6f97SJerin Jacob 4843f3c6f97SJerin Jacob /* Allocating tx queue data structure */ 4853f3c6f97SJerin Jacob txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 4863f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 4873f3c6f97SJerin Jacob if (txq == NULL) { 4883f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx); 4893f3c6f97SJerin Jacob return -ENOMEM; 4903f3c6f97SJerin Jacob } 4913f3c6f97SJerin Jacob 4923f3c6f97SJerin Jacob txq->nic = nic; 4933f3c6f97SJerin Jacob txq->queue_id = qidx; 4943f3c6f97SJerin Jacob txq->tx_free_thresh = tx_free_thresh; 4953f3c6f97SJerin Jacob txq->txq_flags = tx_conf->txq_flags; 4963f3c6f97SJerin Jacob txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 4973f3c6f97SJerin Jacob txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 4983f3c6f97SJerin Jacob is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && 4993f3c6f97SJerin Jacob txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); 5003f3c6f97SJerin Jacob 5013f3c6f97SJerin Jacob /* Choose optimum free threshold value for multipool case */ 5023f3c6f97SJerin Jacob if (!is_single_pool) { 5033f3c6f97SJerin Jacob txq->tx_free_thresh = (uint16_t) 5043f3c6f97SJerin Jacob (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 5053f3c6f97SJerin Jacob NICVF_TX_FREE_MPOOL_THRESH : 5063f3c6f97SJerin Jacob tx_conf->tx_free_thresh); 5073f3c6f97SJerin Jacob } 5083f3c6f97SJerin Jacob 5093f3c6f97SJerin Jacob /* Allocate software ring */ 5103f3c6f97SJerin Jacob txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 5113f3c6f97SJerin Jacob nb_desc * sizeof(struct rte_mbuf *), 5123f3c6f97SJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 5133f3c6f97SJerin Jacob 5143f3c6f97SJerin Jacob if (txq->txbuffs == NULL) { 5153f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 5163f3c6f97SJerin Jacob return -ENOMEM; 5173f3c6f97SJerin Jacob } 5183f3c6f97SJerin Jacob 5193f3c6f97SJerin Jacob if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) { 5203f3c6f97SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 5213f3c6f97SJerin Jacob nicvf_dev_tx_queue_release(txq); 5223f3c6f97SJerin Jacob return -ENOMEM; 5233f3c6f97SJerin Jacob } 5243f3c6f97SJerin Jacob 5253f3c6f97SJerin Jacob nicvf_tx_queue_reset(txq); 5263f3c6f97SJerin Jacob 5273f3c6f97SJerin Jacob PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, 5283f3c6f97SJerin Jacob qidx, txq, nb_desc, txq->desc, txq->phys); 5293f3c6f97SJerin Jacob 5303f3c6f97SJerin Jacob dev->data->tx_queues[qidx] = txq; 5313f3c6f97SJerin Jacob dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 5323f3c6f97SJerin Jacob return 0; 5333f3c6f97SJerin Jacob } 5343f3c6f97SJerin Jacob 535aa0d976eSJerin Jacob static void 536aa0d976eSJerin Jacob nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 537aa0d976eSJerin Jacob { 538aa0d976eSJerin Jacob rxq->head = 0; 539aa0d976eSJerin Jacob rxq->available_space = 0; 540aa0d976eSJerin Jacob rxq->recv_buffers = 0; 541aa0d976eSJerin Jacob } 542aa0d976eSJerin Jacob 543aa0d976eSJerin Jacob static void 544aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(void *rx_queue) 545aa0d976eSJerin Jacob { 546aa0d976eSJerin Jacob struct nicvf_rxq *rxq = rx_queue; 547aa0d976eSJerin Jacob 548aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 549aa0d976eSJerin Jacob 550aa0d976eSJerin Jacob if (rxq) 551aa0d976eSJerin Jacob rte_free(rxq); 552aa0d976eSJerin Jacob } 553aa0d976eSJerin Jacob 554aa0d976eSJerin Jacob static int 555aa0d976eSJerin Jacob nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 556aa0d976eSJerin Jacob uint16_t nb_desc, unsigned int socket_id, 557aa0d976eSJerin Jacob const struct rte_eth_rxconf *rx_conf, 558aa0d976eSJerin Jacob struct rte_mempool *mp) 559aa0d976eSJerin Jacob { 560aa0d976eSJerin Jacob uint16_t rx_free_thresh; 561aa0d976eSJerin Jacob struct nicvf_rxq *rxq; 562aa0d976eSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 563aa0d976eSJerin Jacob 564aa0d976eSJerin Jacob PMD_INIT_FUNC_TRACE(); 565aa0d976eSJerin Jacob 566aa0d976eSJerin Jacob /* Socket id check */ 567aa0d976eSJerin Jacob if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 568aa0d976eSJerin Jacob PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 569aa0d976eSJerin Jacob socket_id, nic->node); 570aa0d976eSJerin Jacob 571aa0d976eSJerin Jacob /* Mempool memory should be contiguous */ 572aa0d976eSJerin Jacob if (mp->nb_mem_chunks != 1) { 573aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz"); 574aa0d976eSJerin Jacob return -EINVAL; 575aa0d976eSJerin Jacob } 576aa0d976eSJerin Jacob 577aa0d976eSJerin Jacob /* Rx deferred start is not supported */ 578aa0d976eSJerin Jacob if (rx_conf->rx_deferred_start) { 579aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 580aa0d976eSJerin Jacob return -EINVAL; 581aa0d976eSJerin Jacob } 582aa0d976eSJerin Jacob 583aa0d976eSJerin Jacob /* Roundup nb_desc to available qsize and validate max number of desc */ 584aa0d976eSJerin Jacob nb_desc = nicvf_qsize_cq_roundup(nb_desc); 585aa0d976eSJerin Jacob if (nb_desc == 0) { 586aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 587aa0d976eSJerin Jacob return -EINVAL; 588aa0d976eSJerin Jacob } 589aa0d976eSJerin Jacob 590aa0d976eSJerin Jacob /* Check rx_free_thresh upper bound */ 591aa0d976eSJerin Jacob rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 592aa0d976eSJerin Jacob rx_conf->rx_free_thresh : 593aa0d976eSJerin Jacob NICVF_DEFAULT_RX_FREE_THRESH); 594aa0d976eSJerin Jacob if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 595aa0d976eSJerin Jacob rx_free_thresh >= nb_desc * .75) { 596aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 597aa0d976eSJerin Jacob rx_free_thresh); 598aa0d976eSJerin Jacob return -EINVAL; 599aa0d976eSJerin Jacob } 600aa0d976eSJerin Jacob 601aa0d976eSJerin Jacob /* Free memory prior to re-allocation if needed */ 602aa0d976eSJerin Jacob if (dev->data->rx_queues[qidx] != NULL) { 603aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 604aa0d976eSJerin Jacob qidx); 605aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]); 606aa0d976eSJerin Jacob dev->data->rx_queues[qidx] = NULL; 607aa0d976eSJerin Jacob } 608aa0d976eSJerin Jacob 609aa0d976eSJerin Jacob /* Allocate rxq memory */ 610aa0d976eSJerin Jacob rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 611aa0d976eSJerin Jacob RTE_CACHE_LINE_SIZE, nic->node); 612aa0d976eSJerin Jacob if (rxq == NULL) { 613aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx); 614aa0d976eSJerin Jacob return -ENOMEM; 615aa0d976eSJerin Jacob } 616aa0d976eSJerin Jacob 617aa0d976eSJerin Jacob rxq->nic = nic; 618aa0d976eSJerin Jacob rxq->pool = mp; 619aa0d976eSJerin Jacob rxq->queue_id = qidx; 620aa0d976eSJerin Jacob rxq->port_id = dev->data->port_id; 621aa0d976eSJerin Jacob rxq->rx_free_thresh = rx_free_thresh; 622aa0d976eSJerin Jacob rxq->rx_drop_en = rx_conf->rx_drop_en; 623aa0d976eSJerin Jacob rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 624aa0d976eSJerin Jacob rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 625aa0d976eSJerin Jacob rxq->precharge_cnt = 0; 626aa0d976eSJerin Jacob rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 627aa0d976eSJerin Jacob 628aa0d976eSJerin Jacob /* Alloc completion queue */ 629aa0d976eSJerin Jacob if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) { 630aa0d976eSJerin Jacob PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 631aa0d976eSJerin Jacob nicvf_dev_rx_queue_release(rxq); 632aa0d976eSJerin Jacob return -ENOMEM; 633aa0d976eSJerin Jacob } 634aa0d976eSJerin Jacob 635aa0d976eSJerin Jacob nicvf_rx_queue_reset(rxq); 636aa0d976eSJerin Jacob 637aa0d976eSJerin Jacob PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, 638aa0d976eSJerin Jacob qidx, rxq, mp->name, nb_desc, 639aa0d976eSJerin Jacob rte_mempool_count(mp), rxq->phys); 640aa0d976eSJerin Jacob 641aa0d976eSJerin Jacob dev->data->rx_queues[qidx] = rxq; 642aa0d976eSJerin Jacob dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 643aa0d976eSJerin Jacob return 0; 644aa0d976eSJerin Jacob } 645aa0d976eSJerin Jacob 646dcd7b1e1SJerin Jacob static void 647dcd7b1e1SJerin Jacob nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 648dcd7b1e1SJerin Jacob { 649dcd7b1e1SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 650dcd7b1e1SJerin Jacob 651dcd7b1e1SJerin Jacob PMD_INIT_FUNC_TRACE(); 652dcd7b1e1SJerin Jacob 653dcd7b1e1SJerin Jacob dev_info->min_rx_bufsize = ETHER_MIN_MTU; 654dcd7b1e1SJerin Jacob dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; 655dcd7b1e1SJerin Jacob dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS; 656dcd7b1e1SJerin Jacob dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS; 657dcd7b1e1SJerin Jacob dev_info->max_mac_addrs = 1; 658dcd7b1e1SJerin Jacob dev_info->max_vfs = dev->pci_dev->max_vfs; 659dcd7b1e1SJerin Jacob 660dcd7b1e1SJerin Jacob dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 661dcd7b1e1SJerin Jacob dev_info->tx_offload_capa = 662dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_IPV4_CKSUM | 663dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_UDP_CKSUM | 664dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_TCP_CKSUM | 665dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_TCP_TSO | 666dcd7b1e1SJerin Jacob DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 667dcd7b1e1SJerin Jacob 668dcd7b1e1SJerin Jacob dev_info->reta_size = nic->rss_info.rss_size; 669dcd7b1e1SJerin Jacob dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 670dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 671dcd7b1e1SJerin Jacob if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 672dcd7b1e1SJerin Jacob dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 673dcd7b1e1SJerin Jacob 674dcd7b1e1SJerin Jacob dev_info->default_rxconf = (struct rte_eth_rxconf) { 675dcd7b1e1SJerin Jacob .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 676dcd7b1e1SJerin Jacob .rx_drop_en = 0, 677dcd7b1e1SJerin Jacob }; 678dcd7b1e1SJerin Jacob 679dcd7b1e1SJerin Jacob dev_info->default_txconf = (struct rte_eth_txconf) { 680dcd7b1e1SJerin Jacob .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 681dcd7b1e1SJerin Jacob .txq_flags = 682dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOMULTSEGS | 683dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOREFCOUNT | 684dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOMULTMEMP | 685dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOVLANOFFL | 686dcd7b1e1SJerin Jacob ETH_TXQ_FLAGS_NOXSUMSCTP, 687dcd7b1e1SJerin Jacob }; 688dcd7b1e1SJerin Jacob } 689dcd7b1e1SJerin Jacob 690bc79615aSJerin Jacob static int 691bc79615aSJerin Jacob nicvf_dev_configure(struct rte_eth_dev *dev) 692bc79615aSJerin Jacob { 693bc79615aSJerin Jacob struct rte_eth_conf *conf = &dev->data->dev_conf; 694bc79615aSJerin Jacob struct rte_eth_rxmode *rxmode = &conf->rxmode; 695bc79615aSJerin Jacob struct rte_eth_txmode *txmode = &conf->txmode; 696bc79615aSJerin Jacob struct nicvf *nic = nicvf_pmd_priv(dev); 697bc79615aSJerin Jacob 698bc79615aSJerin Jacob PMD_INIT_FUNC_TRACE(); 699bc79615aSJerin Jacob 700bc79615aSJerin Jacob if (!rte_eal_has_hugepages()) { 701bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Huge page is not configured"); 702bc79615aSJerin Jacob return -EINVAL; 703bc79615aSJerin Jacob } 704bc79615aSJerin Jacob 705bc79615aSJerin Jacob if (txmode->mq_mode) { 706bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 707bc79615aSJerin Jacob return -EINVAL; 708bc79615aSJerin Jacob } 709bc79615aSJerin Jacob 710bc79615aSJerin Jacob if (rxmode->mq_mode != ETH_MQ_RX_NONE && 711bc79615aSJerin Jacob rxmode->mq_mode != ETH_MQ_RX_RSS) { 712bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 713bc79615aSJerin Jacob return -EINVAL; 714bc79615aSJerin Jacob } 715bc79615aSJerin Jacob 716bc79615aSJerin Jacob if (!rxmode->hw_strip_crc) { 717bc79615aSJerin Jacob PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); 718bc79615aSJerin Jacob rxmode->hw_strip_crc = 1; 719bc79615aSJerin Jacob } 720bc79615aSJerin Jacob 721bc79615aSJerin Jacob if (rxmode->hw_ip_checksum) { 722bc79615aSJerin Jacob PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); 723bc79615aSJerin Jacob rxmode->hw_ip_checksum = 0; 724bc79615aSJerin Jacob } 725bc79615aSJerin Jacob 726bc79615aSJerin Jacob if (rxmode->split_hdr_size) { 727bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 728bc79615aSJerin Jacob return -EINVAL; 729bc79615aSJerin Jacob } 730bc79615aSJerin Jacob 731bc79615aSJerin Jacob if (rxmode->hw_vlan_filter) { 732bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "VLAN filter not supported"); 733bc79615aSJerin Jacob return -EINVAL; 734bc79615aSJerin Jacob } 735bc79615aSJerin Jacob 736bc79615aSJerin Jacob if (rxmode->hw_vlan_extend) { 737bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "VLAN extended not supported"); 738bc79615aSJerin Jacob return -EINVAL; 739bc79615aSJerin Jacob } 740bc79615aSJerin Jacob 741bc79615aSJerin Jacob if (rxmode->enable_lro) { 742bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "LRO not supported"); 743bc79615aSJerin Jacob return -EINVAL; 744bc79615aSJerin Jacob } 745bc79615aSJerin Jacob 746bc79615aSJerin Jacob if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 747bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 748bc79615aSJerin Jacob return -EINVAL; 749bc79615aSJerin Jacob } 750bc79615aSJerin Jacob 751bc79615aSJerin Jacob if (conf->dcb_capability_en) { 752bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "DCB enable not supported"); 753bc79615aSJerin Jacob return -EINVAL; 754bc79615aSJerin Jacob } 755bc79615aSJerin Jacob 756bc79615aSJerin Jacob if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 757bc79615aSJerin Jacob PMD_INIT_LOG(INFO, "Flow director not supported"); 758bc79615aSJerin Jacob return -EINVAL; 759bc79615aSJerin Jacob } 760bc79615aSJerin Jacob 761bc79615aSJerin Jacob PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 762bc79615aSJerin Jacob dev->data->port_id, nicvf_hw_cap(nic)); 763bc79615aSJerin Jacob 764bc79615aSJerin Jacob return 0; 765bc79615aSJerin Jacob } 766bc79615aSJerin Jacob 767e4387966SJerin Jacob /* Initialize and register driver with DPDK Application */ 768e4387966SJerin Jacob static const struct eth_dev_ops nicvf_eth_dev_ops = { 769bc79615aSJerin Jacob .dev_configure = nicvf_dev_configure, 7708fc70464SJerin Jacob .link_update = nicvf_dev_link_update, 771dcd7b1e1SJerin Jacob .dev_infos_get = nicvf_dev_info_get, 772*43362c6aSJerin Jacob .reta_update = nicvf_dev_reta_update, 773*43362c6aSJerin Jacob .reta_query = nicvf_dev_reta_query, 774*43362c6aSJerin Jacob .rss_hash_update = nicvf_dev_rss_hash_update, 775*43362c6aSJerin Jacob .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 776aa0d976eSJerin Jacob .rx_queue_setup = nicvf_dev_rx_queue_setup, 777aa0d976eSJerin Jacob .rx_queue_release = nicvf_dev_rx_queue_release, 7783f3c6f97SJerin Jacob .tx_queue_setup = nicvf_dev_tx_queue_setup, 7793f3c6f97SJerin Jacob .tx_queue_release = nicvf_dev_tx_queue_release, 780606ee746SJerin Jacob .get_reg_length = nicvf_dev_get_reg_length, 781606ee746SJerin Jacob .get_reg = nicvf_dev_get_regs, 782e4387966SJerin Jacob }; 783e4387966SJerin Jacob 784e4387966SJerin Jacob static int 785e4387966SJerin Jacob nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 786e4387966SJerin Jacob { 787e4387966SJerin Jacob int ret; 788e4387966SJerin Jacob struct rte_pci_device *pci_dev; 789e4387966SJerin Jacob struct nicvf *nic = nicvf_pmd_priv(eth_dev); 790e4387966SJerin Jacob 791e4387966SJerin Jacob PMD_INIT_FUNC_TRACE(); 792e4387966SJerin Jacob 793e4387966SJerin Jacob eth_dev->dev_ops = &nicvf_eth_dev_ops; 794e4387966SJerin Jacob 795e4387966SJerin Jacob pci_dev = eth_dev->pci_dev; 796e4387966SJerin Jacob rte_eth_copy_pci_info(eth_dev, pci_dev); 797e4387966SJerin Jacob 798e4387966SJerin Jacob nic->device_id = pci_dev->id.device_id; 799e4387966SJerin Jacob nic->vendor_id = pci_dev->id.vendor_id; 800e4387966SJerin Jacob nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 801e4387966SJerin Jacob nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 802e4387966SJerin Jacob nic->eth_dev = eth_dev; 803e4387966SJerin Jacob 804e4387966SJerin Jacob PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 805e4387966SJerin Jacob pci_dev->id.vendor_id, pci_dev->id.device_id, 806e4387966SJerin Jacob pci_dev->addr.domain, pci_dev->addr.bus, 807e4387966SJerin Jacob pci_dev->addr.devid, pci_dev->addr.function); 808e4387966SJerin Jacob 809e4387966SJerin Jacob nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 810e4387966SJerin Jacob if (!nic->reg_base) { 811e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to map BAR0"); 812e4387966SJerin Jacob ret = -ENODEV; 813e4387966SJerin Jacob goto fail; 814e4387966SJerin Jacob } 815e4387966SJerin Jacob 816e4387966SJerin Jacob nicvf_disable_all_interrupts(nic); 817e4387966SJerin Jacob 818e4387966SJerin Jacob ret = nicvf_periodic_alarm_start(nic); 819e4387966SJerin Jacob if (ret) { 820e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to start period alarm"); 821e4387966SJerin Jacob goto fail; 822e4387966SJerin Jacob } 823e4387966SJerin Jacob 824e4387966SJerin Jacob ret = nicvf_mbox_check_pf_ready(nic); 825e4387966SJerin Jacob if (ret) { 826e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 827e4387966SJerin Jacob goto alarm_fail; 828e4387966SJerin Jacob } else { 829e4387966SJerin Jacob PMD_INIT_LOG(INFO, 830e4387966SJerin Jacob "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 831e4387966SJerin Jacob nic->node, nic->vf_id, 832e4387966SJerin Jacob nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 833e4387966SJerin Jacob nic->sqs_mode ? "true" : "false", 834e4387966SJerin Jacob nic->loopback_supported ? "true" : "false" 835e4387966SJerin Jacob ); 836e4387966SJerin Jacob } 837e4387966SJerin Jacob 838e4387966SJerin Jacob if (nic->sqs_mode) { 839e4387966SJerin Jacob PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching..."); 840e4387966SJerin Jacob /* Detach port by returning Positive error number */ 841e4387966SJerin Jacob ret = ENOTSUP; 842e4387966SJerin Jacob goto alarm_fail; 843e4387966SJerin Jacob } 844e4387966SJerin Jacob 845e4387966SJerin Jacob eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); 846e4387966SJerin Jacob if (eth_dev->data->mac_addrs == NULL) { 847e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 848e4387966SJerin Jacob ret = -ENOMEM; 849e4387966SJerin Jacob goto alarm_fail; 850e4387966SJerin Jacob } 851e4387966SJerin Jacob if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) 852e4387966SJerin Jacob eth_random_addr(&nic->mac_addr[0]); 853e4387966SJerin Jacob 854e4387966SJerin Jacob ether_addr_copy((struct ether_addr *)nic->mac_addr, 855e4387966SJerin Jacob ð_dev->data->mac_addrs[0]); 856e4387966SJerin Jacob 857e4387966SJerin Jacob ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 858e4387966SJerin Jacob if (ret) { 859e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to set mac addr"); 860e4387966SJerin Jacob goto malloc_fail; 861e4387966SJerin Jacob } 862e4387966SJerin Jacob 863e4387966SJerin Jacob ret = nicvf_base_init(nic); 864e4387966SJerin Jacob if (ret) { 865e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 866e4387966SJerin Jacob goto malloc_fail; 867e4387966SJerin Jacob } 868e4387966SJerin Jacob 869e4387966SJerin Jacob ret = nicvf_mbox_get_rss_size(nic); 870e4387966SJerin Jacob if (ret) { 871e4387966SJerin Jacob PMD_INIT_LOG(ERR, "Failed to get rss table size"); 872e4387966SJerin Jacob goto malloc_fail; 873e4387966SJerin Jacob } 874e4387966SJerin Jacob 875e4387966SJerin Jacob PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 876e4387966SJerin Jacob eth_dev->data->port_id, nic->vendor_id, nic->device_id, 877e4387966SJerin Jacob nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 878e4387966SJerin Jacob nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 879e4387966SJerin Jacob 880e4387966SJerin Jacob return 0; 881e4387966SJerin Jacob 882e4387966SJerin Jacob malloc_fail: 883e4387966SJerin Jacob rte_free(eth_dev->data->mac_addrs); 884e4387966SJerin Jacob alarm_fail: 885e4387966SJerin Jacob nicvf_periodic_alarm_stop(nic); 886e4387966SJerin Jacob fail: 887e4387966SJerin Jacob return ret; 888e4387966SJerin Jacob } 889e4387966SJerin Jacob 890e4387966SJerin Jacob static const struct rte_pci_id pci_id_nicvf_map[] = { 891e4387966SJerin Jacob { 892e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 893e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 894e4387966SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF, 895e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 896e4387966SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF, 897e4387966SJerin Jacob }, 898e4387966SJerin Jacob { 899e4387966SJerin Jacob .class_id = RTE_CLASS_ANY_ID, 900e4387966SJerin Jacob .vendor_id = PCI_VENDOR_ID_CAVIUM, 901e4387966SJerin Jacob .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF, 902e4387966SJerin Jacob .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 903e4387966SJerin Jacob .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF, 904e4387966SJerin Jacob }, 905e4387966SJerin Jacob { 906e4387966SJerin Jacob .vendor_id = 0, 907e4387966SJerin Jacob }, 908e4387966SJerin Jacob }; 909e4387966SJerin Jacob 910e4387966SJerin Jacob static struct eth_driver rte_nicvf_pmd = { 911e4387966SJerin Jacob .pci_drv = { 912e4387966SJerin Jacob .name = "rte_nicvf_pmd", 913e4387966SJerin Jacob .id_table = pci_id_nicvf_map, 914e4387966SJerin Jacob .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 915e4387966SJerin Jacob }, 916e4387966SJerin Jacob .eth_dev_init = nicvf_eth_dev_init, 917e4387966SJerin Jacob .dev_private_size = sizeof(struct nicvf), 918e4387966SJerin Jacob }; 919e4387966SJerin Jacob 920e4387966SJerin Jacob static int 921e4387966SJerin Jacob rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused) 922e4387966SJerin Jacob { 923e4387966SJerin Jacob PMD_INIT_FUNC_TRACE(); 924e4387966SJerin Jacob PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s", 925e4387966SJerin Jacob THUNDERX_NICVF_PMD_VERSION); 926e4387966SJerin Jacob 927e4387966SJerin Jacob rte_eth_driver_register(&rte_nicvf_pmd); 928e4387966SJerin Jacob return 0; 929e4387966SJerin Jacob } 930e4387966SJerin Jacob 931e4387966SJerin Jacob static struct rte_driver rte_nicvf_driver = { 932e4387966SJerin Jacob .name = "nicvf_driver", 933e4387966SJerin Jacob .type = PMD_PDEV, 934e4387966SJerin Jacob .init = rte_nicvf_pmd_init, 935e4387966SJerin Jacob }; 936e4387966SJerin Jacob 937e4387966SJerin Jacob PMD_REGISTER_DRIVER(rte_nicvf_driver); 938