1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium networks Ltd. 2016. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium networks nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <assert.h> 34 #include <stdio.h> 35 #include <stdbool.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <inttypes.h> 42 #include <netinet/in.h> 43 #include <sys/queue.h> 44 #include <sys/timerfd.h> 45 46 #include <rte_alarm.h> 47 #include <rte_atomic.h> 48 #include <rte_branch_prediction.h> 49 #include <rte_byteorder.h> 50 #include <rte_common.h> 51 #include <rte_cycles.h> 52 #include <rte_debug.h> 53 #include <rte_dev.h> 54 #include <rte_eal.h> 55 #include <rte_ether.h> 56 #include <rte_ethdev.h> 57 #include <rte_interrupts.h> 58 #include <rte_log.h> 59 #include <rte_memory.h> 60 #include <rte_memzone.h> 61 #include <rte_malloc.h> 62 #include <rte_random.h> 63 #include <rte_pci.h> 64 #include <rte_tailq.h> 65 66 #include "base/nicvf_plat.h" 67 68 #include "nicvf_ethdev.h" 69 #include "nicvf_rxtx.h" 70 #include "nicvf_logs.h" 71 72 static inline int 73 nicvf_atomic_write_link_status(struct rte_eth_dev *dev, 74 struct rte_eth_link *link) 75 { 76 struct rte_eth_link *dst = &dev->data->dev_link; 77 struct rte_eth_link *src = link; 78 79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 80 *(uint64_t *)src) == 0) 81 return -1; 82 83 return 0; 84 } 85 86 static inline void 87 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) 88 { 89 link->link_status = nic->link_up; 90 link->link_duplex = ETH_LINK_AUTONEG; 91 if (nic->duplex == NICVF_HALF_DUPLEX) 92 link->link_duplex = ETH_LINK_HALF_DUPLEX; 93 else if (nic->duplex == NICVF_FULL_DUPLEX) 94 link->link_duplex = ETH_LINK_FULL_DUPLEX; 95 link->link_speed = nic->speed; 96 link->link_autoneg = ETH_LINK_SPEED_AUTONEG; 97 } 98 99 static void 100 nicvf_interrupt(void *arg) 101 { 102 struct nicvf *nic = arg; 103 104 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 105 if (nic->eth_dev->data->dev_conf.intr_conf.lsc) 106 nicvf_set_eth_link_status(nic, 107 &nic->eth_dev->data->dev_link); 108 _rte_eth_dev_callback_process(nic->eth_dev, 109 RTE_ETH_EVENT_INTR_LSC); 110 } 111 112 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 113 nicvf_interrupt, nic); 114 } 115 116 static int 117 nicvf_periodic_alarm_start(struct nicvf *nic) 118 { 119 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 120 nicvf_interrupt, nic); 121 } 122 123 static int 124 nicvf_periodic_alarm_stop(struct nicvf *nic) 125 { 126 return rte_eal_alarm_cancel(nicvf_interrupt, nic); 127 } 128 129 /* 130 * Return 0 means link status changed, -1 means not changed 131 */ 132 static int 133 nicvf_dev_link_update(struct rte_eth_dev *dev, 134 int wait_to_complete __rte_unused) 135 { 136 struct rte_eth_link link; 137 struct nicvf *nic = nicvf_pmd_priv(dev); 138 139 PMD_INIT_FUNC_TRACE(); 140 141 memset(&link, 0, sizeof(link)); 142 nicvf_set_eth_link_status(nic, &link); 143 return nicvf_atomic_write_link_status(dev, &link); 144 } 145 146 static int 147 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 148 { 149 struct nicvf *nic = nicvf_pmd_priv(dev); 150 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 151 152 PMD_INIT_FUNC_TRACE(); 153 154 if (frame_size > NIC_HW_MAX_FRS) 155 return -EINVAL; 156 157 if (frame_size < NIC_HW_MIN_FRS) 158 return -EINVAL; 159 160 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 161 162 /* 163 * Refuse mtu that requires the support of scattered packets 164 * when this feature has not been enabled before. 165 */ 166 if (!dev->data->scattered_rx && 167 (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 168 return -EINVAL; 169 170 /* check <seg size> * <max_seg> >= max_frame */ 171 if (dev->data->scattered_rx && 172 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 173 return -EINVAL; 174 175 if (frame_size > ETHER_MAX_LEN) 176 dev->data->dev_conf.rxmode.jumbo_frame = 1; 177 else 178 dev->data->dev_conf.rxmode.jumbo_frame = 0; 179 180 if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) 181 return -EINVAL; 182 183 /* Update max frame size */ 184 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; 185 nic->mtu = mtu; 186 return 0; 187 } 188 189 static int 190 nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused) 191 { 192 return nicvf_reg_get_count(); 193 } 194 195 static int 196 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 197 { 198 uint64_t *data = regs->data; 199 struct nicvf *nic = nicvf_pmd_priv(dev); 200 201 if (data == NULL) 202 return -EINVAL; 203 204 /* Support only full register dump */ 205 if ((regs->length == 0) || 206 (regs->length == (uint32_t)nicvf_reg_get_count())) { 207 regs->version = nic->vendor_id << 16 | nic->device_id; 208 nicvf_reg_dump(nic, data); 209 return 0; 210 } 211 return -ENOTSUP; 212 } 213 214 static void 215 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 216 { 217 uint16_t qidx; 218 struct nicvf_hw_rx_qstats rx_qstats; 219 struct nicvf_hw_tx_qstats tx_qstats; 220 struct nicvf_hw_stats port_stats; 221 struct nicvf *nic = nicvf_pmd_priv(dev); 222 223 /* Reading per RX ring stats */ 224 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 225 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 226 break; 227 228 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 229 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 230 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 231 } 232 233 /* Reading per TX ring stats */ 234 for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) { 235 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 236 break; 237 238 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 239 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 240 stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 241 } 242 243 nicvf_hw_get_stats(nic, &port_stats); 244 stats->ibytes = port_stats.rx_bytes; 245 stats->ipackets = port_stats.rx_ucast_frames; 246 stats->ipackets += port_stats.rx_bcast_frames; 247 stats->ipackets += port_stats.rx_mcast_frames; 248 stats->ierrors = port_stats.rx_l2_errors; 249 stats->imissed = port_stats.rx_drop_red; 250 stats->imissed += port_stats.rx_drop_overrun; 251 stats->imissed += port_stats.rx_drop_bcast; 252 stats->imissed += port_stats.rx_drop_mcast; 253 stats->imissed += port_stats.rx_drop_l3_bcast; 254 stats->imissed += port_stats.rx_drop_l3_mcast; 255 256 stats->obytes = port_stats.tx_bytes_ok; 257 stats->opackets = port_stats.tx_ucast_frames_ok; 258 stats->opackets += port_stats.tx_bcast_frames_ok; 259 stats->opackets += port_stats.tx_mcast_frames_ok; 260 stats->oerrors = port_stats.tx_drops; 261 } 262 263 static const uint32_t * 264 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) 265 { 266 size_t copied; 267 static uint32_t ptypes[32]; 268 struct nicvf *nic = nicvf_pmd_priv(dev); 269 static const uint32_t ptypes_pass1[] = { 270 RTE_PTYPE_L3_IPV4, 271 RTE_PTYPE_L3_IPV4_EXT, 272 RTE_PTYPE_L3_IPV6, 273 RTE_PTYPE_L3_IPV6_EXT, 274 RTE_PTYPE_L4_TCP, 275 RTE_PTYPE_L4_UDP, 276 RTE_PTYPE_L4_FRAG, 277 }; 278 static const uint32_t ptypes_pass2[] = { 279 RTE_PTYPE_TUNNEL_GRE, 280 RTE_PTYPE_TUNNEL_GENEVE, 281 RTE_PTYPE_TUNNEL_VXLAN, 282 RTE_PTYPE_TUNNEL_NVGRE, 283 }; 284 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; 285 286 copied = sizeof(ptypes_pass1); 287 memcpy(ptypes, ptypes_pass1, copied); 288 if (nicvf_hw_version(nic) == NICVF_PASS2) { 289 memcpy((char *)ptypes + copied, ptypes_pass2, 290 sizeof(ptypes_pass2)); 291 copied += sizeof(ptypes_pass2); 292 } 293 294 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); 295 if (dev->rx_pkt_burst == nicvf_recv_pkts || 296 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg) 297 return ptypes; 298 299 return NULL; 300 } 301 302 static void 303 nicvf_dev_stats_reset(struct rte_eth_dev *dev) 304 { 305 int i; 306 uint16_t rxqs = 0, txqs = 0; 307 struct nicvf *nic = nicvf_pmd_priv(dev); 308 309 for (i = 0; i < dev->data->nb_rx_queues; i++) 310 rxqs |= (0x3 << (i * 2)); 311 for (i = 0; i < dev->data->nb_tx_queues; i++) 312 txqs |= (0x3 << (i * 2)); 313 314 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 315 } 316 317 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 318 static void 319 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 320 { 321 } 322 323 static inline uint64_t 324 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 325 { 326 uint64_t nic_rss = 0; 327 328 if (ethdev_rss & ETH_RSS_IPV4) 329 nic_rss |= RSS_IP_ENA; 330 331 if (ethdev_rss & ETH_RSS_IPV6) 332 nic_rss |= RSS_IP_ENA; 333 334 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 335 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 336 337 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 338 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 339 340 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 341 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 342 343 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 344 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 345 346 if (ethdev_rss & ETH_RSS_PORT) 347 nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 348 349 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 350 if (ethdev_rss & ETH_RSS_VXLAN) 351 nic_rss |= RSS_TUN_VXLAN_ENA; 352 353 if (ethdev_rss & ETH_RSS_GENEVE) 354 nic_rss |= RSS_TUN_GENEVE_ENA; 355 356 if (ethdev_rss & ETH_RSS_NVGRE) 357 nic_rss |= RSS_TUN_NVGRE_ENA; 358 } 359 360 return nic_rss; 361 } 362 363 static inline uint64_t 364 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 365 { 366 uint64_t ethdev_rss = 0; 367 368 if (nic_rss & RSS_IP_ENA) 369 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 370 371 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 372 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 373 ETH_RSS_NONFRAG_IPV6_TCP); 374 375 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 376 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 377 ETH_RSS_NONFRAG_IPV6_UDP); 378 379 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 380 ethdev_rss |= ETH_RSS_PORT; 381 382 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 383 if (nic_rss & RSS_TUN_VXLAN_ENA) 384 ethdev_rss |= ETH_RSS_VXLAN; 385 386 if (nic_rss & RSS_TUN_GENEVE_ENA) 387 ethdev_rss |= ETH_RSS_GENEVE; 388 389 if (nic_rss & RSS_TUN_NVGRE_ENA) 390 ethdev_rss |= ETH_RSS_NVGRE; 391 } 392 return ethdev_rss; 393 } 394 395 static int 396 nicvf_dev_reta_query(struct rte_eth_dev *dev, 397 struct rte_eth_rss_reta_entry64 *reta_conf, 398 uint16_t reta_size) 399 { 400 struct nicvf *nic = nicvf_pmd_priv(dev); 401 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 402 int ret, i, j; 403 404 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 405 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 406 "(%d) doesn't match the number hardware can supported " 407 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 408 return -EINVAL; 409 } 410 411 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 412 if (ret) 413 return ret; 414 415 /* Copy RETA table */ 416 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 417 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 418 if ((reta_conf[i].mask >> j) & 0x01) 419 reta_conf[i].reta[j] = tbl[j]; 420 } 421 422 return 0; 423 } 424 425 static int 426 nicvf_dev_reta_update(struct rte_eth_dev *dev, 427 struct rte_eth_rss_reta_entry64 *reta_conf, 428 uint16_t reta_size) 429 { 430 struct nicvf *nic = nicvf_pmd_priv(dev); 431 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 432 int ret, i, j; 433 434 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 435 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 436 "(%d) doesn't match the number hardware can supported " 437 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 438 return -EINVAL; 439 } 440 441 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 442 if (ret) 443 return ret; 444 445 /* Copy RETA table */ 446 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 447 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 448 if ((reta_conf[i].mask >> j) & 0x01) 449 tbl[j] = reta_conf[i].reta[j]; 450 } 451 452 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 453 } 454 455 static int 456 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 457 struct rte_eth_rss_conf *rss_conf) 458 { 459 struct nicvf *nic = nicvf_pmd_priv(dev); 460 461 if (rss_conf->rss_key) 462 nicvf_rss_get_key(nic, rss_conf->rss_key); 463 464 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 465 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 466 return 0; 467 } 468 469 static int 470 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 471 struct rte_eth_rss_conf *rss_conf) 472 { 473 struct nicvf *nic = nicvf_pmd_priv(dev); 474 uint64_t nic_rss; 475 476 if (rss_conf->rss_key && 477 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 478 RTE_LOG(ERR, PMD, "Hash key size mismatch %d", 479 rss_conf->rss_key_len); 480 return -EINVAL; 481 } 482 483 if (rss_conf->rss_key) 484 nicvf_rss_set_key(nic, rss_conf->rss_key); 485 486 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 487 nicvf_rss_set_cfg(nic, nic_rss); 488 return 0; 489 } 490 491 static int 492 nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx, 493 uint32_t desc_cnt) 494 { 495 const struct rte_memzone *rz; 496 uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t); 497 498 rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size, 499 NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 500 if (rz == NULL) { 501 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 502 return -ENOMEM; 503 } 504 505 memset(rz->addr, 0, ring_size); 506 507 rxq->phys = rz->phys_addr; 508 rxq->desc = rz->addr; 509 rxq->qlen_mask = desc_cnt - 1; 510 511 return 0; 512 } 513 514 static int 515 nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx, 516 uint32_t desc_cnt) 517 { 518 const struct rte_memzone *rz; 519 uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t); 520 521 rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size, 522 NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 523 if (rz == NULL) { 524 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 525 return -ENOMEM; 526 } 527 528 memset(rz->addr, 0, ring_size); 529 530 sq->phys = rz->phys_addr; 531 sq->desc = rz->addr; 532 sq->qlen_mask = desc_cnt - 1; 533 534 return 0; 535 } 536 537 static inline void 538 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 539 { 540 uint32_t head; 541 542 head = txq->head; 543 while (head != txq->tail) { 544 if (txq->txbuffs[head]) { 545 rte_pktmbuf_free_seg(txq->txbuffs[head]); 546 txq->txbuffs[head] = NULL; 547 } 548 head++; 549 head = head & txq->qlen_mask; 550 } 551 } 552 553 static void 554 nicvf_tx_queue_reset(struct nicvf_txq *txq) 555 { 556 uint32_t txq_desc_cnt = txq->qlen_mask + 1; 557 558 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 559 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 560 txq->tail = 0; 561 txq->head = 0; 562 txq->xmit_bufs = 0; 563 } 564 565 static inline int 566 nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) 567 { 568 struct nicvf_txq *txq; 569 int ret; 570 571 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) 572 return 0; 573 574 txq = dev->data->tx_queues[qidx]; 575 txq->pool = NULL; 576 ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq); 577 if (ret) { 578 PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret); 579 goto config_sq_error; 580 } 581 582 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 583 return ret; 584 585 config_sq_error: 586 nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); 587 return ret; 588 } 589 590 static inline int 591 nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) 592 { 593 struct nicvf_txq *txq; 594 int ret; 595 596 if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) 597 return 0; 598 599 ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); 600 if (ret) 601 PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret); 602 603 txq = dev->data->tx_queues[qidx]; 604 nicvf_tx_queue_release_mbufs(txq); 605 nicvf_tx_queue_reset(txq); 606 607 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 608 return ret; 609 } 610 611 static inline int 612 nicvf_configure_cpi(struct rte_eth_dev *dev) 613 { 614 struct nicvf *nic = nicvf_pmd_priv(dev); 615 uint16_t qidx, qcnt; 616 int ret; 617 618 /* Count started rx queues */ 619 for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) 620 if (dev->data->rx_queue_state[qidx] == 621 RTE_ETH_QUEUE_STATE_STARTED) 622 qcnt++; 623 624 nic->cpi_alg = CPI_ALG_NONE; 625 ret = nicvf_mbox_config_cpi(nic, qcnt); 626 if (ret) 627 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 628 629 return ret; 630 } 631 632 static int 633 nicvf_configure_rss_reta(struct rte_eth_dev *dev) 634 { 635 struct nicvf *nic = nicvf_pmd_priv(dev); 636 unsigned int idx, qmap_size; 637 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 638 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 639 640 if (nic->cpi_alg != CPI_ALG_NONE) 641 return -EINVAL; 642 643 /* Prepare queue map */ 644 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 645 if (dev->data->rx_queue_state[idx] == 646 RTE_ETH_QUEUE_STATE_STARTED) 647 qmap[qmap_size++] = idx; 648 } 649 650 /* Update default RSS RETA */ 651 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 652 default_reta[idx] = qmap[idx % qmap_size]; 653 654 return nicvf_rss_reta_update(nic, default_reta, 655 NIC_MAX_RSS_IDR_TBL_SIZE); 656 } 657 658 static void 659 nicvf_dev_tx_queue_release(void *sq) 660 { 661 struct nicvf_txq *txq; 662 663 PMD_INIT_FUNC_TRACE(); 664 665 txq = (struct nicvf_txq *)sq; 666 if (txq) { 667 if (txq->txbuffs != NULL) { 668 nicvf_tx_queue_release_mbufs(txq); 669 rte_free(txq->txbuffs); 670 txq->txbuffs = NULL; 671 } 672 rte_free(txq); 673 } 674 } 675 676 static int 677 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 678 uint16_t nb_desc, unsigned int socket_id, 679 const struct rte_eth_txconf *tx_conf) 680 { 681 uint16_t tx_free_thresh; 682 uint8_t is_single_pool; 683 struct nicvf_txq *txq; 684 struct nicvf *nic = nicvf_pmd_priv(dev); 685 686 PMD_INIT_FUNC_TRACE(); 687 688 /* Socket id check */ 689 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 690 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 691 socket_id, nic->node); 692 693 /* Tx deferred start is not supported */ 694 if (tx_conf->tx_deferred_start) { 695 PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 696 return -EINVAL; 697 } 698 699 /* Roundup nb_desc to available qsize and validate max number of desc */ 700 nb_desc = nicvf_qsize_sq_roundup(nb_desc); 701 if (nb_desc == 0) { 702 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 703 return -EINVAL; 704 } 705 706 /* Validate tx_free_thresh */ 707 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 708 tx_conf->tx_free_thresh : 709 NICVF_DEFAULT_TX_FREE_THRESH); 710 711 if (tx_free_thresh > (nb_desc) || 712 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 713 PMD_INIT_LOG(ERR, 714 "tx_free_thresh must be less than the number of TX " 715 "descriptors. (tx_free_thresh=%u port=%d " 716 "queue=%d)", (unsigned int)tx_free_thresh, 717 (int)dev->data->port_id, (int)qidx); 718 return -EINVAL; 719 } 720 721 /* Free memory prior to re-allocation if needed. */ 722 if (dev->data->tx_queues[qidx] != NULL) { 723 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 724 qidx); 725 nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]); 726 dev->data->tx_queues[qidx] = NULL; 727 } 728 729 /* Allocating tx queue data structure */ 730 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 731 RTE_CACHE_LINE_SIZE, nic->node); 732 if (txq == NULL) { 733 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx); 734 return -ENOMEM; 735 } 736 737 txq->nic = nic; 738 txq->queue_id = qidx; 739 txq->tx_free_thresh = tx_free_thresh; 740 txq->txq_flags = tx_conf->txq_flags; 741 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 742 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 743 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && 744 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); 745 746 /* Choose optimum free threshold value for multipool case */ 747 if (!is_single_pool) { 748 txq->tx_free_thresh = (uint16_t) 749 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 750 NICVF_TX_FREE_MPOOL_THRESH : 751 tx_conf->tx_free_thresh); 752 txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 753 } else { 754 txq->pool_free = nicvf_single_pool_free_xmited_buffers; 755 } 756 757 /* Allocate software ring */ 758 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 759 nb_desc * sizeof(struct rte_mbuf *), 760 RTE_CACHE_LINE_SIZE, nic->node); 761 762 if (txq->txbuffs == NULL) { 763 nicvf_dev_tx_queue_release(txq); 764 return -ENOMEM; 765 } 766 767 if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) { 768 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 769 nicvf_dev_tx_queue_release(txq); 770 return -ENOMEM; 771 } 772 773 nicvf_tx_queue_reset(txq); 774 775 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, 776 qidx, txq, nb_desc, txq->desc, txq->phys); 777 778 dev->data->tx_queues[qidx] = txq; 779 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 780 return 0; 781 } 782 783 static inline void 784 nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq) 785 { 786 uint32_t rxq_cnt; 787 uint32_t nb_pkts, released_pkts = 0; 788 uint32_t refill_cnt = 0; 789 struct rte_eth_dev *dev = rxq->nic->eth_dev; 790 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 791 792 if (dev->rx_pkt_burst == NULL) 793 return; 794 795 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) { 796 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 797 NICVF_MAX_RX_FREE_THRESH); 798 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 799 while (nb_pkts) { 800 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 801 released_pkts++; 802 } 803 } 804 805 refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id); 806 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 807 released_pkts, refill_cnt); 808 } 809 810 static void 811 nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 812 { 813 rxq->head = 0; 814 rxq->available_space = 0; 815 rxq->recv_buffers = 0; 816 } 817 818 static inline int 819 nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) 820 { 821 struct nicvf *nic = nicvf_pmd_priv(dev); 822 struct nicvf_rxq *rxq; 823 int ret; 824 825 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) 826 return 0; 827 828 /* Update rbdr pointer to all rxq */ 829 rxq = dev->data->rx_queues[qidx]; 830 rxq->shared_rbdr = nic->rbdr; 831 832 ret = nicvf_qset_rq_config(nic, qidx, rxq); 833 if (ret) { 834 PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret); 835 goto config_rq_error; 836 } 837 ret = nicvf_qset_cq_config(nic, qidx, rxq); 838 if (ret) { 839 PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret); 840 goto config_cq_error; 841 } 842 843 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; 844 return 0; 845 846 config_cq_error: 847 nicvf_qset_cq_reclaim(nic, qidx); 848 config_rq_error: 849 nicvf_qset_rq_reclaim(nic, qidx); 850 return ret; 851 } 852 853 static inline int 854 nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) 855 { 856 struct nicvf *nic = nicvf_pmd_priv(dev); 857 struct nicvf_rxq *rxq; 858 int ret, other_error; 859 860 if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) 861 return 0; 862 863 ret = nicvf_qset_rq_reclaim(nic, qidx); 864 if (ret) 865 PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret); 866 867 other_error = ret; 868 rxq = dev->data->rx_queues[qidx]; 869 nicvf_rx_queue_release_mbufs(rxq); 870 nicvf_rx_queue_reset(rxq); 871 872 ret = nicvf_qset_cq_reclaim(nic, qidx); 873 if (ret) 874 PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret); 875 876 other_error |= ret; 877 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 878 return other_error; 879 } 880 881 static void 882 nicvf_dev_rx_queue_release(void *rx_queue) 883 { 884 struct nicvf_rxq *rxq = rx_queue; 885 886 PMD_INIT_FUNC_TRACE(); 887 888 if (rxq) 889 rte_free(rxq); 890 } 891 892 static int 893 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 894 { 895 int ret; 896 897 ret = nicvf_start_rx_queue(dev, qidx); 898 if (ret) 899 return ret; 900 901 ret = nicvf_configure_cpi(dev); 902 if (ret) 903 return ret; 904 905 return nicvf_configure_rss_reta(dev); 906 } 907 908 static int 909 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 910 { 911 int ret; 912 913 ret = nicvf_stop_rx_queue(dev, qidx); 914 ret |= nicvf_configure_cpi(dev); 915 ret |= nicvf_configure_rss_reta(dev); 916 return ret; 917 } 918 919 static int 920 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 921 { 922 return nicvf_start_tx_queue(dev, qidx); 923 } 924 925 static int 926 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 927 { 928 return nicvf_stop_tx_queue(dev, qidx); 929 } 930 931 static int 932 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 933 uint16_t nb_desc, unsigned int socket_id, 934 const struct rte_eth_rxconf *rx_conf, 935 struct rte_mempool *mp) 936 { 937 uint16_t rx_free_thresh; 938 struct nicvf_rxq *rxq; 939 struct nicvf *nic = nicvf_pmd_priv(dev); 940 941 PMD_INIT_FUNC_TRACE(); 942 943 /* Socket id check */ 944 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 945 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 946 socket_id, nic->node); 947 948 /* Mempool memory should be contiguous */ 949 if (mp->nb_mem_chunks != 1) { 950 PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz"); 951 return -EINVAL; 952 } 953 954 /* Rx deferred start is not supported */ 955 if (rx_conf->rx_deferred_start) { 956 PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 957 return -EINVAL; 958 } 959 960 /* Roundup nb_desc to available qsize and validate max number of desc */ 961 nb_desc = nicvf_qsize_cq_roundup(nb_desc); 962 if (nb_desc == 0) { 963 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 964 return -EINVAL; 965 } 966 967 /* Check rx_free_thresh upper bound */ 968 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 969 rx_conf->rx_free_thresh : 970 NICVF_DEFAULT_RX_FREE_THRESH); 971 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 972 rx_free_thresh >= nb_desc * .75) { 973 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 974 rx_free_thresh); 975 return -EINVAL; 976 } 977 978 /* Free memory prior to re-allocation if needed */ 979 if (dev->data->rx_queues[qidx] != NULL) { 980 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 981 qidx); 982 nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]); 983 dev->data->rx_queues[qidx] = NULL; 984 } 985 986 /* Allocate rxq memory */ 987 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 988 RTE_CACHE_LINE_SIZE, nic->node); 989 if (rxq == NULL) { 990 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx); 991 return -ENOMEM; 992 } 993 994 rxq->nic = nic; 995 rxq->pool = mp; 996 rxq->queue_id = qidx; 997 rxq->port_id = dev->data->port_id; 998 rxq->rx_free_thresh = rx_free_thresh; 999 rxq->rx_drop_en = rx_conf->rx_drop_en; 1000 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1001 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1002 rxq->precharge_cnt = 0; 1003 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1004 1005 /* Alloc completion queue */ 1006 if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) { 1007 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 1008 nicvf_dev_rx_queue_release(rxq); 1009 return -ENOMEM; 1010 } 1011 1012 nicvf_rx_queue_reset(rxq); 1013 1014 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, 1015 qidx, rxq, mp->name, nb_desc, 1016 rte_mempool_count(mp), rxq->phys); 1017 1018 dev->data->rx_queues[qidx] = rxq; 1019 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; 1020 return 0; 1021 } 1022 1023 static void 1024 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1025 { 1026 struct nicvf *nic = nicvf_pmd_priv(dev); 1027 1028 PMD_INIT_FUNC_TRACE(); 1029 1030 dev_info->min_rx_bufsize = ETHER_MIN_MTU; 1031 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; 1032 dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS; 1033 dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS; 1034 dev_info->max_mac_addrs = 1; 1035 dev_info->max_vfs = dev->pci_dev->max_vfs; 1036 1037 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1038 dev_info->tx_offload_capa = 1039 DEV_TX_OFFLOAD_IPV4_CKSUM | 1040 DEV_TX_OFFLOAD_UDP_CKSUM | 1041 DEV_TX_OFFLOAD_TCP_CKSUM | 1042 DEV_TX_OFFLOAD_TCP_TSO | 1043 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 1044 1045 dev_info->reta_size = nic->rss_info.rss_size; 1046 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1047 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1048 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1049 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1050 1051 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1052 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1053 .rx_drop_en = 0, 1054 }; 1055 1056 dev_info->default_txconf = (struct rte_eth_txconf) { 1057 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1058 .txq_flags = 1059 ETH_TXQ_FLAGS_NOMULTSEGS | 1060 ETH_TXQ_FLAGS_NOREFCOUNT | 1061 ETH_TXQ_FLAGS_NOMULTMEMP | 1062 ETH_TXQ_FLAGS_NOVLANOFFL | 1063 ETH_TXQ_FLAGS_NOXSUMSCTP, 1064 }; 1065 } 1066 1067 static int 1068 nicvf_dev_configure(struct rte_eth_dev *dev) 1069 { 1070 struct rte_eth_conf *conf = &dev->data->dev_conf; 1071 struct rte_eth_rxmode *rxmode = &conf->rxmode; 1072 struct rte_eth_txmode *txmode = &conf->txmode; 1073 struct nicvf *nic = nicvf_pmd_priv(dev); 1074 1075 PMD_INIT_FUNC_TRACE(); 1076 1077 if (!rte_eal_has_hugepages()) { 1078 PMD_INIT_LOG(INFO, "Huge page is not configured"); 1079 return -EINVAL; 1080 } 1081 1082 if (txmode->mq_mode) { 1083 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 1084 return -EINVAL; 1085 } 1086 1087 if (rxmode->mq_mode != ETH_MQ_RX_NONE && 1088 rxmode->mq_mode != ETH_MQ_RX_RSS) { 1089 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 1090 return -EINVAL; 1091 } 1092 1093 if (!rxmode->hw_strip_crc) { 1094 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); 1095 rxmode->hw_strip_crc = 1; 1096 } 1097 1098 if (rxmode->hw_ip_checksum) { 1099 PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); 1100 rxmode->hw_ip_checksum = 0; 1101 } 1102 1103 if (rxmode->split_hdr_size) { 1104 PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 1105 return -EINVAL; 1106 } 1107 1108 if (rxmode->hw_vlan_filter) { 1109 PMD_INIT_LOG(INFO, "VLAN filter not supported"); 1110 return -EINVAL; 1111 } 1112 1113 if (rxmode->hw_vlan_extend) { 1114 PMD_INIT_LOG(INFO, "VLAN extended not supported"); 1115 return -EINVAL; 1116 } 1117 1118 if (rxmode->enable_lro) { 1119 PMD_INIT_LOG(INFO, "LRO not supported"); 1120 return -EINVAL; 1121 } 1122 1123 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 1124 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 1125 return -EINVAL; 1126 } 1127 1128 if (conf->dcb_capability_en) { 1129 PMD_INIT_LOG(INFO, "DCB enable not supported"); 1130 return -EINVAL; 1131 } 1132 1133 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1134 PMD_INIT_LOG(INFO, "Flow director not supported"); 1135 return -EINVAL; 1136 } 1137 1138 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 1139 dev->data->port_id, nicvf_hw_cap(nic)); 1140 1141 return 0; 1142 } 1143 1144 /* Initialize and register driver with DPDK Application */ 1145 static const struct eth_dev_ops nicvf_eth_dev_ops = { 1146 .dev_configure = nicvf_dev_configure, 1147 .link_update = nicvf_dev_link_update, 1148 .stats_get = nicvf_dev_stats_get, 1149 .stats_reset = nicvf_dev_stats_reset, 1150 .promiscuous_enable = nicvf_dev_promisc_enable, 1151 .dev_infos_get = nicvf_dev_info_get, 1152 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 1153 .mtu_set = nicvf_dev_set_mtu, 1154 .reta_update = nicvf_dev_reta_update, 1155 .reta_query = nicvf_dev_reta_query, 1156 .rss_hash_update = nicvf_dev_rss_hash_update, 1157 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 1158 .rx_queue_start = nicvf_dev_rx_queue_start, 1159 .rx_queue_stop = nicvf_dev_rx_queue_stop, 1160 .tx_queue_start = nicvf_dev_tx_queue_start, 1161 .tx_queue_stop = nicvf_dev_tx_queue_stop, 1162 .rx_queue_setup = nicvf_dev_rx_queue_setup, 1163 .rx_queue_release = nicvf_dev_rx_queue_release, 1164 .rx_queue_count = nicvf_dev_rx_queue_count, 1165 .tx_queue_setup = nicvf_dev_tx_queue_setup, 1166 .tx_queue_release = nicvf_dev_tx_queue_release, 1167 .get_reg_length = nicvf_dev_get_reg_length, 1168 .get_reg = nicvf_dev_get_regs, 1169 }; 1170 1171 static int 1172 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 1173 { 1174 int ret; 1175 struct rte_pci_device *pci_dev; 1176 struct nicvf *nic = nicvf_pmd_priv(eth_dev); 1177 1178 PMD_INIT_FUNC_TRACE(); 1179 1180 eth_dev->dev_ops = &nicvf_eth_dev_ops; 1181 1182 pci_dev = eth_dev->pci_dev; 1183 rte_eth_copy_pci_info(eth_dev, pci_dev); 1184 1185 nic->device_id = pci_dev->id.device_id; 1186 nic->vendor_id = pci_dev->id.vendor_id; 1187 nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 1188 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1189 nic->eth_dev = eth_dev; 1190 1191 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 1192 pci_dev->id.vendor_id, pci_dev->id.device_id, 1193 pci_dev->addr.domain, pci_dev->addr.bus, 1194 pci_dev->addr.devid, pci_dev->addr.function); 1195 1196 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 1197 if (!nic->reg_base) { 1198 PMD_INIT_LOG(ERR, "Failed to map BAR0"); 1199 ret = -ENODEV; 1200 goto fail; 1201 } 1202 1203 nicvf_disable_all_interrupts(nic); 1204 1205 ret = nicvf_periodic_alarm_start(nic); 1206 if (ret) { 1207 PMD_INIT_LOG(ERR, "Failed to start period alarm"); 1208 goto fail; 1209 } 1210 1211 ret = nicvf_mbox_check_pf_ready(nic); 1212 if (ret) { 1213 PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 1214 goto alarm_fail; 1215 } else { 1216 PMD_INIT_LOG(INFO, 1217 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 1218 nic->node, nic->vf_id, 1219 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 1220 nic->sqs_mode ? "true" : "false", 1221 nic->loopback_supported ? "true" : "false" 1222 ); 1223 } 1224 1225 if (nic->sqs_mode) { 1226 PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching..."); 1227 /* Detach port by returning Positive error number */ 1228 ret = ENOTSUP; 1229 goto alarm_fail; 1230 } 1231 1232 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); 1233 if (eth_dev->data->mac_addrs == NULL) { 1234 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 1235 ret = -ENOMEM; 1236 goto alarm_fail; 1237 } 1238 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) 1239 eth_random_addr(&nic->mac_addr[0]); 1240 1241 ether_addr_copy((struct ether_addr *)nic->mac_addr, 1242 ð_dev->data->mac_addrs[0]); 1243 1244 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 1245 if (ret) { 1246 PMD_INIT_LOG(ERR, "Failed to set mac addr"); 1247 goto malloc_fail; 1248 } 1249 1250 ret = nicvf_base_init(nic); 1251 if (ret) { 1252 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 1253 goto malloc_fail; 1254 } 1255 1256 ret = nicvf_mbox_get_rss_size(nic); 1257 if (ret) { 1258 PMD_INIT_LOG(ERR, "Failed to get rss table size"); 1259 goto malloc_fail; 1260 } 1261 1262 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 1263 eth_dev->data->port_id, nic->vendor_id, nic->device_id, 1264 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 1265 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 1266 1267 return 0; 1268 1269 malloc_fail: 1270 rte_free(eth_dev->data->mac_addrs); 1271 alarm_fail: 1272 nicvf_periodic_alarm_stop(nic); 1273 fail: 1274 return ret; 1275 } 1276 1277 static const struct rte_pci_id pci_id_nicvf_map[] = { 1278 { 1279 .class_id = RTE_CLASS_ANY_ID, 1280 .vendor_id = PCI_VENDOR_ID_CAVIUM, 1281 .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF, 1282 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 1283 .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF, 1284 }, 1285 { 1286 .class_id = RTE_CLASS_ANY_ID, 1287 .vendor_id = PCI_VENDOR_ID_CAVIUM, 1288 .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF, 1289 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 1290 .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF, 1291 }, 1292 { 1293 .vendor_id = 0, 1294 }, 1295 }; 1296 1297 static struct eth_driver rte_nicvf_pmd = { 1298 .pci_drv = { 1299 .name = "rte_nicvf_pmd", 1300 .id_table = pci_id_nicvf_map, 1301 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1302 }, 1303 .eth_dev_init = nicvf_eth_dev_init, 1304 .dev_private_size = sizeof(struct nicvf), 1305 }; 1306 1307 static int 1308 rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused) 1309 { 1310 PMD_INIT_FUNC_TRACE(); 1311 PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s", 1312 THUNDERX_NICVF_PMD_VERSION); 1313 1314 rte_eth_driver_register(&rte_nicvf_pmd); 1315 return 0; 1316 } 1317 1318 static struct rte_driver rte_nicvf_driver = { 1319 .name = "nicvf_driver", 1320 .type = PMD_PDEV, 1321 .init = rte_nicvf_pmd_init, 1322 }; 1323 1324 PMD_REGISTER_DRIVER(rte_nicvf_driver); 1325