1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium networks Ltd. 2016. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium networks nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <assert.h> 34 #include <stdio.h> 35 #include <stdbool.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <inttypes.h> 42 #include <netinet/in.h> 43 #include <sys/queue.h> 44 45 #include <rte_alarm.h> 46 #include <rte_atomic.h> 47 #include <rte_branch_prediction.h> 48 #include <rte_byteorder.h> 49 #include <rte_common.h> 50 #include <rte_cycles.h> 51 #include <rte_debug.h> 52 #include <rte_dev.h> 53 #include <rte_eal.h> 54 #include <rte_ether.h> 55 #include <rte_ethdev.h> 56 #include <rte_ethdev_pci.h> 57 #include <rte_interrupts.h> 58 #include <rte_log.h> 59 #include <rte_memory.h> 60 #include <rte_memzone.h> 61 #include <rte_malloc.h> 62 #include <rte_random.h> 63 #include <rte_pci.h> 64 #include <rte_tailq.h> 65 66 #include "base/nicvf_plat.h" 67 68 #include "nicvf_ethdev.h" 69 #include "nicvf_rxtx.h" 70 #include "nicvf_svf.h" 71 #include "nicvf_logs.h" 72 73 static void nicvf_dev_stop(struct rte_eth_dev *dev); 74 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup); 75 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, 76 bool cleanup); 77 78 static inline int 79 nicvf_atomic_write_link_status(struct rte_eth_dev *dev, 80 struct rte_eth_link *link) 81 { 82 struct rte_eth_link *dst = &dev->data->dev_link; 83 struct rte_eth_link *src = link; 84 85 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 86 *(uint64_t *)src) == 0) 87 return -1; 88 89 return 0; 90 } 91 92 static inline void 93 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) 94 { 95 link->link_status = nic->link_up; 96 link->link_duplex = ETH_LINK_AUTONEG; 97 if (nic->duplex == NICVF_HALF_DUPLEX) 98 link->link_duplex = ETH_LINK_HALF_DUPLEX; 99 else if (nic->duplex == NICVF_FULL_DUPLEX) 100 link->link_duplex = ETH_LINK_FULL_DUPLEX; 101 link->link_speed = nic->speed; 102 link->link_autoneg = ETH_LINK_SPEED_AUTONEG; 103 } 104 105 static void 106 nicvf_interrupt(void *arg) 107 { 108 struct rte_eth_dev *dev = arg; 109 struct nicvf *nic = nicvf_pmd_priv(dev); 110 111 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 112 if (dev->data->dev_conf.intr_conf.lsc) 113 nicvf_set_eth_link_status(nic, &dev->data->dev_link); 114 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 115 } 116 117 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 118 nicvf_interrupt, dev); 119 } 120 121 static void 122 nicvf_vf_interrupt(void *arg) 123 { 124 struct nicvf *nic = arg; 125 126 nicvf_reg_poll_interrupts(nic); 127 128 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 129 nicvf_vf_interrupt, nic); 130 } 131 132 static int 133 nicvf_periodic_alarm_start(void (fn)(void *), void *arg) 134 { 135 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg); 136 } 137 138 static int 139 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg) 140 { 141 return rte_eal_alarm_cancel(fn, arg); 142 } 143 144 /* 145 * Return 0 means link status changed, -1 means not changed 146 */ 147 static int 148 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 149 { 150 #define CHECK_INTERVAL 100 /* 100ms */ 151 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 152 struct rte_eth_link link; 153 struct nicvf *nic = nicvf_pmd_priv(dev); 154 int i; 155 156 PMD_INIT_FUNC_TRACE(); 157 158 if (wait_to_complete) { 159 /* rte_eth_link_get() might need to wait up to 9 seconds */ 160 for (i = 0; i < MAX_CHECK_TIME; i++) { 161 memset(&link, 0, sizeof(link)); 162 nicvf_set_eth_link_status(nic, &link); 163 if (link.link_status) 164 break; 165 rte_delay_ms(CHECK_INTERVAL); 166 } 167 } else { 168 memset(&link, 0, sizeof(link)); 169 nicvf_set_eth_link_status(nic, &link); 170 } 171 return nicvf_atomic_write_link_status(dev, &link); 172 } 173 174 static int 175 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 176 { 177 struct nicvf *nic = nicvf_pmd_priv(dev); 178 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 179 size_t i; 180 181 PMD_INIT_FUNC_TRACE(); 182 183 if (frame_size > NIC_HW_MAX_FRS) 184 return -EINVAL; 185 186 if (frame_size < NIC_HW_MIN_FRS) 187 return -EINVAL; 188 189 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 190 191 /* 192 * Refuse mtu that requires the support of scattered packets 193 * when this feature has not been enabled before. 194 */ 195 if (!dev->data->scattered_rx && 196 (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 197 return -EINVAL; 198 199 /* check <seg size> * <max_seg> >= max_frame */ 200 if (dev->data->scattered_rx && 201 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 202 return -EINVAL; 203 204 if (frame_size > ETHER_MAX_LEN) 205 dev->data->dev_conf.rxmode.jumbo_frame = 1; 206 else 207 dev->data->dev_conf.rxmode.jumbo_frame = 0; 208 209 if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) 210 return -EINVAL; 211 212 /* Update max frame size */ 213 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; 214 nic->mtu = mtu; 215 216 for (i = 0; i < nic->sqs_count; i++) 217 nic->snicvf[i]->mtu = mtu; 218 219 return 0; 220 } 221 222 static int 223 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 224 { 225 uint64_t *data = regs->data; 226 struct nicvf *nic = nicvf_pmd_priv(dev); 227 228 if (data == NULL) { 229 regs->length = nicvf_reg_get_count(); 230 regs->width = THUNDERX_REG_BYTES; 231 return 0; 232 } 233 234 /* Support only full register dump */ 235 if ((regs->length == 0) || 236 (regs->length == (uint32_t)nicvf_reg_get_count())) { 237 regs->version = nic->vendor_id << 16 | nic->device_id; 238 nicvf_reg_dump(nic, data); 239 return 0; 240 } 241 return -ENOTSUP; 242 } 243 244 static void 245 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 246 { 247 uint16_t qidx; 248 struct nicvf_hw_rx_qstats rx_qstats; 249 struct nicvf_hw_tx_qstats tx_qstats; 250 struct nicvf_hw_stats port_stats; 251 struct nicvf *nic = nicvf_pmd_priv(dev); 252 uint16_t rx_start, rx_end; 253 uint16_t tx_start, tx_end; 254 size_t i; 255 256 /* RX queue indices for the first VF */ 257 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 258 259 /* Reading per RX ring stats */ 260 for (qidx = rx_start; qidx <= rx_end; qidx++) { 261 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 262 break; 263 264 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 265 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 266 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 267 } 268 269 /* TX queue indices for the first VF */ 270 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 271 272 /* Reading per TX ring stats */ 273 for (qidx = tx_start; qidx <= tx_end; qidx++) { 274 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 275 break; 276 277 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 278 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 279 stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 280 } 281 282 for (i = 0; i < nic->sqs_count; i++) { 283 struct nicvf *snic = nic->snicvf[i]; 284 285 if (snic == NULL) 286 break; 287 288 /* RX queue indices for a secondary VF */ 289 nicvf_rx_range(dev, snic, &rx_start, &rx_end); 290 291 /* Reading per RX ring stats */ 292 for (qidx = rx_start; qidx <= rx_end; qidx++) { 293 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 294 break; 295 296 nicvf_hw_get_rx_qstats(snic, &rx_qstats, 297 qidx % MAX_RCV_QUEUES_PER_QS); 298 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 299 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 300 } 301 302 /* TX queue indices for a secondary VF */ 303 nicvf_tx_range(dev, snic, &tx_start, &tx_end); 304 /* Reading per TX ring stats */ 305 for (qidx = tx_start; qidx <= tx_end; qidx++) { 306 if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) 307 break; 308 309 nicvf_hw_get_tx_qstats(snic, &tx_qstats, 310 qidx % MAX_SND_QUEUES_PER_QS); 311 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 312 stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 313 } 314 } 315 316 nicvf_hw_get_stats(nic, &port_stats); 317 stats->ibytes = port_stats.rx_bytes; 318 stats->ipackets = port_stats.rx_ucast_frames; 319 stats->ipackets += port_stats.rx_bcast_frames; 320 stats->ipackets += port_stats.rx_mcast_frames; 321 stats->ierrors = port_stats.rx_l2_errors; 322 stats->imissed = port_stats.rx_drop_red; 323 stats->imissed += port_stats.rx_drop_overrun; 324 stats->imissed += port_stats.rx_drop_bcast; 325 stats->imissed += port_stats.rx_drop_mcast; 326 stats->imissed += port_stats.rx_drop_l3_bcast; 327 stats->imissed += port_stats.rx_drop_l3_mcast; 328 329 stats->obytes = port_stats.tx_bytes_ok; 330 stats->opackets = port_stats.tx_ucast_frames_ok; 331 stats->opackets += port_stats.tx_bcast_frames_ok; 332 stats->opackets += port_stats.tx_mcast_frames_ok; 333 stats->oerrors = port_stats.tx_drops; 334 } 335 336 static const uint32_t * 337 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) 338 { 339 size_t copied; 340 static uint32_t ptypes[32]; 341 struct nicvf *nic = nicvf_pmd_priv(dev); 342 static const uint32_t ptypes_common[] = { 343 RTE_PTYPE_L3_IPV4, 344 RTE_PTYPE_L3_IPV4_EXT, 345 RTE_PTYPE_L3_IPV6, 346 RTE_PTYPE_L3_IPV6_EXT, 347 RTE_PTYPE_L4_TCP, 348 RTE_PTYPE_L4_UDP, 349 RTE_PTYPE_L4_FRAG, 350 }; 351 static const uint32_t ptypes_tunnel[] = { 352 RTE_PTYPE_TUNNEL_GRE, 353 RTE_PTYPE_TUNNEL_GENEVE, 354 RTE_PTYPE_TUNNEL_VXLAN, 355 RTE_PTYPE_TUNNEL_NVGRE, 356 }; 357 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; 358 359 copied = sizeof(ptypes_common); 360 memcpy(ptypes, ptypes_common, copied); 361 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 362 memcpy((char *)ptypes + copied, ptypes_tunnel, 363 sizeof(ptypes_tunnel)); 364 copied += sizeof(ptypes_tunnel); 365 } 366 367 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); 368 if (dev->rx_pkt_burst == nicvf_recv_pkts || 369 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg) 370 return ptypes; 371 372 return NULL; 373 } 374 375 static void 376 nicvf_dev_stats_reset(struct rte_eth_dev *dev) 377 { 378 int i; 379 uint16_t rxqs = 0, txqs = 0; 380 struct nicvf *nic = nicvf_pmd_priv(dev); 381 uint16_t rx_start, rx_end; 382 uint16_t tx_start, tx_end; 383 384 /* Reset all primary nic counters */ 385 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 386 for (i = rx_start; i <= rx_end; i++) 387 rxqs |= (0x3 << (i * 2)); 388 389 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 390 for (i = tx_start; i <= tx_end; i++) 391 txqs |= (0x3 << (i * 2)); 392 393 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 394 395 /* Reset secondary nic queue counters */ 396 for (i = 0; i < nic->sqs_count; i++) { 397 struct nicvf *snic = nic->snicvf[i]; 398 if (snic == NULL) 399 break; 400 401 nicvf_rx_range(dev, snic, &rx_start, &rx_end); 402 for (i = rx_start; i <= rx_end; i++) 403 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2)); 404 405 nicvf_tx_range(dev, snic, &tx_start, &tx_end); 406 for (i = tx_start; i <= tx_end; i++) 407 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2)); 408 409 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs); 410 } 411 } 412 413 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 414 static void 415 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 416 { 417 } 418 419 static inline uint64_t 420 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 421 { 422 uint64_t nic_rss = 0; 423 424 if (ethdev_rss & ETH_RSS_IPV4) 425 nic_rss |= RSS_IP_ENA; 426 427 if (ethdev_rss & ETH_RSS_IPV6) 428 nic_rss |= RSS_IP_ENA; 429 430 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 431 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 432 433 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 434 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 435 436 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 437 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 438 439 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 440 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 441 442 if (ethdev_rss & ETH_RSS_PORT) 443 nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 444 445 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 446 if (ethdev_rss & ETH_RSS_VXLAN) 447 nic_rss |= RSS_TUN_VXLAN_ENA; 448 449 if (ethdev_rss & ETH_RSS_GENEVE) 450 nic_rss |= RSS_TUN_GENEVE_ENA; 451 452 if (ethdev_rss & ETH_RSS_NVGRE) 453 nic_rss |= RSS_TUN_NVGRE_ENA; 454 } 455 456 return nic_rss; 457 } 458 459 static inline uint64_t 460 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 461 { 462 uint64_t ethdev_rss = 0; 463 464 if (nic_rss & RSS_IP_ENA) 465 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 466 467 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 468 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 469 ETH_RSS_NONFRAG_IPV6_TCP); 470 471 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 472 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 473 ETH_RSS_NONFRAG_IPV6_UDP); 474 475 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 476 ethdev_rss |= ETH_RSS_PORT; 477 478 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 479 if (nic_rss & RSS_TUN_VXLAN_ENA) 480 ethdev_rss |= ETH_RSS_VXLAN; 481 482 if (nic_rss & RSS_TUN_GENEVE_ENA) 483 ethdev_rss |= ETH_RSS_GENEVE; 484 485 if (nic_rss & RSS_TUN_NVGRE_ENA) 486 ethdev_rss |= ETH_RSS_NVGRE; 487 } 488 return ethdev_rss; 489 } 490 491 static int 492 nicvf_dev_reta_query(struct rte_eth_dev *dev, 493 struct rte_eth_rss_reta_entry64 *reta_conf, 494 uint16_t reta_size) 495 { 496 struct nicvf *nic = nicvf_pmd_priv(dev); 497 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 498 int ret, i, j; 499 500 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 501 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 502 "(%d) doesn't match the number hardware can supported " 503 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 504 return -EINVAL; 505 } 506 507 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 508 if (ret) 509 return ret; 510 511 /* Copy RETA table */ 512 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 513 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 514 if ((reta_conf[i].mask >> j) & 0x01) 515 reta_conf[i].reta[j] = tbl[j]; 516 } 517 518 return 0; 519 } 520 521 static int 522 nicvf_dev_reta_update(struct rte_eth_dev *dev, 523 struct rte_eth_rss_reta_entry64 *reta_conf, 524 uint16_t reta_size) 525 { 526 struct nicvf *nic = nicvf_pmd_priv(dev); 527 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 528 int ret, i, j; 529 530 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 531 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 532 "(%d) doesn't match the number hardware can supported " 533 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 534 return -EINVAL; 535 } 536 537 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 538 if (ret) 539 return ret; 540 541 /* Copy RETA table */ 542 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 543 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 544 if ((reta_conf[i].mask >> j) & 0x01) 545 tbl[j] = reta_conf[i].reta[j]; 546 } 547 548 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 549 } 550 551 static int 552 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 553 struct rte_eth_rss_conf *rss_conf) 554 { 555 struct nicvf *nic = nicvf_pmd_priv(dev); 556 557 if (rss_conf->rss_key) 558 nicvf_rss_get_key(nic, rss_conf->rss_key); 559 560 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 561 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 562 return 0; 563 } 564 565 static int 566 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 567 struct rte_eth_rss_conf *rss_conf) 568 { 569 struct nicvf *nic = nicvf_pmd_priv(dev); 570 uint64_t nic_rss; 571 572 if (rss_conf->rss_key && 573 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 574 RTE_LOG(ERR, PMD, "Hash key size mismatch %d", 575 rss_conf->rss_key_len); 576 return -EINVAL; 577 } 578 579 if (rss_conf->rss_key) 580 nicvf_rss_set_key(nic, rss_conf->rss_key); 581 582 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 583 nicvf_rss_set_cfg(nic, nic_rss); 584 return 0; 585 } 586 587 static int 588 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 589 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt) 590 { 591 const struct rte_memzone *rz; 592 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); 593 594 rz = rte_eth_dma_zone_reserve(dev, "cq_ring", 595 nicvf_netdev_qidx(nic, qidx), ring_size, 596 NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 597 if (rz == NULL) { 598 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 599 return -ENOMEM; 600 } 601 602 memset(rz->addr, 0, ring_size); 603 604 rxq->phys = rz->phys_addr; 605 rxq->desc = rz->addr; 606 rxq->qlen_mask = desc_cnt - 1; 607 608 return 0; 609 } 610 611 static int 612 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 613 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt) 614 { 615 const struct rte_memzone *rz; 616 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); 617 618 rz = rte_eth_dma_zone_reserve(dev, "sq", 619 nicvf_netdev_qidx(nic, qidx), ring_size, 620 NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 621 if (rz == NULL) { 622 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 623 return -ENOMEM; 624 } 625 626 memset(rz->addr, 0, ring_size); 627 628 sq->phys = rz->phys_addr; 629 sq->desc = rz->addr; 630 sq->qlen_mask = desc_cnt - 1; 631 632 return 0; 633 } 634 635 static int 636 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 637 uint32_t desc_cnt, uint32_t buffsz) 638 { 639 struct nicvf_rbdr *rbdr; 640 const struct rte_memzone *rz; 641 uint32_t ring_size; 642 643 assert(nic->rbdr == NULL); 644 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), 645 RTE_CACHE_LINE_SIZE, nic->node); 646 if (rbdr == NULL) { 647 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); 648 return -ENOMEM; 649 } 650 651 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; 652 rz = rte_eth_dma_zone_reserve(dev, "rbdr", 653 nicvf_netdev_qidx(nic, 0), ring_size, 654 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); 655 if (rz == NULL) { 656 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); 657 return -ENOMEM; 658 } 659 660 memset(rz->addr, 0, ring_size); 661 662 rbdr->phys = rz->phys_addr; 663 rbdr->tail = 0; 664 rbdr->next_tail = 0; 665 rbdr->desc = rz->addr; 666 rbdr->buffsz = buffsz; 667 rbdr->qlen_mask = desc_cnt - 1; 668 rbdr->rbdr_status = 669 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; 670 rbdr->rbdr_door = 671 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; 672 673 nic->rbdr = rbdr; 674 return 0; 675 } 676 677 static void 678 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic, 679 nicvf_phys_addr_t phy) 680 { 681 uint16_t qidx; 682 void *obj; 683 struct nicvf_rxq *rxq; 684 uint16_t rx_start, rx_end; 685 686 /* Get queue ranges for this VF */ 687 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 688 689 for (qidx = rx_start; qidx <= rx_end; qidx++) { 690 rxq = dev->data->rx_queues[qidx]; 691 if (rxq->precharge_cnt) { 692 obj = (void *)nicvf_mbuff_phy2virt(phy, 693 rxq->mbuf_phys_off); 694 rte_mempool_put(rxq->pool, obj); 695 rxq->precharge_cnt--; 696 break; 697 } 698 } 699 } 700 701 static inline void 702 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic) 703 { 704 uint32_t qlen_mask, head; 705 struct rbdr_entry_t *entry; 706 struct nicvf_rbdr *rbdr = nic->rbdr; 707 708 qlen_mask = rbdr->qlen_mask; 709 head = rbdr->head; 710 while (head != rbdr->tail) { 711 entry = rbdr->desc + head; 712 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr); 713 head++; 714 head = head & qlen_mask; 715 } 716 } 717 718 static inline void 719 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 720 { 721 uint32_t head; 722 723 head = txq->head; 724 while (head != txq->tail) { 725 if (txq->txbuffs[head]) { 726 rte_pktmbuf_free_seg(txq->txbuffs[head]); 727 txq->txbuffs[head] = NULL; 728 } 729 head++; 730 head = head & txq->qlen_mask; 731 } 732 } 733 734 static void 735 nicvf_tx_queue_reset(struct nicvf_txq *txq) 736 { 737 uint32_t txq_desc_cnt = txq->qlen_mask + 1; 738 739 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 740 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 741 txq->tail = 0; 742 txq->head = 0; 743 txq->xmit_bufs = 0; 744 } 745 746 static inline int 747 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 748 uint16_t qidx) 749 { 750 struct nicvf_txq *txq; 751 int ret; 752 753 assert(qidx < MAX_SND_QUEUES_PER_QS); 754 755 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 756 RTE_ETH_QUEUE_STATE_STARTED) 757 return 0; 758 759 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 760 txq->pool = NULL; 761 ret = nicvf_qset_sq_config(nic, qidx, txq); 762 if (ret) { 763 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d", 764 nic->vf_id, qidx, ret); 765 goto config_sq_error; 766 } 767 768 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 769 RTE_ETH_QUEUE_STATE_STARTED; 770 return ret; 771 772 config_sq_error: 773 nicvf_qset_sq_reclaim(nic, qidx); 774 return ret; 775 } 776 777 static inline int 778 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 779 uint16_t qidx) 780 { 781 struct nicvf_txq *txq; 782 int ret; 783 784 assert(qidx < MAX_SND_QUEUES_PER_QS); 785 786 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 787 RTE_ETH_QUEUE_STATE_STOPPED) 788 return 0; 789 790 ret = nicvf_qset_sq_reclaim(nic, qidx); 791 if (ret) 792 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d", 793 nic->vf_id, qidx, ret); 794 795 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 796 nicvf_tx_queue_release_mbufs(txq); 797 nicvf_tx_queue_reset(txq); 798 799 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 800 RTE_ETH_QUEUE_STATE_STOPPED; 801 return ret; 802 } 803 804 static inline int 805 nicvf_configure_cpi(struct rte_eth_dev *dev) 806 { 807 struct nicvf *nic = nicvf_pmd_priv(dev); 808 uint16_t qidx, qcnt; 809 int ret; 810 811 /* Count started rx queues */ 812 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++) 813 if (dev->data->rx_queue_state[qidx] == 814 RTE_ETH_QUEUE_STATE_STARTED) 815 qcnt++; 816 817 nic->cpi_alg = CPI_ALG_NONE; 818 ret = nicvf_mbox_config_cpi(nic, qcnt); 819 if (ret) 820 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 821 822 return ret; 823 } 824 825 static inline int 826 nicvf_configure_rss(struct rte_eth_dev *dev) 827 { 828 struct nicvf *nic = nicvf_pmd_priv(dev); 829 uint64_t rsshf; 830 int ret = -EINVAL; 831 832 rsshf = nicvf_rss_ethdev_to_nic(nic, 833 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 834 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, 835 dev->data->dev_conf.rxmode.mq_mode, 836 dev->data->nb_rx_queues, 837 dev->data->dev_conf.lpbk_mode, rsshf); 838 839 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 840 ret = nicvf_rss_term(nic); 841 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 842 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf); 843 if (ret) 844 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); 845 846 return ret; 847 } 848 849 static int 850 nicvf_configure_rss_reta(struct rte_eth_dev *dev) 851 { 852 struct nicvf *nic = nicvf_pmd_priv(dev); 853 unsigned int idx, qmap_size; 854 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 855 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 856 857 if (nic->cpi_alg != CPI_ALG_NONE) 858 return -EINVAL; 859 860 /* Prepare queue map */ 861 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 862 if (dev->data->rx_queue_state[idx] == 863 RTE_ETH_QUEUE_STATE_STARTED) 864 qmap[qmap_size++] = idx; 865 } 866 867 /* Update default RSS RETA */ 868 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 869 default_reta[idx] = qmap[idx % qmap_size]; 870 871 return nicvf_rss_reta_update(nic, default_reta, 872 NIC_MAX_RSS_IDR_TBL_SIZE); 873 } 874 875 static void 876 nicvf_dev_tx_queue_release(void *sq) 877 { 878 struct nicvf_txq *txq; 879 880 PMD_INIT_FUNC_TRACE(); 881 882 txq = (struct nicvf_txq *)sq; 883 if (txq) { 884 if (txq->txbuffs != NULL) { 885 nicvf_tx_queue_release_mbufs(txq); 886 rte_free(txq->txbuffs); 887 txq->txbuffs = NULL; 888 } 889 rte_free(txq); 890 } 891 } 892 893 static void 894 nicvf_set_tx_function(struct rte_eth_dev *dev) 895 { 896 struct nicvf_txq *txq; 897 size_t i; 898 bool multiseg = false; 899 900 for (i = 0; i < dev->data->nb_tx_queues; i++) { 901 txq = dev->data->tx_queues[i]; 902 if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) { 903 multiseg = true; 904 break; 905 } 906 } 907 908 /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 909 if (multiseg) { 910 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); 911 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; 912 } else { 913 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); 914 dev->tx_pkt_burst = nicvf_xmit_pkts; 915 } 916 917 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) 918 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); 919 else 920 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); 921 } 922 923 static void 924 nicvf_set_rx_function(struct rte_eth_dev *dev) 925 { 926 if (dev->data->scattered_rx) { 927 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback"); 928 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg; 929 } else { 930 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback"); 931 dev->rx_pkt_burst = nicvf_recv_pkts; 932 } 933 } 934 935 static int 936 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 937 uint16_t nb_desc, unsigned int socket_id, 938 const struct rte_eth_txconf *tx_conf) 939 { 940 uint16_t tx_free_thresh; 941 uint8_t is_single_pool; 942 struct nicvf_txq *txq; 943 struct nicvf *nic = nicvf_pmd_priv(dev); 944 945 PMD_INIT_FUNC_TRACE(); 946 947 if (qidx >= MAX_SND_QUEUES_PER_QS) 948 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1]; 949 950 qidx = qidx % MAX_SND_QUEUES_PER_QS; 951 952 /* Socket id check */ 953 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 954 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 955 socket_id, nic->node); 956 957 /* Tx deferred start is not supported */ 958 if (tx_conf->tx_deferred_start) { 959 PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 960 return -EINVAL; 961 } 962 963 /* Roundup nb_desc to available qsize and validate max number of desc */ 964 nb_desc = nicvf_qsize_sq_roundup(nb_desc); 965 if (nb_desc == 0) { 966 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 967 return -EINVAL; 968 } 969 970 /* Validate tx_free_thresh */ 971 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 972 tx_conf->tx_free_thresh : 973 NICVF_DEFAULT_TX_FREE_THRESH); 974 975 if (tx_free_thresh > (nb_desc) || 976 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 977 PMD_INIT_LOG(ERR, 978 "tx_free_thresh must be less than the number of TX " 979 "descriptors. (tx_free_thresh=%u port=%d " 980 "queue=%d)", (unsigned int)tx_free_thresh, 981 (int)dev->data->port_id, (int)qidx); 982 return -EINVAL; 983 } 984 985 /* Free memory prior to re-allocation if needed. */ 986 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 987 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 988 nicvf_netdev_qidx(nic, qidx)); 989 nicvf_dev_tx_queue_release( 990 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]); 991 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 992 } 993 994 /* Allocating tx queue data structure */ 995 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 996 RTE_CACHE_LINE_SIZE, nic->node); 997 if (txq == NULL) { 998 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", 999 nicvf_netdev_qidx(nic, qidx)); 1000 return -ENOMEM; 1001 } 1002 1003 txq->nic = nic; 1004 txq->queue_id = qidx; 1005 txq->tx_free_thresh = tx_free_thresh; 1006 txq->txq_flags = tx_conf->txq_flags; 1007 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 1008 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 1009 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && 1010 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); 1011 1012 /* Choose optimum free threshold value for multipool case */ 1013 if (!is_single_pool) { 1014 txq->tx_free_thresh = (uint16_t) 1015 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 1016 NICVF_TX_FREE_MPOOL_THRESH : 1017 tx_conf->tx_free_thresh); 1018 txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 1019 } else { 1020 txq->pool_free = nicvf_single_pool_free_xmited_buffers; 1021 } 1022 1023 /* Allocate software ring */ 1024 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 1025 nb_desc * sizeof(struct rte_mbuf *), 1026 RTE_CACHE_LINE_SIZE, nic->node); 1027 1028 if (txq->txbuffs == NULL) { 1029 nicvf_dev_tx_queue_release(txq); 1030 return -ENOMEM; 1031 } 1032 1033 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) { 1034 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 1035 nicvf_dev_tx_queue_release(txq); 1036 return -ENOMEM; 1037 } 1038 1039 nicvf_tx_queue_reset(txq); 1040 1041 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, 1042 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, 1043 txq->phys); 1044 1045 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; 1046 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1047 RTE_ETH_QUEUE_STATE_STOPPED; 1048 return 0; 1049 } 1050 1051 static inline void 1052 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) 1053 { 1054 uint32_t rxq_cnt; 1055 uint32_t nb_pkts, released_pkts = 0; 1056 uint32_t refill_cnt = 0; 1057 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 1058 1059 if (dev->rx_pkt_burst == NULL) 1060 return; 1061 1062 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, 1063 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) { 1064 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 1065 NICVF_MAX_RX_FREE_THRESH); 1066 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 1067 while (nb_pkts) { 1068 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 1069 released_pkts++; 1070 } 1071 } 1072 1073 1074 refill_cnt += nicvf_dev_rbdr_refill(dev, 1075 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)); 1076 1077 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 1078 released_pkts, refill_cnt); 1079 } 1080 1081 static void 1082 nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 1083 { 1084 rxq->head = 0; 1085 rxq->available_space = 0; 1086 rxq->recv_buffers = 0; 1087 } 1088 1089 static inline int 1090 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1091 uint16_t qidx) 1092 { 1093 struct nicvf_rxq *rxq; 1094 int ret; 1095 1096 assert(qidx < MAX_RCV_QUEUES_PER_QS); 1097 1098 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1099 RTE_ETH_QUEUE_STATE_STARTED) 1100 return 0; 1101 1102 /* Update rbdr pointer to all rxq */ 1103 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 1104 rxq->shared_rbdr = nic->rbdr; 1105 1106 ret = nicvf_qset_rq_config(nic, qidx, rxq); 1107 if (ret) { 1108 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d", 1109 nic->vf_id, qidx, ret); 1110 goto config_rq_error; 1111 } 1112 ret = nicvf_qset_cq_config(nic, qidx, rxq); 1113 if (ret) { 1114 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d", 1115 nic->vf_id, qidx, ret); 1116 goto config_cq_error; 1117 } 1118 1119 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1120 RTE_ETH_QUEUE_STATE_STARTED; 1121 return 0; 1122 1123 config_cq_error: 1124 nicvf_qset_cq_reclaim(nic, qidx); 1125 config_rq_error: 1126 nicvf_qset_rq_reclaim(nic, qidx); 1127 return ret; 1128 } 1129 1130 static inline int 1131 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1132 uint16_t qidx) 1133 { 1134 struct nicvf_rxq *rxq; 1135 int ret, other_error; 1136 1137 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1138 RTE_ETH_QUEUE_STATE_STOPPED) 1139 return 0; 1140 1141 ret = nicvf_qset_rq_reclaim(nic, qidx); 1142 if (ret) 1143 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d", 1144 nic->vf_id, qidx, ret); 1145 1146 other_error = ret; 1147 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 1148 nicvf_rx_queue_release_mbufs(dev, rxq); 1149 nicvf_rx_queue_reset(rxq); 1150 1151 ret = nicvf_qset_cq_reclaim(nic, qidx); 1152 if (ret) 1153 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d", 1154 nic->vf_id, qidx, ret); 1155 1156 other_error |= ret; 1157 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1158 RTE_ETH_QUEUE_STATE_STOPPED; 1159 return other_error; 1160 } 1161 1162 static void 1163 nicvf_dev_rx_queue_release(void *rx_queue) 1164 { 1165 PMD_INIT_FUNC_TRACE(); 1166 1167 rte_free(rx_queue); 1168 } 1169 1170 static int 1171 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1172 { 1173 struct nicvf *nic = nicvf_pmd_priv(dev); 1174 int ret; 1175 1176 if (qidx >= MAX_RCV_QUEUES_PER_QS) 1177 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)]; 1178 1179 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1180 1181 ret = nicvf_vf_start_rx_queue(dev, nic, qidx); 1182 if (ret) 1183 return ret; 1184 1185 ret = nicvf_configure_cpi(dev); 1186 if (ret) 1187 return ret; 1188 1189 return nicvf_configure_rss_reta(dev); 1190 } 1191 1192 static int 1193 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1194 { 1195 int ret; 1196 struct nicvf *nic = nicvf_pmd_priv(dev); 1197 1198 if (qidx >= MAX_SND_QUEUES_PER_QS) 1199 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1200 1201 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1202 1203 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx); 1204 ret |= nicvf_configure_cpi(dev); 1205 ret |= nicvf_configure_rss_reta(dev); 1206 return ret; 1207 } 1208 1209 static int 1210 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1211 { 1212 struct nicvf *nic = nicvf_pmd_priv(dev); 1213 1214 if (qidx >= MAX_SND_QUEUES_PER_QS) 1215 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1216 1217 qidx = qidx % MAX_SND_QUEUES_PER_QS; 1218 1219 return nicvf_vf_start_tx_queue(dev, nic, qidx); 1220 } 1221 1222 static int 1223 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1224 { 1225 struct nicvf *nic = nicvf_pmd_priv(dev); 1226 1227 if (qidx >= MAX_SND_QUEUES_PER_QS) 1228 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1229 1230 qidx = qidx % MAX_SND_QUEUES_PER_QS; 1231 1232 return nicvf_vf_stop_tx_queue(dev, nic, qidx); 1233 } 1234 1235 1236 static int 1237 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 1238 uint16_t nb_desc, unsigned int socket_id, 1239 const struct rte_eth_rxconf *rx_conf, 1240 struct rte_mempool *mp) 1241 { 1242 uint16_t rx_free_thresh; 1243 struct nicvf_rxq *rxq; 1244 struct nicvf *nic = nicvf_pmd_priv(dev); 1245 1246 PMD_INIT_FUNC_TRACE(); 1247 1248 if (qidx >= MAX_RCV_QUEUES_PER_QS) 1249 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1]; 1250 1251 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1252 1253 /* Socket id check */ 1254 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 1255 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 1256 socket_id, nic->node); 1257 1258 /* Mempool memory must be contiguous, so must be one memory segment*/ 1259 if (mp->nb_mem_chunks != 1) { 1260 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages"); 1261 return -EINVAL; 1262 } 1263 1264 /* Mempool memory must be physically contiguous */ 1265 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) { 1266 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); 1267 return -EINVAL; 1268 } 1269 1270 /* Rx deferred start is not supported */ 1271 if (rx_conf->rx_deferred_start) { 1272 PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 1273 return -EINVAL; 1274 } 1275 1276 /* Roundup nb_desc to available qsize and validate max number of desc */ 1277 nb_desc = nicvf_qsize_cq_roundup(nb_desc); 1278 if (nb_desc == 0) { 1279 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 1280 return -EINVAL; 1281 } 1282 1283 /* Check rx_free_thresh upper bound */ 1284 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 1285 rx_conf->rx_free_thresh : 1286 NICVF_DEFAULT_RX_FREE_THRESH); 1287 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 1288 rx_free_thresh >= nb_desc * .75) { 1289 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 1290 rx_free_thresh); 1291 return -EINVAL; 1292 } 1293 1294 /* Free memory prior to re-allocation if needed */ 1295 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 1296 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 1297 nicvf_netdev_qidx(nic, qidx)); 1298 nicvf_dev_rx_queue_release( 1299 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]); 1300 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 1301 } 1302 1303 /* Allocate rxq memory */ 1304 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 1305 RTE_CACHE_LINE_SIZE, nic->node); 1306 if (rxq == NULL) { 1307 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", 1308 nicvf_netdev_qidx(nic, qidx)); 1309 return -ENOMEM; 1310 } 1311 1312 rxq->nic = nic; 1313 rxq->pool = mp; 1314 rxq->queue_id = qidx; 1315 rxq->port_id = dev->data->port_id; 1316 rxq->rx_free_thresh = rx_free_thresh; 1317 rxq->rx_drop_en = rx_conf->rx_drop_en; 1318 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1319 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1320 rxq->precharge_cnt = 0; 1321 1322 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2) 1323 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD; 1324 else 1325 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1326 1327 1328 /* Alloc completion queue */ 1329 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) { 1330 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 1331 nicvf_dev_rx_queue_release(rxq); 1332 return -ENOMEM; 1333 } 1334 1335 nicvf_rx_queue_reset(rxq); 1336 1337 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, 1338 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, 1339 rte_mempool_avail_count(mp), rxq->phys); 1340 1341 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; 1342 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1343 RTE_ETH_QUEUE_STATE_STOPPED; 1344 return 0; 1345 } 1346 1347 static void 1348 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1349 { 1350 struct nicvf *nic = nicvf_pmd_priv(dev); 1351 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 1352 1353 PMD_INIT_FUNC_TRACE(); 1354 1355 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 1356 1357 dev_info->min_rx_bufsize = ETHER_MIN_MTU; 1358 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; 1359 dev_info->max_rx_queues = 1360 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1361 dev_info->max_tx_queues = 1362 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1363 dev_info->max_mac_addrs = 1; 1364 dev_info->max_vfs = pci_dev->max_vfs; 1365 1366 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1367 dev_info->tx_offload_capa = 1368 DEV_TX_OFFLOAD_IPV4_CKSUM | 1369 DEV_TX_OFFLOAD_UDP_CKSUM | 1370 DEV_TX_OFFLOAD_TCP_CKSUM | 1371 DEV_TX_OFFLOAD_TCP_TSO | 1372 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 1373 1374 dev_info->reta_size = nic->rss_info.rss_size; 1375 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1376 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1377 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1378 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1379 1380 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1381 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1382 .rx_drop_en = 0, 1383 }; 1384 1385 dev_info->default_txconf = (struct rte_eth_txconf) { 1386 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1387 .txq_flags = 1388 ETH_TXQ_FLAGS_NOMULTSEGS | 1389 ETH_TXQ_FLAGS_NOREFCOUNT | 1390 ETH_TXQ_FLAGS_NOMULTMEMP | 1391 ETH_TXQ_FLAGS_NOVLANOFFL | 1392 ETH_TXQ_FLAGS_NOXSUMSCTP, 1393 }; 1394 } 1395 1396 static nicvf_phys_addr_t 1397 rbdr_rte_mempool_get(void *dev, void *opaque) 1398 { 1399 uint16_t qidx; 1400 uintptr_t mbuf; 1401 struct nicvf_rxq *rxq; 1402 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev; 1403 struct nicvf *nic = (struct nicvf *)opaque; 1404 uint16_t rx_start, rx_end; 1405 1406 /* Get queue ranges for this VF */ 1407 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end); 1408 1409 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1410 rxq = eth_dev->data->rx_queues[qidx]; 1411 /* Maintain equal buffer count across all pools */ 1412 if (rxq->precharge_cnt >= rxq->qlen_mask) 1413 continue; 1414 rxq->precharge_cnt++; 1415 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); 1416 if (mbuf) 1417 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); 1418 } 1419 return 0; 1420 } 1421 1422 static int 1423 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) 1424 { 1425 int ret; 1426 uint16_t qidx, data_off; 1427 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; 1428 uint64_t mbuf_phys_off = 0; 1429 struct nicvf_rxq *rxq; 1430 struct rte_mbuf *mbuf; 1431 uint16_t rx_start, rx_end; 1432 uint16_t tx_start, tx_end; 1433 1434 PMD_INIT_FUNC_TRACE(); 1435 1436 /* Userspace process exited without proper shutdown in last run */ 1437 if (nicvf_qset_rbdr_active(nic, 0)) 1438 nicvf_vf_stop(dev, nic, false); 1439 1440 /* Get queue ranges for this VF */ 1441 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 1442 1443 /* 1444 * Thunderx nicvf PMD can support more than one pool per port only when 1445 * 1) Data payload size is same across all the pools in given port 1446 * AND 1447 * 2) All mbuffs in the pools are from the same hugepage 1448 * AND 1449 * 3) Mbuff metadata size is same across all the pools in given port 1450 * 1451 * This is to support existing application that uses multiple pool/port. 1452 * But, the purpose of using multipool for QoS will not be addressed. 1453 * 1454 */ 1455 1456 /* Validate mempool attributes */ 1457 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1458 rxq = dev->data->rx_queues[qidx]; 1459 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); 1460 mbuf = rte_pktmbuf_alloc(rxq->pool); 1461 if (mbuf == NULL) { 1462 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d " 1463 "pool=%s", 1464 nic->vf_id, qidx, rxq->pool->name); 1465 return -ENOMEM; 1466 } 1467 data_off = nicvf_mbuff_meta_length(mbuf); 1468 data_off += RTE_PKTMBUF_HEADROOM; 1469 rte_pktmbuf_free(mbuf); 1470 1471 if (data_off % RTE_CACHE_LINE_SIZE) { 1472 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d", 1473 rxq->pool->name, data_off, 1474 data_off % RTE_CACHE_LINE_SIZE); 1475 return -EINVAL; 1476 } 1477 rxq->mbuf_phys_off -= data_off; 1478 1479 if (mbuf_phys_off == 0) 1480 mbuf_phys_off = rxq->mbuf_phys_off; 1481 if (mbuf_phys_off != rxq->mbuf_phys_off) { 1482 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %" 1483 PRIx64, rxq->pool->name, nic->vf_id, 1484 mbuf_phys_off); 1485 return -EINVAL; 1486 } 1487 } 1488 1489 /* Check the level of buffers in the pool */ 1490 total_rxq_desc = 0; 1491 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1492 rxq = dev->data->rx_queues[qidx]; 1493 /* Count total numbers of rxq descs */ 1494 total_rxq_desc += rxq->qlen_mask + 1; 1495 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; 1496 exp_buffs *= dev->data->nb_rx_queues; 1497 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { 1498 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", 1499 rxq->pool->name, 1500 rte_mempool_avail_count(rxq->pool), 1501 exp_buffs); 1502 return -ENOENT; 1503 } 1504 } 1505 1506 /* Check RBDR desc overflow */ 1507 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); 1508 if (ret == 0) { 1509 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc " 1510 "VF%d", nic->vf_id); 1511 return -ENOMEM; 1512 } 1513 1514 /* Enable qset */ 1515 ret = nicvf_qset_config(nic); 1516 if (ret) { 1517 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret, 1518 nic->vf_id); 1519 return ret; 1520 } 1521 1522 /* Allocate RBDR and RBDR ring desc */ 1523 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); 1524 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz); 1525 if (ret) { 1526 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc " 1527 "VF%d", nic->vf_id); 1528 goto qset_reclaim; 1529 } 1530 1531 /* Enable and configure RBDR registers */ 1532 ret = nicvf_qset_rbdr_config(nic, 0); 1533 if (ret) { 1534 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret, 1535 nic->vf_id); 1536 goto qset_rbdr_free; 1537 } 1538 1539 /* Fill rte_mempool buffers in RBDR pool and precharge it */ 1540 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get, 1541 total_rxq_desc); 1542 if (ret) { 1543 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret, 1544 nic->vf_id); 1545 goto qset_rbdr_reclaim; 1546 } 1547 1548 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d", 1549 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id); 1550 1551 /* Configure VLAN Strip */ 1552 nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip); 1553 1554 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data 1555 * to the 64bit memory address. 1556 * The alignment creates a hole in mbuf(between the end of headroom and 1557 * packet data start). The new revision of the HW provides an option to 1558 * disable the L3 alignment feature and make mbuf layout looks 1559 * more like other NICs. For better application compatibility, disabling 1560 * l3 alignment feature on the hardware revisions it supports 1561 */ 1562 nicvf_apad_config(nic, false); 1563 1564 /* Get queue ranges for this VF */ 1565 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1566 1567 /* Configure TX queues */ 1568 for (qidx = tx_start; qidx <= tx_end; qidx++) { 1569 ret = nicvf_vf_start_tx_queue(dev, nic, 1570 qidx % MAX_SND_QUEUES_PER_QS); 1571 if (ret) 1572 goto start_txq_error; 1573 } 1574 1575 /* Configure RX queues */ 1576 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1577 ret = nicvf_vf_start_rx_queue(dev, nic, 1578 qidx % MAX_RCV_QUEUES_PER_QS); 1579 if (ret) 1580 goto start_rxq_error; 1581 } 1582 1583 if (!nic->sqs_mode) { 1584 /* Configure CPI algorithm */ 1585 ret = nicvf_configure_cpi(dev); 1586 if (ret) 1587 goto start_txq_error; 1588 1589 ret = nicvf_mbox_get_rss_size(nic); 1590 if (ret) { 1591 PMD_INIT_LOG(ERR, "Failed to get rss table size"); 1592 goto qset_rss_error; 1593 } 1594 1595 /* Configure RSS */ 1596 ret = nicvf_configure_rss(dev); 1597 if (ret) 1598 goto qset_rss_error; 1599 } 1600 1601 /* Done; Let PF make the BGX's RX and TX switches to ON position */ 1602 nicvf_mbox_cfg_done(nic); 1603 return 0; 1604 1605 qset_rss_error: 1606 nicvf_rss_term(nic); 1607 start_rxq_error: 1608 for (qidx = rx_start; qidx <= rx_end; qidx++) 1609 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 1610 start_txq_error: 1611 for (qidx = tx_start; qidx <= tx_end; qidx++) 1612 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1613 qset_rbdr_reclaim: 1614 nicvf_qset_rbdr_reclaim(nic, 0); 1615 nicvf_rbdr_release_mbufs(dev, nic); 1616 qset_rbdr_free: 1617 if (nic->rbdr) { 1618 rte_free(nic->rbdr); 1619 nic->rbdr = NULL; 1620 } 1621 qset_reclaim: 1622 nicvf_qset_reclaim(nic); 1623 return ret; 1624 } 1625 1626 static int 1627 nicvf_dev_start(struct rte_eth_dev *dev) 1628 { 1629 uint16_t qidx; 1630 int ret; 1631 size_t i; 1632 struct nicvf *nic = nicvf_pmd_priv(dev); 1633 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 1634 uint16_t mtu; 1635 uint32_t buffsz = 0, rbdrsz = 0; 1636 struct rte_pktmbuf_pool_private *mbp_priv; 1637 struct nicvf_rxq *rxq; 1638 1639 PMD_INIT_FUNC_TRACE(); 1640 1641 /* This function must be called for a primary device */ 1642 assert_primary(nic); 1643 1644 /* Validate RBDR buff size */ 1645 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 1646 rxq = dev->data->rx_queues[qidx]; 1647 mbp_priv = rte_mempool_get_priv(rxq->pool); 1648 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1649 if (buffsz % 128) { 1650 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); 1651 return -EINVAL; 1652 } 1653 if (rbdrsz == 0) 1654 rbdrsz = buffsz; 1655 if (rbdrsz != buffsz) { 1656 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)", 1657 qidx, rbdrsz, buffsz); 1658 return -EINVAL; 1659 } 1660 } 1661 1662 /* Configure loopback */ 1663 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); 1664 if (ret) { 1665 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); 1666 return ret; 1667 } 1668 1669 /* Reset all statistics counters attached to this port */ 1670 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); 1671 if (ret) { 1672 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); 1673 return ret; 1674 } 1675 1676 /* Setup scatter mode if needed by jumbo */ 1677 if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 1678 2 * VLAN_TAG_SIZE > buffsz) 1679 dev->data->scattered_rx = 1; 1680 if (rx_conf->enable_scatter) 1681 dev->data->scattered_rx = 1; 1682 1683 /* Setup MTU based on max_rx_pkt_len or default */ 1684 mtu = dev->data->dev_conf.rxmode.jumbo_frame ? 1685 dev->data->dev_conf.rxmode.max_rx_pkt_len 1686 - ETHER_HDR_LEN - ETHER_CRC_LEN 1687 : ETHER_MTU; 1688 1689 if (nicvf_dev_set_mtu(dev, mtu)) { 1690 PMD_INIT_LOG(ERR, "Failed to set default mtu size"); 1691 return -EBUSY; 1692 } 1693 1694 ret = nicvf_vf_start(dev, nic, rbdrsz); 1695 if (ret != 0) 1696 return ret; 1697 1698 for (i = 0; i < nic->sqs_count; i++) { 1699 assert(nic->snicvf[i]); 1700 1701 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz); 1702 if (ret != 0) 1703 return ret; 1704 } 1705 1706 /* Configure callbacks based on scatter mode */ 1707 nicvf_set_tx_function(dev); 1708 nicvf_set_rx_function(dev); 1709 1710 return 0; 1711 } 1712 1713 static void 1714 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup) 1715 { 1716 size_t i; 1717 int ret; 1718 struct nicvf *nic = nicvf_pmd_priv(dev); 1719 1720 PMD_INIT_FUNC_TRACE(); 1721 1722 /* Teardown secondary vf first */ 1723 for (i = 0; i < nic->sqs_count; i++) { 1724 if (!nic->snicvf[i]) 1725 continue; 1726 1727 nicvf_vf_stop(dev, nic->snicvf[i], cleanup); 1728 } 1729 1730 /* Stop the primary VF now */ 1731 nicvf_vf_stop(dev, nic, cleanup); 1732 1733 /* Disable loopback */ 1734 ret = nicvf_loopback_config(nic, 0); 1735 if (ret) 1736 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); 1737 1738 /* Reclaim CPI configuration */ 1739 ret = nicvf_mbox_config_cpi(nic, 0); 1740 if (ret) 1741 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret); 1742 } 1743 1744 static void 1745 nicvf_dev_stop(struct rte_eth_dev *dev) 1746 { 1747 PMD_INIT_FUNC_TRACE(); 1748 1749 nicvf_dev_stop_cleanup(dev, false); 1750 } 1751 1752 static void 1753 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup) 1754 { 1755 int ret; 1756 uint16_t qidx; 1757 uint16_t tx_start, tx_end; 1758 uint16_t rx_start, rx_end; 1759 1760 PMD_INIT_FUNC_TRACE(); 1761 1762 if (cleanup) { 1763 /* Let PF make the BGX's RX and TX switches to OFF position */ 1764 nicvf_mbox_shutdown(nic); 1765 } 1766 1767 /* Disable VLAN Strip */ 1768 nicvf_vlan_hw_strip(nic, 0); 1769 1770 /* Get queue ranges for this VF */ 1771 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1772 1773 for (qidx = tx_start; qidx <= tx_end; qidx++) 1774 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1775 1776 /* Get queue ranges for this VF */ 1777 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 1778 1779 /* Reclaim rq */ 1780 for (qidx = rx_start; qidx <= rx_end; qidx++) 1781 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 1782 1783 /* Reclaim RBDR */ 1784 ret = nicvf_qset_rbdr_reclaim(nic, 0); 1785 if (ret) 1786 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); 1787 1788 /* Move all charged buffers in RBDR back to pool */ 1789 if (nic->rbdr != NULL) 1790 nicvf_rbdr_release_mbufs(dev, nic); 1791 1792 /* Disable qset */ 1793 ret = nicvf_qset_reclaim(nic); 1794 if (ret) 1795 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); 1796 1797 /* Disable all interrupts */ 1798 nicvf_disable_all_interrupts(nic); 1799 1800 /* Free RBDR SW structure */ 1801 if (nic->rbdr) { 1802 rte_free(nic->rbdr); 1803 nic->rbdr = NULL; 1804 } 1805 } 1806 1807 static void 1808 nicvf_dev_close(struct rte_eth_dev *dev) 1809 { 1810 size_t i; 1811 struct nicvf *nic = nicvf_pmd_priv(dev); 1812 1813 PMD_INIT_FUNC_TRACE(); 1814 1815 nicvf_dev_stop_cleanup(dev, true); 1816 nicvf_periodic_alarm_stop(nicvf_interrupt, dev); 1817 1818 for (i = 0; i < nic->sqs_count; i++) { 1819 if (!nic->snicvf[i]) 1820 continue; 1821 1822 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]); 1823 } 1824 } 1825 1826 static int 1827 nicvf_request_sqs(struct nicvf *nic) 1828 { 1829 size_t i; 1830 1831 assert_primary(nic); 1832 assert(nic->sqs_count > 0); 1833 assert(nic->sqs_count <= MAX_SQS_PER_VF); 1834 1835 /* Set no of Rx/Tx queues in each of the SQsets */ 1836 for (i = 0; i < nic->sqs_count; i++) { 1837 if (nicvf_svf_empty()) 1838 rte_panic("Cannot assign sufficient number of " 1839 "secondary queues to primary VF%" PRIu8 "\n", 1840 nic->vf_id); 1841 1842 nic->snicvf[i] = nicvf_svf_pop(); 1843 nic->snicvf[i]->sqs_id = i; 1844 } 1845 1846 return nicvf_mbox_request_sqs(nic); 1847 } 1848 1849 static int 1850 nicvf_dev_configure(struct rte_eth_dev *dev) 1851 { 1852 struct rte_eth_dev_data *data = dev->data; 1853 struct rte_eth_conf *conf = &data->dev_conf; 1854 struct rte_eth_rxmode *rxmode = &conf->rxmode; 1855 struct rte_eth_txmode *txmode = &conf->txmode; 1856 struct nicvf *nic = nicvf_pmd_priv(dev); 1857 uint8_t cqcount; 1858 1859 PMD_INIT_FUNC_TRACE(); 1860 1861 if (!rte_eal_has_hugepages()) { 1862 PMD_INIT_LOG(INFO, "Huge page is not configured"); 1863 return -EINVAL; 1864 } 1865 1866 if (txmode->mq_mode) { 1867 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 1868 return -EINVAL; 1869 } 1870 1871 if (rxmode->mq_mode != ETH_MQ_RX_NONE && 1872 rxmode->mq_mode != ETH_MQ_RX_RSS) { 1873 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 1874 return -EINVAL; 1875 } 1876 1877 if (!rxmode->hw_strip_crc) { 1878 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); 1879 rxmode->hw_strip_crc = 1; 1880 } 1881 1882 if (rxmode->hw_ip_checksum) { 1883 PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); 1884 rxmode->hw_ip_checksum = 0; 1885 } 1886 1887 if (rxmode->split_hdr_size) { 1888 PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 1889 return -EINVAL; 1890 } 1891 1892 if (rxmode->hw_vlan_filter) { 1893 PMD_INIT_LOG(INFO, "VLAN filter not supported"); 1894 return -EINVAL; 1895 } 1896 1897 if (rxmode->hw_vlan_extend) { 1898 PMD_INIT_LOG(INFO, "VLAN extended not supported"); 1899 return -EINVAL; 1900 } 1901 1902 if (rxmode->enable_lro) { 1903 PMD_INIT_LOG(INFO, "LRO not supported"); 1904 return -EINVAL; 1905 } 1906 1907 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 1908 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 1909 return -EINVAL; 1910 } 1911 1912 if (conf->dcb_capability_en) { 1913 PMD_INIT_LOG(INFO, "DCB enable not supported"); 1914 return -EINVAL; 1915 } 1916 1917 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1918 PMD_INIT_LOG(INFO, "Flow director not supported"); 1919 return -EINVAL; 1920 } 1921 1922 assert_primary(nic); 1923 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS); 1924 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues); 1925 if (cqcount > MAX_RCV_QUEUES_PER_QS) { 1926 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS); 1927 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1; 1928 } else { 1929 nic->sqs_count = 0; 1930 } 1931 1932 assert(nic->sqs_count <= MAX_SQS_PER_VF); 1933 1934 if (nic->sqs_count > 0) { 1935 if (nicvf_request_sqs(nic)) { 1936 rte_panic("Cannot assign sufficient number of " 1937 "secondary queues to PORT%d VF%" PRIu8 "\n", 1938 dev->data->port_id, nic->vf_id); 1939 } 1940 } 1941 1942 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 1943 dev->data->port_id, nicvf_hw_cap(nic)); 1944 1945 return 0; 1946 } 1947 1948 /* Initialize and register driver with DPDK Application */ 1949 static const struct eth_dev_ops nicvf_eth_dev_ops = { 1950 .dev_configure = nicvf_dev_configure, 1951 .dev_start = nicvf_dev_start, 1952 .dev_stop = nicvf_dev_stop, 1953 .link_update = nicvf_dev_link_update, 1954 .dev_close = nicvf_dev_close, 1955 .stats_get = nicvf_dev_stats_get, 1956 .stats_reset = nicvf_dev_stats_reset, 1957 .promiscuous_enable = nicvf_dev_promisc_enable, 1958 .dev_infos_get = nicvf_dev_info_get, 1959 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 1960 .mtu_set = nicvf_dev_set_mtu, 1961 .reta_update = nicvf_dev_reta_update, 1962 .reta_query = nicvf_dev_reta_query, 1963 .rss_hash_update = nicvf_dev_rss_hash_update, 1964 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 1965 .rx_queue_start = nicvf_dev_rx_queue_start, 1966 .rx_queue_stop = nicvf_dev_rx_queue_stop, 1967 .tx_queue_start = nicvf_dev_tx_queue_start, 1968 .tx_queue_stop = nicvf_dev_tx_queue_stop, 1969 .rx_queue_setup = nicvf_dev_rx_queue_setup, 1970 .rx_queue_release = nicvf_dev_rx_queue_release, 1971 .rx_queue_count = nicvf_dev_rx_queue_count, 1972 .tx_queue_setup = nicvf_dev_tx_queue_setup, 1973 .tx_queue_release = nicvf_dev_tx_queue_release, 1974 .get_reg = nicvf_dev_get_regs, 1975 }; 1976 1977 static int 1978 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 1979 { 1980 int ret; 1981 struct rte_pci_device *pci_dev; 1982 struct nicvf *nic = nicvf_pmd_priv(eth_dev); 1983 1984 PMD_INIT_FUNC_TRACE(); 1985 1986 eth_dev->dev_ops = &nicvf_eth_dev_ops; 1987 1988 /* For secondary processes, the primary has done all the work */ 1989 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1990 if (nic) { 1991 /* Setup callbacks for secondary process */ 1992 nicvf_set_tx_function(eth_dev); 1993 nicvf_set_rx_function(eth_dev); 1994 return 0; 1995 } else { 1996 /* If nic == NULL than it is secondary function 1997 * so ethdev need to be released by caller */ 1998 return ENOTSUP; 1999 } 2000 } 2001 2002 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2003 rte_eth_copy_pci_info(eth_dev, pci_dev); 2004 2005 nic->device_id = pci_dev->id.device_id; 2006 nic->vendor_id = pci_dev->id.vendor_id; 2007 nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 2008 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2009 2010 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 2011 pci_dev->id.vendor_id, pci_dev->id.device_id, 2012 pci_dev->addr.domain, pci_dev->addr.bus, 2013 pci_dev->addr.devid, pci_dev->addr.function); 2014 2015 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 2016 if (!nic->reg_base) { 2017 PMD_INIT_LOG(ERR, "Failed to map BAR0"); 2018 ret = -ENODEV; 2019 goto fail; 2020 } 2021 2022 nicvf_disable_all_interrupts(nic); 2023 2024 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev); 2025 if (ret) { 2026 PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2027 goto fail; 2028 } 2029 2030 ret = nicvf_mbox_check_pf_ready(nic); 2031 if (ret) { 2032 PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 2033 goto alarm_fail; 2034 } else { 2035 PMD_INIT_LOG(INFO, 2036 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 2037 nic->node, nic->vf_id, 2038 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 2039 nic->sqs_mode ? "true" : "false", 2040 nic->loopback_supported ? "true" : "false" 2041 ); 2042 } 2043 2044 ret = nicvf_base_init(nic); 2045 if (ret) { 2046 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 2047 goto malloc_fail; 2048 } 2049 2050 if (nic->sqs_mode) { 2051 /* Push nic to stack of secondary vfs */ 2052 nicvf_svf_push(nic); 2053 2054 /* Steal nic pointer from the device for further reuse */ 2055 eth_dev->data->dev_private = NULL; 2056 2057 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2058 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic); 2059 if (ret) { 2060 PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2061 goto fail; 2062 } 2063 2064 /* Detach port by returning postive error number */ 2065 return ENOTSUP; 2066 } 2067 2068 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); 2069 if (eth_dev->data->mac_addrs == NULL) { 2070 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 2071 ret = -ENOMEM; 2072 goto alarm_fail; 2073 } 2074 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) 2075 eth_random_addr(&nic->mac_addr[0]); 2076 2077 ether_addr_copy((struct ether_addr *)nic->mac_addr, 2078 ð_dev->data->mac_addrs[0]); 2079 2080 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 2081 if (ret) { 2082 PMD_INIT_LOG(ERR, "Failed to set mac addr"); 2083 goto malloc_fail; 2084 } 2085 2086 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 2087 eth_dev->data->port_id, nic->vendor_id, nic->device_id, 2088 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 2089 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 2090 2091 return 0; 2092 2093 malloc_fail: 2094 rte_free(eth_dev->data->mac_addrs); 2095 alarm_fail: 2096 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2097 fail: 2098 return ret; 2099 } 2100 2101 static const struct rte_pci_id pci_id_nicvf_map[] = { 2102 { 2103 .class_id = RTE_CLASS_ANY_ID, 2104 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2105 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF, 2106 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2107 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF, 2108 }, 2109 { 2110 .class_id = RTE_CLASS_ANY_ID, 2111 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2112 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2113 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2114 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF, 2115 }, 2116 { 2117 .class_id = RTE_CLASS_ANY_ID, 2118 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2119 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2120 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2121 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF, 2122 }, 2123 { 2124 .class_id = RTE_CLASS_ANY_ID, 2125 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2126 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2127 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2128 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF, 2129 }, 2130 { 2131 .vendor_id = 0, 2132 }, 2133 }; 2134 2135 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2136 struct rte_pci_device *pci_dev) 2137 { 2138 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf), 2139 nicvf_eth_dev_init); 2140 } 2141 2142 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev) 2143 { 2144 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 2145 } 2146 2147 static struct rte_pci_driver rte_nicvf_pmd = { 2148 .id_table = pci_id_nicvf_map, 2149 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2150 .probe = nicvf_eth_pci_probe, 2151 .remove = nicvf_eth_pci_remove, 2152 }; 2153 2154 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd); 2155 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); 2156 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio"); 2157