1 /* 2 * BSD LICENSE 3 * 4 * Copyright (C) Cavium networks Ltd. 2016. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Cavium networks nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <assert.h> 34 #include <stdio.h> 35 #include <stdbool.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <inttypes.h> 42 #include <netinet/in.h> 43 #include <sys/queue.h> 44 45 #include <rte_alarm.h> 46 #include <rte_atomic.h> 47 #include <rte_branch_prediction.h> 48 #include <rte_byteorder.h> 49 #include <rte_common.h> 50 #include <rte_cycles.h> 51 #include <rte_debug.h> 52 #include <rte_dev.h> 53 #include <rte_eal.h> 54 #include <rte_ether.h> 55 #include <rte_ethdev.h> 56 #include <rte_ethdev_pci.h> 57 #include <rte_interrupts.h> 58 #include <rte_log.h> 59 #include <rte_memory.h> 60 #include <rte_memzone.h> 61 #include <rte_malloc.h> 62 #include <rte_random.h> 63 #include <rte_pci.h> 64 #include <rte_tailq.h> 65 66 #include "base/nicvf_plat.h" 67 68 #include "nicvf_ethdev.h" 69 #include "nicvf_rxtx.h" 70 #include "nicvf_svf.h" 71 #include "nicvf_logs.h" 72 73 static void nicvf_dev_stop(struct rte_eth_dev *dev); 74 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup); 75 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, 76 bool cleanup); 77 78 static inline int 79 nicvf_atomic_write_link_status(struct rte_eth_dev *dev, 80 struct rte_eth_link *link) 81 { 82 struct rte_eth_link *dst = &dev->data->dev_link; 83 struct rte_eth_link *src = link; 84 85 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 86 *(uint64_t *)src) == 0) 87 return -1; 88 89 return 0; 90 } 91 92 static inline void 93 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) 94 { 95 link->link_status = nic->link_up; 96 link->link_duplex = ETH_LINK_AUTONEG; 97 if (nic->duplex == NICVF_HALF_DUPLEX) 98 link->link_duplex = ETH_LINK_HALF_DUPLEX; 99 else if (nic->duplex == NICVF_FULL_DUPLEX) 100 link->link_duplex = ETH_LINK_FULL_DUPLEX; 101 link->link_speed = nic->speed; 102 link->link_autoneg = ETH_LINK_SPEED_AUTONEG; 103 } 104 105 static void 106 nicvf_interrupt(void *arg) 107 { 108 struct rte_eth_dev *dev = arg; 109 struct nicvf *nic = nicvf_pmd_priv(dev); 110 111 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 112 if (dev->data->dev_conf.intr_conf.lsc) 113 nicvf_set_eth_link_status(nic, &dev->data->dev_link); 114 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 115 } 116 117 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 118 nicvf_interrupt, dev); 119 } 120 121 static void 122 nicvf_vf_interrupt(void *arg) 123 { 124 struct nicvf *nic = arg; 125 126 nicvf_reg_poll_interrupts(nic); 127 128 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 129 nicvf_vf_interrupt, nic); 130 } 131 132 static int 133 nicvf_periodic_alarm_start(void (fn)(void *), void *arg) 134 { 135 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg); 136 } 137 138 static int 139 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg) 140 { 141 return rte_eal_alarm_cancel(fn, arg); 142 } 143 144 /* 145 * Return 0 means link status changed, -1 means not changed 146 */ 147 static int 148 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 149 { 150 #define CHECK_INTERVAL 100 /* 100ms */ 151 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 152 struct rte_eth_link link; 153 struct nicvf *nic = nicvf_pmd_priv(dev); 154 int i; 155 156 PMD_INIT_FUNC_TRACE(); 157 158 if (wait_to_complete) { 159 /* rte_eth_link_get() might need to wait up to 9 seconds */ 160 for (i = 0; i < MAX_CHECK_TIME; i++) { 161 memset(&link, 0, sizeof(link)); 162 nicvf_set_eth_link_status(nic, &link); 163 if (link.link_status) 164 break; 165 rte_delay_ms(CHECK_INTERVAL); 166 } 167 } else { 168 memset(&link, 0, sizeof(link)); 169 nicvf_set_eth_link_status(nic, &link); 170 } 171 return nicvf_atomic_write_link_status(dev, &link); 172 } 173 174 static int 175 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 176 { 177 struct nicvf *nic = nicvf_pmd_priv(dev); 178 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 179 size_t i; 180 181 PMD_INIT_FUNC_TRACE(); 182 183 if (frame_size > NIC_HW_MAX_FRS) 184 return -EINVAL; 185 186 if (frame_size < NIC_HW_MIN_FRS) 187 return -EINVAL; 188 189 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 190 191 /* 192 * Refuse mtu that requires the support of scattered packets 193 * when this feature has not been enabled before. 194 */ 195 if (!dev->data->scattered_rx && 196 (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 197 return -EINVAL; 198 199 /* check <seg size> * <max_seg> >= max_frame */ 200 if (dev->data->scattered_rx && 201 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 202 return -EINVAL; 203 204 if (frame_size > ETHER_MAX_LEN) 205 dev->data->dev_conf.rxmode.jumbo_frame = 1; 206 else 207 dev->data->dev_conf.rxmode.jumbo_frame = 0; 208 209 if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) 210 return -EINVAL; 211 212 /* Update max frame size */ 213 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; 214 nic->mtu = mtu; 215 216 for (i = 0; i < nic->sqs_count; i++) 217 nic->snicvf[i]->mtu = mtu; 218 219 return 0; 220 } 221 222 static int 223 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 224 { 225 uint64_t *data = regs->data; 226 struct nicvf *nic = nicvf_pmd_priv(dev); 227 228 if (data == NULL) { 229 regs->length = nicvf_reg_get_count(); 230 regs->width = THUNDERX_REG_BYTES; 231 return 0; 232 } 233 234 /* Support only full register dump */ 235 if ((regs->length == 0) || 236 (regs->length == (uint32_t)nicvf_reg_get_count())) { 237 regs->version = nic->vendor_id << 16 | nic->device_id; 238 nicvf_reg_dump(nic, data); 239 return 0; 240 } 241 return -ENOTSUP; 242 } 243 244 static void 245 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 246 { 247 uint16_t qidx; 248 struct nicvf_hw_rx_qstats rx_qstats; 249 struct nicvf_hw_tx_qstats tx_qstats; 250 struct nicvf_hw_stats port_stats; 251 struct nicvf *nic = nicvf_pmd_priv(dev); 252 uint16_t rx_start, rx_end; 253 uint16_t tx_start, tx_end; 254 size_t i; 255 256 /* RX queue indices for the first VF */ 257 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 258 259 /* Reading per RX ring stats */ 260 for (qidx = rx_start; qidx <= rx_end; qidx++) { 261 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 262 break; 263 264 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 265 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 266 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 267 } 268 269 /* TX queue indices for the first VF */ 270 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 271 272 /* Reading per TX ring stats */ 273 for (qidx = tx_start; qidx <= tx_end; qidx++) { 274 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 275 break; 276 277 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 278 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 279 stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 280 } 281 282 for (i = 0; i < nic->sqs_count; i++) { 283 struct nicvf *snic = nic->snicvf[i]; 284 285 if (snic == NULL) 286 break; 287 288 /* RX queue indices for a secondary VF */ 289 nicvf_rx_range(dev, snic, &rx_start, &rx_end); 290 291 /* Reading per RX ring stats */ 292 for (qidx = rx_start; qidx <= rx_end; qidx++) { 293 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 294 break; 295 296 nicvf_hw_get_rx_qstats(snic, &rx_qstats, 297 qidx % MAX_RCV_QUEUES_PER_QS); 298 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 299 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 300 } 301 302 /* TX queue indices for a secondary VF */ 303 nicvf_tx_range(dev, snic, &tx_start, &tx_end); 304 /* Reading per TX ring stats */ 305 for (qidx = tx_start; qidx <= tx_end; qidx++) { 306 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 307 break; 308 309 nicvf_hw_get_tx_qstats(snic, &tx_qstats, 310 qidx % MAX_SND_QUEUES_PER_QS); 311 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 312 stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 313 } 314 } 315 316 nicvf_hw_get_stats(nic, &port_stats); 317 stats->ibytes = port_stats.rx_bytes; 318 stats->ipackets = port_stats.rx_ucast_frames; 319 stats->ipackets += port_stats.rx_bcast_frames; 320 stats->ipackets += port_stats.rx_mcast_frames; 321 stats->ierrors = port_stats.rx_l2_errors; 322 stats->imissed = port_stats.rx_drop_red; 323 stats->imissed += port_stats.rx_drop_overrun; 324 stats->imissed += port_stats.rx_drop_bcast; 325 stats->imissed += port_stats.rx_drop_mcast; 326 stats->imissed += port_stats.rx_drop_l3_bcast; 327 stats->imissed += port_stats.rx_drop_l3_mcast; 328 329 stats->obytes = port_stats.tx_bytes_ok; 330 stats->opackets = port_stats.tx_ucast_frames_ok; 331 stats->opackets += port_stats.tx_bcast_frames_ok; 332 stats->opackets += port_stats.tx_mcast_frames_ok; 333 stats->oerrors = port_stats.tx_drops; 334 } 335 336 static const uint32_t * 337 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) 338 { 339 size_t copied; 340 static uint32_t ptypes[32]; 341 struct nicvf *nic = nicvf_pmd_priv(dev); 342 static const uint32_t ptypes_common[] = { 343 RTE_PTYPE_L3_IPV4, 344 RTE_PTYPE_L3_IPV4_EXT, 345 RTE_PTYPE_L3_IPV6, 346 RTE_PTYPE_L3_IPV6_EXT, 347 RTE_PTYPE_L4_TCP, 348 RTE_PTYPE_L4_UDP, 349 RTE_PTYPE_L4_FRAG, 350 }; 351 static const uint32_t ptypes_tunnel[] = { 352 RTE_PTYPE_TUNNEL_GRE, 353 RTE_PTYPE_TUNNEL_GENEVE, 354 RTE_PTYPE_TUNNEL_VXLAN, 355 RTE_PTYPE_TUNNEL_NVGRE, 356 }; 357 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; 358 359 copied = sizeof(ptypes_common); 360 memcpy(ptypes, ptypes_common, copied); 361 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 362 memcpy((char *)ptypes + copied, ptypes_tunnel, 363 sizeof(ptypes_tunnel)); 364 copied += sizeof(ptypes_tunnel); 365 } 366 367 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); 368 if (dev->rx_pkt_burst == nicvf_recv_pkts || 369 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg) 370 return ptypes; 371 372 return NULL; 373 } 374 375 static void 376 nicvf_dev_stats_reset(struct rte_eth_dev *dev) 377 { 378 int i; 379 uint16_t rxqs = 0, txqs = 0; 380 struct nicvf *nic = nicvf_pmd_priv(dev); 381 uint16_t rx_start, rx_end; 382 uint16_t tx_start, tx_end; 383 384 /* Reset all primary nic counters */ 385 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 386 for (i = rx_start; i <= rx_end; i++) 387 rxqs |= (0x3 << (i * 2)); 388 389 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 390 for (i = tx_start; i <= tx_end; i++) 391 txqs |= (0x3 << (i * 2)); 392 393 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 394 395 /* Reset secondary nic queue counters */ 396 for (i = 0; i < nic->sqs_count; i++) { 397 struct nicvf *snic = nic->snicvf[i]; 398 if (snic == NULL) 399 break; 400 401 nicvf_rx_range(dev, snic, &rx_start, &rx_end); 402 for (i = rx_start; i <= rx_end; i++) 403 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2)); 404 405 nicvf_tx_range(dev, snic, &tx_start, &tx_end); 406 for (i = tx_start; i <= tx_end; i++) 407 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2)); 408 409 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs); 410 } 411 } 412 413 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 414 static void 415 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 416 { 417 } 418 419 static inline uint64_t 420 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 421 { 422 uint64_t nic_rss = 0; 423 424 if (ethdev_rss & ETH_RSS_IPV4) 425 nic_rss |= RSS_IP_ENA; 426 427 if (ethdev_rss & ETH_RSS_IPV6) 428 nic_rss |= RSS_IP_ENA; 429 430 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 431 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 432 433 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 434 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 435 436 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 437 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 438 439 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 440 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 441 442 if (ethdev_rss & ETH_RSS_PORT) 443 nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 444 445 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 446 if (ethdev_rss & ETH_RSS_VXLAN) 447 nic_rss |= RSS_TUN_VXLAN_ENA; 448 449 if (ethdev_rss & ETH_RSS_GENEVE) 450 nic_rss |= RSS_TUN_GENEVE_ENA; 451 452 if (ethdev_rss & ETH_RSS_NVGRE) 453 nic_rss |= RSS_TUN_NVGRE_ENA; 454 } 455 456 return nic_rss; 457 } 458 459 static inline uint64_t 460 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 461 { 462 uint64_t ethdev_rss = 0; 463 464 if (nic_rss & RSS_IP_ENA) 465 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 466 467 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 468 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 469 ETH_RSS_NONFRAG_IPV6_TCP); 470 471 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 472 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 473 ETH_RSS_NONFRAG_IPV6_UDP); 474 475 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 476 ethdev_rss |= ETH_RSS_PORT; 477 478 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 479 if (nic_rss & RSS_TUN_VXLAN_ENA) 480 ethdev_rss |= ETH_RSS_VXLAN; 481 482 if (nic_rss & RSS_TUN_GENEVE_ENA) 483 ethdev_rss |= ETH_RSS_GENEVE; 484 485 if (nic_rss & RSS_TUN_NVGRE_ENA) 486 ethdev_rss |= ETH_RSS_NVGRE; 487 } 488 return ethdev_rss; 489 } 490 491 static int 492 nicvf_dev_reta_query(struct rte_eth_dev *dev, 493 struct rte_eth_rss_reta_entry64 *reta_conf, 494 uint16_t reta_size) 495 { 496 struct nicvf *nic = nicvf_pmd_priv(dev); 497 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 498 int ret, i, j; 499 500 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 501 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 502 "(%d) doesn't match the number hardware can supported " 503 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 504 return -EINVAL; 505 } 506 507 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 508 if (ret) 509 return ret; 510 511 /* Copy RETA table */ 512 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 513 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 514 if ((reta_conf[i].mask >> j) & 0x01) 515 reta_conf[i].reta[j] = tbl[j]; 516 } 517 518 return 0; 519 } 520 521 static int 522 nicvf_dev_reta_update(struct rte_eth_dev *dev, 523 struct rte_eth_rss_reta_entry64 *reta_conf, 524 uint16_t reta_size) 525 { 526 struct nicvf *nic = nicvf_pmd_priv(dev); 527 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 528 int ret, i, j; 529 530 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 531 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 532 "(%d) doesn't match the number hardware can supported " 533 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 534 return -EINVAL; 535 } 536 537 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 538 if (ret) 539 return ret; 540 541 /* Copy RETA table */ 542 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 543 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 544 if ((reta_conf[i].mask >> j) & 0x01) 545 tbl[j] = reta_conf[i].reta[j]; 546 } 547 548 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 549 } 550 551 static int 552 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 553 struct rte_eth_rss_conf *rss_conf) 554 { 555 struct nicvf *nic = nicvf_pmd_priv(dev); 556 557 if (rss_conf->rss_key) 558 nicvf_rss_get_key(nic, rss_conf->rss_key); 559 560 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 561 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 562 return 0; 563 } 564 565 static int 566 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 567 struct rte_eth_rss_conf *rss_conf) 568 { 569 struct nicvf *nic = nicvf_pmd_priv(dev); 570 uint64_t nic_rss; 571 572 if (rss_conf->rss_key && 573 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 574 RTE_LOG(ERR, PMD, "Hash key size mismatch %d", 575 rss_conf->rss_key_len); 576 return -EINVAL; 577 } 578 579 if (rss_conf->rss_key) 580 nicvf_rss_set_key(nic, rss_conf->rss_key); 581 582 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 583 nicvf_rss_set_cfg(nic, nic_rss); 584 return 0; 585 } 586 587 static int 588 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 589 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt) 590 { 591 const struct rte_memzone *rz; 592 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); 593 594 rz = rte_eth_dma_zone_reserve(dev, "cq_ring", 595 nicvf_netdev_qidx(nic, qidx), ring_size, 596 NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 597 if (rz == NULL) { 598 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 599 return -ENOMEM; 600 } 601 602 memset(rz->addr, 0, ring_size); 603 604 rxq->phys = rz->phys_addr; 605 rxq->desc = rz->addr; 606 rxq->qlen_mask = desc_cnt - 1; 607 608 return 0; 609 } 610 611 static int 612 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 613 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt) 614 { 615 const struct rte_memzone *rz; 616 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); 617 618 rz = rte_eth_dma_zone_reserve(dev, "sq", 619 nicvf_netdev_qidx(nic, qidx), ring_size, 620 NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 621 if (rz == NULL) { 622 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 623 return -ENOMEM; 624 } 625 626 memset(rz->addr, 0, ring_size); 627 628 sq->phys = rz->phys_addr; 629 sq->desc = rz->addr; 630 sq->qlen_mask = desc_cnt - 1; 631 632 return 0; 633 } 634 635 static int 636 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 637 uint32_t desc_cnt, uint32_t buffsz) 638 { 639 struct nicvf_rbdr *rbdr; 640 const struct rte_memzone *rz; 641 uint32_t ring_size; 642 643 assert(nic->rbdr == NULL); 644 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), 645 RTE_CACHE_LINE_SIZE, nic->node); 646 if (rbdr == NULL) { 647 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); 648 return -ENOMEM; 649 } 650 651 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; 652 rz = rte_eth_dma_zone_reserve(dev, "rbdr", 653 nicvf_netdev_qidx(nic, 0), ring_size, 654 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); 655 if (rz == NULL) { 656 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); 657 return -ENOMEM; 658 } 659 660 memset(rz->addr, 0, ring_size); 661 662 rbdr->phys = rz->phys_addr; 663 rbdr->tail = 0; 664 rbdr->next_tail = 0; 665 rbdr->desc = rz->addr; 666 rbdr->buffsz = buffsz; 667 rbdr->qlen_mask = desc_cnt - 1; 668 rbdr->rbdr_status = 669 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; 670 rbdr->rbdr_door = 671 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; 672 673 nic->rbdr = rbdr; 674 return 0; 675 } 676 677 static void 678 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic, 679 nicvf_phys_addr_t phy) 680 { 681 uint16_t qidx; 682 void *obj; 683 struct nicvf_rxq *rxq; 684 uint16_t rx_start, rx_end; 685 686 /* Get queue ranges for this VF */ 687 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 688 689 for (qidx = rx_start; qidx <= rx_end; qidx++) { 690 rxq = dev->data->rx_queues[qidx]; 691 if (rxq->precharge_cnt) { 692 obj = (void *)nicvf_mbuff_phy2virt(phy, 693 rxq->mbuf_phys_off); 694 rte_mempool_put(rxq->pool, obj); 695 rxq->precharge_cnt--; 696 break; 697 } 698 } 699 } 700 701 static inline void 702 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic) 703 { 704 uint32_t qlen_mask, head; 705 struct rbdr_entry_t *entry; 706 struct nicvf_rbdr *rbdr = nic->rbdr; 707 708 qlen_mask = rbdr->qlen_mask; 709 head = rbdr->head; 710 while (head != rbdr->tail) { 711 entry = rbdr->desc + head; 712 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr); 713 head++; 714 head = head & qlen_mask; 715 } 716 } 717 718 static inline void 719 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 720 { 721 uint32_t head; 722 723 head = txq->head; 724 while (head != txq->tail) { 725 if (txq->txbuffs[head]) { 726 rte_pktmbuf_free_seg(txq->txbuffs[head]); 727 txq->txbuffs[head] = NULL; 728 } 729 head++; 730 head = head & txq->qlen_mask; 731 } 732 } 733 734 static void 735 nicvf_tx_queue_reset(struct nicvf_txq *txq) 736 { 737 uint32_t txq_desc_cnt = txq->qlen_mask + 1; 738 739 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 740 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 741 txq->tail = 0; 742 txq->head = 0; 743 txq->xmit_bufs = 0; 744 } 745 746 static inline int 747 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 748 uint16_t qidx) 749 { 750 struct nicvf_txq *txq; 751 int ret; 752 753 assert(qidx < MAX_SND_QUEUES_PER_QS); 754 755 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 756 RTE_ETH_QUEUE_STATE_STARTED) 757 return 0; 758 759 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 760 txq->pool = NULL; 761 ret = nicvf_qset_sq_config(nic, qidx, txq); 762 if (ret) { 763 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d", 764 nic->vf_id, qidx, ret); 765 goto config_sq_error; 766 } 767 768 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 769 RTE_ETH_QUEUE_STATE_STARTED; 770 return ret; 771 772 config_sq_error: 773 nicvf_qset_sq_reclaim(nic, qidx); 774 return ret; 775 } 776 777 static inline int 778 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 779 uint16_t qidx) 780 { 781 struct nicvf_txq *txq; 782 int ret; 783 784 assert(qidx < MAX_SND_QUEUES_PER_QS); 785 786 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 787 RTE_ETH_QUEUE_STATE_STOPPED) 788 return 0; 789 790 ret = nicvf_qset_sq_reclaim(nic, qidx); 791 if (ret) 792 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d", 793 nic->vf_id, qidx, ret); 794 795 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 796 nicvf_tx_queue_release_mbufs(txq); 797 nicvf_tx_queue_reset(txq); 798 799 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 800 RTE_ETH_QUEUE_STATE_STOPPED; 801 return ret; 802 } 803 804 static inline int 805 nicvf_configure_cpi(struct rte_eth_dev *dev) 806 { 807 struct nicvf *nic = nicvf_pmd_priv(dev); 808 uint16_t qidx, qcnt; 809 int ret; 810 811 /* Count started rx queues */ 812 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++) 813 if (dev->data->rx_queue_state[qidx] == 814 RTE_ETH_QUEUE_STATE_STARTED) 815 qcnt++; 816 817 nic->cpi_alg = CPI_ALG_NONE; 818 ret = nicvf_mbox_config_cpi(nic, qcnt); 819 if (ret) 820 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 821 822 return ret; 823 } 824 825 static inline int 826 nicvf_configure_rss(struct rte_eth_dev *dev) 827 { 828 struct nicvf *nic = nicvf_pmd_priv(dev); 829 uint64_t rsshf; 830 int ret = -EINVAL; 831 832 rsshf = nicvf_rss_ethdev_to_nic(nic, 833 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 834 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, 835 dev->data->dev_conf.rxmode.mq_mode, 836 dev->data->nb_rx_queues, 837 dev->data->dev_conf.lpbk_mode, rsshf); 838 839 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 840 ret = nicvf_rss_term(nic); 841 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 842 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf); 843 if (ret) 844 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); 845 846 return ret; 847 } 848 849 static int 850 nicvf_configure_rss_reta(struct rte_eth_dev *dev) 851 { 852 struct nicvf *nic = nicvf_pmd_priv(dev); 853 unsigned int idx, qmap_size; 854 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 855 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 856 857 if (nic->cpi_alg != CPI_ALG_NONE) 858 return -EINVAL; 859 860 /* Prepare queue map */ 861 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 862 if (dev->data->rx_queue_state[idx] == 863 RTE_ETH_QUEUE_STATE_STARTED) 864 qmap[qmap_size++] = idx; 865 } 866 867 /* Update default RSS RETA */ 868 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 869 default_reta[idx] = qmap[idx % qmap_size]; 870 871 return nicvf_rss_reta_update(nic, default_reta, 872 NIC_MAX_RSS_IDR_TBL_SIZE); 873 } 874 875 static void 876 nicvf_dev_tx_queue_release(void *sq) 877 { 878 struct nicvf_txq *txq; 879 880 PMD_INIT_FUNC_TRACE(); 881 882 txq = (struct nicvf_txq *)sq; 883 if (txq) { 884 if (txq->txbuffs != NULL) { 885 nicvf_tx_queue_release_mbufs(txq); 886 rte_free(txq->txbuffs); 887 txq->txbuffs = NULL; 888 } 889 rte_free(txq); 890 } 891 } 892 893 static void 894 nicvf_set_tx_function(struct rte_eth_dev *dev) 895 { 896 struct nicvf_txq *txq; 897 size_t i; 898 bool multiseg = false; 899 900 for (i = 0; i < dev->data->nb_tx_queues; i++) { 901 txq = dev->data->tx_queues[i]; 902 if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) { 903 multiseg = true; 904 break; 905 } 906 } 907 908 /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 909 if (multiseg) { 910 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); 911 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; 912 } else { 913 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); 914 dev->tx_pkt_burst = nicvf_xmit_pkts; 915 } 916 917 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) 918 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); 919 else 920 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); 921 } 922 923 static void 924 nicvf_set_rx_function(struct rte_eth_dev *dev) 925 { 926 if (dev->data->scattered_rx) { 927 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback"); 928 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg; 929 } else { 930 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback"); 931 dev->rx_pkt_burst = nicvf_recv_pkts; 932 } 933 } 934 935 static int 936 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 937 uint16_t nb_desc, unsigned int socket_id, 938 const struct rte_eth_txconf *tx_conf) 939 { 940 uint16_t tx_free_thresh; 941 uint8_t is_single_pool; 942 struct nicvf_txq *txq; 943 struct nicvf *nic = nicvf_pmd_priv(dev); 944 945 PMD_INIT_FUNC_TRACE(); 946 947 if (qidx >= MAX_SND_QUEUES_PER_QS) 948 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1]; 949 950 qidx = qidx % MAX_SND_QUEUES_PER_QS; 951 952 /* Socket id check */ 953 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 954 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 955 socket_id, nic->node); 956 957 /* Tx deferred start is not supported */ 958 if (tx_conf->tx_deferred_start) { 959 PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 960 return -EINVAL; 961 } 962 963 /* Roundup nb_desc to available qsize and validate max number of desc */ 964 nb_desc = nicvf_qsize_sq_roundup(nb_desc); 965 if (nb_desc == 0) { 966 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 967 return -EINVAL; 968 } 969 970 /* Validate tx_free_thresh */ 971 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 972 tx_conf->tx_free_thresh : 973 NICVF_DEFAULT_TX_FREE_THRESH); 974 975 if (tx_free_thresh > (nb_desc) || 976 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 977 PMD_INIT_LOG(ERR, 978 "tx_free_thresh must be less than the number of TX " 979 "descriptors. (tx_free_thresh=%u port=%d " 980 "queue=%d)", (unsigned int)tx_free_thresh, 981 (int)dev->data->port_id, (int)qidx); 982 return -EINVAL; 983 } 984 985 /* Free memory prior to re-allocation if needed. */ 986 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 987 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 988 nicvf_netdev_qidx(nic, qidx)); 989 nicvf_dev_tx_queue_release( 990 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]); 991 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 992 } 993 994 /* Allocating tx queue data structure */ 995 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 996 RTE_CACHE_LINE_SIZE, nic->node); 997 if (txq == NULL) { 998 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", 999 nicvf_netdev_qidx(nic, qidx)); 1000 return -ENOMEM; 1001 } 1002 1003 txq->nic = nic; 1004 txq->queue_id = qidx; 1005 txq->tx_free_thresh = tx_free_thresh; 1006 txq->txq_flags = tx_conf->txq_flags; 1007 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 1008 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 1009 is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && 1010 txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); 1011 1012 /* Choose optimum free threshold value for multipool case */ 1013 if (!is_single_pool) { 1014 txq->tx_free_thresh = (uint16_t) 1015 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 1016 NICVF_TX_FREE_MPOOL_THRESH : 1017 tx_conf->tx_free_thresh); 1018 txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 1019 } else { 1020 txq->pool_free = nicvf_single_pool_free_xmited_buffers; 1021 } 1022 1023 /* Allocate software ring */ 1024 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 1025 nb_desc * sizeof(struct rte_mbuf *), 1026 RTE_CACHE_LINE_SIZE, nic->node); 1027 1028 if (txq->txbuffs == NULL) { 1029 nicvf_dev_tx_queue_release(txq); 1030 return -ENOMEM; 1031 } 1032 1033 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) { 1034 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 1035 nicvf_dev_tx_queue_release(txq); 1036 return -ENOMEM; 1037 } 1038 1039 nicvf_tx_queue_reset(txq); 1040 1041 PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, 1042 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, 1043 txq->phys); 1044 1045 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; 1046 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1047 RTE_ETH_QUEUE_STATE_STOPPED; 1048 return 0; 1049 } 1050 1051 static inline void 1052 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) 1053 { 1054 uint32_t rxq_cnt; 1055 uint32_t nb_pkts, released_pkts = 0; 1056 uint32_t refill_cnt = 0; 1057 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 1058 1059 if (dev->rx_pkt_burst == NULL) 1060 return; 1061 1062 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, 1063 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) { 1064 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 1065 NICVF_MAX_RX_FREE_THRESH); 1066 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 1067 while (nb_pkts) { 1068 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 1069 released_pkts++; 1070 } 1071 } 1072 1073 1074 refill_cnt += nicvf_dev_rbdr_refill(dev, 1075 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)); 1076 1077 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 1078 released_pkts, refill_cnt); 1079 } 1080 1081 static void 1082 nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 1083 { 1084 rxq->head = 0; 1085 rxq->available_space = 0; 1086 rxq->recv_buffers = 0; 1087 } 1088 1089 static inline int 1090 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1091 uint16_t qidx) 1092 { 1093 struct nicvf_rxq *rxq; 1094 int ret; 1095 1096 assert(qidx < MAX_RCV_QUEUES_PER_QS); 1097 1098 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1099 RTE_ETH_QUEUE_STATE_STARTED) 1100 return 0; 1101 1102 /* Update rbdr pointer to all rxq */ 1103 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 1104 rxq->shared_rbdr = nic->rbdr; 1105 1106 ret = nicvf_qset_rq_config(nic, qidx, rxq); 1107 if (ret) { 1108 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d", 1109 nic->vf_id, qidx, ret); 1110 goto config_rq_error; 1111 } 1112 ret = nicvf_qset_cq_config(nic, qidx, rxq); 1113 if (ret) { 1114 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d", 1115 nic->vf_id, qidx, ret); 1116 goto config_cq_error; 1117 } 1118 1119 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1120 RTE_ETH_QUEUE_STATE_STARTED; 1121 return 0; 1122 1123 config_cq_error: 1124 nicvf_qset_cq_reclaim(nic, qidx); 1125 config_rq_error: 1126 nicvf_qset_rq_reclaim(nic, qidx); 1127 return ret; 1128 } 1129 1130 static inline int 1131 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1132 uint16_t qidx) 1133 { 1134 struct nicvf_rxq *rxq; 1135 int ret, other_error; 1136 1137 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1138 RTE_ETH_QUEUE_STATE_STOPPED) 1139 return 0; 1140 1141 ret = nicvf_qset_rq_reclaim(nic, qidx); 1142 if (ret) 1143 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d", 1144 nic->vf_id, qidx, ret); 1145 1146 other_error = ret; 1147 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 1148 nicvf_rx_queue_release_mbufs(dev, rxq); 1149 nicvf_rx_queue_reset(rxq); 1150 1151 ret = nicvf_qset_cq_reclaim(nic, qidx); 1152 if (ret) 1153 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d", 1154 nic->vf_id, qidx, ret); 1155 1156 other_error |= ret; 1157 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1158 RTE_ETH_QUEUE_STATE_STOPPED; 1159 return other_error; 1160 } 1161 1162 static void 1163 nicvf_dev_rx_queue_release(void *rx_queue) 1164 { 1165 PMD_INIT_FUNC_TRACE(); 1166 1167 rte_free(rx_queue); 1168 } 1169 1170 static int 1171 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1172 { 1173 struct nicvf *nic = nicvf_pmd_priv(dev); 1174 int ret; 1175 1176 if (qidx >= MAX_RCV_QUEUES_PER_QS) 1177 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)]; 1178 1179 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1180 1181 ret = nicvf_vf_start_rx_queue(dev, nic, qidx); 1182 if (ret) 1183 return ret; 1184 1185 ret = nicvf_configure_cpi(dev); 1186 if (ret) 1187 return ret; 1188 1189 return nicvf_configure_rss_reta(dev); 1190 } 1191 1192 static int 1193 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1194 { 1195 int ret; 1196 struct nicvf *nic = nicvf_pmd_priv(dev); 1197 1198 if (qidx >= MAX_SND_QUEUES_PER_QS) 1199 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1200 1201 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1202 1203 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx); 1204 ret |= nicvf_configure_cpi(dev); 1205 ret |= nicvf_configure_rss_reta(dev); 1206 return ret; 1207 } 1208 1209 static int 1210 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1211 { 1212 struct nicvf *nic = nicvf_pmd_priv(dev); 1213 1214 if (qidx >= MAX_SND_QUEUES_PER_QS) 1215 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1216 1217 qidx = qidx % MAX_SND_QUEUES_PER_QS; 1218 1219 return nicvf_vf_start_tx_queue(dev, nic, qidx); 1220 } 1221 1222 static int 1223 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1224 { 1225 struct nicvf *nic = nicvf_pmd_priv(dev); 1226 1227 if (qidx >= MAX_SND_QUEUES_PER_QS) 1228 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1229 1230 qidx = qidx % MAX_SND_QUEUES_PER_QS; 1231 1232 return nicvf_vf_stop_tx_queue(dev, nic, qidx); 1233 } 1234 1235 static inline void 1236 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq) 1237 { 1238 uintptr_t p; 1239 struct rte_mbuf mb_def; 1240 1241 RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8); 1242 mb_def.nb_segs = 1; 1243 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 1244 mb_def.port = rxq->port_id; 1245 rte_mbuf_refcnt_set(&mb_def, 1); 1246 1247 /* Prevent compiler reordering: rearm_data covers previous fields */ 1248 rte_compiler_barrier(); 1249 p = (uintptr_t)&mb_def.rearm_data; 1250 rxq->mbuf_initializer.value = *(uint64_t *)p; 1251 } 1252 1253 static int 1254 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 1255 uint16_t nb_desc, unsigned int socket_id, 1256 const struct rte_eth_rxconf *rx_conf, 1257 struct rte_mempool *mp) 1258 { 1259 uint16_t rx_free_thresh; 1260 struct nicvf_rxq *rxq; 1261 struct nicvf *nic = nicvf_pmd_priv(dev); 1262 1263 PMD_INIT_FUNC_TRACE(); 1264 1265 if (qidx >= MAX_RCV_QUEUES_PER_QS) 1266 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1]; 1267 1268 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1269 1270 /* Socket id check */ 1271 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 1272 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 1273 socket_id, nic->node); 1274 1275 /* Mempool memory must be contiguous, so must be one memory segment*/ 1276 if (mp->nb_mem_chunks != 1) { 1277 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages"); 1278 return -EINVAL; 1279 } 1280 1281 /* Mempool memory must be physically contiguous */ 1282 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) { 1283 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); 1284 return -EINVAL; 1285 } 1286 1287 /* Rx deferred start is not supported */ 1288 if (rx_conf->rx_deferred_start) { 1289 PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 1290 return -EINVAL; 1291 } 1292 1293 /* Roundup nb_desc to available qsize and validate max number of desc */ 1294 nb_desc = nicvf_qsize_cq_roundup(nb_desc); 1295 if (nb_desc == 0) { 1296 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 1297 return -EINVAL; 1298 } 1299 1300 /* Check rx_free_thresh upper bound */ 1301 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 1302 rx_conf->rx_free_thresh : 1303 NICVF_DEFAULT_RX_FREE_THRESH); 1304 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 1305 rx_free_thresh >= nb_desc * .75) { 1306 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 1307 rx_free_thresh); 1308 return -EINVAL; 1309 } 1310 1311 /* Free memory prior to re-allocation if needed */ 1312 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 1313 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 1314 nicvf_netdev_qidx(nic, qidx)); 1315 nicvf_dev_rx_queue_release( 1316 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]); 1317 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 1318 } 1319 1320 /* Allocate rxq memory */ 1321 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 1322 RTE_CACHE_LINE_SIZE, nic->node); 1323 if (rxq == NULL) { 1324 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", 1325 nicvf_netdev_qidx(nic, qidx)); 1326 return -ENOMEM; 1327 } 1328 1329 rxq->nic = nic; 1330 rxq->pool = mp; 1331 rxq->queue_id = qidx; 1332 rxq->port_id = dev->data->port_id; 1333 rxq->rx_free_thresh = rx_free_thresh; 1334 rxq->rx_drop_en = rx_conf->rx_drop_en; 1335 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1336 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1337 rxq->precharge_cnt = 0; 1338 1339 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2) 1340 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD; 1341 else 1342 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1343 1344 nicvf_rxq_mbuf_setup(rxq); 1345 1346 /* Alloc completion queue */ 1347 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) { 1348 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 1349 nicvf_dev_rx_queue_release(rxq); 1350 return -ENOMEM; 1351 } 1352 1353 nicvf_rx_queue_reset(rxq); 1354 1355 PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, 1356 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, 1357 rte_mempool_avail_count(mp), rxq->phys); 1358 1359 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; 1360 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1361 RTE_ETH_QUEUE_STATE_STOPPED; 1362 return 0; 1363 } 1364 1365 static void 1366 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1367 { 1368 struct nicvf *nic = nicvf_pmd_priv(dev); 1369 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 1370 1371 PMD_INIT_FUNC_TRACE(); 1372 1373 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 1374 1375 dev_info->min_rx_bufsize = ETHER_MIN_MTU; 1376 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; 1377 dev_info->max_rx_queues = 1378 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1379 dev_info->max_tx_queues = 1380 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1381 dev_info->max_mac_addrs = 1; 1382 dev_info->max_vfs = pci_dev->max_vfs; 1383 1384 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1385 dev_info->tx_offload_capa = 1386 DEV_TX_OFFLOAD_IPV4_CKSUM | 1387 DEV_TX_OFFLOAD_UDP_CKSUM | 1388 DEV_TX_OFFLOAD_TCP_CKSUM | 1389 DEV_TX_OFFLOAD_TCP_TSO | 1390 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 1391 1392 dev_info->reta_size = nic->rss_info.rss_size; 1393 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1394 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1395 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1396 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1397 1398 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1399 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1400 .rx_drop_en = 0, 1401 }; 1402 1403 dev_info->default_txconf = (struct rte_eth_txconf) { 1404 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1405 .txq_flags = 1406 ETH_TXQ_FLAGS_NOMULTSEGS | 1407 ETH_TXQ_FLAGS_NOREFCOUNT | 1408 ETH_TXQ_FLAGS_NOMULTMEMP | 1409 ETH_TXQ_FLAGS_NOVLANOFFL | 1410 ETH_TXQ_FLAGS_NOXSUMSCTP, 1411 }; 1412 } 1413 1414 static nicvf_phys_addr_t 1415 rbdr_rte_mempool_get(void *dev, void *opaque) 1416 { 1417 uint16_t qidx; 1418 uintptr_t mbuf; 1419 struct nicvf_rxq *rxq; 1420 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev; 1421 struct nicvf *nic = (struct nicvf *)opaque; 1422 uint16_t rx_start, rx_end; 1423 1424 /* Get queue ranges for this VF */ 1425 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end); 1426 1427 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1428 rxq = eth_dev->data->rx_queues[qidx]; 1429 /* Maintain equal buffer count across all pools */ 1430 if (rxq->precharge_cnt >= rxq->qlen_mask) 1431 continue; 1432 rxq->precharge_cnt++; 1433 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); 1434 if (mbuf) 1435 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); 1436 } 1437 return 0; 1438 } 1439 1440 static int 1441 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) 1442 { 1443 int ret; 1444 uint16_t qidx, data_off; 1445 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; 1446 uint64_t mbuf_phys_off = 0; 1447 struct nicvf_rxq *rxq; 1448 struct rte_mbuf *mbuf; 1449 uint16_t rx_start, rx_end; 1450 uint16_t tx_start, tx_end; 1451 1452 PMD_INIT_FUNC_TRACE(); 1453 1454 /* Userspace process exited without proper shutdown in last run */ 1455 if (nicvf_qset_rbdr_active(nic, 0)) 1456 nicvf_vf_stop(dev, nic, false); 1457 1458 /* Get queue ranges for this VF */ 1459 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 1460 1461 /* 1462 * Thunderx nicvf PMD can support more than one pool per port only when 1463 * 1) Data payload size is same across all the pools in given port 1464 * AND 1465 * 2) All mbuffs in the pools are from the same hugepage 1466 * AND 1467 * 3) Mbuff metadata size is same across all the pools in given port 1468 * 1469 * This is to support existing application that uses multiple pool/port. 1470 * But, the purpose of using multipool for QoS will not be addressed. 1471 * 1472 */ 1473 1474 /* Validate mempool attributes */ 1475 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1476 rxq = dev->data->rx_queues[qidx]; 1477 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); 1478 mbuf = rte_pktmbuf_alloc(rxq->pool); 1479 if (mbuf == NULL) { 1480 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d " 1481 "pool=%s", 1482 nic->vf_id, qidx, rxq->pool->name); 1483 return -ENOMEM; 1484 } 1485 data_off = nicvf_mbuff_meta_length(mbuf); 1486 data_off += RTE_PKTMBUF_HEADROOM; 1487 rte_pktmbuf_free(mbuf); 1488 1489 if (data_off % RTE_CACHE_LINE_SIZE) { 1490 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d", 1491 rxq->pool->name, data_off, 1492 data_off % RTE_CACHE_LINE_SIZE); 1493 return -EINVAL; 1494 } 1495 rxq->mbuf_phys_off -= data_off; 1496 1497 if (mbuf_phys_off == 0) 1498 mbuf_phys_off = rxq->mbuf_phys_off; 1499 if (mbuf_phys_off != rxq->mbuf_phys_off) { 1500 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %" 1501 PRIx64, rxq->pool->name, nic->vf_id, 1502 mbuf_phys_off); 1503 return -EINVAL; 1504 } 1505 } 1506 1507 /* Check the level of buffers in the pool */ 1508 total_rxq_desc = 0; 1509 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1510 rxq = dev->data->rx_queues[qidx]; 1511 /* Count total numbers of rxq descs */ 1512 total_rxq_desc += rxq->qlen_mask + 1; 1513 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; 1514 exp_buffs *= dev->data->nb_rx_queues; 1515 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { 1516 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", 1517 rxq->pool->name, 1518 rte_mempool_avail_count(rxq->pool), 1519 exp_buffs); 1520 return -ENOENT; 1521 } 1522 } 1523 1524 /* Check RBDR desc overflow */ 1525 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); 1526 if (ret == 0) { 1527 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc " 1528 "VF%d", nic->vf_id); 1529 return -ENOMEM; 1530 } 1531 1532 /* Enable qset */ 1533 ret = nicvf_qset_config(nic); 1534 if (ret) { 1535 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret, 1536 nic->vf_id); 1537 return ret; 1538 } 1539 1540 /* Allocate RBDR and RBDR ring desc */ 1541 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); 1542 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz); 1543 if (ret) { 1544 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc " 1545 "VF%d", nic->vf_id); 1546 goto qset_reclaim; 1547 } 1548 1549 /* Enable and configure RBDR registers */ 1550 ret = nicvf_qset_rbdr_config(nic, 0); 1551 if (ret) { 1552 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret, 1553 nic->vf_id); 1554 goto qset_rbdr_free; 1555 } 1556 1557 /* Fill rte_mempool buffers in RBDR pool and precharge it */ 1558 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get, 1559 total_rxq_desc); 1560 if (ret) { 1561 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret, 1562 nic->vf_id); 1563 goto qset_rbdr_reclaim; 1564 } 1565 1566 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d", 1567 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id); 1568 1569 /* Configure VLAN Strip */ 1570 nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip); 1571 1572 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data 1573 * to the 64bit memory address. 1574 * The alignment creates a hole in mbuf(between the end of headroom and 1575 * packet data start). The new revision of the HW provides an option to 1576 * disable the L3 alignment feature and make mbuf layout looks 1577 * more like other NICs. For better application compatibility, disabling 1578 * l3 alignment feature on the hardware revisions it supports 1579 */ 1580 nicvf_apad_config(nic, false); 1581 1582 /* Get queue ranges for this VF */ 1583 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1584 1585 /* Configure TX queues */ 1586 for (qidx = tx_start; qidx <= tx_end; qidx++) { 1587 ret = nicvf_vf_start_tx_queue(dev, nic, 1588 qidx % MAX_SND_QUEUES_PER_QS); 1589 if (ret) 1590 goto start_txq_error; 1591 } 1592 1593 /* Configure RX queues */ 1594 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1595 ret = nicvf_vf_start_rx_queue(dev, nic, 1596 qidx % MAX_RCV_QUEUES_PER_QS); 1597 if (ret) 1598 goto start_rxq_error; 1599 } 1600 1601 if (!nic->sqs_mode) { 1602 /* Configure CPI algorithm */ 1603 ret = nicvf_configure_cpi(dev); 1604 if (ret) 1605 goto start_txq_error; 1606 1607 ret = nicvf_mbox_get_rss_size(nic); 1608 if (ret) { 1609 PMD_INIT_LOG(ERR, "Failed to get rss table size"); 1610 goto qset_rss_error; 1611 } 1612 1613 /* Configure RSS */ 1614 ret = nicvf_configure_rss(dev); 1615 if (ret) 1616 goto qset_rss_error; 1617 } 1618 1619 /* Done; Let PF make the BGX's RX and TX switches to ON position */ 1620 nicvf_mbox_cfg_done(nic); 1621 return 0; 1622 1623 qset_rss_error: 1624 nicvf_rss_term(nic); 1625 start_rxq_error: 1626 for (qidx = rx_start; qidx <= rx_end; qidx++) 1627 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 1628 start_txq_error: 1629 for (qidx = tx_start; qidx <= tx_end; qidx++) 1630 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1631 qset_rbdr_reclaim: 1632 nicvf_qset_rbdr_reclaim(nic, 0); 1633 nicvf_rbdr_release_mbufs(dev, nic); 1634 qset_rbdr_free: 1635 if (nic->rbdr) { 1636 rte_free(nic->rbdr); 1637 nic->rbdr = NULL; 1638 } 1639 qset_reclaim: 1640 nicvf_qset_reclaim(nic); 1641 return ret; 1642 } 1643 1644 static int 1645 nicvf_dev_start(struct rte_eth_dev *dev) 1646 { 1647 uint16_t qidx; 1648 int ret; 1649 size_t i; 1650 struct nicvf *nic = nicvf_pmd_priv(dev); 1651 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 1652 uint16_t mtu; 1653 uint32_t buffsz = 0, rbdrsz = 0; 1654 struct rte_pktmbuf_pool_private *mbp_priv; 1655 struct nicvf_rxq *rxq; 1656 1657 PMD_INIT_FUNC_TRACE(); 1658 1659 /* This function must be called for a primary device */ 1660 assert_primary(nic); 1661 1662 /* Validate RBDR buff size */ 1663 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 1664 rxq = dev->data->rx_queues[qidx]; 1665 mbp_priv = rte_mempool_get_priv(rxq->pool); 1666 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1667 if (buffsz % 128) { 1668 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); 1669 return -EINVAL; 1670 } 1671 if (rbdrsz == 0) 1672 rbdrsz = buffsz; 1673 if (rbdrsz != buffsz) { 1674 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)", 1675 qidx, rbdrsz, buffsz); 1676 return -EINVAL; 1677 } 1678 } 1679 1680 /* Configure loopback */ 1681 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); 1682 if (ret) { 1683 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); 1684 return ret; 1685 } 1686 1687 /* Reset all statistics counters attached to this port */ 1688 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); 1689 if (ret) { 1690 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); 1691 return ret; 1692 } 1693 1694 /* Setup scatter mode if needed by jumbo */ 1695 if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 1696 2 * VLAN_TAG_SIZE > buffsz) 1697 dev->data->scattered_rx = 1; 1698 if (rx_conf->enable_scatter) 1699 dev->data->scattered_rx = 1; 1700 1701 /* Setup MTU based on max_rx_pkt_len or default */ 1702 mtu = dev->data->dev_conf.rxmode.jumbo_frame ? 1703 dev->data->dev_conf.rxmode.max_rx_pkt_len 1704 - ETHER_HDR_LEN - ETHER_CRC_LEN 1705 : ETHER_MTU; 1706 1707 if (nicvf_dev_set_mtu(dev, mtu)) { 1708 PMD_INIT_LOG(ERR, "Failed to set default mtu size"); 1709 return -EBUSY; 1710 } 1711 1712 ret = nicvf_vf_start(dev, nic, rbdrsz); 1713 if (ret != 0) 1714 return ret; 1715 1716 for (i = 0; i < nic->sqs_count; i++) { 1717 assert(nic->snicvf[i]); 1718 1719 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz); 1720 if (ret != 0) 1721 return ret; 1722 } 1723 1724 /* Configure callbacks based on scatter mode */ 1725 nicvf_set_tx_function(dev); 1726 nicvf_set_rx_function(dev); 1727 1728 return 0; 1729 } 1730 1731 static void 1732 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup) 1733 { 1734 size_t i; 1735 int ret; 1736 struct nicvf *nic = nicvf_pmd_priv(dev); 1737 1738 PMD_INIT_FUNC_TRACE(); 1739 1740 /* Teardown secondary vf first */ 1741 for (i = 0; i < nic->sqs_count; i++) { 1742 if (!nic->snicvf[i]) 1743 continue; 1744 1745 nicvf_vf_stop(dev, nic->snicvf[i], cleanup); 1746 } 1747 1748 /* Stop the primary VF now */ 1749 nicvf_vf_stop(dev, nic, cleanup); 1750 1751 /* Disable loopback */ 1752 ret = nicvf_loopback_config(nic, 0); 1753 if (ret) 1754 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); 1755 1756 /* Reclaim CPI configuration */ 1757 ret = nicvf_mbox_config_cpi(nic, 0); 1758 if (ret) 1759 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret); 1760 } 1761 1762 static void 1763 nicvf_dev_stop(struct rte_eth_dev *dev) 1764 { 1765 PMD_INIT_FUNC_TRACE(); 1766 1767 nicvf_dev_stop_cleanup(dev, false); 1768 } 1769 1770 static void 1771 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup) 1772 { 1773 int ret; 1774 uint16_t qidx; 1775 uint16_t tx_start, tx_end; 1776 uint16_t rx_start, rx_end; 1777 1778 PMD_INIT_FUNC_TRACE(); 1779 1780 if (cleanup) { 1781 /* Let PF make the BGX's RX and TX switches to OFF position */ 1782 nicvf_mbox_shutdown(nic); 1783 } 1784 1785 /* Disable VLAN Strip */ 1786 nicvf_vlan_hw_strip(nic, 0); 1787 1788 /* Get queue ranges for this VF */ 1789 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1790 1791 for (qidx = tx_start; qidx <= tx_end; qidx++) 1792 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1793 1794 /* Get queue ranges for this VF */ 1795 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 1796 1797 /* Reclaim rq */ 1798 for (qidx = rx_start; qidx <= rx_end; qidx++) 1799 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 1800 1801 /* Reclaim RBDR */ 1802 ret = nicvf_qset_rbdr_reclaim(nic, 0); 1803 if (ret) 1804 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); 1805 1806 /* Move all charged buffers in RBDR back to pool */ 1807 if (nic->rbdr != NULL) 1808 nicvf_rbdr_release_mbufs(dev, nic); 1809 1810 /* Disable qset */ 1811 ret = nicvf_qset_reclaim(nic); 1812 if (ret) 1813 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); 1814 1815 /* Disable all interrupts */ 1816 nicvf_disable_all_interrupts(nic); 1817 1818 /* Free RBDR SW structure */ 1819 if (nic->rbdr) { 1820 rte_free(nic->rbdr); 1821 nic->rbdr = NULL; 1822 } 1823 } 1824 1825 static void 1826 nicvf_dev_close(struct rte_eth_dev *dev) 1827 { 1828 size_t i; 1829 struct nicvf *nic = nicvf_pmd_priv(dev); 1830 1831 PMD_INIT_FUNC_TRACE(); 1832 1833 nicvf_dev_stop_cleanup(dev, true); 1834 nicvf_periodic_alarm_stop(nicvf_interrupt, dev); 1835 1836 for (i = 0; i < nic->sqs_count; i++) { 1837 if (!nic->snicvf[i]) 1838 continue; 1839 1840 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]); 1841 } 1842 } 1843 1844 static int 1845 nicvf_request_sqs(struct nicvf *nic) 1846 { 1847 size_t i; 1848 1849 assert_primary(nic); 1850 assert(nic->sqs_count > 0); 1851 assert(nic->sqs_count <= MAX_SQS_PER_VF); 1852 1853 /* Set no of Rx/Tx queues in each of the SQsets */ 1854 for (i = 0; i < nic->sqs_count; i++) { 1855 if (nicvf_svf_empty()) 1856 rte_panic("Cannot assign sufficient number of " 1857 "secondary queues to primary VF%" PRIu8 "\n", 1858 nic->vf_id); 1859 1860 nic->snicvf[i] = nicvf_svf_pop(); 1861 nic->snicvf[i]->sqs_id = i; 1862 } 1863 1864 return nicvf_mbox_request_sqs(nic); 1865 } 1866 1867 static int 1868 nicvf_dev_configure(struct rte_eth_dev *dev) 1869 { 1870 struct rte_eth_dev_data *data = dev->data; 1871 struct rte_eth_conf *conf = &data->dev_conf; 1872 struct rte_eth_rxmode *rxmode = &conf->rxmode; 1873 struct rte_eth_txmode *txmode = &conf->txmode; 1874 struct nicvf *nic = nicvf_pmd_priv(dev); 1875 uint8_t cqcount; 1876 1877 PMD_INIT_FUNC_TRACE(); 1878 1879 if (!rte_eal_has_hugepages()) { 1880 PMD_INIT_LOG(INFO, "Huge page is not configured"); 1881 return -EINVAL; 1882 } 1883 1884 if (txmode->mq_mode) { 1885 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 1886 return -EINVAL; 1887 } 1888 1889 if (rxmode->mq_mode != ETH_MQ_RX_NONE && 1890 rxmode->mq_mode != ETH_MQ_RX_RSS) { 1891 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 1892 return -EINVAL; 1893 } 1894 1895 if (!rxmode->hw_strip_crc) { 1896 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); 1897 rxmode->hw_strip_crc = 1; 1898 } 1899 1900 if (rxmode->hw_ip_checksum) { 1901 PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); 1902 rxmode->hw_ip_checksum = 0; 1903 } 1904 1905 if (rxmode->split_hdr_size) { 1906 PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 1907 return -EINVAL; 1908 } 1909 1910 if (rxmode->hw_vlan_filter) { 1911 PMD_INIT_LOG(INFO, "VLAN filter not supported"); 1912 return -EINVAL; 1913 } 1914 1915 if (rxmode->hw_vlan_extend) { 1916 PMD_INIT_LOG(INFO, "VLAN extended not supported"); 1917 return -EINVAL; 1918 } 1919 1920 if (rxmode->enable_lro) { 1921 PMD_INIT_LOG(INFO, "LRO not supported"); 1922 return -EINVAL; 1923 } 1924 1925 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 1926 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 1927 return -EINVAL; 1928 } 1929 1930 if (conf->dcb_capability_en) { 1931 PMD_INIT_LOG(INFO, "DCB enable not supported"); 1932 return -EINVAL; 1933 } 1934 1935 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1936 PMD_INIT_LOG(INFO, "Flow director not supported"); 1937 return -EINVAL; 1938 } 1939 1940 assert_primary(nic); 1941 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS); 1942 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues); 1943 if (cqcount > MAX_RCV_QUEUES_PER_QS) { 1944 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS); 1945 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1; 1946 } else { 1947 nic->sqs_count = 0; 1948 } 1949 1950 assert(nic->sqs_count <= MAX_SQS_PER_VF); 1951 1952 if (nic->sqs_count > 0) { 1953 if (nicvf_request_sqs(nic)) { 1954 rte_panic("Cannot assign sufficient number of " 1955 "secondary queues to PORT%d VF%" PRIu8 "\n", 1956 dev->data->port_id, nic->vf_id); 1957 } 1958 } 1959 1960 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 1961 dev->data->port_id, nicvf_hw_cap(nic)); 1962 1963 return 0; 1964 } 1965 1966 /* Initialize and register driver with DPDK Application */ 1967 static const struct eth_dev_ops nicvf_eth_dev_ops = { 1968 .dev_configure = nicvf_dev_configure, 1969 .dev_start = nicvf_dev_start, 1970 .dev_stop = nicvf_dev_stop, 1971 .link_update = nicvf_dev_link_update, 1972 .dev_close = nicvf_dev_close, 1973 .stats_get = nicvf_dev_stats_get, 1974 .stats_reset = nicvf_dev_stats_reset, 1975 .promiscuous_enable = nicvf_dev_promisc_enable, 1976 .dev_infos_get = nicvf_dev_info_get, 1977 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 1978 .mtu_set = nicvf_dev_set_mtu, 1979 .reta_update = nicvf_dev_reta_update, 1980 .reta_query = nicvf_dev_reta_query, 1981 .rss_hash_update = nicvf_dev_rss_hash_update, 1982 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 1983 .rx_queue_start = nicvf_dev_rx_queue_start, 1984 .rx_queue_stop = nicvf_dev_rx_queue_stop, 1985 .tx_queue_start = nicvf_dev_tx_queue_start, 1986 .tx_queue_stop = nicvf_dev_tx_queue_stop, 1987 .rx_queue_setup = nicvf_dev_rx_queue_setup, 1988 .rx_queue_release = nicvf_dev_rx_queue_release, 1989 .rx_queue_count = nicvf_dev_rx_queue_count, 1990 .tx_queue_setup = nicvf_dev_tx_queue_setup, 1991 .tx_queue_release = nicvf_dev_tx_queue_release, 1992 .get_reg = nicvf_dev_get_regs, 1993 }; 1994 1995 static int 1996 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 1997 { 1998 int ret; 1999 struct rte_pci_device *pci_dev; 2000 struct nicvf *nic = nicvf_pmd_priv(eth_dev); 2001 2002 PMD_INIT_FUNC_TRACE(); 2003 2004 eth_dev->dev_ops = &nicvf_eth_dev_ops; 2005 2006 /* For secondary processes, the primary has done all the work */ 2007 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2008 if (nic) { 2009 /* Setup callbacks for secondary process */ 2010 nicvf_set_tx_function(eth_dev); 2011 nicvf_set_rx_function(eth_dev); 2012 return 0; 2013 } else { 2014 /* If nic == NULL than it is secondary function 2015 * so ethdev need to be released by caller */ 2016 return ENOTSUP; 2017 } 2018 } 2019 2020 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2021 rte_eth_copy_pci_info(eth_dev, pci_dev); 2022 2023 nic->device_id = pci_dev->id.device_id; 2024 nic->vendor_id = pci_dev->id.vendor_id; 2025 nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 2026 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2027 2028 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 2029 pci_dev->id.vendor_id, pci_dev->id.device_id, 2030 pci_dev->addr.domain, pci_dev->addr.bus, 2031 pci_dev->addr.devid, pci_dev->addr.function); 2032 2033 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 2034 if (!nic->reg_base) { 2035 PMD_INIT_LOG(ERR, "Failed to map BAR0"); 2036 ret = -ENODEV; 2037 goto fail; 2038 } 2039 2040 nicvf_disable_all_interrupts(nic); 2041 2042 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev); 2043 if (ret) { 2044 PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2045 goto fail; 2046 } 2047 2048 ret = nicvf_mbox_check_pf_ready(nic); 2049 if (ret) { 2050 PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 2051 goto alarm_fail; 2052 } else { 2053 PMD_INIT_LOG(INFO, 2054 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 2055 nic->node, nic->vf_id, 2056 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 2057 nic->sqs_mode ? "true" : "false", 2058 nic->loopback_supported ? "true" : "false" 2059 ); 2060 } 2061 2062 ret = nicvf_base_init(nic); 2063 if (ret) { 2064 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 2065 goto malloc_fail; 2066 } 2067 2068 if (nic->sqs_mode) { 2069 /* Push nic to stack of secondary vfs */ 2070 nicvf_svf_push(nic); 2071 2072 /* Steal nic pointer from the device for further reuse */ 2073 eth_dev->data->dev_private = NULL; 2074 2075 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2076 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic); 2077 if (ret) { 2078 PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2079 goto fail; 2080 } 2081 2082 /* Detach port by returning postive error number */ 2083 return ENOTSUP; 2084 } 2085 2086 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); 2087 if (eth_dev->data->mac_addrs == NULL) { 2088 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 2089 ret = -ENOMEM; 2090 goto alarm_fail; 2091 } 2092 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) 2093 eth_random_addr(&nic->mac_addr[0]); 2094 2095 ether_addr_copy((struct ether_addr *)nic->mac_addr, 2096 ð_dev->data->mac_addrs[0]); 2097 2098 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 2099 if (ret) { 2100 PMD_INIT_LOG(ERR, "Failed to set mac addr"); 2101 goto malloc_fail; 2102 } 2103 2104 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 2105 eth_dev->data->port_id, nic->vendor_id, nic->device_id, 2106 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 2107 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 2108 2109 return 0; 2110 2111 malloc_fail: 2112 rte_free(eth_dev->data->mac_addrs); 2113 alarm_fail: 2114 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2115 fail: 2116 return ret; 2117 } 2118 2119 static const struct rte_pci_id pci_id_nicvf_map[] = { 2120 { 2121 .class_id = RTE_CLASS_ANY_ID, 2122 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2123 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF, 2124 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2125 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF, 2126 }, 2127 { 2128 .class_id = RTE_CLASS_ANY_ID, 2129 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2130 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2131 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2132 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF, 2133 }, 2134 { 2135 .class_id = RTE_CLASS_ANY_ID, 2136 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2137 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2138 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2139 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF, 2140 }, 2141 { 2142 .class_id = RTE_CLASS_ANY_ID, 2143 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2144 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2145 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2146 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF, 2147 }, 2148 { 2149 .vendor_id = 0, 2150 }, 2151 }; 2152 2153 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2154 struct rte_pci_device *pci_dev) 2155 { 2156 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf), 2157 nicvf_eth_dev_init); 2158 } 2159 2160 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev) 2161 { 2162 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 2163 } 2164 2165 static struct rte_pci_driver rte_nicvf_pmd = { 2166 .id_table = pci_id_nicvf_map, 2167 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2168 .probe = nicvf_eth_pci_probe, 2169 .remove = nicvf_eth_pci_remove, 2170 }; 2171 2172 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd); 2173 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); 2174 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio"); 2175