1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Cavium, Inc 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <unistd.h> 12 #include <stdarg.h> 13 #include <inttypes.h> 14 #include <netinet/in.h> 15 #include <sys/queue.h> 16 17 #include <rte_alarm.h> 18 #include <rte_atomic.h> 19 #include <rte_branch_prediction.h> 20 #include <rte_byteorder.h> 21 #include <rte_common.h> 22 #include <rte_cycles.h> 23 #include <rte_debug.h> 24 #include <rte_dev.h> 25 #include <rte_eal.h> 26 #include <rte_ether.h> 27 #include <rte_ethdev_driver.h> 28 #include <rte_ethdev_pci.h> 29 #include <rte_interrupts.h> 30 #include <rte_log.h> 31 #include <rte_memory.h> 32 #include <rte_memzone.h> 33 #include <rte_malloc.h> 34 #include <rte_random.h> 35 #include <rte_pci.h> 36 #include <rte_bus_pci.h> 37 #include <rte_tailq.h> 38 39 #include "base/nicvf_plat.h" 40 41 #include "nicvf_ethdev.h" 42 #include "nicvf_rxtx.h" 43 #include "nicvf_svf.h" 44 #include "nicvf_logs.h" 45 46 int nicvf_logtype_mbox; 47 int nicvf_logtype_init; 48 int nicvf_logtype_driver; 49 50 static void nicvf_dev_stop(struct rte_eth_dev *dev); 51 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup); 52 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, 53 bool cleanup); 54 55 RTE_INIT(nicvf_init_log); 56 static void 57 nicvf_init_log(void) 58 { 59 nicvf_logtype_mbox = rte_log_register("pmd.nicvf.mbox"); 60 if (nicvf_logtype_mbox >= 0) 61 rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE); 62 63 nicvf_logtype_init = rte_log_register("pmd.nicvf.init"); 64 if (nicvf_logtype_init >= 0) 65 rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE); 66 67 nicvf_logtype_driver = rte_log_register("pmd.nicvf.driver"); 68 if (nicvf_logtype_driver >= 0) 69 rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE); 70 } 71 72 static inline int 73 nicvf_atomic_write_link_status(struct rte_eth_dev *dev, 74 struct rte_eth_link *link) 75 { 76 struct rte_eth_link *dst = &dev->data->dev_link; 77 struct rte_eth_link *src = link; 78 79 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 80 *(uint64_t *)src) == 0) 81 return -1; 82 83 return 0; 84 } 85 86 static inline void 87 nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) 88 { 89 link->link_status = nic->link_up; 90 link->link_duplex = ETH_LINK_AUTONEG; 91 if (nic->duplex == NICVF_HALF_DUPLEX) 92 link->link_duplex = ETH_LINK_HALF_DUPLEX; 93 else if (nic->duplex == NICVF_FULL_DUPLEX) 94 link->link_duplex = ETH_LINK_FULL_DUPLEX; 95 link->link_speed = nic->speed; 96 link->link_autoneg = ETH_LINK_AUTONEG; 97 } 98 99 static void 100 nicvf_interrupt(void *arg) 101 { 102 struct rte_eth_dev *dev = arg; 103 struct nicvf *nic = nicvf_pmd_priv(dev); 104 105 if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { 106 if (dev->data->dev_conf.intr_conf.lsc) 107 nicvf_set_eth_link_status(nic, &dev->data->dev_link); 108 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 109 NULL); 110 } 111 112 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 113 nicvf_interrupt, dev); 114 } 115 116 static void 117 nicvf_vf_interrupt(void *arg) 118 { 119 struct nicvf *nic = arg; 120 121 nicvf_reg_poll_interrupts(nic); 122 123 rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, 124 nicvf_vf_interrupt, nic); 125 } 126 127 static int 128 nicvf_periodic_alarm_start(void (fn)(void *), void *arg) 129 { 130 return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg); 131 } 132 133 static int 134 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg) 135 { 136 return rte_eal_alarm_cancel(fn, arg); 137 } 138 139 /* 140 * Return 0 means link status changed, -1 means not changed 141 */ 142 static int 143 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 144 { 145 #define CHECK_INTERVAL 100 /* 100ms */ 146 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 147 struct rte_eth_link link; 148 struct nicvf *nic = nicvf_pmd_priv(dev); 149 int i; 150 151 PMD_INIT_FUNC_TRACE(); 152 153 if (wait_to_complete) { 154 /* rte_eth_link_get() might need to wait up to 9 seconds */ 155 for (i = 0; i < MAX_CHECK_TIME; i++) { 156 memset(&link, 0, sizeof(link)); 157 nicvf_set_eth_link_status(nic, &link); 158 if (link.link_status) 159 break; 160 rte_delay_ms(CHECK_INTERVAL); 161 } 162 } else { 163 memset(&link, 0, sizeof(link)); 164 nicvf_set_eth_link_status(nic, &link); 165 } 166 return nicvf_atomic_write_link_status(dev, &link); 167 } 168 169 static int 170 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 171 { 172 struct nicvf *nic = nicvf_pmd_priv(dev); 173 uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 174 size_t i; 175 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 176 177 PMD_INIT_FUNC_TRACE(); 178 179 if (frame_size > NIC_HW_MAX_FRS) 180 return -EINVAL; 181 182 if (frame_size < NIC_HW_MIN_FRS) 183 return -EINVAL; 184 185 buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; 186 187 /* 188 * Refuse mtu that requires the support of scattered packets 189 * when this feature has not been enabled before. 190 */ 191 if (!dev->data->scattered_rx && 192 (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) 193 return -EINVAL; 194 195 /* check <seg size> * <max_seg> >= max_frame */ 196 if (dev->data->scattered_rx && 197 (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) 198 return -EINVAL; 199 200 if (frame_size > ETHER_MAX_LEN) 201 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 202 else 203 rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 204 205 if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) 206 return -EINVAL; 207 208 /* Update max frame size */ 209 rxmode->max_rx_pkt_len = (uint32_t)frame_size; 210 nic->mtu = mtu; 211 212 for (i = 0; i < nic->sqs_count; i++) 213 nic->snicvf[i]->mtu = mtu; 214 215 return 0; 216 } 217 218 static int 219 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 220 { 221 uint64_t *data = regs->data; 222 struct nicvf *nic = nicvf_pmd_priv(dev); 223 224 if (data == NULL) { 225 regs->length = nicvf_reg_get_count(); 226 regs->width = THUNDERX_REG_BYTES; 227 return 0; 228 } 229 230 /* Support only full register dump */ 231 if ((regs->length == 0) || 232 (regs->length == (uint32_t)nicvf_reg_get_count())) { 233 regs->version = nic->vendor_id << 16 | nic->device_id; 234 nicvf_reg_dump(nic, data); 235 return 0; 236 } 237 return -ENOTSUP; 238 } 239 240 static int 241 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 242 { 243 uint16_t qidx; 244 struct nicvf_hw_rx_qstats rx_qstats; 245 struct nicvf_hw_tx_qstats tx_qstats; 246 struct nicvf_hw_stats port_stats; 247 struct nicvf *nic = nicvf_pmd_priv(dev); 248 uint16_t rx_start, rx_end; 249 uint16_t tx_start, tx_end; 250 size_t i; 251 252 /* RX queue indices for the first VF */ 253 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 254 255 /* Reading per RX ring stats */ 256 for (qidx = rx_start; qidx <= rx_end; qidx++) { 257 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 258 break; 259 260 nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); 261 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 262 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 263 } 264 265 /* TX queue indices for the first VF */ 266 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 267 268 /* Reading per TX ring stats */ 269 for (qidx = tx_start; qidx <= tx_end; qidx++) { 270 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 271 break; 272 273 nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); 274 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 275 stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 276 } 277 278 for (i = 0; i < nic->sqs_count; i++) { 279 struct nicvf *snic = nic->snicvf[i]; 280 281 if (snic == NULL) 282 break; 283 284 /* RX queue indices for a secondary VF */ 285 nicvf_rx_range(dev, snic, &rx_start, &rx_end); 286 287 /* Reading per RX ring stats */ 288 for (qidx = rx_start; qidx <= rx_end; qidx++) { 289 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 290 break; 291 292 nicvf_hw_get_rx_qstats(snic, &rx_qstats, 293 qidx % MAX_RCV_QUEUES_PER_QS); 294 stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; 295 stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; 296 } 297 298 /* TX queue indices for a secondary VF */ 299 nicvf_tx_range(dev, snic, &tx_start, &tx_end); 300 /* Reading per TX ring stats */ 301 for (qidx = tx_start; qidx <= tx_end; qidx++) { 302 if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 303 break; 304 305 nicvf_hw_get_tx_qstats(snic, &tx_qstats, 306 qidx % MAX_SND_QUEUES_PER_QS); 307 stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; 308 stats->q_opackets[qidx] = tx_qstats.q_tx_packets; 309 } 310 } 311 312 nicvf_hw_get_stats(nic, &port_stats); 313 stats->ibytes = port_stats.rx_bytes; 314 stats->ipackets = port_stats.rx_ucast_frames; 315 stats->ipackets += port_stats.rx_bcast_frames; 316 stats->ipackets += port_stats.rx_mcast_frames; 317 stats->ierrors = port_stats.rx_l2_errors; 318 stats->imissed = port_stats.rx_drop_red; 319 stats->imissed += port_stats.rx_drop_overrun; 320 stats->imissed += port_stats.rx_drop_bcast; 321 stats->imissed += port_stats.rx_drop_mcast; 322 stats->imissed += port_stats.rx_drop_l3_bcast; 323 stats->imissed += port_stats.rx_drop_l3_mcast; 324 325 stats->obytes = port_stats.tx_bytes_ok; 326 stats->opackets = port_stats.tx_ucast_frames_ok; 327 stats->opackets += port_stats.tx_bcast_frames_ok; 328 stats->opackets += port_stats.tx_mcast_frames_ok; 329 stats->oerrors = port_stats.tx_drops; 330 331 return 0; 332 } 333 334 static const uint32_t * 335 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) 336 { 337 size_t copied; 338 static uint32_t ptypes[32]; 339 struct nicvf *nic = nicvf_pmd_priv(dev); 340 static const uint32_t ptypes_common[] = { 341 RTE_PTYPE_L3_IPV4, 342 RTE_PTYPE_L3_IPV4_EXT, 343 RTE_PTYPE_L3_IPV6, 344 RTE_PTYPE_L3_IPV6_EXT, 345 RTE_PTYPE_L4_TCP, 346 RTE_PTYPE_L4_UDP, 347 RTE_PTYPE_L4_FRAG, 348 }; 349 static const uint32_t ptypes_tunnel[] = { 350 RTE_PTYPE_TUNNEL_GRE, 351 RTE_PTYPE_TUNNEL_GENEVE, 352 RTE_PTYPE_TUNNEL_VXLAN, 353 RTE_PTYPE_TUNNEL_NVGRE, 354 }; 355 static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; 356 357 copied = sizeof(ptypes_common); 358 memcpy(ptypes, ptypes_common, copied); 359 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 360 memcpy((char *)ptypes + copied, ptypes_tunnel, 361 sizeof(ptypes_tunnel)); 362 copied += sizeof(ptypes_tunnel); 363 } 364 365 memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); 366 if (dev->rx_pkt_burst == nicvf_recv_pkts || 367 dev->rx_pkt_burst == nicvf_recv_pkts_multiseg) 368 return ptypes; 369 370 return NULL; 371 } 372 373 static void 374 nicvf_dev_stats_reset(struct rte_eth_dev *dev) 375 { 376 int i; 377 uint16_t rxqs = 0, txqs = 0; 378 struct nicvf *nic = nicvf_pmd_priv(dev); 379 uint16_t rx_start, rx_end; 380 uint16_t tx_start, tx_end; 381 382 /* Reset all primary nic counters */ 383 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 384 for (i = rx_start; i <= rx_end; i++) 385 rxqs |= (0x3 << (i * 2)); 386 387 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 388 for (i = tx_start; i <= tx_end; i++) 389 txqs |= (0x3 << (i * 2)); 390 391 nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); 392 393 /* Reset secondary nic queue counters */ 394 for (i = 0; i < nic->sqs_count; i++) { 395 struct nicvf *snic = nic->snicvf[i]; 396 if (snic == NULL) 397 break; 398 399 nicvf_rx_range(dev, snic, &rx_start, &rx_end); 400 for (i = rx_start; i <= rx_end; i++) 401 rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2)); 402 403 nicvf_tx_range(dev, snic, &tx_start, &tx_end); 404 for (i = tx_start; i <= tx_end; i++) 405 txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2)); 406 407 nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs); 408 } 409 } 410 411 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ 412 static void 413 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) 414 { 415 } 416 417 static inline uint64_t 418 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) 419 { 420 uint64_t nic_rss = 0; 421 422 if (ethdev_rss & ETH_RSS_IPV4) 423 nic_rss |= RSS_IP_ENA; 424 425 if (ethdev_rss & ETH_RSS_IPV6) 426 nic_rss |= RSS_IP_ENA; 427 428 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) 429 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 430 431 if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) 432 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 433 434 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) 435 nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); 436 437 if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) 438 nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); 439 440 if (ethdev_rss & ETH_RSS_PORT) 441 nic_rss |= RSS_L2_EXTENDED_HASH_ENA; 442 443 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 444 if (ethdev_rss & ETH_RSS_VXLAN) 445 nic_rss |= RSS_TUN_VXLAN_ENA; 446 447 if (ethdev_rss & ETH_RSS_GENEVE) 448 nic_rss |= RSS_TUN_GENEVE_ENA; 449 450 if (ethdev_rss & ETH_RSS_NVGRE) 451 nic_rss |= RSS_TUN_NVGRE_ENA; 452 } 453 454 return nic_rss; 455 } 456 457 static inline uint64_t 458 nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) 459 { 460 uint64_t ethdev_rss = 0; 461 462 if (nic_rss & RSS_IP_ENA) 463 ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); 464 465 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) 466 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | 467 ETH_RSS_NONFRAG_IPV6_TCP); 468 469 if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) 470 ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | 471 ETH_RSS_NONFRAG_IPV6_UDP); 472 473 if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) 474 ethdev_rss |= ETH_RSS_PORT; 475 476 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { 477 if (nic_rss & RSS_TUN_VXLAN_ENA) 478 ethdev_rss |= ETH_RSS_VXLAN; 479 480 if (nic_rss & RSS_TUN_GENEVE_ENA) 481 ethdev_rss |= ETH_RSS_GENEVE; 482 483 if (nic_rss & RSS_TUN_NVGRE_ENA) 484 ethdev_rss |= ETH_RSS_NVGRE; 485 } 486 return ethdev_rss; 487 } 488 489 static int 490 nicvf_dev_reta_query(struct rte_eth_dev *dev, 491 struct rte_eth_rss_reta_entry64 *reta_conf, 492 uint16_t reta_size) 493 { 494 struct nicvf *nic = nicvf_pmd_priv(dev); 495 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 496 int ret, i, j; 497 498 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 499 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 500 "(%d) doesn't match the number hardware can supported " 501 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 502 return -EINVAL; 503 } 504 505 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 506 if (ret) 507 return ret; 508 509 /* Copy RETA table */ 510 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 511 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 512 if ((reta_conf[i].mask >> j) & 0x01) 513 reta_conf[i].reta[j] = tbl[j]; 514 } 515 516 return 0; 517 } 518 519 static int 520 nicvf_dev_reta_update(struct rte_eth_dev *dev, 521 struct rte_eth_rss_reta_entry64 *reta_conf, 522 uint16_t reta_size) 523 { 524 struct nicvf *nic = nicvf_pmd_priv(dev); 525 uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; 526 int ret, i, j; 527 528 if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { 529 RTE_LOG(ERR, PMD, "The size of hash lookup table configured " 530 "(%d) doesn't match the number hardware can supported " 531 "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); 532 return -EINVAL; 533 } 534 535 ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 536 if (ret) 537 return ret; 538 539 /* Copy RETA table */ 540 for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { 541 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) 542 if ((reta_conf[i].mask >> j) & 0x01) 543 tbl[j] = reta_conf[i].reta[j]; 544 } 545 546 return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); 547 } 548 549 static int 550 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 551 struct rte_eth_rss_conf *rss_conf) 552 { 553 struct nicvf *nic = nicvf_pmd_priv(dev); 554 555 if (rss_conf->rss_key) 556 nicvf_rss_get_key(nic, rss_conf->rss_key); 557 558 rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; 559 rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); 560 return 0; 561 } 562 563 static int 564 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, 565 struct rte_eth_rss_conf *rss_conf) 566 { 567 struct nicvf *nic = nicvf_pmd_priv(dev); 568 uint64_t nic_rss; 569 570 if (rss_conf->rss_key && 571 rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { 572 RTE_LOG(ERR, PMD, "Hash key size mismatch %d", 573 rss_conf->rss_key_len); 574 return -EINVAL; 575 } 576 577 if (rss_conf->rss_key) 578 nicvf_rss_set_key(nic, rss_conf->rss_key); 579 580 nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); 581 nicvf_rss_set_cfg(nic, nic_rss); 582 return 0; 583 } 584 585 static int 586 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 587 struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt) 588 { 589 const struct rte_memzone *rz; 590 uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); 591 592 rz = rte_eth_dma_zone_reserve(dev, "cq_ring", 593 nicvf_netdev_qidx(nic, qidx), ring_size, 594 NICVF_CQ_BASE_ALIGN_BYTES, nic->node); 595 if (rz == NULL) { 596 PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); 597 return -ENOMEM; 598 } 599 600 memset(rz->addr, 0, ring_size); 601 602 rxq->phys = rz->iova; 603 rxq->desc = rz->addr; 604 rxq->qlen_mask = desc_cnt - 1; 605 606 return 0; 607 } 608 609 static int 610 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 611 struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt) 612 { 613 const struct rte_memzone *rz; 614 uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); 615 616 rz = rte_eth_dma_zone_reserve(dev, "sq", 617 nicvf_netdev_qidx(nic, qidx), ring_size, 618 NICVF_SQ_BASE_ALIGN_BYTES, nic->node); 619 if (rz == NULL) { 620 PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); 621 return -ENOMEM; 622 } 623 624 memset(rz->addr, 0, ring_size); 625 626 sq->phys = rz->iova; 627 sq->desc = rz->addr; 628 sq->qlen_mask = desc_cnt - 1; 629 630 return 0; 631 } 632 633 static int 634 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic, 635 uint32_t desc_cnt, uint32_t buffsz) 636 { 637 struct nicvf_rbdr *rbdr; 638 const struct rte_memzone *rz; 639 uint32_t ring_size; 640 641 assert(nic->rbdr == NULL); 642 rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), 643 RTE_CACHE_LINE_SIZE, nic->node); 644 if (rbdr == NULL) { 645 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); 646 return -ENOMEM; 647 } 648 649 ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; 650 rz = rte_eth_dma_zone_reserve(dev, "rbdr", 651 nicvf_netdev_qidx(nic, 0), ring_size, 652 NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); 653 if (rz == NULL) { 654 PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); 655 return -ENOMEM; 656 } 657 658 memset(rz->addr, 0, ring_size); 659 660 rbdr->phys = rz->iova; 661 rbdr->tail = 0; 662 rbdr->next_tail = 0; 663 rbdr->desc = rz->addr; 664 rbdr->buffsz = buffsz; 665 rbdr->qlen_mask = desc_cnt - 1; 666 rbdr->rbdr_status = 667 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; 668 rbdr->rbdr_door = 669 nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; 670 671 nic->rbdr = rbdr; 672 return 0; 673 } 674 675 static void 676 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic, 677 nicvf_iova_addr_t phy) 678 { 679 uint16_t qidx; 680 void *obj; 681 struct nicvf_rxq *rxq; 682 uint16_t rx_start, rx_end; 683 684 /* Get queue ranges for this VF */ 685 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 686 687 for (qidx = rx_start; qidx <= rx_end; qidx++) { 688 rxq = dev->data->rx_queues[qidx]; 689 if (rxq->precharge_cnt) { 690 obj = (void *)nicvf_mbuff_phy2virt(phy, 691 rxq->mbuf_phys_off); 692 rte_mempool_put(rxq->pool, obj); 693 rxq->precharge_cnt--; 694 break; 695 } 696 } 697 } 698 699 static inline void 700 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic) 701 { 702 uint32_t qlen_mask, head; 703 struct rbdr_entry_t *entry; 704 struct nicvf_rbdr *rbdr = nic->rbdr; 705 706 qlen_mask = rbdr->qlen_mask; 707 head = rbdr->head; 708 while (head != rbdr->tail) { 709 entry = rbdr->desc + head; 710 nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr); 711 head++; 712 head = head & qlen_mask; 713 } 714 } 715 716 static inline void 717 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) 718 { 719 uint32_t head; 720 721 head = txq->head; 722 while (head != txq->tail) { 723 if (txq->txbuffs[head]) { 724 rte_pktmbuf_free_seg(txq->txbuffs[head]); 725 txq->txbuffs[head] = NULL; 726 } 727 head++; 728 head = head & txq->qlen_mask; 729 } 730 } 731 732 static void 733 nicvf_tx_queue_reset(struct nicvf_txq *txq) 734 { 735 uint32_t txq_desc_cnt = txq->qlen_mask + 1; 736 737 memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); 738 memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); 739 txq->tail = 0; 740 txq->head = 0; 741 txq->xmit_bufs = 0; 742 } 743 744 static inline int 745 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 746 uint16_t qidx) 747 { 748 struct nicvf_txq *txq; 749 int ret; 750 751 assert(qidx < MAX_SND_QUEUES_PER_QS); 752 753 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 754 RTE_ETH_QUEUE_STATE_STARTED) 755 return 0; 756 757 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 758 txq->pool = NULL; 759 ret = nicvf_qset_sq_config(nic, qidx, txq); 760 if (ret) { 761 PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d", 762 nic->vf_id, qidx, ret); 763 goto config_sq_error; 764 } 765 766 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 767 RTE_ETH_QUEUE_STATE_STARTED; 768 return ret; 769 770 config_sq_error: 771 nicvf_qset_sq_reclaim(nic, qidx); 772 return ret; 773 } 774 775 static inline int 776 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 777 uint16_t qidx) 778 { 779 struct nicvf_txq *txq; 780 int ret; 781 782 assert(qidx < MAX_SND_QUEUES_PER_QS); 783 784 if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 785 RTE_ETH_QUEUE_STATE_STOPPED) 786 return 0; 787 788 ret = nicvf_qset_sq_reclaim(nic, qidx); 789 if (ret) 790 PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d", 791 nic->vf_id, qidx, ret); 792 793 txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]; 794 nicvf_tx_queue_release_mbufs(txq); 795 nicvf_tx_queue_reset(txq); 796 797 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 798 RTE_ETH_QUEUE_STATE_STOPPED; 799 return ret; 800 } 801 802 static inline int 803 nicvf_configure_cpi(struct rte_eth_dev *dev) 804 { 805 struct nicvf *nic = nicvf_pmd_priv(dev); 806 uint16_t qidx, qcnt; 807 int ret; 808 809 /* Count started rx queues */ 810 for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++) 811 if (dev->data->rx_queue_state[qidx] == 812 RTE_ETH_QUEUE_STATE_STARTED) 813 qcnt++; 814 815 nic->cpi_alg = CPI_ALG_NONE; 816 ret = nicvf_mbox_config_cpi(nic, qcnt); 817 if (ret) 818 PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); 819 820 return ret; 821 } 822 823 static inline int 824 nicvf_configure_rss(struct rte_eth_dev *dev) 825 { 826 struct nicvf *nic = nicvf_pmd_priv(dev); 827 uint64_t rsshf; 828 int ret = -EINVAL; 829 830 rsshf = nicvf_rss_ethdev_to_nic(nic, 831 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 832 PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, 833 dev->data->dev_conf.rxmode.mq_mode, 834 dev->data->nb_rx_queues, 835 dev->data->dev_conf.lpbk_mode, rsshf); 836 837 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 838 ret = nicvf_rss_term(nic); 839 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 840 ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf); 841 if (ret) 842 PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); 843 844 return ret; 845 } 846 847 static int 848 nicvf_configure_rss_reta(struct rte_eth_dev *dev) 849 { 850 struct nicvf *nic = nicvf_pmd_priv(dev); 851 unsigned int idx, qmap_size; 852 uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; 853 uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; 854 855 if (nic->cpi_alg != CPI_ALG_NONE) 856 return -EINVAL; 857 858 /* Prepare queue map */ 859 for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { 860 if (dev->data->rx_queue_state[idx] == 861 RTE_ETH_QUEUE_STATE_STARTED) 862 qmap[qmap_size++] = idx; 863 } 864 865 /* Update default RSS RETA */ 866 for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) 867 default_reta[idx] = qmap[idx % qmap_size]; 868 869 return nicvf_rss_reta_update(nic, default_reta, 870 NIC_MAX_RSS_IDR_TBL_SIZE); 871 } 872 873 static void 874 nicvf_dev_tx_queue_release(void *sq) 875 { 876 struct nicvf_txq *txq; 877 878 PMD_INIT_FUNC_TRACE(); 879 880 txq = (struct nicvf_txq *)sq; 881 if (txq) { 882 if (txq->txbuffs != NULL) { 883 nicvf_tx_queue_release_mbufs(txq); 884 rte_free(txq->txbuffs); 885 txq->txbuffs = NULL; 886 } 887 rte_free(txq); 888 } 889 } 890 891 static void 892 nicvf_set_tx_function(struct rte_eth_dev *dev) 893 { 894 struct nicvf_txq *txq; 895 size_t i; 896 bool multiseg = false; 897 898 for (i = 0; i < dev->data->nb_tx_queues; i++) { 899 txq = dev->data->tx_queues[i]; 900 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) { 901 multiseg = true; 902 break; 903 } 904 } 905 906 /* Use a simple Tx queue (no offloads, no multi segs) if possible */ 907 if (multiseg) { 908 PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); 909 dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; 910 } else { 911 PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); 912 dev->tx_pkt_burst = nicvf_xmit_pkts; 913 } 914 915 if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) 916 PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); 917 else 918 PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); 919 } 920 921 static void 922 nicvf_set_rx_function(struct rte_eth_dev *dev) 923 { 924 if (dev->data->scattered_rx) { 925 PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback"); 926 dev->rx_pkt_burst = nicvf_recv_pkts_multiseg; 927 } else { 928 PMD_DRV_LOG(DEBUG, "Using single-segment rx callback"); 929 dev->rx_pkt_burst = nicvf_recv_pkts; 930 } 931 } 932 933 static int 934 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 935 uint16_t nb_desc, unsigned int socket_id, 936 const struct rte_eth_txconf *tx_conf) 937 { 938 uint16_t tx_free_thresh; 939 bool is_single_pool; 940 struct nicvf_txq *txq; 941 struct nicvf *nic = nicvf_pmd_priv(dev); 942 uint64_t conf_offloads, offload_capa, unsupported_offloads; 943 944 PMD_INIT_FUNC_TRACE(); 945 946 if (qidx >= MAX_SND_QUEUES_PER_QS) 947 nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1]; 948 949 qidx = qidx % MAX_SND_QUEUES_PER_QS; 950 951 /* Socket id check */ 952 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 953 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 954 socket_id, nic->node); 955 956 conf_offloads = tx_conf->offloads; 957 offload_capa = NICVF_TX_OFFLOAD_CAPA; 958 959 unsupported_offloads = conf_offloads & ~offload_capa; 960 if (unsupported_offloads) { 961 PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported." 962 "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", 963 unsupported_offloads, conf_offloads, offload_capa); 964 return -ENOTSUP; 965 } 966 967 /* Tx deferred start is not supported */ 968 if (tx_conf->tx_deferred_start) { 969 PMD_INIT_LOG(ERR, "Tx deferred start not supported"); 970 return -EINVAL; 971 } 972 973 /* Roundup nb_desc to available qsize and validate max number of desc */ 974 nb_desc = nicvf_qsize_sq_roundup(nb_desc); 975 if (nb_desc == 0) { 976 PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); 977 return -EINVAL; 978 } 979 980 /* Validate tx_free_thresh */ 981 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? 982 tx_conf->tx_free_thresh : 983 NICVF_DEFAULT_TX_FREE_THRESH); 984 985 if (tx_free_thresh > (nb_desc) || 986 tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { 987 PMD_INIT_LOG(ERR, 988 "tx_free_thresh must be less than the number of TX " 989 "descriptors. (tx_free_thresh=%u port=%d " 990 "queue=%d)", (unsigned int)tx_free_thresh, 991 (int)dev->data->port_id, (int)qidx); 992 return -EINVAL; 993 } 994 995 /* Free memory prior to re-allocation if needed. */ 996 if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 997 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 998 nicvf_netdev_qidx(nic, qidx)); 999 nicvf_dev_tx_queue_release( 1000 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]); 1001 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 1002 } 1003 1004 /* Allocating tx queue data structure */ 1005 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), 1006 RTE_CACHE_LINE_SIZE, nic->node); 1007 if (txq == NULL) { 1008 PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", 1009 nicvf_netdev_qidx(nic, qidx)); 1010 return -ENOMEM; 1011 } 1012 1013 txq->nic = nic; 1014 txq->queue_id = qidx; 1015 txq->tx_free_thresh = tx_free_thresh; 1016 txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; 1017 txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; 1018 txq->offloads = conf_offloads; 1019 1020 is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE); 1021 1022 /* Choose optimum free threshold value for multipool case */ 1023 if (!is_single_pool) { 1024 txq->tx_free_thresh = (uint16_t) 1025 (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? 1026 NICVF_TX_FREE_MPOOL_THRESH : 1027 tx_conf->tx_free_thresh); 1028 txq->pool_free = nicvf_multi_pool_free_xmited_buffers; 1029 } else { 1030 txq->pool_free = nicvf_single_pool_free_xmited_buffers; 1031 } 1032 1033 /* Allocate software ring */ 1034 txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", 1035 nb_desc * sizeof(struct rte_mbuf *), 1036 RTE_CACHE_LINE_SIZE, nic->node); 1037 1038 if (txq->txbuffs == NULL) { 1039 nicvf_dev_tx_queue_release(txq); 1040 return -ENOMEM; 1041 } 1042 1043 if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) { 1044 PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); 1045 nicvf_dev_tx_queue_release(txq); 1046 return -ENOMEM; 1047 } 1048 1049 nicvf_tx_queue_reset(txq); 1050 1051 PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p" 1052 " phys=0x%" PRIx64 " offloads=0x%" PRIx64, 1053 nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc, 1054 txq->phys, txq->offloads); 1055 1056 dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq; 1057 dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1058 RTE_ETH_QUEUE_STATE_STOPPED; 1059 return 0; 1060 } 1061 1062 static inline void 1063 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq) 1064 { 1065 uint32_t rxq_cnt; 1066 uint32_t nb_pkts, released_pkts = 0; 1067 uint32_t refill_cnt = 0; 1068 struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; 1069 1070 if (dev->rx_pkt_burst == NULL) 1071 return; 1072 1073 while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, 1074 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) { 1075 nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, 1076 NICVF_MAX_RX_FREE_THRESH); 1077 PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); 1078 while (nb_pkts) { 1079 rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); 1080 released_pkts++; 1081 } 1082 } 1083 1084 1085 refill_cnt += nicvf_dev_rbdr_refill(dev, 1086 nicvf_netdev_qidx(rxq->nic, rxq->queue_id)); 1087 1088 PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", 1089 released_pkts, refill_cnt); 1090 } 1091 1092 static void 1093 nicvf_rx_queue_reset(struct nicvf_rxq *rxq) 1094 { 1095 rxq->head = 0; 1096 rxq->available_space = 0; 1097 rxq->recv_buffers = 0; 1098 } 1099 1100 static inline int 1101 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1102 uint16_t qidx) 1103 { 1104 struct nicvf_rxq *rxq; 1105 int ret; 1106 1107 assert(qidx < MAX_RCV_QUEUES_PER_QS); 1108 1109 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1110 RTE_ETH_QUEUE_STATE_STARTED) 1111 return 0; 1112 1113 /* Update rbdr pointer to all rxq */ 1114 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 1115 rxq->shared_rbdr = nic->rbdr; 1116 1117 ret = nicvf_qset_rq_config(nic, qidx, rxq); 1118 if (ret) { 1119 PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d", 1120 nic->vf_id, qidx, ret); 1121 goto config_rq_error; 1122 } 1123 ret = nicvf_qset_cq_config(nic, qidx, rxq); 1124 if (ret) { 1125 PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d", 1126 nic->vf_id, qidx, ret); 1127 goto config_cq_error; 1128 } 1129 1130 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1131 RTE_ETH_QUEUE_STATE_STARTED; 1132 return 0; 1133 1134 config_cq_error: 1135 nicvf_qset_cq_reclaim(nic, qidx); 1136 config_rq_error: 1137 nicvf_qset_rq_reclaim(nic, qidx); 1138 return ret; 1139 } 1140 1141 static inline int 1142 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic, 1143 uint16_t qidx) 1144 { 1145 struct nicvf_rxq *rxq; 1146 int ret, other_error; 1147 1148 if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] == 1149 RTE_ETH_QUEUE_STATE_STOPPED) 1150 return 0; 1151 1152 ret = nicvf_qset_rq_reclaim(nic, qidx); 1153 if (ret) 1154 PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d", 1155 nic->vf_id, qidx, ret); 1156 1157 other_error = ret; 1158 rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]; 1159 nicvf_rx_queue_release_mbufs(dev, rxq); 1160 nicvf_rx_queue_reset(rxq); 1161 1162 ret = nicvf_qset_cq_reclaim(nic, qidx); 1163 if (ret) 1164 PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d", 1165 nic->vf_id, qidx, ret); 1166 1167 other_error |= ret; 1168 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1169 RTE_ETH_QUEUE_STATE_STOPPED; 1170 return other_error; 1171 } 1172 1173 static void 1174 nicvf_dev_rx_queue_release(void *rx_queue) 1175 { 1176 PMD_INIT_FUNC_TRACE(); 1177 1178 rte_free(rx_queue); 1179 } 1180 1181 static int 1182 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1183 { 1184 struct nicvf *nic = nicvf_pmd_priv(dev); 1185 int ret; 1186 1187 if (qidx >= MAX_RCV_QUEUES_PER_QS) 1188 nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)]; 1189 1190 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1191 1192 ret = nicvf_vf_start_rx_queue(dev, nic, qidx); 1193 if (ret) 1194 return ret; 1195 1196 ret = nicvf_configure_cpi(dev); 1197 if (ret) 1198 return ret; 1199 1200 return nicvf_configure_rss_reta(dev); 1201 } 1202 1203 static int 1204 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1205 { 1206 int ret; 1207 struct nicvf *nic = nicvf_pmd_priv(dev); 1208 1209 if (qidx >= MAX_SND_QUEUES_PER_QS) 1210 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1211 1212 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1213 1214 ret = nicvf_vf_stop_rx_queue(dev, nic, qidx); 1215 ret |= nicvf_configure_cpi(dev); 1216 ret |= nicvf_configure_rss_reta(dev); 1217 return ret; 1218 } 1219 1220 static int 1221 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) 1222 { 1223 struct nicvf *nic = nicvf_pmd_priv(dev); 1224 1225 if (qidx >= MAX_SND_QUEUES_PER_QS) 1226 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1227 1228 qidx = qidx % MAX_SND_QUEUES_PER_QS; 1229 1230 return nicvf_vf_start_tx_queue(dev, nic, qidx); 1231 } 1232 1233 static int 1234 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) 1235 { 1236 struct nicvf *nic = nicvf_pmd_priv(dev); 1237 1238 if (qidx >= MAX_SND_QUEUES_PER_QS) 1239 nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)]; 1240 1241 qidx = qidx % MAX_SND_QUEUES_PER_QS; 1242 1243 return nicvf_vf_stop_tx_queue(dev, nic, qidx); 1244 } 1245 1246 static inline void 1247 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq) 1248 { 1249 uintptr_t p; 1250 struct rte_mbuf mb_def; 1251 1252 RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8); 1253 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0); 1254 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) - 1255 offsetof(struct rte_mbuf, data_off) != 2); 1256 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) - 1257 offsetof(struct rte_mbuf, data_off) != 4); 1258 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) - 1259 offsetof(struct rte_mbuf, data_off) != 6); 1260 mb_def.nb_segs = 1; 1261 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 1262 mb_def.port = rxq->port_id; 1263 rte_mbuf_refcnt_set(&mb_def, 1); 1264 1265 /* Prevent compiler reordering: rearm_data covers previous fields */ 1266 rte_compiler_barrier(); 1267 p = (uintptr_t)&mb_def.rearm_data; 1268 rxq->mbuf_initializer.value = *(uint64_t *)p; 1269 } 1270 1271 static int 1272 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, 1273 uint16_t nb_desc, unsigned int socket_id, 1274 const struct rte_eth_rxconf *rx_conf, 1275 struct rte_mempool *mp) 1276 { 1277 uint16_t rx_free_thresh; 1278 struct nicvf_rxq *rxq; 1279 struct nicvf *nic = nicvf_pmd_priv(dev); 1280 uint64_t conf_offloads, offload_capa, unsupported_offloads; 1281 1282 PMD_INIT_FUNC_TRACE(); 1283 1284 if (qidx >= MAX_RCV_QUEUES_PER_QS) 1285 nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1]; 1286 1287 qidx = qidx % MAX_RCV_QUEUES_PER_QS; 1288 1289 /* Socket id check */ 1290 if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) 1291 PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", 1292 socket_id, nic->node); 1293 1294 1295 conf_offloads = rx_conf->offloads; 1296 1297 if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) { 1298 PMD_INIT_LOG(NOTICE, "Rx checksum not supported"); 1299 conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM; 1300 } 1301 1302 offload_capa = NICVF_RX_OFFLOAD_CAPA; 1303 unsupported_offloads = conf_offloads & ~offload_capa; 1304 1305 if (unsupported_offloads) { 1306 PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. " 1307 "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", 1308 unsupported_offloads, conf_offloads, offload_capa); 1309 return -ENOTSUP; 1310 } 1311 1312 /* Mempool memory must be contiguous, so must be one memory segment*/ 1313 if (mp->nb_mem_chunks != 1) { 1314 PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages"); 1315 return -EINVAL; 1316 } 1317 1318 /* Mempool memory must be physically contiguous */ 1319 if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) { 1320 PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); 1321 return -EINVAL; 1322 } 1323 1324 /* Rx deferred start is not supported */ 1325 if (rx_conf->rx_deferred_start) { 1326 PMD_INIT_LOG(ERR, "Rx deferred start not supported"); 1327 return -EINVAL; 1328 } 1329 1330 /* Roundup nb_desc to available qsize and validate max number of desc */ 1331 nb_desc = nicvf_qsize_cq_roundup(nb_desc); 1332 if (nb_desc == 0) { 1333 PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); 1334 return -EINVAL; 1335 } 1336 1337 /* Check rx_free_thresh upper bound */ 1338 rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? 1339 rx_conf->rx_free_thresh : 1340 NICVF_DEFAULT_RX_FREE_THRESH); 1341 if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || 1342 rx_free_thresh >= nb_desc * .75) { 1343 PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", 1344 rx_free_thresh); 1345 return -EINVAL; 1346 } 1347 1348 /* Free memory prior to re-allocation if needed */ 1349 if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) { 1350 PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", 1351 nicvf_netdev_qidx(nic, qidx)); 1352 nicvf_dev_rx_queue_release( 1353 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]); 1354 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL; 1355 } 1356 1357 /* Allocate rxq memory */ 1358 rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), 1359 RTE_CACHE_LINE_SIZE, nic->node); 1360 if (rxq == NULL) { 1361 PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", 1362 nicvf_netdev_qidx(nic, qidx)); 1363 return -ENOMEM; 1364 } 1365 1366 rxq->nic = nic; 1367 rxq->pool = mp; 1368 rxq->queue_id = qidx; 1369 rxq->port_id = dev->data->port_id; 1370 rxq->rx_free_thresh = rx_free_thresh; 1371 rxq->rx_drop_en = rx_conf->rx_drop_en; 1372 rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; 1373 rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; 1374 rxq->precharge_cnt = 0; 1375 1376 if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2) 1377 rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD; 1378 else 1379 rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; 1380 1381 nicvf_rxq_mbuf_setup(rxq); 1382 1383 /* Alloc completion queue */ 1384 if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) { 1385 PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); 1386 nicvf_dev_rx_queue_release(rxq); 1387 return -ENOMEM; 1388 } 1389 1390 nicvf_rx_queue_reset(rxq); 1391 1392 PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)" 1393 " phy=0x%" PRIx64 " offloads=0x%" PRIx64, 1394 nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, 1395 rte_mempool_avail_count(mp), rxq->phys, conf_offloads); 1396 1397 dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; 1398 dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = 1399 RTE_ETH_QUEUE_STATE_STOPPED; 1400 return 0; 1401 } 1402 1403 static void 1404 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1405 { 1406 struct nicvf *nic = nicvf_pmd_priv(dev); 1407 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1408 1409 PMD_INIT_FUNC_TRACE(); 1410 1411 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1412 1413 /* Autonegotiation may be disabled */ 1414 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 1415 dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M | 1416 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 1417 if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF) 1418 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 1419 1420 dev_info->min_rx_bufsize = ETHER_MIN_MTU; 1421 dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; 1422 dev_info->max_rx_queues = 1423 (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1424 dev_info->max_tx_queues = 1425 (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); 1426 dev_info->max_mac_addrs = 1; 1427 dev_info->max_vfs = pci_dev->max_vfs; 1428 1429 dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA; 1430 dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA; 1431 dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA; 1432 dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA; 1433 1434 dev_info->reta_size = nic->rss_info.rss_size; 1435 dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; 1436 dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; 1437 if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) 1438 dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; 1439 1440 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1441 .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, 1442 .rx_drop_en = 0, 1443 .offloads = DEV_RX_OFFLOAD_CRC_STRIP, 1444 }; 1445 1446 dev_info->default_txconf = (struct rte_eth_txconf) { 1447 .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, 1448 .txq_flags = 1449 ETH_TXQ_FLAGS_NOMULTSEGS | 1450 ETH_TXQ_FLAGS_NOREFCOUNT | 1451 ETH_TXQ_FLAGS_NOMULTMEMP | 1452 ETH_TXQ_FLAGS_NOVLANOFFL | 1453 ETH_TXQ_FLAGS_NOXSUMSCTP, 1454 .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE | 1455 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1456 DEV_TX_OFFLOAD_UDP_CKSUM | 1457 DEV_TX_OFFLOAD_TCP_CKSUM, 1458 }; 1459 } 1460 1461 static nicvf_iova_addr_t 1462 rbdr_rte_mempool_get(void *dev, void *opaque) 1463 { 1464 uint16_t qidx; 1465 uintptr_t mbuf; 1466 struct nicvf_rxq *rxq; 1467 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev; 1468 struct nicvf *nic = (struct nicvf *)opaque; 1469 uint16_t rx_start, rx_end; 1470 1471 /* Get queue ranges for this VF */ 1472 nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end); 1473 1474 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1475 rxq = eth_dev->data->rx_queues[qidx]; 1476 /* Maintain equal buffer count across all pools */ 1477 if (rxq->precharge_cnt >= rxq->qlen_mask) 1478 continue; 1479 rxq->precharge_cnt++; 1480 mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); 1481 if (mbuf) 1482 return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); 1483 } 1484 return 0; 1485 } 1486 1487 static int 1488 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz) 1489 { 1490 int ret; 1491 uint16_t qidx, data_off; 1492 uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; 1493 uint64_t mbuf_phys_off = 0; 1494 struct nicvf_rxq *rxq; 1495 struct rte_mbuf *mbuf; 1496 uint16_t rx_start, rx_end; 1497 uint16_t tx_start, tx_end; 1498 bool vlan_strip; 1499 1500 PMD_INIT_FUNC_TRACE(); 1501 1502 /* Userspace process exited without proper shutdown in last run */ 1503 if (nicvf_qset_rbdr_active(nic, 0)) 1504 nicvf_vf_stop(dev, nic, false); 1505 1506 /* Get queue ranges for this VF */ 1507 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 1508 1509 /* 1510 * Thunderx nicvf PMD can support more than one pool per port only when 1511 * 1) Data payload size is same across all the pools in given port 1512 * AND 1513 * 2) All mbuffs in the pools are from the same hugepage 1514 * AND 1515 * 3) Mbuff metadata size is same across all the pools in given port 1516 * 1517 * This is to support existing application that uses multiple pool/port. 1518 * But, the purpose of using multipool for QoS will not be addressed. 1519 * 1520 */ 1521 1522 /* Validate mempool attributes */ 1523 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1524 rxq = dev->data->rx_queues[qidx]; 1525 rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); 1526 mbuf = rte_pktmbuf_alloc(rxq->pool); 1527 if (mbuf == NULL) { 1528 PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d " 1529 "pool=%s", 1530 nic->vf_id, qidx, rxq->pool->name); 1531 return -ENOMEM; 1532 } 1533 data_off = nicvf_mbuff_meta_length(mbuf); 1534 data_off += RTE_PKTMBUF_HEADROOM; 1535 rte_pktmbuf_free(mbuf); 1536 1537 if (data_off % RTE_CACHE_LINE_SIZE) { 1538 PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d", 1539 rxq->pool->name, data_off, 1540 data_off % RTE_CACHE_LINE_SIZE); 1541 return -EINVAL; 1542 } 1543 rxq->mbuf_phys_off -= data_off; 1544 1545 if (mbuf_phys_off == 0) 1546 mbuf_phys_off = rxq->mbuf_phys_off; 1547 if (mbuf_phys_off != rxq->mbuf_phys_off) { 1548 PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %" 1549 PRIx64, rxq->pool->name, nic->vf_id, 1550 mbuf_phys_off); 1551 return -EINVAL; 1552 } 1553 } 1554 1555 /* Check the level of buffers in the pool */ 1556 total_rxq_desc = 0; 1557 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1558 rxq = dev->data->rx_queues[qidx]; 1559 /* Count total numbers of rxq descs */ 1560 total_rxq_desc += rxq->qlen_mask + 1; 1561 exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; 1562 exp_buffs *= dev->data->nb_rx_queues; 1563 if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { 1564 PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", 1565 rxq->pool->name, 1566 rte_mempool_avail_count(rxq->pool), 1567 exp_buffs); 1568 return -ENOENT; 1569 } 1570 } 1571 1572 /* Check RBDR desc overflow */ 1573 ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); 1574 if (ret == 0) { 1575 PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc " 1576 "VF%d", nic->vf_id); 1577 return -ENOMEM; 1578 } 1579 1580 /* Enable qset */ 1581 ret = nicvf_qset_config(nic); 1582 if (ret) { 1583 PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret, 1584 nic->vf_id); 1585 return ret; 1586 } 1587 1588 /* Allocate RBDR and RBDR ring desc */ 1589 nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); 1590 ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz); 1591 if (ret) { 1592 PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc " 1593 "VF%d", nic->vf_id); 1594 goto qset_reclaim; 1595 } 1596 1597 /* Enable and configure RBDR registers */ 1598 ret = nicvf_qset_rbdr_config(nic, 0); 1599 if (ret) { 1600 PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret, 1601 nic->vf_id); 1602 goto qset_rbdr_free; 1603 } 1604 1605 /* Fill rte_mempool buffers in RBDR pool and precharge it */ 1606 ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get, 1607 total_rxq_desc); 1608 if (ret) { 1609 PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret, 1610 nic->vf_id); 1611 goto qset_rbdr_reclaim; 1612 } 1613 1614 PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d", 1615 nic->rbdr->tail, nb_rbdr_desc, nic->vf_id); 1616 1617 /* Configure VLAN Strip */ 1618 vlan_strip = !!(dev->data->dev_conf.rxmode.offloads & 1619 DEV_RX_OFFLOAD_VLAN_STRIP); 1620 nicvf_vlan_hw_strip(nic, vlan_strip); 1621 1622 /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data 1623 * to the 64bit memory address. 1624 * The alignment creates a hole in mbuf(between the end of headroom and 1625 * packet data start). The new revision of the HW provides an option to 1626 * disable the L3 alignment feature and make mbuf layout looks 1627 * more like other NICs. For better application compatibility, disabling 1628 * l3 alignment feature on the hardware revisions it supports 1629 */ 1630 nicvf_apad_config(nic, false); 1631 1632 /* Get queue ranges for this VF */ 1633 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1634 1635 /* Configure TX queues */ 1636 for (qidx = tx_start; qidx <= tx_end; qidx++) { 1637 ret = nicvf_vf_start_tx_queue(dev, nic, 1638 qidx % MAX_SND_QUEUES_PER_QS); 1639 if (ret) 1640 goto start_txq_error; 1641 } 1642 1643 /* Configure RX queues */ 1644 for (qidx = rx_start; qidx <= rx_end; qidx++) { 1645 ret = nicvf_vf_start_rx_queue(dev, nic, 1646 qidx % MAX_RCV_QUEUES_PER_QS); 1647 if (ret) 1648 goto start_rxq_error; 1649 } 1650 1651 if (!nic->sqs_mode) { 1652 /* Configure CPI algorithm */ 1653 ret = nicvf_configure_cpi(dev); 1654 if (ret) 1655 goto start_txq_error; 1656 1657 ret = nicvf_mbox_get_rss_size(nic); 1658 if (ret) { 1659 PMD_INIT_LOG(ERR, "Failed to get rss table size"); 1660 goto qset_rss_error; 1661 } 1662 1663 /* Configure RSS */ 1664 ret = nicvf_configure_rss(dev); 1665 if (ret) 1666 goto qset_rss_error; 1667 } 1668 1669 /* Done; Let PF make the BGX's RX and TX switches to ON position */ 1670 nicvf_mbox_cfg_done(nic); 1671 return 0; 1672 1673 qset_rss_error: 1674 nicvf_rss_term(nic); 1675 start_rxq_error: 1676 for (qidx = rx_start; qidx <= rx_end; qidx++) 1677 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 1678 start_txq_error: 1679 for (qidx = tx_start; qidx <= tx_end; qidx++) 1680 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1681 qset_rbdr_reclaim: 1682 nicvf_qset_rbdr_reclaim(nic, 0); 1683 nicvf_rbdr_release_mbufs(dev, nic); 1684 qset_rbdr_free: 1685 if (nic->rbdr) { 1686 rte_free(nic->rbdr); 1687 nic->rbdr = NULL; 1688 } 1689 qset_reclaim: 1690 nicvf_qset_reclaim(nic); 1691 return ret; 1692 } 1693 1694 static int 1695 nicvf_dev_start(struct rte_eth_dev *dev) 1696 { 1697 uint16_t qidx; 1698 int ret; 1699 size_t i; 1700 struct nicvf *nic = nicvf_pmd_priv(dev); 1701 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 1702 uint16_t mtu; 1703 uint32_t buffsz = 0, rbdrsz = 0; 1704 struct rte_pktmbuf_pool_private *mbp_priv; 1705 struct nicvf_rxq *rxq; 1706 1707 PMD_INIT_FUNC_TRACE(); 1708 1709 /* This function must be called for a primary device */ 1710 assert_primary(nic); 1711 1712 /* Validate RBDR buff size */ 1713 for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { 1714 rxq = dev->data->rx_queues[qidx]; 1715 mbp_priv = rte_mempool_get_priv(rxq->pool); 1716 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1717 if (buffsz % 128) { 1718 PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); 1719 return -EINVAL; 1720 } 1721 if (rbdrsz == 0) 1722 rbdrsz = buffsz; 1723 if (rbdrsz != buffsz) { 1724 PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)", 1725 qidx, rbdrsz, buffsz); 1726 return -EINVAL; 1727 } 1728 } 1729 1730 /* Configure loopback */ 1731 ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); 1732 if (ret) { 1733 PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); 1734 return ret; 1735 } 1736 1737 /* Reset all statistics counters attached to this port */ 1738 ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); 1739 if (ret) { 1740 PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); 1741 return ret; 1742 } 1743 1744 /* Setup scatter mode if needed by jumbo */ 1745 if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 1746 2 * VLAN_TAG_SIZE > buffsz) 1747 dev->data->scattered_rx = 1; 1748 if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0) 1749 dev->data->scattered_rx = 1; 1750 1751 /* Setup MTU based on max_rx_pkt_len or default */ 1752 mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ? 1753 dev->data->dev_conf.rxmode.max_rx_pkt_len 1754 - ETHER_HDR_LEN - ETHER_CRC_LEN 1755 : ETHER_MTU; 1756 1757 if (nicvf_dev_set_mtu(dev, mtu)) { 1758 PMD_INIT_LOG(ERR, "Failed to set default mtu size"); 1759 return -EBUSY; 1760 } 1761 1762 ret = nicvf_vf_start(dev, nic, rbdrsz); 1763 if (ret != 0) 1764 return ret; 1765 1766 for (i = 0; i < nic->sqs_count; i++) { 1767 assert(nic->snicvf[i]); 1768 1769 ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz); 1770 if (ret != 0) 1771 return ret; 1772 } 1773 1774 /* Configure callbacks based on scatter mode */ 1775 nicvf_set_tx_function(dev); 1776 nicvf_set_rx_function(dev); 1777 1778 return 0; 1779 } 1780 1781 static void 1782 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup) 1783 { 1784 size_t i; 1785 int ret; 1786 struct nicvf *nic = nicvf_pmd_priv(dev); 1787 1788 PMD_INIT_FUNC_TRACE(); 1789 1790 /* Teardown secondary vf first */ 1791 for (i = 0; i < nic->sqs_count; i++) { 1792 if (!nic->snicvf[i]) 1793 continue; 1794 1795 nicvf_vf_stop(dev, nic->snicvf[i], cleanup); 1796 } 1797 1798 /* Stop the primary VF now */ 1799 nicvf_vf_stop(dev, nic, cleanup); 1800 1801 /* Disable loopback */ 1802 ret = nicvf_loopback_config(nic, 0); 1803 if (ret) 1804 PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); 1805 1806 /* Reclaim CPI configuration */ 1807 ret = nicvf_mbox_config_cpi(nic, 0); 1808 if (ret) 1809 PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret); 1810 } 1811 1812 static void 1813 nicvf_dev_stop(struct rte_eth_dev *dev) 1814 { 1815 PMD_INIT_FUNC_TRACE(); 1816 1817 nicvf_dev_stop_cleanup(dev, false); 1818 } 1819 1820 static void 1821 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup) 1822 { 1823 int ret; 1824 uint16_t qidx; 1825 uint16_t tx_start, tx_end; 1826 uint16_t rx_start, rx_end; 1827 1828 PMD_INIT_FUNC_TRACE(); 1829 1830 if (cleanup) { 1831 /* Let PF make the BGX's RX and TX switches to OFF position */ 1832 nicvf_mbox_shutdown(nic); 1833 } 1834 1835 /* Disable VLAN Strip */ 1836 nicvf_vlan_hw_strip(nic, 0); 1837 1838 /* Get queue ranges for this VF */ 1839 nicvf_tx_range(dev, nic, &tx_start, &tx_end); 1840 1841 for (qidx = tx_start; qidx <= tx_end; qidx++) 1842 nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS); 1843 1844 /* Get queue ranges for this VF */ 1845 nicvf_rx_range(dev, nic, &rx_start, &rx_end); 1846 1847 /* Reclaim rq */ 1848 for (qidx = rx_start; qidx <= rx_end; qidx++) 1849 nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS); 1850 1851 /* Reclaim RBDR */ 1852 ret = nicvf_qset_rbdr_reclaim(nic, 0); 1853 if (ret) 1854 PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); 1855 1856 /* Move all charged buffers in RBDR back to pool */ 1857 if (nic->rbdr != NULL) 1858 nicvf_rbdr_release_mbufs(dev, nic); 1859 1860 /* Disable qset */ 1861 ret = nicvf_qset_reclaim(nic); 1862 if (ret) 1863 PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); 1864 1865 /* Disable all interrupts */ 1866 nicvf_disable_all_interrupts(nic); 1867 1868 /* Free RBDR SW structure */ 1869 if (nic->rbdr) { 1870 rte_free(nic->rbdr); 1871 nic->rbdr = NULL; 1872 } 1873 } 1874 1875 static void 1876 nicvf_dev_close(struct rte_eth_dev *dev) 1877 { 1878 size_t i; 1879 struct nicvf *nic = nicvf_pmd_priv(dev); 1880 1881 PMD_INIT_FUNC_TRACE(); 1882 1883 nicvf_dev_stop_cleanup(dev, true); 1884 nicvf_periodic_alarm_stop(nicvf_interrupt, dev); 1885 1886 for (i = 0; i < nic->sqs_count; i++) { 1887 if (!nic->snicvf[i]) 1888 continue; 1889 1890 nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]); 1891 } 1892 } 1893 1894 static int 1895 nicvf_request_sqs(struct nicvf *nic) 1896 { 1897 size_t i; 1898 1899 assert_primary(nic); 1900 assert(nic->sqs_count > 0); 1901 assert(nic->sqs_count <= MAX_SQS_PER_VF); 1902 1903 /* Set no of Rx/Tx queues in each of the SQsets */ 1904 for (i = 0; i < nic->sqs_count; i++) { 1905 if (nicvf_svf_empty()) 1906 rte_panic("Cannot assign sufficient number of " 1907 "secondary queues to primary VF%" PRIu8 "\n", 1908 nic->vf_id); 1909 1910 nic->snicvf[i] = nicvf_svf_pop(); 1911 nic->snicvf[i]->sqs_id = i; 1912 } 1913 1914 return nicvf_mbox_request_sqs(nic); 1915 } 1916 1917 static int 1918 nicvf_dev_configure(struct rte_eth_dev *dev) 1919 { 1920 struct rte_eth_dev_data *data = dev->data; 1921 struct rte_eth_conf *conf = &data->dev_conf; 1922 struct rte_eth_rxmode *rxmode = &conf->rxmode; 1923 struct rte_eth_txmode *txmode = &conf->txmode; 1924 struct nicvf *nic = nicvf_pmd_priv(dev); 1925 uint8_t cqcount; 1926 uint64_t conf_rx_offloads, rx_offload_capa; 1927 uint64_t conf_tx_offloads, tx_offload_capa; 1928 1929 PMD_INIT_FUNC_TRACE(); 1930 1931 if (!rte_eal_has_hugepages()) { 1932 PMD_INIT_LOG(INFO, "Huge page is not configured"); 1933 return -EINVAL; 1934 } 1935 1936 conf_tx_offloads = dev->data->dev_conf.txmode.offloads; 1937 tx_offload_capa = NICVF_TX_OFFLOAD_CAPA; 1938 1939 if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) { 1940 PMD_INIT_LOG(ERR, "Some Tx offloads are not supported " 1941 "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", 1942 conf_tx_offloads, tx_offload_capa); 1943 return -ENOTSUP; 1944 } 1945 1946 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) { 1947 PMD_INIT_LOG(NOTICE, "Rx checksum not supported"); 1948 rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM; 1949 } 1950 1951 conf_rx_offloads = rxmode->offloads; 1952 rx_offload_capa = NICVF_RX_OFFLOAD_CAPA; 1953 1954 if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) { 1955 PMD_INIT_LOG(ERR, "Some Rx offloads are not supported " 1956 "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", 1957 conf_rx_offloads, rx_offload_capa); 1958 return -ENOTSUP; 1959 } 1960 1961 if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) { 1962 PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); 1963 rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP; 1964 } 1965 1966 if (txmode->mq_mode) { 1967 PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); 1968 return -EINVAL; 1969 } 1970 1971 if (rxmode->mq_mode != ETH_MQ_RX_NONE && 1972 rxmode->mq_mode != ETH_MQ_RX_RSS) { 1973 PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); 1974 return -EINVAL; 1975 } 1976 1977 if (rxmode->split_hdr_size) { 1978 PMD_INIT_LOG(INFO, "Rxmode does not support split header"); 1979 return -EINVAL; 1980 } 1981 1982 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { 1983 PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); 1984 return -EINVAL; 1985 } 1986 1987 if (conf->dcb_capability_en) { 1988 PMD_INIT_LOG(INFO, "DCB enable not supported"); 1989 return -EINVAL; 1990 } 1991 1992 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1993 PMD_INIT_LOG(INFO, "Flow director not supported"); 1994 return -EINVAL; 1995 } 1996 1997 assert_primary(nic); 1998 NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS); 1999 cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues); 2000 if (cqcount > MAX_RCV_QUEUES_PER_QS) { 2001 nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS); 2002 nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1; 2003 } else { 2004 nic->sqs_count = 0; 2005 } 2006 2007 assert(nic->sqs_count <= MAX_SQS_PER_VF); 2008 2009 if (nic->sqs_count > 0) { 2010 if (nicvf_request_sqs(nic)) { 2011 rte_panic("Cannot assign sufficient number of " 2012 "secondary queues to PORT%d VF%" PRIu8 "\n", 2013 dev->data->port_id, nic->vf_id); 2014 } 2015 } 2016 2017 PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, 2018 dev->data->port_id, nicvf_hw_cap(nic)); 2019 2020 return 0; 2021 } 2022 2023 /* Initialize and register driver with DPDK Application */ 2024 static const struct eth_dev_ops nicvf_eth_dev_ops = { 2025 .dev_configure = nicvf_dev_configure, 2026 .dev_start = nicvf_dev_start, 2027 .dev_stop = nicvf_dev_stop, 2028 .link_update = nicvf_dev_link_update, 2029 .dev_close = nicvf_dev_close, 2030 .stats_get = nicvf_dev_stats_get, 2031 .stats_reset = nicvf_dev_stats_reset, 2032 .promiscuous_enable = nicvf_dev_promisc_enable, 2033 .dev_infos_get = nicvf_dev_info_get, 2034 .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, 2035 .mtu_set = nicvf_dev_set_mtu, 2036 .reta_update = nicvf_dev_reta_update, 2037 .reta_query = nicvf_dev_reta_query, 2038 .rss_hash_update = nicvf_dev_rss_hash_update, 2039 .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, 2040 .rx_queue_start = nicvf_dev_rx_queue_start, 2041 .rx_queue_stop = nicvf_dev_rx_queue_stop, 2042 .tx_queue_start = nicvf_dev_tx_queue_start, 2043 .tx_queue_stop = nicvf_dev_tx_queue_stop, 2044 .rx_queue_setup = nicvf_dev_rx_queue_setup, 2045 .rx_queue_release = nicvf_dev_rx_queue_release, 2046 .rx_queue_count = nicvf_dev_rx_queue_count, 2047 .tx_queue_setup = nicvf_dev_tx_queue_setup, 2048 .tx_queue_release = nicvf_dev_tx_queue_release, 2049 .get_reg = nicvf_dev_get_regs, 2050 }; 2051 2052 static int 2053 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) 2054 { 2055 int ret; 2056 struct rte_pci_device *pci_dev; 2057 struct nicvf *nic = nicvf_pmd_priv(eth_dev); 2058 2059 PMD_INIT_FUNC_TRACE(); 2060 2061 eth_dev->dev_ops = &nicvf_eth_dev_ops; 2062 2063 /* For secondary processes, the primary has done all the work */ 2064 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2065 if (nic) { 2066 /* Setup callbacks for secondary process */ 2067 nicvf_set_tx_function(eth_dev); 2068 nicvf_set_rx_function(eth_dev); 2069 return 0; 2070 } else { 2071 /* If nic == NULL than it is secondary function 2072 * so ethdev need to be released by caller */ 2073 return ENOTSUP; 2074 } 2075 } 2076 2077 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2078 rte_eth_copy_pci_info(eth_dev, pci_dev); 2079 2080 nic->device_id = pci_dev->id.device_id; 2081 nic->vendor_id = pci_dev->id.vendor_id; 2082 nic->subsystem_device_id = pci_dev->id.subsystem_device_id; 2083 nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 2084 2085 PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", 2086 pci_dev->id.vendor_id, pci_dev->id.device_id, 2087 pci_dev->addr.domain, pci_dev->addr.bus, 2088 pci_dev->addr.devid, pci_dev->addr.function); 2089 2090 nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; 2091 if (!nic->reg_base) { 2092 PMD_INIT_LOG(ERR, "Failed to map BAR0"); 2093 ret = -ENODEV; 2094 goto fail; 2095 } 2096 2097 nicvf_disable_all_interrupts(nic); 2098 2099 ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev); 2100 if (ret) { 2101 PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2102 goto fail; 2103 } 2104 2105 ret = nicvf_mbox_check_pf_ready(nic); 2106 if (ret) { 2107 PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); 2108 goto alarm_fail; 2109 } else { 2110 PMD_INIT_LOG(INFO, 2111 "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", 2112 nic->node, nic->vf_id, 2113 nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", 2114 nic->sqs_mode ? "true" : "false", 2115 nic->loopback_supported ? "true" : "false" 2116 ); 2117 } 2118 2119 ret = nicvf_base_init(nic); 2120 if (ret) { 2121 PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); 2122 goto malloc_fail; 2123 } 2124 2125 if (nic->sqs_mode) { 2126 /* Push nic to stack of secondary vfs */ 2127 nicvf_svf_push(nic); 2128 2129 /* Steal nic pointer from the device for further reuse */ 2130 eth_dev->data->dev_private = NULL; 2131 2132 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2133 ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic); 2134 if (ret) { 2135 PMD_INIT_LOG(ERR, "Failed to start period alarm"); 2136 goto fail; 2137 } 2138 2139 /* Detach port by returning positive error number */ 2140 return ENOTSUP; 2141 } 2142 2143 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); 2144 if (eth_dev->data->mac_addrs == NULL) { 2145 PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); 2146 ret = -ENOMEM; 2147 goto alarm_fail; 2148 } 2149 if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) 2150 eth_random_addr(&nic->mac_addr[0]); 2151 2152 ether_addr_copy((struct ether_addr *)nic->mac_addr, 2153 ð_dev->data->mac_addrs[0]); 2154 2155 ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); 2156 if (ret) { 2157 PMD_INIT_LOG(ERR, "Failed to set mac addr"); 2158 goto malloc_fail; 2159 } 2160 2161 PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", 2162 eth_dev->data->port_id, nic->vendor_id, nic->device_id, 2163 nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], 2164 nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); 2165 2166 return 0; 2167 2168 malloc_fail: 2169 rte_free(eth_dev->data->mac_addrs); 2170 alarm_fail: 2171 nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); 2172 fail: 2173 return ret; 2174 } 2175 2176 static const struct rte_pci_id pci_id_nicvf_map[] = { 2177 { 2178 .class_id = RTE_CLASS_ANY_ID, 2179 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2180 .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF, 2181 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2182 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF, 2183 }, 2184 { 2185 .class_id = RTE_CLASS_ANY_ID, 2186 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2187 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2188 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2189 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF, 2190 }, 2191 { 2192 .class_id = RTE_CLASS_ANY_ID, 2193 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2194 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2195 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2196 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF, 2197 }, 2198 { 2199 .class_id = RTE_CLASS_ANY_ID, 2200 .vendor_id = PCI_VENDOR_ID_CAVIUM, 2201 .device_id = PCI_DEVICE_ID_THUNDERX_NICVF, 2202 .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, 2203 .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF, 2204 }, 2205 { 2206 .vendor_id = 0, 2207 }, 2208 }; 2209 2210 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2211 struct rte_pci_device *pci_dev) 2212 { 2213 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf), 2214 nicvf_eth_dev_init); 2215 } 2216 2217 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev) 2218 { 2219 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 2220 } 2221 2222 static struct rte_pci_driver rte_nicvf_pmd = { 2223 .id_table = pci_id_nicvf_map, 2224 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES | 2225 RTE_PCI_DRV_INTR_LSC, 2226 .probe = nicvf_eth_pci_probe, 2227 .remove = nicvf_eth_pci_remove, 2228 }; 2229 2230 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd); 2231 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map); 2232 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci"); 2233