1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2016-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_dev.h> 11 #include <rte_ethdev_driver.h> 12 #include <rte_ethdev_pci.h> 13 #include <rte_pci.h> 14 #include <rte_bus_pci.h> 15 #include <rte_errno.h> 16 #include <rte_string_fns.h> 17 #include <rte_ether.h> 18 19 #include "efx.h" 20 21 #include "sfc.h" 22 #include "sfc_debug.h" 23 #include "sfc_log.h" 24 #include "sfc_kvargs.h" 25 #include "sfc_ev.h" 26 #include "sfc_rx.h" 27 #include "sfc_tx.h" 28 #include "sfc_flow.h" 29 #include "sfc_dp.h" 30 #include "sfc_dp_rx.h" 31 32 uint32_t sfc_logtype_driver; 33 34 static struct sfc_dp_list sfc_dp_head = 35 TAILQ_HEAD_INITIALIZER(sfc_dp_head); 36 37 38 static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev); 39 40 41 static int 42 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 43 { 44 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 45 efx_nic_fw_info_t enfi; 46 int ret; 47 int rc; 48 49 /* 50 * Return value of the callback is likely supposed to be 51 * equal to or greater than 0, nevertheless, if an error 52 * occurs, it will be desirable to pass it to the caller 53 */ 54 if ((fw_version == NULL) || (fw_size == 0)) 55 return -EINVAL; 56 57 rc = efx_nic_get_fw_version(sa->nic, &enfi); 58 if (rc != 0) 59 return -rc; 60 61 ret = snprintf(fw_version, fw_size, 62 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16, 63 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1], 64 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]); 65 if (ret < 0) 66 return ret; 67 68 if (enfi.enfi_dpcpu_fw_ids_valid) { 69 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret); 70 int ret_extra; 71 72 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset, 73 fw_size - dpcpu_fw_ids_offset, 74 " rx%" PRIx16 " tx%" PRIx16, 75 enfi.enfi_rx_dpcpu_fw_id, 76 enfi.enfi_tx_dpcpu_fw_id); 77 if (ret_extra < 0) 78 return ret_extra; 79 80 ret += ret_extra; 81 } 82 83 if (fw_size < (size_t)(++ret)) 84 return ret; 85 else 86 return 0; 87 } 88 89 static int 90 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 91 { 92 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 93 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 94 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 95 struct sfc_rss *rss = &sas->rss; 96 uint64_t txq_offloads_def = 0; 97 98 sfc_log_init(sa, "entry"); 99 100 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 101 dev_info->max_mtu = EFX_MAC_SDU_MAX; 102 103 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 104 105 /* Autonegotiation may be disabled */ 106 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 107 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX)) 108 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 109 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX)) 110 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 111 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX)) 112 dev_info->speed_capa |= ETH_LINK_SPEED_25G; 113 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX)) 114 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 115 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX)) 116 dev_info->speed_capa |= ETH_LINK_SPEED_50G; 117 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX)) 118 dev_info->speed_capa |= ETH_LINK_SPEED_100G; 119 120 dev_info->max_rx_queues = sa->rxq_max; 121 dev_info->max_tx_queues = sa->txq_max; 122 123 /* By default packets are dropped if no descriptors are available */ 124 dev_info->default_rxconf.rx_drop_en = 1; 125 126 dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa); 127 128 /* 129 * rx_offload_capa includes both device and queue offloads since 130 * the latter may be requested on a per device basis which makes 131 * sense when some offloads are needed to be set on all queues. 132 */ 133 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) | 134 dev_info->rx_queue_offload_capa; 135 136 dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa); 137 138 /* 139 * tx_offload_capa includes both device and queue offloads since 140 * the latter may be requested on a per device basis which makes 141 * sense when some offloads are needed to be set on all queues. 142 */ 143 dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) | 144 dev_info->tx_queue_offload_capa; 145 146 if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 147 txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; 148 149 dev_info->default_txconf.offloads |= txq_offloads_def; 150 151 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { 152 uint64_t rte_hf = 0; 153 unsigned int i; 154 155 for (i = 0; i < rss->hf_map_nb_entries; ++i) 156 rte_hf |= rss->hf_map[i].rte; 157 158 dev_info->reta_size = EFX_RSS_TBL_SIZE; 159 dev_info->hash_key_size = EFX_RSS_KEY_SIZE; 160 dev_info->flow_type_rss_offloads = rte_hf; 161 } 162 163 /* Initialize to hardware limits */ 164 dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries; 165 dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries; 166 /* The RXQ hardware requires that the descriptor count is a power 167 * of 2, but rx_desc_lim cannot properly describe that constraint. 168 */ 169 dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries; 170 171 /* Initialize to hardware limits */ 172 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 173 dev_info->tx_desc_lim.nb_min = sa->txq_min_entries; 174 /* 175 * The TXQ hardware requires that the descriptor count is a power 176 * of 2, but tx_desc_lim cannot properly describe that constraint 177 */ 178 dev_info->tx_desc_lim.nb_align = sa->txq_min_entries; 179 180 if (sap->dp_rx->get_dev_info != NULL) 181 sap->dp_rx->get_dev_info(dev_info); 182 if (sap->dp_tx->get_dev_info != NULL) 183 sap->dp_tx->get_dev_info(dev_info); 184 185 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 186 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 187 188 return 0; 189 } 190 191 static const uint32_t * 192 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 193 { 194 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 195 196 return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps); 197 } 198 199 static int 200 sfc_dev_configure(struct rte_eth_dev *dev) 201 { 202 struct rte_eth_dev_data *dev_data = dev->data; 203 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 204 int rc; 205 206 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 207 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 208 209 sfc_adapter_lock(sa); 210 switch (sa->state) { 211 case SFC_ADAPTER_CONFIGURED: 212 /* FALLTHROUGH */ 213 case SFC_ADAPTER_INITIALIZED: 214 rc = sfc_configure(sa); 215 break; 216 default: 217 sfc_err(sa, "unexpected adapter state %u to configure", 218 sa->state); 219 rc = EINVAL; 220 break; 221 } 222 sfc_adapter_unlock(sa); 223 224 sfc_log_init(sa, "done %d", rc); 225 SFC_ASSERT(rc >= 0); 226 return -rc; 227 } 228 229 static int 230 sfc_dev_start(struct rte_eth_dev *dev) 231 { 232 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 233 int rc; 234 235 sfc_log_init(sa, "entry"); 236 237 sfc_adapter_lock(sa); 238 rc = sfc_start(sa); 239 sfc_adapter_unlock(sa); 240 241 sfc_log_init(sa, "done %d", rc); 242 SFC_ASSERT(rc >= 0); 243 return -rc; 244 } 245 246 static int 247 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 248 { 249 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 250 struct rte_eth_link current_link; 251 int ret; 252 253 sfc_log_init(sa, "entry"); 254 255 if (sa->state != SFC_ADAPTER_STARTED) { 256 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); 257 } else if (wait_to_complete) { 258 efx_link_mode_t link_mode; 259 260 if (efx_port_poll(sa->nic, &link_mode) != 0) 261 link_mode = EFX_LINK_UNKNOWN; 262 sfc_port_link_mode_to_info(link_mode, ¤t_link); 263 264 } else { 265 sfc_ev_mgmt_qpoll(sa); 266 rte_eth_linkstatus_get(dev, ¤t_link); 267 } 268 269 ret = rte_eth_linkstatus_set(dev, ¤t_link); 270 if (ret == 0) 271 sfc_notice(sa, "Link status is %s", 272 current_link.link_status ? "UP" : "DOWN"); 273 274 return ret; 275 } 276 277 static void 278 sfc_dev_stop(struct rte_eth_dev *dev) 279 { 280 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 281 282 sfc_log_init(sa, "entry"); 283 284 sfc_adapter_lock(sa); 285 sfc_stop(sa); 286 sfc_adapter_unlock(sa); 287 288 sfc_log_init(sa, "done"); 289 } 290 291 static int 292 sfc_dev_set_link_up(struct rte_eth_dev *dev) 293 { 294 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 295 int rc; 296 297 sfc_log_init(sa, "entry"); 298 299 sfc_adapter_lock(sa); 300 rc = sfc_start(sa); 301 sfc_adapter_unlock(sa); 302 303 SFC_ASSERT(rc >= 0); 304 return -rc; 305 } 306 307 static int 308 sfc_dev_set_link_down(struct rte_eth_dev *dev) 309 { 310 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 311 312 sfc_log_init(sa, "entry"); 313 314 sfc_adapter_lock(sa); 315 sfc_stop(sa); 316 sfc_adapter_unlock(sa); 317 318 return 0; 319 } 320 321 static void 322 sfc_dev_close(struct rte_eth_dev *dev) 323 { 324 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 325 326 sfc_log_init(sa, "entry"); 327 328 sfc_adapter_lock(sa); 329 switch (sa->state) { 330 case SFC_ADAPTER_STARTED: 331 sfc_stop(sa); 332 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 333 /* FALLTHROUGH */ 334 case SFC_ADAPTER_CONFIGURED: 335 sfc_close(sa); 336 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 337 /* FALLTHROUGH */ 338 case SFC_ADAPTER_INITIALIZED: 339 break; 340 default: 341 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 342 break; 343 } 344 345 /* 346 * Cleanup all resources in accordance with RTE_ETH_DEV_CLOSE_REMOVE. 347 * Rollback primary process sfc_eth_dev_init() below. 348 */ 349 350 sfc_eth_dev_clear_ops(dev); 351 352 sfc_detach(sa); 353 sfc_unprobe(sa); 354 355 sfc_kvargs_cleanup(sa); 356 357 sfc_adapter_unlock(sa); 358 sfc_adapter_lock_fini(sa); 359 360 sfc_log_init(sa, "done"); 361 362 /* Required for logging, so cleanup last */ 363 sa->eth_dev = NULL; 364 365 dev->process_private = NULL; 366 free(sa); 367 } 368 369 static int 370 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 371 boolean_t enabled) 372 { 373 struct sfc_port *port; 374 boolean_t *toggle; 375 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 376 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 377 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 378 int rc = 0; 379 380 sfc_adapter_lock(sa); 381 382 port = &sa->port; 383 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 384 385 if (*toggle != enabled) { 386 *toggle = enabled; 387 388 if (sfc_sa2shared(sa)->isolated) { 389 sfc_warn(sa, "isolated mode is active on the port"); 390 sfc_warn(sa, "the change is to be applied on the next " 391 "start provided that isolated mode is " 392 "disabled prior the next start"); 393 } else if ((sa->state == SFC_ADAPTER_STARTED) && 394 ((rc = sfc_set_rx_mode(sa)) != 0)) { 395 *toggle = !(enabled); 396 sfc_warn(sa, "Failed to %s %s mode, rc = %d", 397 ((enabled) ? "enable" : "disable"), desc, rc); 398 399 /* 400 * For promiscuous and all-multicast filters a 401 * permission failure should be reported as an 402 * unsupported filter. 403 */ 404 if (rc == EPERM) 405 rc = ENOTSUP; 406 } 407 } 408 409 sfc_adapter_unlock(sa); 410 return rc; 411 } 412 413 static int 414 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 415 { 416 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 417 418 SFC_ASSERT(rc >= 0); 419 return -rc; 420 } 421 422 static int 423 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 424 { 425 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 426 427 SFC_ASSERT(rc >= 0); 428 return -rc; 429 } 430 431 static int 432 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 433 { 434 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 435 436 SFC_ASSERT(rc >= 0); 437 return -rc; 438 } 439 440 static int 441 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 442 { 443 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 444 445 SFC_ASSERT(rc >= 0); 446 return -rc; 447 } 448 449 static int 450 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 451 uint16_t nb_rx_desc, unsigned int socket_id, 452 const struct rte_eth_rxconf *rx_conf, 453 struct rte_mempool *mb_pool) 454 { 455 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 456 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 457 int rc; 458 459 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 460 rx_queue_id, nb_rx_desc, socket_id); 461 462 sfc_adapter_lock(sa); 463 464 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 465 rx_conf, mb_pool); 466 if (rc != 0) 467 goto fail_rx_qinit; 468 469 dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp; 470 471 sfc_adapter_unlock(sa); 472 473 return 0; 474 475 fail_rx_qinit: 476 sfc_adapter_unlock(sa); 477 SFC_ASSERT(rc > 0); 478 return -rc; 479 } 480 481 static void 482 sfc_rx_queue_release(void *queue) 483 { 484 struct sfc_dp_rxq *dp_rxq = queue; 485 struct sfc_rxq *rxq; 486 struct sfc_adapter *sa; 487 unsigned int sw_index; 488 489 if (dp_rxq == NULL) 490 return; 491 492 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 493 sa = rxq->evq->sa; 494 sfc_adapter_lock(sa); 495 496 sw_index = dp_rxq->dpq.queue_id; 497 498 sfc_log_init(sa, "RxQ=%u", sw_index); 499 500 sfc_rx_qfini(sa, sw_index); 501 502 sfc_adapter_unlock(sa); 503 } 504 505 static int 506 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 507 uint16_t nb_tx_desc, unsigned int socket_id, 508 const struct rte_eth_txconf *tx_conf) 509 { 510 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 511 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 512 int rc; 513 514 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 515 tx_queue_id, nb_tx_desc, socket_id); 516 517 sfc_adapter_lock(sa); 518 519 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 520 if (rc != 0) 521 goto fail_tx_qinit; 522 523 dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp; 524 525 sfc_adapter_unlock(sa); 526 return 0; 527 528 fail_tx_qinit: 529 sfc_adapter_unlock(sa); 530 SFC_ASSERT(rc > 0); 531 return -rc; 532 } 533 534 static void 535 sfc_tx_queue_release(void *queue) 536 { 537 struct sfc_dp_txq *dp_txq = queue; 538 struct sfc_txq *txq; 539 unsigned int sw_index; 540 struct sfc_adapter *sa; 541 542 if (dp_txq == NULL) 543 return; 544 545 txq = sfc_txq_by_dp_txq(dp_txq); 546 sw_index = dp_txq->dpq.queue_id; 547 548 SFC_ASSERT(txq->evq != NULL); 549 sa = txq->evq->sa; 550 551 sfc_log_init(sa, "TxQ = %u", sw_index); 552 553 sfc_adapter_lock(sa); 554 555 sfc_tx_qfini(sa, sw_index); 556 557 sfc_adapter_unlock(sa); 558 } 559 560 /* 561 * Some statistics are computed as A - B where A and B each increase 562 * monotonically with some hardware counter(s) and the counters are read 563 * asynchronously. 564 * 565 * If packet X is counted in A, but not counted in B yet, computed value is 566 * greater than real. 567 * 568 * If packet X is not counted in A at the moment of reading the counter, 569 * but counted in B at the moment of reading the counter, computed value 570 * is less than real. 571 * 572 * However, counter which grows backward is worse evil than slightly wrong 573 * value. So, let's try to guarantee that it never happens except may be 574 * the case when the MAC stats are zeroed as a result of a NIC reset. 575 */ 576 static void 577 sfc_update_diff_stat(uint64_t *stat, uint64_t newval) 578 { 579 if ((int64_t)(newval - *stat) > 0 || newval == 0) 580 *stat = newval; 581 } 582 583 static int 584 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 585 { 586 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 587 struct sfc_port *port = &sa->port; 588 uint64_t *mac_stats; 589 int ret; 590 591 rte_spinlock_lock(&port->mac_stats_lock); 592 593 ret = sfc_port_update_mac_stats(sa); 594 if (ret != 0) 595 goto unlock; 596 597 mac_stats = port->mac_stats_buf; 598 599 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 600 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 601 stats->ipackets = 602 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 603 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 604 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 605 stats->opackets = 606 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 607 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 608 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 609 stats->ibytes = 610 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 611 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 612 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 613 stats->obytes = 614 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 615 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 616 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 617 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 618 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 619 } else { 620 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 621 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 622 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 623 /* 624 * Take into account stats which are whenever supported 625 * on EF10. If some stat is not supported by current 626 * firmware variant or HW revision, it is guaranteed 627 * to be zero in mac_stats. 628 */ 629 stats->imissed = 630 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 631 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 632 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 633 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 634 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 635 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 636 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 637 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 638 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 639 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 640 stats->ierrors = 641 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 642 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 643 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 644 /* no oerrors counters supported on EF10 */ 645 646 /* Exclude missed, errors and pauses from Rx packets */ 647 sfc_update_diff_stat(&port->ipackets, 648 mac_stats[EFX_MAC_RX_PKTS] - 649 mac_stats[EFX_MAC_RX_PAUSE_PKTS] - 650 stats->imissed - stats->ierrors); 651 stats->ipackets = port->ipackets; 652 } 653 654 unlock: 655 rte_spinlock_unlock(&port->mac_stats_lock); 656 SFC_ASSERT(ret >= 0); 657 return -ret; 658 } 659 660 static int 661 sfc_stats_reset(struct rte_eth_dev *dev) 662 { 663 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 664 struct sfc_port *port = &sa->port; 665 int rc; 666 667 if (sa->state != SFC_ADAPTER_STARTED) { 668 /* 669 * The operation cannot be done if port is not started; it 670 * will be scheduled to be done during the next port start 671 */ 672 port->mac_stats_reset_pending = B_TRUE; 673 return 0; 674 } 675 676 rc = sfc_port_reset_mac_stats(sa); 677 if (rc != 0) 678 sfc_err(sa, "failed to reset statistics (rc = %d)", rc); 679 680 SFC_ASSERT(rc >= 0); 681 return -rc; 682 } 683 684 static int 685 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 686 unsigned int xstats_count) 687 { 688 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 689 struct sfc_port *port = &sa->port; 690 uint64_t *mac_stats; 691 int rc; 692 unsigned int i; 693 int nstats = 0; 694 695 rte_spinlock_lock(&port->mac_stats_lock); 696 697 rc = sfc_port_update_mac_stats(sa); 698 if (rc != 0) { 699 SFC_ASSERT(rc > 0); 700 nstats = -rc; 701 goto unlock; 702 } 703 704 mac_stats = port->mac_stats_buf; 705 706 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 707 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 708 if (xstats != NULL && nstats < (int)xstats_count) { 709 xstats[nstats].id = nstats; 710 xstats[nstats].value = mac_stats[i]; 711 } 712 nstats++; 713 } 714 } 715 716 unlock: 717 rte_spinlock_unlock(&port->mac_stats_lock); 718 719 return nstats; 720 } 721 722 static int 723 sfc_xstats_get_names(struct rte_eth_dev *dev, 724 struct rte_eth_xstat_name *xstats_names, 725 unsigned int xstats_count) 726 { 727 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 728 struct sfc_port *port = &sa->port; 729 unsigned int i; 730 unsigned int nstats = 0; 731 732 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 733 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 734 if (xstats_names != NULL && nstats < xstats_count) 735 strlcpy(xstats_names[nstats].name, 736 efx_mac_stat_name(sa->nic, i), 737 sizeof(xstats_names[0].name)); 738 nstats++; 739 } 740 } 741 742 return nstats; 743 } 744 745 static int 746 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 747 uint64_t *values, unsigned int n) 748 { 749 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 750 struct sfc_port *port = &sa->port; 751 uint64_t *mac_stats; 752 unsigned int nb_supported = 0; 753 unsigned int nb_written = 0; 754 unsigned int i; 755 int ret; 756 int rc; 757 758 if (unlikely(values == NULL) || 759 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported))) 760 return port->mac_stats_nb_supported; 761 762 rte_spinlock_lock(&port->mac_stats_lock); 763 764 rc = sfc_port_update_mac_stats(sa); 765 if (rc != 0) { 766 SFC_ASSERT(rc > 0); 767 ret = -rc; 768 goto unlock; 769 } 770 771 mac_stats = port->mac_stats_buf; 772 773 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) { 774 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 775 continue; 776 777 if ((ids == NULL) || (ids[nb_written] == nb_supported)) 778 values[nb_written++] = mac_stats[i]; 779 780 ++nb_supported; 781 } 782 783 ret = nb_written; 784 785 unlock: 786 rte_spinlock_unlock(&port->mac_stats_lock); 787 788 return ret; 789 } 790 791 static int 792 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, 793 struct rte_eth_xstat_name *xstats_names, 794 const uint64_t *ids, unsigned int size) 795 { 796 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 797 struct sfc_port *port = &sa->port; 798 unsigned int nb_supported = 0; 799 unsigned int nb_written = 0; 800 unsigned int i; 801 802 if (unlikely(xstats_names == NULL) || 803 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported))) 804 return port->mac_stats_nb_supported; 805 806 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) { 807 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 808 continue; 809 810 if ((ids == NULL) || (ids[nb_written] == nb_supported)) { 811 char *name = xstats_names[nb_written++].name; 812 813 strlcpy(name, efx_mac_stat_name(sa->nic, i), 814 sizeof(xstats_names[0].name)); 815 } 816 817 ++nb_supported; 818 } 819 820 return nb_written; 821 } 822 823 static int 824 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 825 { 826 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 827 unsigned int wanted_fc, link_fc; 828 829 memset(fc_conf, 0, sizeof(*fc_conf)); 830 831 sfc_adapter_lock(sa); 832 833 if (sa->state == SFC_ADAPTER_STARTED) 834 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 835 else 836 link_fc = sa->port.flow_ctrl; 837 838 switch (link_fc) { 839 case 0: 840 fc_conf->mode = RTE_FC_NONE; 841 break; 842 case EFX_FCNTL_RESPOND: 843 fc_conf->mode = RTE_FC_RX_PAUSE; 844 break; 845 case EFX_FCNTL_GENERATE: 846 fc_conf->mode = RTE_FC_TX_PAUSE; 847 break; 848 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 849 fc_conf->mode = RTE_FC_FULL; 850 break; 851 default: 852 sfc_err(sa, "%s: unexpected flow control value %#x", 853 __func__, link_fc); 854 } 855 856 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 857 858 sfc_adapter_unlock(sa); 859 860 return 0; 861 } 862 863 static int 864 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 865 { 866 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 867 struct sfc_port *port = &sa->port; 868 unsigned int fcntl; 869 int rc; 870 871 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 872 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 873 fc_conf->mac_ctrl_frame_fwd != 0) { 874 sfc_err(sa, "unsupported flow control settings specified"); 875 rc = EINVAL; 876 goto fail_inval; 877 } 878 879 switch (fc_conf->mode) { 880 case RTE_FC_NONE: 881 fcntl = 0; 882 break; 883 case RTE_FC_RX_PAUSE: 884 fcntl = EFX_FCNTL_RESPOND; 885 break; 886 case RTE_FC_TX_PAUSE: 887 fcntl = EFX_FCNTL_GENERATE; 888 break; 889 case RTE_FC_FULL: 890 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 891 break; 892 default: 893 rc = EINVAL; 894 goto fail_inval; 895 } 896 897 sfc_adapter_lock(sa); 898 899 if (sa->state == SFC_ADAPTER_STARTED) { 900 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 901 if (rc != 0) 902 goto fail_mac_fcntl_set; 903 } 904 905 port->flow_ctrl = fcntl; 906 port->flow_ctrl_autoneg = fc_conf->autoneg; 907 908 sfc_adapter_unlock(sa); 909 910 return 0; 911 912 fail_mac_fcntl_set: 913 sfc_adapter_unlock(sa); 914 fail_inval: 915 SFC_ASSERT(rc > 0); 916 return -rc; 917 } 918 919 static int 920 sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu) 921 { 922 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 923 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 924 boolean_t scatter_enabled; 925 const char *error; 926 unsigned int i; 927 928 for (i = 0; i < sas->rxq_count; i++) { 929 if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0) 930 continue; 931 932 scatter_enabled = (sas->rxq_info[i].type_flags & 933 EFX_RXQ_FLAG_SCATTER); 934 935 if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size, 936 encp->enc_rx_prefix_size, 937 scatter_enabled, &error)) { 938 sfc_err(sa, "MTU check for RxQ %u failed: %s", i, 939 error); 940 return EINVAL; 941 } 942 } 943 944 return 0; 945 } 946 947 static int 948 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 949 { 950 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 951 size_t pdu = EFX_MAC_PDU(mtu); 952 size_t old_pdu; 953 int rc; 954 955 sfc_log_init(sa, "mtu=%u", mtu); 956 957 rc = EINVAL; 958 if (pdu < EFX_MAC_PDU_MIN) { 959 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 960 (unsigned int)mtu, (unsigned int)pdu, 961 EFX_MAC_PDU_MIN); 962 goto fail_inval; 963 } 964 if (pdu > EFX_MAC_PDU_MAX) { 965 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 966 (unsigned int)mtu, (unsigned int)pdu, 967 (unsigned int)EFX_MAC_PDU_MAX); 968 goto fail_inval; 969 } 970 971 sfc_adapter_lock(sa); 972 973 rc = sfc_check_scatter_on_all_rx_queues(sa, pdu); 974 if (rc != 0) 975 goto fail_check_scatter; 976 977 if (pdu != sa->port.pdu) { 978 if (sa->state == SFC_ADAPTER_STARTED) { 979 sfc_stop(sa); 980 981 old_pdu = sa->port.pdu; 982 sa->port.pdu = pdu; 983 rc = sfc_start(sa); 984 if (rc != 0) 985 goto fail_start; 986 } else { 987 sa->port.pdu = pdu; 988 } 989 } 990 991 /* 992 * The driver does not use it, but other PMDs update jumbo frame 993 * flag and max_rx_pkt_len when MTU is set. 994 */ 995 if (mtu > RTE_ETHER_MAX_LEN) { 996 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 997 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 998 } 999 1000 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 1001 1002 sfc_adapter_unlock(sa); 1003 1004 sfc_log_init(sa, "done"); 1005 return 0; 1006 1007 fail_start: 1008 sa->port.pdu = old_pdu; 1009 if (sfc_start(sa) != 0) 1010 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 1011 "PDU max size - port is stopped", 1012 (unsigned int)pdu, (unsigned int)old_pdu); 1013 1014 fail_check_scatter: 1015 sfc_adapter_unlock(sa); 1016 1017 fail_inval: 1018 sfc_log_init(sa, "failed %d", rc); 1019 SFC_ASSERT(rc > 0); 1020 return -rc; 1021 } 1022 static int 1023 sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1024 { 1025 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1026 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1027 struct sfc_port *port = &sa->port; 1028 struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0]; 1029 int rc = 0; 1030 1031 sfc_adapter_lock(sa); 1032 1033 if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr)) 1034 goto unlock; 1035 1036 /* 1037 * Copy the address to the device private data so that 1038 * it could be recalled in the case of adapter restart. 1039 */ 1040 rte_ether_addr_copy(mac_addr, &port->default_mac_addr); 1041 1042 /* 1043 * Neither of the two following checks can return 1044 * an error. The new MAC address is preserved in 1045 * the device private data and can be activated 1046 * on the next port start if the user prevents 1047 * isolated mode from being enabled. 1048 */ 1049 if (sfc_sa2shared(sa)->isolated) { 1050 sfc_warn(sa, "isolated mode is active on the port"); 1051 sfc_warn(sa, "will not set MAC address"); 1052 goto unlock; 1053 } 1054 1055 if (sa->state != SFC_ADAPTER_STARTED) { 1056 sfc_notice(sa, "the port is not started"); 1057 sfc_notice(sa, "the new MAC address will be set on port start"); 1058 1059 goto unlock; 1060 } 1061 1062 if (encp->enc_allow_set_mac_with_installed_filters) { 1063 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 1064 if (rc != 0) { 1065 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 1066 goto unlock; 1067 } 1068 1069 /* 1070 * Changing the MAC address by means of MCDI request 1071 * has no effect on received traffic, therefore 1072 * we also need to update unicast filters 1073 */ 1074 rc = sfc_set_rx_mode_unchecked(sa); 1075 if (rc != 0) { 1076 sfc_err(sa, "cannot set filter (rc = %u)", rc); 1077 /* Rollback the old address */ 1078 (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes); 1079 (void)sfc_set_rx_mode_unchecked(sa); 1080 } 1081 } else { 1082 sfc_warn(sa, "cannot set MAC address with filters installed"); 1083 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 1084 sfc_warn(sa, "(some traffic may be dropped)"); 1085 1086 /* 1087 * Since setting MAC address with filters installed is not 1088 * allowed on the adapter, the new MAC address will be set 1089 * by means of adapter restart. sfc_start() shall retrieve 1090 * the new address from the device private data and set it. 1091 */ 1092 sfc_stop(sa); 1093 rc = sfc_start(sa); 1094 if (rc != 0) 1095 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 1096 } 1097 1098 unlock: 1099 if (rc != 0) 1100 rte_ether_addr_copy(old_addr, &port->default_mac_addr); 1101 1102 sfc_adapter_unlock(sa); 1103 1104 SFC_ASSERT(rc >= 0); 1105 return -rc; 1106 } 1107 1108 1109 static int 1110 sfc_set_mc_addr_list(struct rte_eth_dev *dev, 1111 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1112 { 1113 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1114 struct sfc_port *port = &sa->port; 1115 uint8_t *mc_addrs = port->mcast_addrs; 1116 int rc; 1117 unsigned int i; 1118 1119 if (sfc_sa2shared(sa)->isolated) { 1120 sfc_err(sa, "isolated mode is active on the port"); 1121 sfc_err(sa, "will not set multicast address list"); 1122 return -ENOTSUP; 1123 } 1124 1125 if (mc_addrs == NULL) 1126 return -ENOBUFS; 1127 1128 if (nb_mc_addr > port->max_mcast_addrs) { 1129 sfc_err(sa, "too many multicast addresses: %u > %u", 1130 nb_mc_addr, port->max_mcast_addrs); 1131 return -EINVAL; 1132 } 1133 1134 for (i = 0; i < nb_mc_addr; ++i) { 1135 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 1136 EFX_MAC_ADDR_LEN); 1137 mc_addrs += EFX_MAC_ADDR_LEN; 1138 } 1139 1140 port->nb_mcast_addrs = nb_mc_addr; 1141 1142 if (sa->state != SFC_ADAPTER_STARTED) 1143 return 0; 1144 1145 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs, 1146 port->nb_mcast_addrs); 1147 if (rc != 0) 1148 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 1149 1150 SFC_ASSERT(rc >= 0); 1151 return -rc; 1152 } 1153 1154 /* 1155 * The function is used by the secondary process as well. It must not 1156 * use any process-local pointers from the adapter data. 1157 */ 1158 static void 1159 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1160 struct rte_eth_rxq_info *qinfo) 1161 { 1162 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1163 struct sfc_rxq_info *rxq_info; 1164 1165 SFC_ASSERT(rx_queue_id < sas->rxq_count); 1166 1167 rxq_info = &sas->rxq_info[rx_queue_id]; 1168 1169 qinfo->mp = rxq_info->refill_mb_pool; 1170 qinfo->conf.rx_free_thresh = rxq_info->refill_threshold; 1171 qinfo->conf.rx_drop_en = 1; 1172 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 1173 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 1174 if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) { 1175 qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER; 1176 qinfo->scattered_rx = 1; 1177 } 1178 qinfo->nb_desc = rxq_info->entries; 1179 } 1180 1181 /* 1182 * The function is used by the secondary process as well. It must not 1183 * use any process-local pointers from the adapter data. 1184 */ 1185 static void 1186 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1187 struct rte_eth_txq_info *qinfo) 1188 { 1189 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1190 struct sfc_txq_info *txq_info; 1191 1192 SFC_ASSERT(tx_queue_id < sas->txq_count); 1193 1194 txq_info = &sas->txq_info[tx_queue_id]; 1195 1196 memset(qinfo, 0, sizeof(*qinfo)); 1197 1198 qinfo->conf.offloads = txq_info->offloads; 1199 qinfo->conf.tx_free_thresh = txq_info->free_thresh; 1200 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 1201 qinfo->nb_desc = txq_info->entries; 1202 } 1203 1204 /* 1205 * The function is used by the secondary process as well. It must not 1206 * use any process-local pointers from the adapter data. 1207 */ 1208 static uint32_t 1209 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1210 { 1211 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1212 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1213 struct sfc_rxq_info *rxq_info; 1214 1215 SFC_ASSERT(rx_queue_id < sas->rxq_count); 1216 rxq_info = &sas->rxq_info[rx_queue_id]; 1217 1218 if ((rxq_info->state & SFC_RXQ_STARTED) == 0) 1219 return 0; 1220 1221 return sap->dp_rx->qdesc_npending(rxq_info->dp); 1222 } 1223 1224 /* 1225 * The function is used by the secondary process as well. It must not 1226 * use any process-local pointers from the adapter data. 1227 */ 1228 static int 1229 sfc_rx_descriptor_done(void *queue, uint16_t offset) 1230 { 1231 struct sfc_dp_rxq *dp_rxq = queue; 1232 const struct sfc_dp_rx *dp_rx; 1233 1234 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); 1235 1236 return offset < dp_rx->qdesc_npending(dp_rxq); 1237 } 1238 1239 /* 1240 * The function is used by the secondary process as well. It must not 1241 * use any process-local pointers from the adapter data. 1242 */ 1243 static int 1244 sfc_rx_descriptor_status(void *queue, uint16_t offset) 1245 { 1246 struct sfc_dp_rxq *dp_rxq = queue; 1247 const struct sfc_dp_rx *dp_rx; 1248 1249 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); 1250 1251 return dp_rx->qdesc_status(dp_rxq, offset); 1252 } 1253 1254 /* 1255 * The function is used by the secondary process as well. It must not 1256 * use any process-local pointers from the adapter data. 1257 */ 1258 static int 1259 sfc_tx_descriptor_status(void *queue, uint16_t offset) 1260 { 1261 struct sfc_dp_txq *dp_txq = queue; 1262 const struct sfc_dp_tx *dp_tx; 1263 1264 dp_tx = sfc_dp_tx_by_dp_txq(dp_txq); 1265 1266 return dp_tx->qdesc_status(dp_txq, offset); 1267 } 1268 1269 static int 1270 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1271 { 1272 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1273 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1274 int rc; 1275 1276 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1277 1278 sfc_adapter_lock(sa); 1279 1280 rc = EINVAL; 1281 if (sa->state != SFC_ADAPTER_STARTED) 1282 goto fail_not_started; 1283 1284 if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED) 1285 goto fail_not_setup; 1286 1287 rc = sfc_rx_qstart(sa, rx_queue_id); 1288 if (rc != 0) 1289 goto fail_rx_qstart; 1290 1291 sas->rxq_info[rx_queue_id].deferred_started = B_TRUE; 1292 1293 sfc_adapter_unlock(sa); 1294 1295 return 0; 1296 1297 fail_rx_qstart: 1298 fail_not_setup: 1299 fail_not_started: 1300 sfc_adapter_unlock(sa); 1301 SFC_ASSERT(rc > 0); 1302 return -rc; 1303 } 1304 1305 static int 1306 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1307 { 1308 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1309 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1310 1311 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1312 1313 sfc_adapter_lock(sa); 1314 sfc_rx_qstop(sa, rx_queue_id); 1315 1316 sas->rxq_info[rx_queue_id].deferred_started = B_FALSE; 1317 1318 sfc_adapter_unlock(sa); 1319 1320 return 0; 1321 } 1322 1323 static int 1324 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1325 { 1326 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1327 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1328 int rc; 1329 1330 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1331 1332 sfc_adapter_lock(sa); 1333 1334 rc = EINVAL; 1335 if (sa->state != SFC_ADAPTER_STARTED) 1336 goto fail_not_started; 1337 1338 if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED) 1339 goto fail_not_setup; 1340 1341 rc = sfc_tx_qstart(sa, tx_queue_id); 1342 if (rc != 0) 1343 goto fail_tx_qstart; 1344 1345 sas->txq_info[tx_queue_id].deferred_started = B_TRUE; 1346 1347 sfc_adapter_unlock(sa); 1348 return 0; 1349 1350 fail_tx_qstart: 1351 1352 fail_not_setup: 1353 fail_not_started: 1354 sfc_adapter_unlock(sa); 1355 SFC_ASSERT(rc > 0); 1356 return -rc; 1357 } 1358 1359 static int 1360 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1361 { 1362 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1363 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1364 1365 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1366 1367 sfc_adapter_lock(sa); 1368 1369 sfc_tx_qstop(sa, tx_queue_id); 1370 1371 sas->txq_info[tx_queue_id].deferred_started = B_FALSE; 1372 1373 sfc_adapter_unlock(sa); 1374 return 0; 1375 } 1376 1377 static efx_tunnel_protocol_t 1378 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type) 1379 { 1380 switch (rte_type) { 1381 case RTE_TUNNEL_TYPE_VXLAN: 1382 return EFX_TUNNEL_PROTOCOL_VXLAN; 1383 case RTE_TUNNEL_TYPE_GENEVE: 1384 return EFX_TUNNEL_PROTOCOL_GENEVE; 1385 default: 1386 return EFX_TUNNEL_NPROTOS; 1387 } 1388 } 1389 1390 enum sfc_udp_tunnel_op_e { 1391 SFC_UDP_TUNNEL_ADD_PORT, 1392 SFC_UDP_TUNNEL_DEL_PORT, 1393 }; 1394 1395 static int 1396 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev, 1397 struct rte_eth_udp_tunnel *tunnel_udp, 1398 enum sfc_udp_tunnel_op_e op) 1399 { 1400 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1401 efx_tunnel_protocol_t tunnel_proto; 1402 int rc; 1403 1404 sfc_log_init(sa, "%s udp_port=%u prot_type=%u", 1405 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" : 1406 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown", 1407 tunnel_udp->udp_port, tunnel_udp->prot_type); 1408 1409 tunnel_proto = 1410 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type); 1411 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) { 1412 rc = ENOTSUP; 1413 goto fail_bad_proto; 1414 } 1415 1416 sfc_adapter_lock(sa); 1417 1418 switch (op) { 1419 case SFC_UDP_TUNNEL_ADD_PORT: 1420 rc = efx_tunnel_config_udp_add(sa->nic, 1421 tunnel_udp->udp_port, 1422 tunnel_proto); 1423 break; 1424 case SFC_UDP_TUNNEL_DEL_PORT: 1425 rc = efx_tunnel_config_udp_remove(sa->nic, 1426 tunnel_udp->udp_port, 1427 tunnel_proto); 1428 break; 1429 default: 1430 rc = EINVAL; 1431 goto fail_bad_op; 1432 } 1433 1434 if (rc != 0) 1435 goto fail_op; 1436 1437 if (sa->state == SFC_ADAPTER_STARTED) { 1438 rc = efx_tunnel_reconfigure(sa->nic); 1439 if (rc == EAGAIN) { 1440 /* 1441 * Configuration is accepted by FW and MC reboot 1442 * is initiated to apply the changes. MC reboot 1443 * will be handled in a usual way (MC reboot 1444 * event on management event queue and adapter 1445 * restart). 1446 */ 1447 rc = 0; 1448 } else if (rc != 0) { 1449 goto fail_reconfigure; 1450 } 1451 } 1452 1453 sfc_adapter_unlock(sa); 1454 return 0; 1455 1456 fail_reconfigure: 1457 /* Remove/restore entry since the change makes the trouble */ 1458 switch (op) { 1459 case SFC_UDP_TUNNEL_ADD_PORT: 1460 (void)efx_tunnel_config_udp_remove(sa->nic, 1461 tunnel_udp->udp_port, 1462 tunnel_proto); 1463 break; 1464 case SFC_UDP_TUNNEL_DEL_PORT: 1465 (void)efx_tunnel_config_udp_add(sa->nic, 1466 tunnel_udp->udp_port, 1467 tunnel_proto); 1468 break; 1469 } 1470 1471 fail_op: 1472 fail_bad_op: 1473 sfc_adapter_unlock(sa); 1474 1475 fail_bad_proto: 1476 SFC_ASSERT(rc > 0); 1477 return -rc; 1478 } 1479 1480 static int 1481 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 1482 struct rte_eth_udp_tunnel *tunnel_udp) 1483 { 1484 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT); 1485 } 1486 1487 static int 1488 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 1489 struct rte_eth_udp_tunnel *tunnel_udp) 1490 { 1491 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); 1492 } 1493 1494 /* 1495 * The function is used by the secondary process as well. It must not 1496 * use any process-local pointers from the adapter data. 1497 */ 1498 static int 1499 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1500 struct rte_eth_rss_conf *rss_conf) 1501 { 1502 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1503 struct sfc_rss *rss = &sas->rss; 1504 1505 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) 1506 return -ENOTSUP; 1507 1508 /* 1509 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1510 * hence, conversion is done here to derive a correct set of ETH_RSS 1511 * flags which corresponds to the active EFX configuration stored 1512 * locally in 'sfc_adapter' and kept up-to-date 1513 */ 1514 rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types); 1515 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; 1516 if (rss_conf->rss_key != NULL) 1517 rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE); 1518 1519 return 0; 1520 } 1521 1522 static int 1523 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1524 struct rte_eth_rss_conf *rss_conf) 1525 { 1526 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1527 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1528 unsigned int efx_hash_types; 1529 int rc = 0; 1530 1531 if (sfc_sa2shared(sa)->isolated) 1532 return -ENOTSUP; 1533 1534 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { 1535 sfc_err(sa, "RSS is not available"); 1536 return -ENOTSUP; 1537 } 1538 1539 if (rss->channels == 0) { 1540 sfc_err(sa, "RSS is not configured"); 1541 return -EINVAL; 1542 } 1543 1544 if ((rss_conf->rss_key != NULL) && 1545 (rss_conf->rss_key_len != sizeof(rss->key))) { 1546 sfc_err(sa, "RSS key size is wrong (should be %zu)", 1547 sizeof(rss->key)); 1548 return -EINVAL; 1549 } 1550 1551 sfc_adapter_lock(sa); 1552 1553 rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types); 1554 if (rc != 0) 1555 goto fail_rx_hf_rte_to_efx; 1556 1557 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1558 rss->hash_alg, efx_hash_types, B_TRUE); 1559 if (rc != 0) 1560 goto fail_scale_mode_set; 1561 1562 if (rss_conf->rss_key != NULL) { 1563 if (sa->state == SFC_ADAPTER_STARTED) { 1564 rc = efx_rx_scale_key_set(sa->nic, 1565 EFX_RSS_CONTEXT_DEFAULT, 1566 rss_conf->rss_key, 1567 sizeof(rss->key)); 1568 if (rc != 0) 1569 goto fail_scale_key_set; 1570 } 1571 1572 rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); 1573 } 1574 1575 rss->hash_types = efx_hash_types; 1576 1577 sfc_adapter_unlock(sa); 1578 1579 return 0; 1580 1581 fail_scale_key_set: 1582 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1583 EFX_RX_HASHALG_TOEPLITZ, 1584 rss->hash_types, B_TRUE) != 0) 1585 sfc_err(sa, "failed to restore RSS mode"); 1586 1587 fail_scale_mode_set: 1588 fail_rx_hf_rte_to_efx: 1589 sfc_adapter_unlock(sa); 1590 return -rc; 1591 } 1592 1593 /* 1594 * The function is used by the secondary process as well. It must not 1595 * use any process-local pointers from the adapter data. 1596 */ 1597 static int 1598 sfc_dev_rss_reta_query(struct rte_eth_dev *dev, 1599 struct rte_eth_rss_reta_entry64 *reta_conf, 1600 uint16_t reta_size) 1601 { 1602 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1603 struct sfc_rss *rss = &sas->rss; 1604 int entry; 1605 1606 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated) 1607 return -ENOTSUP; 1608 1609 if (rss->channels == 0) 1610 return -EINVAL; 1611 1612 if (reta_size != EFX_RSS_TBL_SIZE) 1613 return -EINVAL; 1614 1615 for (entry = 0; entry < reta_size; entry++) { 1616 int grp = entry / RTE_RETA_GROUP_SIZE; 1617 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1618 1619 if ((reta_conf[grp].mask >> grp_idx) & 1) 1620 reta_conf[grp].reta[grp_idx] = rss->tbl[entry]; 1621 } 1622 1623 return 0; 1624 } 1625 1626 static int 1627 sfc_dev_rss_reta_update(struct rte_eth_dev *dev, 1628 struct rte_eth_rss_reta_entry64 *reta_conf, 1629 uint16_t reta_size) 1630 { 1631 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1632 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1633 unsigned int *rss_tbl_new; 1634 uint16_t entry; 1635 int rc = 0; 1636 1637 1638 if (sfc_sa2shared(sa)->isolated) 1639 return -ENOTSUP; 1640 1641 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { 1642 sfc_err(sa, "RSS is not available"); 1643 return -ENOTSUP; 1644 } 1645 1646 if (rss->channels == 0) { 1647 sfc_err(sa, "RSS is not configured"); 1648 return -EINVAL; 1649 } 1650 1651 if (reta_size != EFX_RSS_TBL_SIZE) { 1652 sfc_err(sa, "RETA size is wrong (should be %u)", 1653 EFX_RSS_TBL_SIZE); 1654 return -EINVAL; 1655 } 1656 1657 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0); 1658 if (rss_tbl_new == NULL) 1659 return -ENOMEM; 1660 1661 sfc_adapter_lock(sa); 1662 1663 rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl)); 1664 1665 for (entry = 0; entry < reta_size; entry++) { 1666 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1667 struct rte_eth_rss_reta_entry64 *grp; 1668 1669 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; 1670 1671 if (grp->mask & (1ull << grp_idx)) { 1672 if (grp->reta[grp_idx] >= rss->channels) { 1673 rc = EINVAL; 1674 goto bad_reta_entry; 1675 } 1676 rss_tbl_new[entry] = grp->reta[grp_idx]; 1677 } 1678 } 1679 1680 if (sa->state == SFC_ADAPTER_STARTED) { 1681 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1682 rss_tbl_new, EFX_RSS_TBL_SIZE); 1683 if (rc != 0) 1684 goto fail_scale_tbl_set; 1685 } 1686 1687 rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl)); 1688 1689 fail_scale_tbl_set: 1690 bad_reta_entry: 1691 sfc_adapter_unlock(sa); 1692 1693 rte_free(rss_tbl_new); 1694 1695 SFC_ASSERT(rc >= 0); 1696 return -rc; 1697 } 1698 1699 static int 1700 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, 1701 enum rte_filter_op filter_op, 1702 void *arg) 1703 { 1704 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1705 int rc = ENOTSUP; 1706 1707 sfc_log_init(sa, "entry"); 1708 1709 switch (filter_type) { 1710 case RTE_ETH_FILTER_NONE: 1711 sfc_err(sa, "Global filters configuration not supported"); 1712 break; 1713 case RTE_ETH_FILTER_MACVLAN: 1714 sfc_err(sa, "MACVLAN filters not supported"); 1715 break; 1716 case RTE_ETH_FILTER_ETHERTYPE: 1717 sfc_err(sa, "EtherType filters not supported"); 1718 break; 1719 case RTE_ETH_FILTER_FLEXIBLE: 1720 sfc_err(sa, "Flexible filters not supported"); 1721 break; 1722 case RTE_ETH_FILTER_SYN: 1723 sfc_err(sa, "SYN filters not supported"); 1724 break; 1725 case RTE_ETH_FILTER_NTUPLE: 1726 sfc_err(sa, "NTUPLE filters not supported"); 1727 break; 1728 case RTE_ETH_FILTER_TUNNEL: 1729 sfc_err(sa, "Tunnel filters not supported"); 1730 break; 1731 case RTE_ETH_FILTER_FDIR: 1732 sfc_err(sa, "Flow Director filters not supported"); 1733 break; 1734 case RTE_ETH_FILTER_HASH: 1735 sfc_err(sa, "Hash filters not supported"); 1736 break; 1737 case RTE_ETH_FILTER_GENERIC: 1738 if (filter_op != RTE_ETH_FILTER_GET) { 1739 rc = EINVAL; 1740 } else { 1741 *(const void **)arg = &sfc_flow_ops; 1742 rc = 0; 1743 } 1744 break; 1745 default: 1746 sfc_err(sa, "Unknown filter type %u", filter_type); 1747 break; 1748 } 1749 1750 sfc_log_init(sa, "exit: %d", -rc); 1751 SFC_ASSERT(rc >= 0); 1752 return -rc; 1753 } 1754 1755 static int 1756 sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool) 1757 { 1758 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1759 1760 /* 1761 * If Rx datapath does not provide callback to check mempool, 1762 * all pools are supported. 1763 */ 1764 if (sap->dp_rx->pool_ops_supported == NULL) 1765 return 1; 1766 1767 return sap->dp_rx->pool_ops_supported(pool); 1768 } 1769 1770 static int 1771 sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 1772 { 1773 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1774 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1775 struct sfc_rxq_info *rxq_info; 1776 1777 SFC_ASSERT(queue_id < sas->rxq_count); 1778 rxq_info = &sas->rxq_info[queue_id]; 1779 1780 return sap->dp_rx->intr_enable(rxq_info->dp); 1781 } 1782 1783 static int 1784 sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 1785 { 1786 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1787 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1788 struct sfc_rxq_info *rxq_info; 1789 1790 SFC_ASSERT(queue_id < sas->rxq_count); 1791 rxq_info = &sas->rxq_info[queue_id]; 1792 1793 return sap->dp_rx->intr_disable(rxq_info->dp); 1794 } 1795 1796 static const struct eth_dev_ops sfc_eth_dev_ops = { 1797 .dev_configure = sfc_dev_configure, 1798 .dev_start = sfc_dev_start, 1799 .dev_stop = sfc_dev_stop, 1800 .dev_set_link_up = sfc_dev_set_link_up, 1801 .dev_set_link_down = sfc_dev_set_link_down, 1802 .dev_close = sfc_dev_close, 1803 .promiscuous_enable = sfc_dev_promisc_enable, 1804 .promiscuous_disable = sfc_dev_promisc_disable, 1805 .allmulticast_enable = sfc_dev_allmulti_enable, 1806 .allmulticast_disable = sfc_dev_allmulti_disable, 1807 .link_update = sfc_dev_link_update, 1808 .stats_get = sfc_stats_get, 1809 .stats_reset = sfc_stats_reset, 1810 .xstats_get = sfc_xstats_get, 1811 .xstats_reset = sfc_stats_reset, 1812 .xstats_get_names = sfc_xstats_get_names, 1813 .dev_infos_get = sfc_dev_infos_get, 1814 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1815 .mtu_set = sfc_dev_set_mtu, 1816 .rx_queue_start = sfc_rx_queue_start, 1817 .rx_queue_stop = sfc_rx_queue_stop, 1818 .tx_queue_start = sfc_tx_queue_start, 1819 .tx_queue_stop = sfc_tx_queue_stop, 1820 .rx_queue_setup = sfc_rx_queue_setup, 1821 .rx_queue_release = sfc_rx_queue_release, 1822 .rx_queue_intr_enable = sfc_rx_queue_intr_enable, 1823 .rx_queue_intr_disable = sfc_rx_queue_intr_disable, 1824 .tx_queue_setup = sfc_tx_queue_setup, 1825 .tx_queue_release = sfc_tx_queue_release, 1826 .flow_ctrl_get = sfc_flow_ctrl_get, 1827 .flow_ctrl_set = sfc_flow_ctrl_set, 1828 .mac_addr_set = sfc_mac_addr_set, 1829 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, 1830 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, 1831 .reta_update = sfc_dev_rss_reta_update, 1832 .reta_query = sfc_dev_rss_reta_query, 1833 .rss_hash_update = sfc_dev_rss_hash_update, 1834 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1835 .filter_ctrl = sfc_dev_filter_ctrl, 1836 .set_mc_addr_list = sfc_set_mc_addr_list, 1837 .rxq_info_get = sfc_rx_queue_info_get, 1838 .txq_info_get = sfc_tx_queue_info_get, 1839 .fw_version_get = sfc_fw_version_get, 1840 .xstats_get_by_id = sfc_xstats_get_by_id, 1841 .xstats_get_names_by_id = sfc_xstats_get_names_by_id, 1842 .pool_ops_supported = sfc_pool_ops_supported, 1843 }; 1844 1845 /** 1846 * Duplicate a string in potentially shared memory required for 1847 * multi-process support. 1848 * 1849 * strdup() allocates from process-local heap/memory. 1850 */ 1851 static char * 1852 sfc_strdup(const char *str) 1853 { 1854 size_t size; 1855 char *copy; 1856 1857 if (str == NULL) 1858 return NULL; 1859 1860 size = strlen(str) + 1; 1861 copy = rte_malloc(__func__, size, 0); 1862 if (copy != NULL) 1863 rte_memcpy(copy, str, size); 1864 1865 return copy; 1866 } 1867 1868 static int 1869 sfc_eth_dev_set_ops(struct rte_eth_dev *dev) 1870 { 1871 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1872 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1873 const struct sfc_dp_rx *dp_rx; 1874 const struct sfc_dp_tx *dp_tx; 1875 const efx_nic_cfg_t *encp; 1876 unsigned int avail_caps = 0; 1877 const char *rx_name = NULL; 1878 const char *tx_name = NULL; 1879 int rc; 1880 1881 switch (sa->family) { 1882 case EFX_FAMILY_HUNTINGTON: 1883 case EFX_FAMILY_MEDFORD: 1884 case EFX_FAMILY_MEDFORD2: 1885 avail_caps |= SFC_DP_HW_FW_CAP_EF10; 1886 break; 1887 default: 1888 break; 1889 } 1890 1891 encp = efx_nic_cfg_get(sa->nic); 1892 if (encp->enc_rx_es_super_buffer_supported) 1893 avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER; 1894 1895 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, 1896 sfc_kvarg_string_handler, &rx_name); 1897 if (rc != 0) 1898 goto fail_kvarg_rx_datapath; 1899 1900 if (rx_name != NULL) { 1901 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name); 1902 if (dp_rx == NULL) { 1903 sfc_err(sa, "Rx datapath %s not found", rx_name); 1904 rc = ENOENT; 1905 goto fail_dp_rx; 1906 } 1907 if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) { 1908 sfc_err(sa, 1909 "Insufficient Hw/FW capabilities to use Rx datapath %s", 1910 rx_name); 1911 rc = EINVAL; 1912 goto fail_dp_rx_caps; 1913 } 1914 } else { 1915 dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps); 1916 if (dp_rx == NULL) { 1917 sfc_err(sa, "Rx datapath by caps %#x not found", 1918 avail_caps); 1919 rc = ENOENT; 1920 goto fail_dp_rx; 1921 } 1922 } 1923 1924 sas->dp_rx_name = sfc_strdup(dp_rx->dp.name); 1925 if (sas->dp_rx_name == NULL) { 1926 rc = ENOMEM; 1927 goto fail_dp_rx_name; 1928 } 1929 1930 sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name); 1931 1932 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH, 1933 sfc_kvarg_string_handler, &tx_name); 1934 if (rc != 0) 1935 goto fail_kvarg_tx_datapath; 1936 1937 if (tx_name != NULL) { 1938 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name); 1939 if (dp_tx == NULL) { 1940 sfc_err(sa, "Tx datapath %s not found", tx_name); 1941 rc = ENOENT; 1942 goto fail_dp_tx; 1943 } 1944 if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) { 1945 sfc_err(sa, 1946 "Insufficient Hw/FW capabilities to use Tx datapath %s", 1947 tx_name); 1948 rc = EINVAL; 1949 goto fail_dp_tx_caps; 1950 } 1951 } else { 1952 dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps); 1953 if (dp_tx == NULL) { 1954 sfc_err(sa, "Tx datapath by caps %#x not found", 1955 avail_caps); 1956 rc = ENOENT; 1957 goto fail_dp_tx; 1958 } 1959 } 1960 1961 sas->dp_tx_name = sfc_strdup(dp_tx->dp.name); 1962 if (sas->dp_tx_name == NULL) { 1963 rc = ENOMEM; 1964 goto fail_dp_tx_name; 1965 } 1966 1967 sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name); 1968 1969 sa->priv.dp_rx = dp_rx; 1970 sa->priv.dp_tx = dp_tx; 1971 1972 dev->rx_pkt_burst = dp_rx->pkt_burst; 1973 dev->tx_pkt_prepare = dp_tx->pkt_prepare; 1974 dev->tx_pkt_burst = dp_tx->pkt_burst; 1975 1976 dev->rx_queue_count = sfc_rx_queue_count; 1977 dev->rx_descriptor_done = sfc_rx_descriptor_done; 1978 dev->rx_descriptor_status = sfc_rx_descriptor_status; 1979 dev->tx_descriptor_status = sfc_tx_descriptor_status; 1980 dev->dev_ops = &sfc_eth_dev_ops; 1981 1982 return 0; 1983 1984 fail_dp_tx_name: 1985 fail_dp_tx_caps: 1986 fail_dp_tx: 1987 fail_kvarg_tx_datapath: 1988 rte_free(sas->dp_rx_name); 1989 sas->dp_rx_name = NULL; 1990 1991 fail_dp_rx_name: 1992 fail_dp_rx_caps: 1993 fail_dp_rx: 1994 fail_kvarg_rx_datapath: 1995 return rc; 1996 } 1997 1998 static void 1999 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev) 2000 { 2001 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 2002 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 2003 2004 dev->dev_ops = NULL; 2005 dev->tx_pkt_prepare = NULL; 2006 dev->rx_pkt_burst = NULL; 2007 dev->tx_pkt_burst = NULL; 2008 2009 rte_free(sas->dp_tx_name); 2010 sas->dp_tx_name = NULL; 2011 sa->priv.dp_tx = NULL; 2012 2013 rte_free(sas->dp_rx_name); 2014 sas->dp_rx_name = NULL; 2015 sa->priv.dp_rx = NULL; 2016 } 2017 2018 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = { 2019 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 2020 .reta_query = sfc_dev_rss_reta_query, 2021 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 2022 .rxq_info_get = sfc_rx_queue_info_get, 2023 .txq_info_get = sfc_tx_queue_info_get, 2024 }; 2025 2026 static int 2027 sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main) 2028 { 2029 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 2030 struct sfc_adapter_priv *sap; 2031 const struct sfc_dp_rx *dp_rx; 2032 const struct sfc_dp_tx *dp_tx; 2033 int rc; 2034 2035 /* 2036 * Allocate process private data from heap, since it should not 2037 * be located in shared memory allocated using rte_malloc() API. 2038 */ 2039 sap = calloc(1, sizeof(*sap)); 2040 if (sap == NULL) { 2041 rc = ENOMEM; 2042 goto fail_alloc_priv; 2043 } 2044 2045 sap->logtype_main = logtype_main; 2046 2047 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name); 2048 if (dp_rx == NULL) { 2049 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2050 "cannot find %s Rx datapath", sas->dp_rx_name); 2051 rc = ENOENT; 2052 goto fail_dp_rx; 2053 } 2054 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) { 2055 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2056 "%s Rx datapath does not support multi-process", 2057 sas->dp_rx_name); 2058 rc = EINVAL; 2059 goto fail_dp_rx_multi_process; 2060 } 2061 2062 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name); 2063 if (dp_tx == NULL) { 2064 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2065 "cannot find %s Tx datapath", sas->dp_tx_name); 2066 rc = ENOENT; 2067 goto fail_dp_tx; 2068 } 2069 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) { 2070 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2071 "%s Tx datapath does not support multi-process", 2072 sas->dp_tx_name); 2073 rc = EINVAL; 2074 goto fail_dp_tx_multi_process; 2075 } 2076 2077 sap->dp_rx = dp_rx; 2078 sap->dp_tx = dp_tx; 2079 2080 dev->process_private = sap; 2081 dev->rx_pkt_burst = dp_rx->pkt_burst; 2082 dev->tx_pkt_prepare = dp_tx->pkt_prepare; 2083 dev->tx_pkt_burst = dp_tx->pkt_burst; 2084 dev->rx_queue_count = sfc_rx_queue_count; 2085 dev->rx_descriptor_done = sfc_rx_descriptor_done; 2086 dev->rx_descriptor_status = sfc_rx_descriptor_status; 2087 dev->tx_descriptor_status = sfc_tx_descriptor_status; 2088 dev->dev_ops = &sfc_eth_dev_secondary_ops; 2089 2090 return 0; 2091 2092 fail_dp_tx_multi_process: 2093 fail_dp_tx: 2094 fail_dp_rx_multi_process: 2095 fail_dp_rx: 2096 free(sap); 2097 2098 fail_alloc_priv: 2099 return rc; 2100 } 2101 2102 static void 2103 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev) 2104 { 2105 free(dev->process_private); 2106 dev->process_private = NULL; 2107 dev->dev_ops = NULL; 2108 dev->tx_pkt_prepare = NULL; 2109 dev->tx_pkt_burst = NULL; 2110 dev->rx_pkt_burst = NULL; 2111 } 2112 2113 static void 2114 sfc_register_dp(void) 2115 { 2116 /* Register once */ 2117 if (TAILQ_EMPTY(&sfc_dp_head)) { 2118 /* Prefer EF10 datapath */ 2119 sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp); 2120 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); 2121 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); 2122 2123 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp); 2124 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp); 2125 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp); 2126 } 2127 } 2128 2129 static int 2130 sfc_eth_dev_init(struct rte_eth_dev *dev) 2131 { 2132 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 2133 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2134 uint32_t logtype_main; 2135 struct sfc_adapter *sa; 2136 int rc; 2137 const efx_nic_cfg_t *encp; 2138 const struct rte_ether_addr *from; 2139 2140 sfc_register_dp(); 2141 2142 logtype_main = sfc_register_logtype(&pci_dev->addr, 2143 SFC_LOGTYPE_MAIN_STR, 2144 RTE_LOG_NOTICE); 2145 2146 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2147 return -sfc_eth_dev_secondary_init(dev, logtype_main); 2148 2149 /* Required for logging */ 2150 sas->pci_addr = pci_dev->addr; 2151 sas->port_id = dev->data->port_id; 2152 2153 /* 2154 * Allocate process private data from heap, since it should not 2155 * be located in shared memory allocated using rte_malloc() API. 2156 */ 2157 sa = calloc(1, sizeof(*sa)); 2158 if (sa == NULL) { 2159 rc = ENOMEM; 2160 goto fail_alloc_sa; 2161 } 2162 2163 dev->process_private = sa; 2164 2165 /* Required for logging */ 2166 sa->priv.shared = sas; 2167 sa->priv.logtype_main = logtype_main; 2168 2169 sa->eth_dev = dev; 2170 2171 /* Copy PCI device info to the dev->data */ 2172 rte_eth_copy_pci_info(dev, pci_dev); 2173 2174 rc = sfc_kvargs_parse(sa); 2175 if (rc != 0) 2176 goto fail_kvargs_parse; 2177 2178 sfc_log_init(sa, "entry"); 2179 2180 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 2181 2182 dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0); 2183 if (dev->data->mac_addrs == NULL) { 2184 rc = ENOMEM; 2185 goto fail_mac_addrs; 2186 } 2187 2188 sfc_adapter_lock_init(sa); 2189 sfc_adapter_lock(sa); 2190 2191 sfc_log_init(sa, "probing"); 2192 rc = sfc_probe(sa); 2193 if (rc != 0) 2194 goto fail_probe; 2195 2196 sfc_log_init(sa, "set device ops"); 2197 rc = sfc_eth_dev_set_ops(dev); 2198 if (rc != 0) 2199 goto fail_set_ops; 2200 2201 sfc_log_init(sa, "attaching"); 2202 rc = sfc_attach(sa); 2203 if (rc != 0) 2204 goto fail_attach; 2205 2206 encp = efx_nic_cfg_get(sa->nic); 2207 2208 /* 2209 * The arguments are really reverse order in comparison to 2210 * Linux kernel. Copy from NIC config to Ethernet device data. 2211 */ 2212 from = (const struct rte_ether_addr *)(encp->enc_mac_addr); 2213 rte_ether_addr_copy(from, &dev->data->mac_addrs[0]); 2214 2215 sfc_adapter_unlock(sa); 2216 2217 sfc_log_init(sa, "done"); 2218 return 0; 2219 2220 fail_attach: 2221 sfc_eth_dev_clear_ops(dev); 2222 2223 fail_set_ops: 2224 sfc_unprobe(sa); 2225 2226 fail_probe: 2227 sfc_adapter_unlock(sa); 2228 sfc_adapter_lock_fini(sa); 2229 rte_free(dev->data->mac_addrs); 2230 dev->data->mac_addrs = NULL; 2231 2232 fail_mac_addrs: 2233 sfc_kvargs_cleanup(sa); 2234 2235 fail_kvargs_parse: 2236 sfc_log_init(sa, "failed %d", rc); 2237 dev->process_private = NULL; 2238 free(sa); 2239 2240 fail_alloc_sa: 2241 SFC_ASSERT(rc > 0); 2242 return -rc; 2243 } 2244 2245 static int 2246 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 2247 { 2248 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2249 sfc_eth_dev_secondary_clear_ops(dev); 2250 return 0; 2251 } 2252 2253 sfc_dev_close(dev); 2254 2255 return 0; 2256 } 2257 2258 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 2259 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 2260 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) }, 2261 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 2262 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, 2263 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 2264 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, 2265 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) }, 2266 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) }, 2267 { .vendor_id = 0 /* sentinel */ } 2268 }; 2269 2270 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2271 struct rte_pci_device *pci_dev) 2272 { 2273 return rte_eth_dev_pci_generic_probe(pci_dev, 2274 sizeof(struct sfc_adapter_shared), sfc_eth_dev_init); 2275 } 2276 2277 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2278 { 2279 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit); 2280 } 2281 2282 static struct rte_pci_driver sfc_efx_pmd = { 2283 .id_table = pci_id_sfc_efx_map, 2284 .drv_flags = 2285 RTE_PCI_DRV_INTR_LSC | 2286 RTE_PCI_DRV_NEED_MAPPING, 2287 .probe = sfc_eth_dev_pci_probe, 2288 .remove = sfc_eth_dev_pci_remove, 2289 }; 2290 2291 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd); 2292 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 2293 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci"); 2294 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 2295 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " 2296 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " 2297 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 2298 SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " " 2299 SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> " 2300 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>"); 2301 2302 RTE_INIT(sfc_driver_register_logtype) 2303 { 2304 int ret; 2305 2306 ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver", 2307 RTE_LOG_NOTICE); 2308 sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret; 2309 } 2310