1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2016-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_dev.h> 11 #include <rte_ethdev_driver.h> 12 #include <rte_ethdev_pci.h> 13 #include <rte_pci.h> 14 #include <rte_bus_pci.h> 15 #include <rte_errno.h> 16 #include <rte_string_fns.h> 17 #include <rte_ether.h> 18 19 #include "efx.h" 20 21 #include "sfc.h" 22 #include "sfc_debug.h" 23 #include "sfc_log.h" 24 #include "sfc_kvargs.h" 25 #include "sfc_ev.h" 26 #include "sfc_rx.h" 27 #include "sfc_tx.h" 28 #include "sfc_flow.h" 29 #include "sfc_dp.h" 30 #include "sfc_dp_rx.h" 31 32 uint32_t sfc_logtype_driver; 33 34 static struct sfc_dp_list sfc_dp_head = 35 TAILQ_HEAD_INITIALIZER(sfc_dp_head); 36 37 38 static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev); 39 40 41 static int 42 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 43 { 44 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 45 efx_nic_fw_info_t enfi; 46 int ret; 47 int rc; 48 49 /* 50 * Return value of the callback is likely supposed to be 51 * equal to or greater than 0, nevertheless, if an error 52 * occurs, it will be desirable to pass it to the caller 53 */ 54 if ((fw_version == NULL) || (fw_size == 0)) 55 return -EINVAL; 56 57 rc = efx_nic_get_fw_version(sa->nic, &enfi); 58 if (rc != 0) 59 return -rc; 60 61 ret = snprintf(fw_version, fw_size, 62 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16, 63 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1], 64 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]); 65 if (ret < 0) 66 return ret; 67 68 if (enfi.enfi_dpcpu_fw_ids_valid) { 69 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret); 70 int ret_extra; 71 72 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset, 73 fw_size - dpcpu_fw_ids_offset, 74 " rx%" PRIx16 " tx%" PRIx16, 75 enfi.enfi_rx_dpcpu_fw_id, 76 enfi.enfi_tx_dpcpu_fw_id); 77 if (ret_extra < 0) 78 return ret_extra; 79 80 ret += ret_extra; 81 } 82 83 if (fw_size < (size_t)(++ret)) 84 return ret; 85 else 86 return 0; 87 } 88 89 static int 90 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 91 { 92 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 93 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 94 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 95 struct sfc_rss *rss = &sas->rss; 96 uint64_t txq_offloads_def = 0; 97 98 sfc_log_init(sa, "entry"); 99 100 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 101 dev_info->max_mtu = EFX_MAC_SDU_MAX; 102 103 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 104 105 /* Autonegotiation may be disabled */ 106 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 107 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX)) 108 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 109 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX)) 110 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 111 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX)) 112 dev_info->speed_capa |= ETH_LINK_SPEED_25G; 113 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX)) 114 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 115 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX)) 116 dev_info->speed_capa |= ETH_LINK_SPEED_50G; 117 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX)) 118 dev_info->speed_capa |= ETH_LINK_SPEED_100G; 119 120 dev_info->max_rx_queues = sa->rxq_max; 121 dev_info->max_tx_queues = sa->txq_max; 122 123 /* By default packets are dropped if no descriptors are available */ 124 dev_info->default_rxconf.rx_drop_en = 1; 125 126 dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa); 127 128 /* 129 * rx_offload_capa includes both device and queue offloads since 130 * the latter may be requested on a per device basis which makes 131 * sense when some offloads are needed to be set on all queues. 132 */ 133 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) | 134 dev_info->rx_queue_offload_capa; 135 136 dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa); 137 138 /* 139 * tx_offload_capa includes both device and queue offloads since 140 * the latter may be requested on a per device basis which makes 141 * sense when some offloads are needed to be set on all queues. 142 */ 143 dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) | 144 dev_info->tx_queue_offload_capa; 145 146 if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 147 txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; 148 149 dev_info->default_txconf.offloads |= txq_offloads_def; 150 151 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { 152 uint64_t rte_hf = 0; 153 unsigned int i; 154 155 for (i = 0; i < rss->hf_map_nb_entries; ++i) 156 rte_hf |= rss->hf_map[i].rte; 157 158 dev_info->reta_size = EFX_RSS_TBL_SIZE; 159 dev_info->hash_key_size = EFX_RSS_KEY_SIZE; 160 dev_info->flow_type_rss_offloads = rte_hf; 161 } 162 163 /* Initialize to hardware limits */ 164 dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries; 165 dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries; 166 /* The RXQ hardware requires that the descriptor count is a power 167 * of 2, but rx_desc_lim cannot properly describe that constraint. 168 */ 169 dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries; 170 171 /* Initialize to hardware limits */ 172 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 173 dev_info->tx_desc_lim.nb_min = sa->txq_min_entries; 174 /* 175 * The TXQ hardware requires that the descriptor count is a power 176 * of 2, but tx_desc_lim cannot properly describe that constraint 177 */ 178 dev_info->tx_desc_lim.nb_align = sa->txq_min_entries; 179 180 if (sap->dp_rx->get_dev_info != NULL) 181 sap->dp_rx->get_dev_info(dev_info); 182 if (sap->dp_tx->get_dev_info != NULL) 183 sap->dp_tx->get_dev_info(dev_info); 184 185 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 186 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 187 188 return 0; 189 } 190 191 static const uint32_t * 192 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 193 { 194 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 195 196 return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps); 197 } 198 199 static int 200 sfc_dev_configure(struct rte_eth_dev *dev) 201 { 202 struct rte_eth_dev_data *dev_data = dev->data; 203 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 204 int rc; 205 206 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 207 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 208 209 sfc_adapter_lock(sa); 210 switch (sa->state) { 211 case SFC_ADAPTER_CONFIGURED: 212 /* FALLTHROUGH */ 213 case SFC_ADAPTER_INITIALIZED: 214 rc = sfc_configure(sa); 215 break; 216 default: 217 sfc_err(sa, "unexpected adapter state %u to configure", 218 sa->state); 219 rc = EINVAL; 220 break; 221 } 222 sfc_adapter_unlock(sa); 223 224 sfc_log_init(sa, "done %d", rc); 225 SFC_ASSERT(rc >= 0); 226 return -rc; 227 } 228 229 static int 230 sfc_dev_start(struct rte_eth_dev *dev) 231 { 232 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 233 int rc; 234 235 sfc_log_init(sa, "entry"); 236 237 sfc_adapter_lock(sa); 238 rc = sfc_start(sa); 239 sfc_adapter_unlock(sa); 240 241 sfc_log_init(sa, "done %d", rc); 242 SFC_ASSERT(rc >= 0); 243 return -rc; 244 } 245 246 static int 247 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 248 { 249 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 250 struct rte_eth_link current_link; 251 int ret; 252 253 sfc_log_init(sa, "entry"); 254 255 if (sa->state != SFC_ADAPTER_STARTED) { 256 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); 257 } else if (wait_to_complete) { 258 efx_link_mode_t link_mode; 259 260 if (efx_port_poll(sa->nic, &link_mode) != 0) 261 link_mode = EFX_LINK_UNKNOWN; 262 sfc_port_link_mode_to_info(link_mode, ¤t_link); 263 264 } else { 265 sfc_ev_mgmt_qpoll(sa); 266 rte_eth_linkstatus_get(dev, ¤t_link); 267 } 268 269 ret = rte_eth_linkstatus_set(dev, ¤t_link); 270 if (ret == 0) 271 sfc_notice(sa, "Link status is %s", 272 current_link.link_status ? "UP" : "DOWN"); 273 274 return ret; 275 } 276 277 static void 278 sfc_dev_stop(struct rte_eth_dev *dev) 279 { 280 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 281 282 sfc_log_init(sa, "entry"); 283 284 sfc_adapter_lock(sa); 285 sfc_stop(sa); 286 sfc_adapter_unlock(sa); 287 288 sfc_log_init(sa, "done"); 289 } 290 291 static int 292 sfc_dev_set_link_up(struct rte_eth_dev *dev) 293 { 294 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 295 int rc; 296 297 sfc_log_init(sa, "entry"); 298 299 sfc_adapter_lock(sa); 300 rc = sfc_start(sa); 301 sfc_adapter_unlock(sa); 302 303 SFC_ASSERT(rc >= 0); 304 return -rc; 305 } 306 307 static int 308 sfc_dev_set_link_down(struct rte_eth_dev *dev) 309 { 310 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 311 312 sfc_log_init(sa, "entry"); 313 314 sfc_adapter_lock(sa); 315 sfc_stop(sa); 316 sfc_adapter_unlock(sa); 317 318 return 0; 319 } 320 321 static void 322 sfc_dev_close(struct rte_eth_dev *dev) 323 { 324 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 325 326 sfc_log_init(sa, "entry"); 327 328 sfc_adapter_lock(sa); 329 switch (sa->state) { 330 case SFC_ADAPTER_STARTED: 331 sfc_stop(sa); 332 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 333 /* FALLTHROUGH */ 334 case SFC_ADAPTER_CONFIGURED: 335 sfc_close(sa); 336 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 337 /* FALLTHROUGH */ 338 case SFC_ADAPTER_INITIALIZED: 339 break; 340 default: 341 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 342 break; 343 } 344 345 /* 346 * Cleanup all resources in accordance with RTE_ETH_DEV_CLOSE_REMOVE. 347 * Rollback primary process sfc_eth_dev_init() below. 348 */ 349 350 sfc_eth_dev_clear_ops(dev); 351 352 sfc_detach(sa); 353 sfc_unprobe(sa); 354 355 sfc_kvargs_cleanup(sa); 356 357 sfc_adapter_unlock(sa); 358 sfc_adapter_lock_fini(sa); 359 360 sfc_log_init(sa, "done"); 361 362 /* Required for logging, so cleanup last */ 363 sa->eth_dev = NULL; 364 365 dev->process_private = NULL; 366 free(sa); 367 } 368 369 static int 370 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 371 boolean_t enabled) 372 { 373 struct sfc_port *port; 374 boolean_t *toggle; 375 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 376 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 377 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 378 int rc = 0; 379 380 sfc_adapter_lock(sa); 381 382 port = &sa->port; 383 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 384 385 if (*toggle != enabled) { 386 *toggle = enabled; 387 388 if (sfc_sa2shared(sa)->isolated) { 389 sfc_warn(sa, "isolated mode is active on the port"); 390 sfc_warn(sa, "the change is to be applied on the next " 391 "start provided that isolated mode is " 392 "disabled prior the next start"); 393 } else if ((sa->state == SFC_ADAPTER_STARTED) && 394 ((rc = sfc_set_rx_mode(sa)) != 0)) { 395 *toggle = !(enabled); 396 sfc_warn(sa, "Failed to %s %s mode, rc = %d", 397 ((enabled) ? "enable" : "disable"), desc, rc); 398 399 /* 400 * For promiscuous and all-multicast filters a 401 * permission failure should be reported as an 402 * unsupported filter. 403 */ 404 if (rc == EPERM) 405 rc = ENOTSUP; 406 } 407 } 408 409 sfc_adapter_unlock(sa); 410 return rc; 411 } 412 413 static int 414 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 415 { 416 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 417 418 SFC_ASSERT(rc >= 0); 419 return -rc; 420 } 421 422 static int 423 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 424 { 425 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 426 427 SFC_ASSERT(rc >= 0); 428 return -rc; 429 } 430 431 static int 432 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 433 { 434 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 435 436 SFC_ASSERT(rc >= 0); 437 return -rc; 438 } 439 440 static int 441 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 442 { 443 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 444 445 SFC_ASSERT(rc >= 0); 446 return -rc; 447 } 448 449 static int 450 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 451 uint16_t nb_rx_desc, unsigned int socket_id, 452 const struct rte_eth_rxconf *rx_conf, 453 struct rte_mempool *mb_pool) 454 { 455 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 456 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 457 int rc; 458 459 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 460 rx_queue_id, nb_rx_desc, socket_id); 461 462 sfc_adapter_lock(sa); 463 464 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 465 rx_conf, mb_pool); 466 if (rc != 0) 467 goto fail_rx_qinit; 468 469 dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp; 470 471 sfc_adapter_unlock(sa); 472 473 return 0; 474 475 fail_rx_qinit: 476 sfc_adapter_unlock(sa); 477 SFC_ASSERT(rc > 0); 478 return -rc; 479 } 480 481 static void 482 sfc_rx_queue_release(void *queue) 483 { 484 struct sfc_dp_rxq *dp_rxq = queue; 485 struct sfc_rxq *rxq; 486 struct sfc_adapter *sa; 487 unsigned int sw_index; 488 489 if (dp_rxq == NULL) 490 return; 491 492 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 493 sa = rxq->evq->sa; 494 sfc_adapter_lock(sa); 495 496 sw_index = dp_rxq->dpq.queue_id; 497 498 sfc_log_init(sa, "RxQ=%u", sw_index); 499 500 sfc_rx_qfini(sa, sw_index); 501 502 sfc_adapter_unlock(sa); 503 } 504 505 static int 506 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 507 uint16_t nb_tx_desc, unsigned int socket_id, 508 const struct rte_eth_txconf *tx_conf) 509 { 510 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 511 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 512 int rc; 513 514 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 515 tx_queue_id, nb_tx_desc, socket_id); 516 517 sfc_adapter_lock(sa); 518 519 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 520 if (rc != 0) 521 goto fail_tx_qinit; 522 523 dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp; 524 525 sfc_adapter_unlock(sa); 526 return 0; 527 528 fail_tx_qinit: 529 sfc_adapter_unlock(sa); 530 SFC_ASSERT(rc > 0); 531 return -rc; 532 } 533 534 static void 535 sfc_tx_queue_release(void *queue) 536 { 537 struct sfc_dp_txq *dp_txq = queue; 538 struct sfc_txq *txq; 539 unsigned int sw_index; 540 struct sfc_adapter *sa; 541 542 if (dp_txq == NULL) 543 return; 544 545 txq = sfc_txq_by_dp_txq(dp_txq); 546 sw_index = dp_txq->dpq.queue_id; 547 548 SFC_ASSERT(txq->evq != NULL); 549 sa = txq->evq->sa; 550 551 sfc_log_init(sa, "TxQ = %u", sw_index); 552 553 sfc_adapter_lock(sa); 554 555 sfc_tx_qfini(sa, sw_index); 556 557 sfc_adapter_unlock(sa); 558 } 559 560 /* 561 * Some statistics are computed as A - B where A and B each increase 562 * monotonically with some hardware counter(s) and the counters are read 563 * asynchronously. 564 * 565 * If packet X is counted in A, but not counted in B yet, computed value is 566 * greater than real. 567 * 568 * If packet X is not counted in A at the moment of reading the counter, 569 * but counted in B at the moment of reading the counter, computed value 570 * is less than real. 571 * 572 * However, counter which grows backward is worse evil than slightly wrong 573 * value. So, let's try to guarantee that it never happens except may be 574 * the case when the MAC stats are zeroed as a result of a NIC reset. 575 */ 576 static void 577 sfc_update_diff_stat(uint64_t *stat, uint64_t newval) 578 { 579 if ((int64_t)(newval - *stat) > 0 || newval == 0) 580 *stat = newval; 581 } 582 583 static int 584 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 585 { 586 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 587 struct sfc_port *port = &sa->port; 588 uint64_t *mac_stats; 589 int ret; 590 591 rte_spinlock_lock(&port->mac_stats_lock); 592 593 ret = sfc_port_update_mac_stats(sa); 594 if (ret != 0) 595 goto unlock; 596 597 mac_stats = port->mac_stats_buf; 598 599 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 600 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 601 stats->ipackets = 602 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 603 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 604 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 605 stats->opackets = 606 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 607 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 608 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 609 stats->ibytes = 610 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 611 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 612 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 613 stats->obytes = 614 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 615 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 616 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 617 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 618 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 619 } else { 620 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 621 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 622 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 623 /* 624 * Take into account stats which are whenever supported 625 * on EF10. If some stat is not supported by current 626 * firmware variant or HW revision, it is guaranteed 627 * to be zero in mac_stats. 628 */ 629 stats->imissed = 630 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 631 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 632 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 633 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 634 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 635 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 636 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 637 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 638 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 639 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 640 stats->ierrors = 641 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 642 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 643 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 644 /* no oerrors counters supported on EF10 */ 645 646 /* Exclude missed, errors and pauses from Rx packets */ 647 sfc_update_diff_stat(&port->ipackets, 648 mac_stats[EFX_MAC_RX_PKTS] - 649 mac_stats[EFX_MAC_RX_PAUSE_PKTS] - 650 stats->imissed - stats->ierrors); 651 stats->ipackets = port->ipackets; 652 } 653 654 unlock: 655 rte_spinlock_unlock(&port->mac_stats_lock); 656 SFC_ASSERT(ret >= 0); 657 return -ret; 658 } 659 660 static int 661 sfc_stats_reset(struct rte_eth_dev *dev) 662 { 663 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 664 struct sfc_port *port = &sa->port; 665 int rc; 666 667 if (sa->state != SFC_ADAPTER_STARTED) { 668 /* 669 * The operation cannot be done if port is not started; it 670 * will be scheduled to be done during the next port start 671 */ 672 port->mac_stats_reset_pending = B_TRUE; 673 return 0; 674 } 675 676 rc = sfc_port_reset_mac_stats(sa); 677 if (rc != 0) 678 sfc_err(sa, "failed to reset statistics (rc = %d)", rc); 679 680 SFC_ASSERT(rc >= 0); 681 return -rc; 682 } 683 684 static int 685 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 686 unsigned int xstats_count) 687 { 688 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 689 struct sfc_port *port = &sa->port; 690 uint64_t *mac_stats; 691 int rc; 692 unsigned int i; 693 int nstats = 0; 694 695 rte_spinlock_lock(&port->mac_stats_lock); 696 697 rc = sfc_port_update_mac_stats(sa); 698 if (rc != 0) { 699 SFC_ASSERT(rc > 0); 700 nstats = -rc; 701 goto unlock; 702 } 703 704 mac_stats = port->mac_stats_buf; 705 706 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 707 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 708 if (xstats != NULL && nstats < (int)xstats_count) { 709 xstats[nstats].id = nstats; 710 xstats[nstats].value = mac_stats[i]; 711 } 712 nstats++; 713 } 714 } 715 716 unlock: 717 rte_spinlock_unlock(&port->mac_stats_lock); 718 719 return nstats; 720 } 721 722 static int 723 sfc_xstats_get_names(struct rte_eth_dev *dev, 724 struct rte_eth_xstat_name *xstats_names, 725 unsigned int xstats_count) 726 { 727 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 728 struct sfc_port *port = &sa->port; 729 unsigned int i; 730 unsigned int nstats = 0; 731 732 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 733 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 734 if (xstats_names != NULL && nstats < xstats_count) 735 strlcpy(xstats_names[nstats].name, 736 efx_mac_stat_name(sa->nic, i), 737 sizeof(xstats_names[0].name)); 738 nstats++; 739 } 740 } 741 742 return nstats; 743 } 744 745 static int 746 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 747 uint64_t *values, unsigned int n) 748 { 749 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 750 struct sfc_port *port = &sa->port; 751 uint64_t *mac_stats; 752 unsigned int nb_supported = 0; 753 unsigned int nb_written = 0; 754 unsigned int i; 755 int ret; 756 int rc; 757 758 if (unlikely(values == NULL) || 759 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported))) 760 return port->mac_stats_nb_supported; 761 762 rte_spinlock_lock(&port->mac_stats_lock); 763 764 rc = sfc_port_update_mac_stats(sa); 765 if (rc != 0) { 766 SFC_ASSERT(rc > 0); 767 ret = -rc; 768 goto unlock; 769 } 770 771 mac_stats = port->mac_stats_buf; 772 773 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) { 774 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 775 continue; 776 777 if ((ids == NULL) || (ids[nb_written] == nb_supported)) 778 values[nb_written++] = mac_stats[i]; 779 780 ++nb_supported; 781 } 782 783 ret = nb_written; 784 785 unlock: 786 rte_spinlock_unlock(&port->mac_stats_lock); 787 788 return ret; 789 } 790 791 static int 792 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, 793 struct rte_eth_xstat_name *xstats_names, 794 const uint64_t *ids, unsigned int size) 795 { 796 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 797 struct sfc_port *port = &sa->port; 798 unsigned int nb_supported = 0; 799 unsigned int nb_written = 0; 800 unsigned int i; 801 802 if (unlikely(xstats_names == NULL) || 803 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported))) 804 return port->mac_stats_nb_supported; 805 806 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) { 807 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 808 continue; 809 810 if ((ids == NULL) || (ids[nb_written] == nb_supported)) { 811 char *name = xstats_names[nb_written++].name; 812 813 strlcpy(name, efx_mac_stat_name(sa->nic, i), 814 sizeof(xstats_names[0].name)); 815 } 816 817 ++nb_supported; 818 } 819 820 return nb_written; 821 } 822 823 static int 824 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 825 { 826 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 827 unsigned int wanted_fc, link_fc; 828 829 memset(fc_conf, 0, sizeof(*fc_conf)); 830 831 sfc_adapter_lock(sa); 832 833 if (sa->state == SFC_ADAPTER_STARTED) 834 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 835 else 836 link_fc = sa->port.flow_ctrl; 837 838 switch (link_fc) { 839 case 0: 840 fc_conf->mode = RTE_FC_NONE; 841 break; 842 case EFX_FCNTL_RESPOND: 843 fc_conf->mode = RTE_FC_RX_PAUSE; 844 break; 845 case EFX_FCNTL_GENERATE: 846 fc_conf->mode = RTE_FC_TX_PAUSE; 847 break; 848 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 849 fc_conf->mode = RTE_FC_FULL; 850 break; 851 default: 852 sfc_err(sa, "%s: unexpected flow control value %#x", 853 __func__, link_fc); 854 } 855 856 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 857 858 sfc_adapter_unlock(sa); 859 860 return 0; 861 } 862 863 static int 864 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 865 { 866 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 867 struct sfc_port *port = &sa->port; 868 unsigned int fcntl; 869 int rc; 870 871 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 872 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 873 fc_conf->mac_ctrl_frame_fwd != 0) { 874 sfc_err(sa, "unsupported flow control settings specified"); 875 rc = EINVAL; 876 goto fail_inval; 877 } 878 879 switch (fc_conf->mode) { 880 case RTE_FC_NONE: 881 fcntl = 0; 882 break; 883 case RTE_FC_RX_PAUSE: 884 fcntl = EFX_FCNTL_RESPOND; 885 break; 886 case RTE_FC_TX_PAUSE: 887 fcntl = EFX_FCNTL_GENERATE; 888 break; 889 case RTE_FC_FULL: 890 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 891 break; 892 default: 893 rc = EINVAL; 894 goto fail_inval; 895 } 896 897 sfc_adapter_lock(sa); 898 899 if (sa->state == SFC_ADAPTER_STARTED) { 900 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 901 if (rc != 0) 902 goto fail_mac_fcntl_set; 903 } 904 905 port->flow_ctrl = fcntl; 906 port->flow_ctrl_autoneg = fc_conf->autoneg; 907 908 sfc_adapter_unlock(sa); 909 910 return 0; 911 912 fail_mac_fcntl_set: 913 sfc_adapter_unlock(sa); 914 fail_inval: 915 SFC_ASSERT(rc > 0); 916 return -rc; 917 } 918 919 static int 920 sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu) 921 { 922 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 923 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 924 boolean_t scatter_enabled; 925 const char *error; 926 unsigned int i; 927 928 for (i = 0; i < sas->rxq_count; i++) { 929 if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0) 930 continue; 931 932 scatter_enabled = (sas->rxq_info[i].type_flags & 933 EFX_RXQ_FLAG_SCATTER); 934 935 if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size, 936 encp->enc_rx_prefix_size, 937 scatter_enabled, &error)) { 938 sfc_err(sa, "MTU check for RxQ %u failed: %s", i, 939 error); 940 return EINVAL; 941 } 942 } 943 944 return 0; 945 } 946 947 static int 948 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 949 { 950 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 951 size_t pdu = EFX_MAC_PDU(mtu); 952 size_t old_pdu; 953 int rc; 954 955 sfc_log_init(sa, "mtu=%u", mtu); 956 957 rc = EINVAL; 958 if (pdu < EFX_MAC_PDU_MIN) { 959 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 960 (unsigned int)mtu, (unsigned int)pdu, 961 EFX_MAC_PDU_MIN); 962 goto fail_inval; 963 } 964 if (pdu > EFX_MAC_PDU_MAX) { 965 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 966 (unsigned int)mtu, (unsigned int)pdu, 967 (unsigned int)EFX_MAC_PDU_MAX); 968 goto fail_inval; 969 } 970 971 sfc_adapter_lock(sa); 972 973 rc = sfc_check_scatter_on_all_rx_queues(sa, pdu); 974 if (rc != 0) 975 goto fail_check_scatter; 976 977 if (pdu != sa->port.pdu) { 978 if (sa->state == SFC_ADAPTER_STARTED) { 979 sfc_stop(sa); 980 981 old_pdu = sa->port.pdu; 982 sa->port.pdu = pdu; 983 rc = sfc_start(sa); 984 if (rc != 0) 985 goto fail_start; 986 } else { 987 sa->port.pdu = pdu; 988 } 989 } 990 991 /* 992 * The driver does not use it, but other PMDs update jumbo frame 993 * flag and max_rx_pkt_len when MTU is set. 994 */ 995 if (mtu > RTE_ETHER_MAX_LEN) { 996 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 997 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 998 } 999 1000 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 1001 1002 sfc_adapter_unlock(sa); 1003 1004 sfc_log_init(sa, "done"); 1005 return 0; 1006 1007 fail_start: 1008 sa->port.pdu = old_pdu; 1009 if (sfc_start(sa) != 0) 1010 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 1011 "PDU max size - port is stopped", 1012 (unsigned int)pdu, (unsigned int)old_pdu); 1013 1014 fail_check_scatter: 1015 sfc_adapter_unlock(sa); 1016 1017 fail_inval: 1018 sfc_log_init(sa, "failed %d", rc); 1019 SFC_ASSERT(rc > 0); 1020 return -rc; 1021 } 1022 static int 1023 sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1024 { 1025 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1026 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1027 struct sfc_port *port = &sa->port; 1028 struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0]; 1029 int rc = 0; 1030 1031 sfc_adapter_lock(sa); 1032 1033 /* 1034 * Copy the address to the device private data so that 1035 * it could be recalled in the case of adapter restart. 1036 */ 1037 rte_ether_addr_copy(mac_addr, &port->default_mac_addr); 1038 1039 /* 1040 * Neither of the two following checks can return 1041 * an error. The new MAC address is preserved in 1042 * the device private data and can be activated 1043 * on the next port start if the user prevents 1044 * isolated mode from being enabled. 1045 */ 1046 if (sfc_sa2shared(sa)->isolated) { 1047 sfc_warn(sa, "isolated mode is active on the port"); 1048 sfc_warn(sa, "will not set MAC address"); 1049 goto unlock; 1050 } 1051 1052 if (sa->state != SFC_ADAPTER_STARTED) { 1053 sfc_notice(sa, "the port is not started"); 1054 sfc_notice(sa, "the new MAC address will be set on port start"); 1055 1056 goto unlock; 1057 } 1058 1059 if (encp->enc_allow_set_mac_with_installed_filters) { 1060 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 1061 if (rc != 0) { 1062 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 1063 goto unlock; 1064 } 1065 1066 /* 1067 * Changing the MAC address by means of MCDI request 1068 * has no effect on received traffic, therefore 1069 * we also need to update unicast filters 1070 */ 1071 rc = sfc_set_rx_mode_unchecked(sa); 1072 if (rc != 0) { 1073 sfc_err(sa, "cannot set filter (rc = %u)", rc); 1074 /* Rollback the old address */ 1075 (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes); 1076 (void)sfc_set_rx_mode_unchecked(sa); 1077 } 1078 } else { 1079 sfc_warn(sa, "cannot set MAC address with filters installed"); 1080 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 1081 sfc_warn(sa, "(some traffic may be dropped)"); 1082 1083 /* 1084 * Since setting MAC address with filters installed is not 1085 * allowed on the adapter, the new MAC address will be set 1086 * by means of adapter restart. sfc_start() shall retrieve 1087 * the new address from the device private data and set it. 1088 */ 1089 sfc_stop(sa); 1090 rc = sfc_start(sa); 1091 if (rc != 0) 1092 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 1093 } 1094 1095 unlock: 1096 if (rc != 0) 1097 rte_ether_addr_copy(old_addr, &port->default_mac_addr); 1098 1099 sfc_adapter_unlock(sa); 1100 1101 SFC_ASSERT(rc >= 0); 1102 return -rc; 1103 } 1104 1105 1106 static int 1107 sfc_set_mc_addr_list(struct rte_eth_dev *dev, 1108 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1109 { 1110 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1111 struct sfc_port *port = &sa->port; 1112 uint8_t *mc_addrs = port->mcast_addrs; 1113 int rc; 1114 unsigned int i; 1115 1116 if (sfc_sa2shared(sa)->isolated) { 1117 sfc_err(sa, "isolated mode is active on the port"); 1118 sfc_err(sa, "will not set multicast address list"); 1119 return -ENOTSUP; 1120 } 1121 1122 if (mc_addrs == NULL) 1123 return -ENOBUFS; 1124 1125 if (nb_mc_addr > port->max_mcast_addrs) { 1126 sfc_err(sa, "too many multicast addresses: %u > %u", 1127 nb_mc_addr, port->max_mcast_addrs); 1128 return -EINVAL; 1129 } 1130 1131 for (i = 0; i < nb_mc_addr; ++i) { 1132 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 1133 EFX_MAC_ADDR_LEN); 1134 mc_addrs += EFX_MAC_ADDR_LEN; 1135 } 1136 1137 port->nb_mcast_addrs = nb_mc_addr; 1138 1139 if (sa->state != SFC_ADAPTER_STARTED) 1140 return 0; 1141 1142 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs, 1143 port->nb_mcast_addrs); 1144 if (rc != 0) 1145 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 1146 1147 SFC_ASSERT(rc >= 0); 1148 return -rc; 1149 } 1150 1151 /* 1152 * The function is used by the secondary process as well. It must not 1153 * use any process-local pointers from the adapter data. 1154 */ 1155 static void 1156 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1157 struct rte_eth_rxq_info *qinfo) 1158 { 1159 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1160 struct sfc_rxq_info *rxq_info; 1161 1162 SFC_ASSERT(rx_queue_id < sas->rxq_count); 1163 1164 rxq_info = &sas->rxq_info[rx_queue_id]; 1165 1166 qinfo->mp = rxq_info->refill_mb_pool; 1167 qinfo->conf.rx_free_thresh = rxq_info->refill_threshold; 1168 qinfo->conf.rx_drop_en = 1; 1169 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 1170 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 1171 if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) { 1172 qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER; 1173 qinfo->scattered_rx = 1; 1174 } 1175 qinfo->nb_desc = rxq_info->entries; 1176 } 1177 1178 /* 1179 * The function is used by the secondary process as well. It must not 1180 * use any process-local pointers from the adapter data. 1181 */ 1182 static void 1183 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1184 struct rte_eth_txq_info *qinfo) 1185 { 1186 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1187 struct sfc_txq_info *txq_info; 1188 1189 SFC_ASSERT(tx_queue_id < sas->txq_count); 1190 1191 txq_info = &sas->txq_info[tx_queue_id]; 1192 1193 memset(qinfo, 0, sizeof(*qinfo)); 1194 1195 qinfo->conf.offloads = txq_info->offloads; 1196 qinfo->conf.tx_free_thresh = txq_info->free_thresh; 1197 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 1198 qinfo->nb_desc = txq_info->entries; 1199 } 1200 1201 /* 1202 * The function is used by the secondary process as well. It must not 1203 * use any process-local pointers from the adapter data. 1204 */ 1205 static uint32_t 1206 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1207 { 1208 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1209 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1210 struct sfc_rxq_info *rxq_info; 1211 1212 SFC_ASSERT(rx_queue_id < sas->rxq_count); 1213 rxq_info = &sas->rxq_info[rx_queue_id]; 1214 1215 if ((rxq_info->state & SFC_RXQ_STARTED) == 0) 1216 return 0; 1217 1218 return sap->dp_rx->qdesc_npending(rxq_info->dp); 1219 } 1220 1221 /* 1222 * The function is used by the secondary process as well. It must not 1223 * use any process-local pointers from the adapter data. 1224 */ 1225 static int 1226 sfc_rx_descriptor_done(void *queue, uint16_t offset) 1227 { 1228 struct sfc_dp_rxq *dp_rxq = queue; 1229 const struct sfc_dp_rx *dp_rx; 1230 1231 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); 1232 1233 return offset < dp_rx->qdesc_npending(dp_rxq); 1234 } 1235 1236 /* 1237 * The function is used by the secondary process as well. It must not 1238 * use any process-local pointers from the adapter data. 1239 */ 1240 static int 1241 sfc_rx_descriptor_status(void *queue, uint16_t offset) 1242 { 1243 struct sfc_dp_rxq *dp_rxq = queue; 1244 const struct sfc_dp_rx *dp_rx; 1245 1246 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq); 1247 1248 return dp_rx->qdesc_status(dp_rxq, offset); 1249 } 1250 1251 /* 1252 * The function is used by the secondary process as well. It must not 1253 * use any process-local pointers from the adapter data. 1254 */ 1255 static int 1256 sfc_tx_descriptor_status(void *queue, uint16_t offset) 1257 { 1258 struct sfc_dp_txq *dp_txq = queue; 1259 const struct sfc_dp_tx *dp_tx; 1260 1261 dp_tx = sfc_dp_tx_by_dp_txq(dp_txq); 1262 1263 return dp_tx->qdesc_status(dp_txq, offset); 1264 } 1265 1266 static int 1267 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1268 { 1269 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1270 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1271 int rc; 1272 1273 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1274 1275 sfc_adapter_lock(sa); 1276 1277 rc = EINVAL; 1278 if (sa->state != SFC_ADAPTER_STARTED) 1279 goto fail_not_started; 1280 1281 if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED) 1282 goto fail_not_setup; 1283 1284 rc = sfc_rx_qstart(sa, rx_queue_id); 1285 if (rc != 0) 1286 goto fail_rx_qstart; 1287 1288 sas->rxq_info[rx_queue_id].deferred_started = B_TRUE; 1289 1290 sfc_adapter_unlock(sa); 1291 1292 return 0; 1293 1294 fail_rx_qstart: 1295 fail_not_setup: 1296 fail_not_started: 1297 sfc_adapter_unlock(sa); 1298 SFC_ASSERT(rc > 0); 1299 return -rc; 1300 } 1301 1302 static int 1303 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1304 { 1305 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1306 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1307 1308 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1309 1310 sfc_adapter_lock(sa); 1311 sfc_rx_qstop(sa, rx_queue_id); 1312 1313 sas->rxq_info[rx_queue_id].deferred_started = B_FALSE; 1314 1315 sfc_adapter_unlock(sa); 1316 1317 return 0; 1318 } 1319 1320 static int 1321 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1322 { 1323 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1324 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1325 int rc; 1326 1327 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1328 1329 sfc_adapter_lock(sa); 1330 1331 rc = EINVAL; 1332 if (sa->state != SFC_ADAPTER_STARTED) 1333 goto fail_not_started; 1334 1335 if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED) 1336 goto fail_not_setup; 1337 1338 rc = sfc_tx_qstart(sa, tx_queue_id); 1339 if (rc != 0) 1340 goto fail_tx_qstart; 1341 1342 sas->txq_info[tx_queue_id].deferred_started = B_TRUE; 1343 1344 sfc_adapter_unlock(sa); 1345 return 0; 1346 1347 fail_tx_qstart: 1348 1349 fail_not_setup: 1350 fail_not_started: 1351 sfc_adapter_unlock(sa); 1352 SFC_ASSERT(rc > 0); 1353 return -rc; 1354 } 1355 1356 static int 1357 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1358 { 1359 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1360 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1361 1362 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1363 1364 sfc_adapter_lock(sa); 1365 1366 sfc_tx_qstop(sa, tx_queue_id); 1367 1368 sas->txq_info[tx_queue_id].deferred_started = B_FALSE; 1369 1370 sfc_adapter_unlock(sa); 1371 return 0; 1372 } 1373 1374 static efx_tunnel_protocol_t 1375 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type) 1376 { 1377 switch (rte_type) { 1378 case RTE_TUNNEL_TYPE_VXLAN: 1379 return EFX_TUNNEL_PROTOCOL_VXLAN; 1380 case RTE_TUNNEL_TYPE_GENEVE: 1381 return EFX_TUNNEL_PROTOCOL_GENEVE; 1382 default: 1383 return EFX_TUNNEL_NPROTOS; 1384 } 1385 } 1386 1387 enum sfc_udp_tunnel_op_e { 1388 SFC_UDP_TUNNEL_ADD_PORT, 1389 SFC_UDP_TUNNEL_DEL_PORT, 1390 }; 1391 1392 static int 1393 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev, 1394 struct rte_eth_udp_tunnel *tunnel_udp, 1395 enum sfc_udp_tunnel_op_e op) 1396 { 1397 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1398 efx_tunnel_protocol_t tunnel_proto; 1399 int rc; 1400 1401 sfc_log_init(sa, "%s udp_port=%u prot_type=%u", 1402 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" : 1403 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown", 1404 tunnel_udp->udp_port, tunnel_udp->prot_type); 1405 1406 tunnel_proto = 1407 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type); 1408 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) { 1409 rc = ENOTSUP; 1410 goto fail_bad_proto; 1411 } 1412 1413 sfc_adapter_lock(sa); 1414 1415 switch (op) { 1416 case SFC_UDP_TUNNEL_ADD_PORT: 1417 rc = efx_tunnel_config_udp_add(sa->nic, 1418 tunnel_udp->udp_port, 1419 tunnel_proto); 1420 break; 1421 case SFC_UDP_TUNNEL_DEL_PORT: 1422 rc = efx_tunnel_config_udp_remove(sa->nic, 1423 tunnel_udp->udp_port, 1424 tunnel_proto); 1425 break; 1426 default: 1427 rc = EINVAL; 1428 goto fail_bad_op; 1429 } 1430 1431 if (rc != 0) 1432 goto fail_op; 1433 1434 if (sa->state == SFC_ADAPTER_STARTED) { 1435 rc = efx_tunnel_reconfigure(sa->nic); 1436 if (rc == EAGAIN) { 1437 /* 1438 * Configuration is accepted by FW and MC reboot 1439 * is initiated to apply the changes. MC reboot 1440 * will be handled in a usual way (MC reboot 1441 * event on management event queue and adapter 1442 * restart). 1443 */ 1444 rc = 0; 1445 } else if (rc != 0) { 1446 goto fail_reconfigure; 1447 } 1448 } 1449 1450 sfc_adapter_unlock(sa); 1451 return 0; 1452 1453 fail_reconfigure: 1454 /* Remove/restore entry since the change makes the trouble */ 1455 switch (op) { 1456 case SFC_UDP_TUNNEL_ADD_PORT: 1457 (void)efx_tunnel_config_udp_remove(sa->nic, 1458 tunnel_udp->udp_port, 1459 tunnel_proto); 1460 break; 1461 case SFC_UDP_TUNNEL_DEL_PORT: 1462 (void)efx_tunnel_config_udp_add(sa->nic, 1463 tunnel_udp->udp_port, 1464 tunnel_proto); 1465 break; 1466 } 1467 1468 fail_op: 1469 fail_bad_op: 1470 sfc_adapter_unlock(sa); 1471 1472 fail_bad_proto: 1473 SFC_ASSERT(rc > 0); 1474 return -rc; 1475 } 1476 1477 static int 1478 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 1479 struct rte_eth_udp_tunnel *tunnel_udp) 1480 { 1481 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT); 1482 } 1483 1484 static int 1485 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 1486 struct rte_eth_udp_tunnel *tunnel_udp) 1487 { 1488 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); 1489 } 1490 1491 /* 1492 * The function is used by the secondary process as well. It must not 1493 * use any process-local pointers from the adapter data. 1494 */ 1495 static int 1496 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1497 struct rte_eth_rss_conf *rss_conf) 1498 { 1499 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1500 struct sfc_rss *rss = &sas->rss; 1501 1502 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) 1503 return -ENOTSUP; 1504 1505 /* 1506 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1507 * hence, conversion is done here to derive a correct set of ETH_RSS 1508 * flags which corresponds to the active EFX configuration stored 1509 * locally in 'sfc_adapter' and kept up-to-date 1510 */ 1511 rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types); 1512 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; 1513 if (rss_conf->rss_key != NULL) 1514 rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE); 1515 1516 return 0; 1517 } 1518 1519 static int 1520 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1521 struct rte_eth_rss_conf *rss_conf) 1522 { 1523 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1524 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1525 unsigned int efx_hash_types; 1526 int rc = 0; 1527 1528 if (sfc_sa2shared(sa)->isolated) 1529 return -ENOTSUP; 1530 1531 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { 1532 sfc_err(sa, "RSS is not available"); 1533 return -ENOTSUP; 1534 } 1535 1536 if (rss->channels == 0) { 1537 sfc_err(sa, "RSS is not configured"); 1538 return -EINVAL; 1539 } 1540 1541 if ((rss_conf->rss_key != NULL) && 1542 (rss_conf->rss_key_len != sizeof(rss->key))) { 1543 sfc_err(sa, "RSS key size is wrong (should be %zu)", 1544 sizeof(rss->key)); 1545 return -EINVAL; 1546 } 1547 1548 sfc_adapter_lock(sa); 1549 1550 rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types); 1551 if (rc != 0) 1552 goto fail_rx_hf_rte_to_efx; 1553 1554 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1555 rss->hash_alg, efx_hash_types, B_TRUE); 1556 if (rc != 0) 1557 goto fail_scale_mode_set; 1558 1559 if (rss_conf->rss_key != NULL) { 1560 if (sa->state == SFC_ADAPTER_STARTED) { 1561 rc = efx_rx_scale_key_set(sa->nic, 1562 EFX_RSS_CONTEXT_DEFAULT, 1563 rss_conf->rss_key, 1564 sizeof(rss->key)); 1565 if (rc != 0) 1566 goto fail_scale_key_set; 1567 } 1568 1569 rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); 1570 } 1571 1572 rss->hash_types = efx_hash_types; 1573 1574 sfc_adapter_unlock(sa); 1575 1576 return 0; 1577 1578 fail_scale_key_set: 1579 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1580 EFX_RX_HASHALG_TOEPLITZ, 1581 rss->hash_types, B_TRUE) != 0) 1582 sfc_err(sa, "failed to restore RSS mode"); 1583 1584 fail_scale_mode_set: 1585 fail_rx_hf_rte_to_efx: 1586 sfc_adapter_unlock(sa); 1587 return -rc; 1588 } 1589 1590 /* 1591 * The function is used by the secondary process as well. It must not 1592 * use any process-local pointers from the adapter data. 1593 */ 1594 static int 1595 sfc_dev_rss_reta_query(struct rte_eth_dev *dev, 1596 struct rte_eth_rss_reta_entry64 *reta_conf, 1597 uint16_t reta_size) 1598 { 1599 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1600 struct sfc_rss *rss = &sas->rss; 1601 int entry; 1602 1603 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated) 1604 return -ENOTSUP; 1605 1606 if (rss->channels == 0) 1607 return -EINVAL; 1608 1609 if (reta_size != EFX_RSS_TBL_SIZE) 1610 return -EINVAL; 1611 1612 for (entry = 0; entry < reta_size; entry++) { 1613 int grp = entry / RTE_RETA_GROUP_SIZE; 1614 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1615 1616 if ((reta_conf[grp].mask >> grp_idx) & 1) 1617 reta_conf[grp].reta[grp_idx] = rss->tbl[entry]; 1618 } 1619 1620 return 0; 1621 } 1622 1623 static int 1624 sfc_dev_rss_reta_update(struct rte_eth_dev *dev, 1625 struct rte_eth_rss_reta_entry64 *reta_conf, 1626 uint16_t reta_size) 1627 { 1628 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1629 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1630 unsigned int *rss_tbl_new; 1631 uint16_t entry; 1632 int rc = 0; 1633 1634 1635 if (sfc_sa2shared(sa)->isolated) 1636 return -ENOTSUP; 1637 1638 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { 1639 sfc_err(sa, "RSS is not available"); 1640 return -ENOTSUP; 1641 } 1642 1643 if (rss->channels == 0) { 1644 sfc_err(sa, "RSS is not configured"); 1645 return -EINVAL; 1646 } 1647 1648 if (reta_size != EFX_RSS_TBL_SIZE) { 1649 sfc_err(sa, "RETA size is wrong (should be %u)", 1650 EFX_RSS_TBL_SIZE); 1651 return -EINVAL; 1652 } 1653 1654 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0); 1655 if (rss_tbl_new == NULL) 1656 return -ENOMEM; 1657 1658 sfc_adapter_lock(sa); 1659 1660 rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl)); 1661 1662 for (entry = 0; entry < reta_size; entry++) { 1663 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1664 struct rte_eth_rss_reta_entry64 *grp; 1665 1666 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; 1667 1668 if (grp->mask & (1ull << grp_idx)) { 1669 if (grp->reta[grp_idx] >= rss->channels) { 1670 rc = EINVAL; 1671 goto bad_reta_entry; 1672 } 1673 rss_tbl_new[entry] = grp->reta[grp_idx]; 1674 } 1675 } 1676 1677 if (sa->state == SFC_ADAPTER_STARTED) { 1678 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1679 rss_tbl_new, EFX_RSS_TBL_SIZE); 1680 if (rc != 0) 1681 goto fail_scale_tbl_set; 1682 } 1683 1684 rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl)); 1685 1686 fail_scale_tbl_set: 1687 bad_reta_entry: 1688 sfc_adapter_unlock(sa); 1689 1690 rte_free(rss_tbl_new); 1691 1692 SFC_ASSERT(rc >= 0); 1693 return -rc; 1694 } 1695 1696 static int 1697 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, 1698 enum rte_filter_op filter_op, 1699 void *arg) 1700 { 1701 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1702 int rc = ENOTSUP; 1703 1704 sfc_log_init(sa, "entry"); 1705 1706 switch (filter_type) { 1707 case RTE_ETH_FILTER_NONE: 1708 sfc_err(sa, "Global filters configuration not supported"); 1709 break; 1710 case RTE_ETH_FILTER_MACVLAN: 1711 sfc_err(sa, "MACVLAN filters not supported"); 1712 break; 1713 case RTE_ETH_FILTER_ETHERTYPE: 1714 sfc_err(sa, "EtherType filters not supported"); 1715 break; 1716 case RTE_ETH_FILTER_FLEXIBLE: 1717 sfc_err(sa, "Flexible filters not supported"); 1718 break; 1719 case RTE_ETH_FILTER_SYN: 1720 sfc_err(sa, "SYN filters not supported"); 1721 break; 1722 case RTE_ETH_FILTER_NTUPLE: 1723 sfc_err(sa, "NTUPLE filters not supported"); 1724 break; 1725 case RTE_ETH_FILTER_TUNNEL: 1726 sfc_err(sa, "Tunnel filters not supported"); 1727 break; 1728 case RTE_ETH_FILTER_FDIR: 1729 sfc_err(sa, "Flow Director filters not supported"); 1730 break; 1731 case RTE_ETH_FILTER_HASH: 1732 sfc_err(sa, "Hash filters not supported"); 1733 break; 1734 case RTE_ETH_FILTER_GENERIC: 1735 if (filter_op != RTE_ETH_FILTER_GET) { 1736 rc = EINVAL; 1737 } else { 1738 *(const void **)arg = &sfc_flow_ops; 1739 rc = 0; 1740 } 1741 break; 1742 default: 1743 sfc_err(sa, "Unknown filter type %u", filter_type); 1744 break; 1745 } 1746 1747 sfc_log_init(sa, "exit: %d", -rc); 1748 SFC_ASSERT(rc >= 0); 1749 return -rc; 1750 } 1751 1752 static int 1753 sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool) 1754 { 1755 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1756 1757 /* 1758 * If Rx datapath does not provide callback to check mempool, 1759 * all pools are supported. 1760 */ 1761 if (sap->dp_rx->pool_ops_supported == NULL) 1762 return 1; 1763 1764 return sap->dp_rx->pool_ops_supported(pool); 1765 } 1766 1767 static int 1768 sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 1769 { 1770 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1771 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1772 struct sfc_rxq_info *rxq_info; 1773 1774 SFC_ASSERT(queue_id < sas->rxq_count); 1775 rxq_info = &sas->rxq_info[queue_id]; 1776 1777 return sap->dp_rx->intr_enable(rxq_info->dp); 1778 } 1779 1780 static int 1781 sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 1782 { 1783 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); 1784 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1785 struct sfc_rxq_info *rxq_info; 1786 1787 SFC_ASSERT(queue_id < sas->rxq_count); 1788 rxq_info = &sas->rxq_info[queue_id]; 1789 1790 return sap->dp_rx->intr_disable(rxq_info->dp); 1791 } 1792 1793 static const struct eth_dev_ops sfc_eth_dev_ops = { 1794 .dev_configure = sfc_dev_configure, 1795 .dev_start = sfc_dev_start, 1796 .dev_stop = sfc_dev_stop, 1797 .dev_set_link_up = sfc_dev_set_link_up, 1798 .dev_set_link_down = sfc_dev_set_link_down, 1799 .dev_close = sfc_dev_close, 1800 .promiscuous_enable = sfc_dev_promisc_enable, 1801 .promiscuous_disable = sfc_dev_promisc_disable, 1802 .allmulticast_enable = sfc_dev_allmulti_enable, 1803 .allmulticast_disable = sfc_dev_allmulti_disable, 1804 .link_update = sfc_dev_link_update, 1805 .stats_get = sfc_stats_get, 1806 .stats_reset = sfc_stats_reset, 1807 .xstats_get = sfc_xstats_get, 1808 .xstats_reset = sfc_stats_reset, 1809 .xstats_get_names = sfc_xstats_get_names, 1810 .dev_infos_get = sfc_dev_infos_get, 1811 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1812 .mtu_set = sfc_dev_set_mtu, 1813 .rx_queue_start = sfc_rx_queue_start, 1814 .rx_queue_stop = sfc_rx_queue_stop, 1815 .tx_queue_start = sfc_tx_queue_start, 1816 .tx_queue_stop = sfc_tx_queue_stop, 1817 .rx_queue_setup = sfc_rx_queue_setup, 1818 .rx_queue_release = sfc_rx_queue_release, 1819 .rx_queue_count = sfc_rx_queue_count, 1820 .rx_descriptor_done = sfc_rx_descriptor_done, 1821 .rx_descriptor_status = sfc_rx_descriptor_status, 1822 .tx_descriptor_status = sfc_tx_descriptor_status, 1823 .rx_queue_intr_enable = sfc_rx_queue_intr_enable, 1824 .rx_queue_intr_disable = sfc_rx_queue_intr_disable, 1825 .tx_queue_setup = sfc_tx_queue_setup, 1826 .tx_queue_release = sfc_tx_queue_release, 1827 .flow_ctrl_get = sfc_flow_ctrl_get, 1828 .flow_ctrl_set = sfc_flow_ctrl_set, 1829 .mac_addr_set = sfc_mac_addr_set, 1830 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, 1831 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, 1832 .reta_update = sfc_dev_rss_reta_update, 1833 .reta_query = sfc_dev_rss_reta_query, 1834 .rss_hash_update = sfc_dev_rss_hash_update, 1835 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1836 .filter_ctrl = sfc_dev_filter_ctrl, 1837 .set_mc_addr_list = sfc_set_mc_addr_list, 1838 .rxq_info_get = sfc_rx_queue_info_get, 1839 .txq_info_get = sfc_tx_queue_info_get, 1840 .fw_version_get = sfc_fw_version_get, 1841 .xstats_get_by_id = sfc_xstats_get_by_id, 1842 .xstats_get_names_by_id = sfc_xstats_get_names_by_id, 1843 .pool_ops_supported = sfc_pool_ops_supported, 1844 }; 1845 1846 /** 1847 * Duplicate a string in potentially shared memory required for 1848 * multi-process support. 1849 * 1850 * strdup() allocates from process-local heap/memory. 1851 */ 1852 static char * 1853 sfc_strdup(const char *str) 1854 { 1855 size_t size; 1856 char *copy; 1857 1858 if (str == NULL) 1859 return NULL; 1860 1861 size = strlen(str) + 1; 1862 copy = rte_malloc(__func__, size, 0); 1863 if (copy != NULL) 1864 rte_memcpy(copy, str, size); 1865 1866 return copy; 1867 } 1868 1869 static int 1870 sfc_eth_dev_set_ops(struct rte_eth_dev *dev) 1871 { 1872 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1873 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 1874 const struct sfc_dp_rx *dp_rx; 1875 const struct sfc_dp_tx *dp_tx; 1876 const efx_nic_cfg_t *encp; 1877 unsigned int avail_caps = 0; 1878 const char *rx_name = NULL; 1879 const char *tx_name = NULL; 1880 int rc; 1881 1882 switch (sa->family) { 1883 case EFX_FAMILY_HUNTINGTON: 1884 case EFX_FAMILY_MEDFORD: 1885 case EFX_FAMILY_MEDFORD2: 1886 avail_caps |= SFC_DP_HW_FW_CAP_EF10; 1887 break; 1888 default: 1889 break; 1890 } 1891 1892 encp = efx_nic_cfg_get(sa->nic); 1893 if (encp->enc_rx_es_super_buffer_supported) 1894 avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER; 1895 1896 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, 1897 sfc_kvarg_string_handler, &rx_name); 1898 if (rc != 0) 1899 goto fail_kvarg_rx_datapath; 1900 1901 if (rx_name != NULL) { 1902 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name); 1903 if (dp_rx == NULL) { 1904 sfc_err(sa, "Rx datapath %s not found", rx_name); 1905 rc = ENOENT; 1906 goto fail_dp_rx; 1907 } 1908 if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) { 1909 sfc_err(sa, 1910 "Insufficient Hw/FW capabilities to use Rx datapath %s", 1911 rx_name); 1912 rc = EINVAL; 1913 goto fail_dp_rx_caps; 1914 } 1915 } else { 1916 dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps); 1917 if (dp_rx == NULL) { 1918 sfc_err(sa, "Rx datapath by caps %#x not found", 1919 avail_caps); 1920 rc = ENOENT; 1921 goto fail_dp_rx; 1922 } 1923 } 1924 1925 sas->dp_rx_name = sfc_strdup(dp_rx->dp.name); 1926 if (sas->dp_rx_name == NULL) { 1927 rc = ENOMEM; 1928 goto fail_dp_rx_name; 1929 } 1930 1931 sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name); 1932 1933 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH, 1934 sfc_kvarg_string_handler, &tx_name); 1935 if (rc != 0) 1936 goto fail_kvarg_tx_datapath; 1937 1938 if (tx_name != NULL) { 1939 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name); 1940 if (dp_tx == NULL) { 1941 sfc_err(sa, "Tx datapath %s not found", tx_name); 1942 rc = ENOENT; 1943 goto fail_dp_tx; 1944 } 1945 if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) { 1946 sfc_err(sa, 1947 "Insufficient Hw/FW capabilities to use Tx datapath %s", 1948 tx_name); 1949 rc = EINVAL; 1950 goto fail_dp_tx_caps; 1951 } 1952 } else { 1953 dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps); 1954 if (dp_tx == NULL) { 1955 sfc_err(sa, "Tx datapath by caps %#x not found", 1956 avail_caps); 1957 rc = ENOENT; 1958 goto fail_dp_tx; 1959 } 1960 } 1961 1962 sas->dp_tx_name = sfc_strdup(dp_tx->dp.name); 1963 if (sas->dp_tx_name == NULL) { 1964 rc = ENOMEM; 1965 goto fail_dp_tx_name; 1966 } 1967 1968 sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name); 1969 1970 sa->priv.dp_rx = dp_rx; 1971 sa->priv.dp_tx = dp_tx; 1972 1973 dev->rx_pkt_burst = dp_rx->pkt_burst; 1974 dev->tx_pkt_prepare = dp_tx->pkt_prepare; 1975 dev->tx_pkt_burst = dp_tx->pkt_burst; 1976 1977 dev->dev_ops = &sfc_eth_dev_ops; 1978 1979 return 0; 1980 1981 fail_dp_tx_name: 1982 fail_dp_tx_caps: 1983 fail_dp_tx: 1984 fail_kvarg_tx_datapath: 1985 rte_free(sas->dp_rx_name); 1986 sas->dp_rx_name = NULL; 1987 1988 fail_dp_rx_name: 1989 fail_dp_rx_caps: 1990 fail_dp_rx: 1991 fail_kvarg_rx_datapath: 1992 return rc; 1993 } 1994 1995 static void 1996 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev) 1997 { 1998 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); 1999 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 2000 2001 dev->dev_ops = NULL; 2002 dev->tx_pkt_prepare = NULL; 2003 dev->rx_pkt_burst = NULL; 2004 dev->tx_pkt_burst = NULL; 2005 2006 rte_free(sas->dp_tx_name); 2007 sas->dp_tx_name = NULL; 2008 sa->priv.dp_tx = NULL; 2009 2010 rte_free(sas->dp_rx_name); 2011 sas->dp_rx_name = NULL; 2012 sa->priv.dp_rx = NULL; 2013 } 2014 2015 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = { 2016 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 2017 .rx_queue_count = sfc_rx_queue_count, 2018 .rx_descriptor_done = sfc_rx_descriptor_done, 2019 .rx_descriptor_status = sfc_rx_descriptor_status, 2020 .tx_descriptor_status = sfc_tx_descriptor_status, 2021 .reta_query = sfc_dev_rss_reta_query, 2022 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 2023 .rxq_info_get = sfc_rx_queue_info_get, 2024 .txq_info_get = sfc_tx_queue_info_get, 2025 }; 2026 2027 static int 2028 sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main) 2029 { 2030 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 2031 struct sfc_adapter_priv *sap; 2032 const struct sfc_dp_rx *dp_rx; 2033 const struct sfc_dp_tx *dp_tx; 2034 int rc; 2035 2036 /* 2037 * Allocate process private data from heap, since it should not 2038 * be located in shared memory allocated using rte_malloc() API. 2039 */ 2040 sap = calloc(1, sizeof(*sap)); 2041 if (sap == NULL) { 2042 rc = ENOMEM; 2043 goto fail_alloc_priv; 2044 } 2045 2046 sap->logtype_main = logtype_main; 2047 2048 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name); 2049 if (dp_rx == NULL) { 2050 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2051 "cannot find %s Rx datapath", sas->dp_rx_name); 2052 rc = ENOENT; 2053 goto fail_dp_rx; 2054 } 2055 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) { 2056 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2057 "%s Rx datapath does not support multi-process", 2058 sas->dp_rx_name); 2059 rc = EINVAL; 2060 goto fail_dp_rx_multi_process; 2061 } 2062 2063 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name); 2064 if (dp_tx == NULL) { 2065 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2066 "cannot find %s Tx datapath", sas->dp_tx_name); 2067 rc = ENOENT; 2068 goto fail_dp_tx; 2069 } 2070 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) { 2071 SFC_LOG(sas, RTE_LOG_ERR, logtype_main, 2072 "%s Tx datapath does not support multi-process", 2073 sas->dp_tx_name); 2074 rc = EINVAL; 2075 goto fail_dp_tx_multi_process; 2076 } 2077 2078 sap->dp_rx = dp_rx; 2079 sap->dp_tx = dp_tx; 2080 2081 dev->process_private = sap; 2082 dev->rx_pkt_burst = dp_rx->pkt_burst; 2083 dev->tx_pkt_prepare = dp_tx->pkt_prepare; 2084 dev->tx_pkt_burst = dp_tx->pkt_burst; 2085 dev->dev_ops = &sfc_eth_dev_secondary_ops; 2086 2087 return 0; 2088 2089 fail_dp_tx_multi_process: 2090 fail_dp_tx: 2091 fail_dp_rx_multi_process: 2092 fail_dp_rx: 2093 free(sap); 2094 2095 fail_alloc_priv: 2096 return rc; 2097 } 2098 2099 static void 2100 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev) 2101 { 2102 free(dev->process_private); 2103 dev->process_private = NULL; 2104 dev->dev_ops = NULL; 2105 dev->tx_pkt_prepare = NULL; 2106 dev->tx_pkt_burst = NULL; 2107 dev->rx_pkt_burst = NULL; 2108 } 2109 2110 static void 2111 sfc_register_dp(void) 2112 { 2113 /* Register once */ 2114 if (TAILQ_EMPTY(&sfc_dp_head)) { 2115 /* Prefer EF10 datapath */ 2116 sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp); 2117 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); 2118 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); 2119 2120 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp); 2121 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp); 2122 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp); 2123 } 2124 } 2125 2126 static int 2127 sfc_eth_dev_init(struct rte_eth_dev *dev) 2128 { 2129 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); 2130 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2131 uint32_t logtype_main; 2132 struct sfc_adapter *sa; 2133 int rc; 2134 const efx_nic_cfg_t *encp; 2135 const struct rte_ether_addr *from; 2136 2137 sfc_register_dp(); 2138 2139 logtype_main = sfc_register_logtype(&pci_dev->addr, 2140 SFC_LOGTYPE_MAIN_STR, 2141 RTE_LOG_NOTICE); 2142 2143 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2144 return -sfc_eth_dev_secondary_init(dev, logtype_main); 2145 2146 /* Required for logging */ 2147 sas->pci_addr = pci_dev->addr; 2148 sas->port_id = dev->data->port_id; 2149 2150 /* 2151 * Allocate process private data from heap, since it should not 2152 * be located in shared memory allocated using rte_malloc() API. 2153 */ 2154 sa = calloc(1, sizeof(*sa)); 2155 if (sa == NULL) { 2156 rc = ENOMEM; 2157 goto fail_alloc_sa; 2158 } 2159 2160 dev->process_private = sa; 2161 2162 /* Required for logging */ 2163 sa->priv.shared = sas; 2164 sa->priv.logtype_main = logtype_main; 2165 2166 sa->eth_dev = dev; 2167 2168 /* Copy PCI device info to the dev->data */ 2169 rte_eth_copy_pci_info(dev, pci_dev); 2170 2171 rc = sfc_kvargs_parse(sa); 2172 if (rc != 0) 2173 goto fail_kvargs_parse; 2174 2175 sfc_log_init(sa, "entry"); 2176 2177 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 2178 2179 dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0); 2180 if (dev->data->mac_addrs == NULL) { 2181 rc = ENOMEM; 2182 goto fail_mac_addrs; 2183 } 2184 2185 sfc_adapter_lock_init(sa); 2186 sfc_adapter_lock(sa); 2187 2188 sfc_log_init(sa, "probing"); 2189 rc = sfc_probe(sa); 2190 if (rc != 0) 2191 goto fail_probe; 2192 2193 sfc_log_init(sa, "set device ops"); 2194 rc = sfc_eth_dev_set_ops(dev); 2195 if (rc != 0) 2196 goto fail_set_ops; 2197 2198 sfc_log_init(sa, "attaching"); 2199 rc = sfc_attach(sa); 2200 if (rc != 0) 2201 goto fail_attach; 2202 2203 encp = efx_nic_cfg_get(sa->nic); 2204 2205 /* 2206 * The arguments are really reverse order in comparison to 2207 * Linux kernel. Copy from NIC config to Ethernet device data. 2208 */ 2209 from = (const struct rte_ether_addr *)(encp->enc_mac_addr); 2210 rte_ether_addr_copy(from, &dev->data->mac_addrs[0]); 2211 2212 sfc_adapter_unlock(sa); 2213 2214 sfc_log_init(sa, "done"); 2215 return 0; 2216 2217 fail_attach: 2218 sfc_eth_dev_clear_ops(dev); 2219 2220 fail_set_ops: 2221 sfc_unprobe(sa); 2222 2223 fail_probe: 2224 sfc_adapter_unlock(sa); 2225 sfc_adapter_lock_fini(sa); 2226 rte_free(dev->data->mac_addrs); 2227 dev->data->mac_addrs = NULL; 2228 2229 fail_mac_addrs: 2230 sfc_kvargs_cleanup(sa); 2231 2232 fail_kvargs_parse: 2233 sfc_log_init(sa, "failed %d", rc); 2234 dev->process_private = NULL; 2235 free(sa); 2236 2237 fail_alloc_sa: 2238 SFC_ASSERT(rc > 0); 2239 return -rc; 2240 } 2241 2242 static int 2243 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 2244 { 2245 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2246 sfc_eth_dev_secondary_clear_ops(dev); 2247 return 0; 2248 } 2249 2250 sfc_dev_close(dev); 2251 2252 return 0; 2253 } 2254 2255 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 2256 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 2257 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) }, 2258 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 2259 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, 2260 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 2261 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, 2262 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) }, 2263 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) }, 2264 { .vendor_id = 0 /* sentinel */ } 2265 }; 2266 2267 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2268 struct rte_pci_device *pci_dev) 2269 { 2270 return rte_eth_dev_pci_generic_probe(pci_dev, 2271 sizeof(struct sfc_adapter_shared), sfc_eth_dev_init); 2272 } 2273 2274 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2275 { 2276 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit); 2277 } 2278 2279 static struct rte_pci_driver sfc_efx_pmd = { 2280 .id_table = pci_id_sfc_efx_map, 2281 .drv_flags = 2282 RTE_PCI_DRV_INTR_LSC | 2283 RTE_PCI_DRV_NEED_MAPPING, 2284 .probe = sfc_eth_dev_pci_probe, 2285 .remove = sfc_eth_dev_pci_remove, 2286 }; 2287 2288 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd); 2289 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 2290 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci"); 2291 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 2292 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " 2293 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " 2294 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 2295 SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " " 2296 SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> " 2297 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>"); 2298 2299 RTE_INIT(sfc_driver_register_logtype) 2300 { 2301 int ret; 2302 2303 ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver", 2304 RTE_LOG_NOTICE); 2305 sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret; 2306 } 2307