1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_dev.h> 11 #include <rte_ethdev_driver.h> 12 #include <rte_ethdev_pci.h> 13 #include <rte_pci.h> 14 #include <rte_bus_pci.h> 15 #include <rte_errno.h> 16 17 #include "efx.h" 18 19 #include "sfc.h" 20 #include "sfc_debug.h" 21 #include "sfc_log.h" 22 #include "sfc_kvargs.h" 23 #include "sfc_ev.h" 24 #include "sfc_rx.h" 25 #include "sfc_tx.h" 26 #include "sfc_flow.h" 27 #include "sfc_dp.h" 28 #include "sfc_dp_rx.h" 29 30 uint32_t sfc_logtype_driver; 31 32 static struct sfc_dp_list sfc_dp_head = 33 TAILQ_HEAD_INITIALIZER(sfc_dp_head); 34 35 static int 36 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 37 { 38 struct sfc_adapter *sa = dev->data->dev_private; 39 efx_nic_fw_info_t enfi; 40 int ret; 41 int rc; 42 43 /* 44 * Return value of the callback is likely supposed to be 45 * equal to or greater than 0, nevertheless, if an error 46 * occurs, it will be desirable to pass it to the caller 47 */ 48 if ((fw_version == NULL) || (fw_size == 0)) 49 return -EINVAL; 50 51 rc = efx_nic_get_fw_version(sa->nic, &enfi); 52 if (rc != 0) 53 return -rc; 54 55 ret = snprintf(fw_version, fw_size, 56 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16, 57 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1], 58 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]); 59 if (ret < 0) 60 return ret; 61 62 if (enfi.enfi_dpcpu_fw_ids_valid) { 63 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret); 64 int ret_extra; 65 66 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset, 67 fw_size - dpcpu_fw_ids_offset, 68 " rx%" PRIx16 " tx%" PRIx16, 69 enfi.enfi_rx_dpcpu_fw_id, 70 enfi.enfi_tx_dpcpu_fw_id); 71 if (ret_extra < 0) 72 return ret_extra; 73 74 ret += ret_extra; 75 } 76 77 if (fw_size < (size_t)(++ret)) 78 return ret; 79 else 80 return 0; 81 } 82 83 static void 84 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 85 { 86 struct sfc_adapter *sa = dev->data->dev_private; 87 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 88 struct sfc_rss *rss = &sa->rss; 89 uint64_t txq_offloads_def = 0; 90 91 sfc_log_init(sa, "entry"); 92 93 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 94 95 /* Autonegotiation may be disabled */ 96 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 97 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 98 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 99 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 100 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 101 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX) 102 dev_info->speed_capa |= ETH_LINK_SPEED_25G; 103 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 104 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 105 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX) 106 dev_info->speed_capa |= ETH_LINK_SPEED_50G; 107 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX) 108 dev_info->speed_capa |= ETH_LINK_SPEED_100G; 109 110 dev_info->max_rx_queues = sa->rxq_max; 111 dev_info->max_tx_queues = sa->txq_max; 112 113 /* By default packets are dropped if no descriptors are available */ 114 dev_info->default_rxconf.rx_drop_en = 1; 115 116 dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa); 117 118 /* 119 * rx_offload_capa includes both device and queue offloads since 120 * the latter may be requested on a per device basis which makes 121 * sense when some offloads are needed to be set on all queues. 122 */ 123 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) | 124 dev_info->rx_queue_offload_capa; 125 126 dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa); 127 128 /* 129 * tx_offload_capa includes both device and queue offloads since 130 * the latter may be requested on a per device basis which makes 131 * sense when some offloads are needed to be set on all queues. 132 */ 133 dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) | 134 dev_info->tx_queue_offload_capa; 135 136 if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 137 txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; 138 139 dev_info->default_txconf.offloads |= txq_offloads_def; 140 141 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 142 if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) || 143 !encp->enc_hw_tx_insert_vlan_enabled) 144 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; 145 146 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) 147 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; 148 149 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) 150 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP; 151 152 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT) 153 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT; 154 155 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { 156 uint64_t rte_hf = 0; 157 unsigned int i; 158 159 for (i = 0; i < rss->hf_map_nb_entries; ++i) 160 rte_hf |= rss->hf_map[i].rte; 161 162 dev_info->reta_size = EFX_RSS_TBL_SIZE; 163 dev_info->hash_key_size = EFX_RSS_KEY_SIZE; 164 dev_info->flow_type_rss_offloads = rte_hf; 165 } 166 167 /* Initialize to hardware limits */ 168 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 169 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 170 /* The RXQ hardware requires that the descriptor count is a power 171 * of 2, but rx_desc_lim cannot properly describe that constraint. 172 */ 173 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 174 175 /* Initialize to hardware limits */ 176 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 177 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 178 /* 179 * The TXQ hardware requires that the descriptor count is a power 180 * of 2, but tx_desc_lim cannot properly describe that constraint 181 */ 182 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 183 184 if (sa->dp_rx->get_dev_info != NULL) 185 sa->dp_rx->get_dev_info(dev_info); 186 if (sa->dp_tx->get_dev_info != NULL) 187 sa->dp_tx->get_dev_info(dev_info); 188 } 189 190 static const uint32_t * 191 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 192 { 193 struct sfc_adapter *sa = dev->data->dev_private; 194 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 195 uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported; 196 197 return sa->dp_rx->supported_ptypes_get(tunnel_encaps); 198 } 199 200 static int 201 sfc_dev_configure(struct rte_eth_dev *dev) 202 { 203 struct rte_eth_dev_data *dev_data = dev->data; 204 struct sfc_adapter *sa = dev_data->dev_private; 205 int rc; 206 207 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 208 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 209 210 sfc_adapter_lock(sa); 211 switch (sa->state) { 212 case SFC_ADAPTER_CONFIGURED: 213 /* FALLTHROUGH */ 214 case SFC_ADAPTER_INITIALIZED: 215 rc = sfc_configure(sa); 216 break; 217 default: 218 sfc_err(sa, "unexpected adapter state %u to configure", 219 sa->state); 220 rc = EINVAL; 221 break; 222 } 223 sfc_adapter_unlock(sa); 224 225 sfc_log_init(sa, "done %d", rc); 226 SFC_ASSERT(rc >= 0); 227 return -rc; 228 } 229 230 static int 231 sfc_dev_start(struct rte_eth_dev *dev) 232 { 233 struct sfc_adapter *sa = dev->data->dev_private; 234 int rc; 235 236 sfc_log_init(sa, "entry"); 237 238 sfc_adapter_lock(sa); 239 rc = sfc_start(sa); 240 sfc_adapter_unlock(sa); 241 242 sfc_log_init(sa, "done %d", rc); 243 SFC_ASSERT(rc >= 0); 244 return -rc; 245 } 246 247 static int 248 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 249 { 250 struct sfc_adapter *sa = dev->data->dev_private; 251 struct rte_eth_link current_link; 252 int ret; 253 254 sfc_log_init(sa, "entry"); 255 256 if (sa->state != SFC_ADAPTER_STARTED) { 257 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); 258 } else if (wait_to_complete) { 259 efx_link_mode_t link_mode; 260 261 if (efx_port_poll(sa->nic, &link_mode) != 0) 262 link_mode = EFX_LINK_UNKNOWN; 263 sfc_port_link_mode_to_info(link_mode, ¤t_link); 264 265 } else { 266 sfc_ev_mgmt_qpoll(sa); 267 rte_eth_linkstatus_get(dev, ¤t_link); 268 } 269 270 ret = rte_eth_linkstatus_set(dev, ¤t_link); 271 if (ret == 0) 272 sfc_notice(sa, "Link status is %s", 273 current_link.link_status ? "UP" : "DOWN"); 274 275 return ret; 276 } 277 278 static void 279 sfc_dev_stop(struct rte_eth_dev *dev) 280 { 281 struct sfc_adapter *sa = dev->data->dev_private; 282 283 sfc_log_init(sa, "entry"); 284 285 sfc_adapter_lock(sa); 286 sfc_stop(sa); 287 sfc_adapter_unlock(sa); 288 289 sfc_log_init(sa, "done"); 290 } 291 292 static int 293 sfc_dev_set_link_up(struct rte_eth_dev *dev) 294 { 295 struct sfc_adapter *sa = dev->data->dev_private; 296 int rc; 297 298 sfc_log_init(sa, "entry"); 299 300 sfc_adapter_lock(sa); 301 rc = sfc_start(sa); 302 sfc_adapter_unlock(sa); 303 304 SFC_ASSERT(rc >= 0); 305 return -rc; 306 } 307 308 static int 309 sfc_dev_set_link_down(struct rte_eth_dev *dev) 310 { 311 struct sfc_adapter *sa = dev->data->dev_private; 312 313 sfc_log_init(sa, "entry"); 314 315 sfc_adapter_lock(sa); 316 sfc_stop(sa); 317 sfc_adapter_unlock(sa); 318 319 return 0; 320 } 321 322 static void 323 sfc_dev_close(struct rte_eth_dev *dev) 324 { 325 struct sfc_adapter *sa = dev->data->dev_private; 326 327 sfc_log_init(sa, "entry"); 328 329 sfc_adapter_lock(sa); 330 switch (sa->state) { 331 case SFC_ADAPTER_STARTED: 332 sfc_stop(sa); 333 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 334 /* FALLTHROUGH */ 335 case SFC_ADAPTER_CONFIGURED: 336 sfc_close(sa); 337 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 338 /* FALLTHROUGH */ 339 case SFC_ADAPTER_INITIALIZED: 340 break; 341 default: 342 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 343 break; 344 } 345 sfc_adapter_unlock(sa); 346 347 sfc_log_init(sa, "done"); 348 } 349 350 static void 351 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 352 boolean_t enabled) 353 { 354 struct sfc_port *port; 355 boolean_t *toggle; 356 struct sfc_adapter *sa = dev->data->dev_private; 357 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 358 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 359 360 sfc_adapter_lock(sa); 361 362 port = &sa->port; 363 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 364 365 if (*toggle != enabled) { 366 *toggle = enabled; 367 368 if (port->isolated) { 369 sfc_warn(sa, "isolated mode is active on the port"); 370 sfc_warn(sa, "the change is to be applied on the next " 371 "start provided that isolated mode is " 372 "disabled prior the next start"); 373 } else if ((sa->state == SFC_ADAPTER_STARTED) && 374 (sfc_set_rx_mode(sa) != 0)) { 375 *toggle = !(enabled); 376 sfc_warn(sa, "Failed to %s %s mode", 377 ((enabled) ? "enable" : "disable"), desc); 378 } 379 } 380 381 sfc_adapter_unlock(sa); 382 } 383 384 static void 385 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 386 { 387 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 388 } 389 390 static void 391 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 392 { 393 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 394 } 395 396 static void 397 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 398 { 399 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 400 } 401 402 static void 403 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 404 { 405 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 406 } 407 408 static int 409 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 410 uint16_t nb_rx_desc, unsigned int socket_id, 411 const struct rte_eth_rxconf *rx_conf, 412 struct rte_mempool *mb_pool) 413 { 414 struct sfc_adapter *sa = dev->data->dev_private; 415 int rc; 416 417 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 418 rx_queue_id, nb_rx_desc, socket_id); 419 420 sfc_adapter_lock(sa); 421 422 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 423 rx_conf, mb_pool); 424 if (rc != 0) 425 goto fail_rx_qinit; 426 427 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp; 428 429 sfc_adapter_unlock(sa); 430 431 return 0; 432 433 fail_rx_qinit: 434 sfc_adapter_unlock(sa); 435 SFC_ASSERT(rc > 0); 436 return -rc; 437 } 438 439 static void 440 sfc_rx_queue_release(void *queue) 441 { 442 struct sfc_dp_rxq *dp_rxq = queue; 443 struct sfc_rxq *rxq; 444 struct sfc_adapter *sa; 445 unsigned int sw_index; 446 447 if (dp_rxq == NULL) 448 return; 449 450 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 451 sa = rxq->evq->sa; 452 sfc_adapter_lock(sa); 453 454 sw_index = sfc_rxq_sw_index(rxq); 455 456 sfc_log_init(sa, "RxQ=%u", sw_index); 457 458 sa->eth_dev->data->rx_queues[sw_index] = NULL; 459 460 sfc_rx_qfini(sa, sw_index); 461 462 sfc_adapter_unlock(sa); 463 } 464 465 static int 466 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 467 uint16_t nb_tx_desc, unsigned int socket_id, 468 const struct rte_eth_txconf *tx_conf) 469 { 470 struct sfc_adapter *sa = dev->data->dev_private; 471 int rc; 472 473 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 474 tx_queue_id, nb_tx_desc, socket_id); 475 476 sfc_adapter_lock(sa); 477 478 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 479 if (rc != 0) 480 goto fail_tx_qinit; 481 482 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp; 483 484 sfc_adapter_unlock(sa); 485 return 0; 486 487 fail_tx_qinit: 488 sfc_adapter_unlock(sa); 489 SFC_ASSERT(rc > 0); 490 return -rc; 491 } 492 493 static void 494 sfc_tx_queue_release(void *queue) 495 { 496 struct sfc_dp_txq *dp_txq = queue; 497 struct sfc_txq *txq; 498 unsigned int sw_index; 499 struct sfc_adapter *sa; 500 501 if (dp_txq == NULL) 502 return; 503 504 txq = sfc_txq_by_dp_txq(dp_txq); 505 sw_index = sfc_txq_sw_index(txq); 506 507 SFC_ASSERT(txq->evq != NULL); 508 sa = txq->evq->sa; 509 510 sfc_log_init(sa, "TxQ = %u", sw_index); 511 512 sfc_adapter_lock(sa); 513 514 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 515 sa->eth_dev->data->tx_queues[sw_index] = NULL; 516 517 sfc_tx_qfini(sa, sw_index); 518 519 sfc_adapter_unlock(sa); 520 } 521 522 static int 523 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 524 { 525 struct sfc_adapter *sa = dev->data->dev_private; 526 struct sfc_port *port = &sa->port; 527 uint64_t *mac_stats; 528 int ret; 529 530 rte_spinlock_lock(&port->mac_stats_lock); 531 532 ret = sfc_port_update_mac_stats(sa); 533 if (ret != 0) 534 goto unlock; 535 536 mac_stats = port->mac_stats_buf; 537 538 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 539 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 540 stats->ipackets = 541 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 542 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 543 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 544 stats->opackets = 545 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 546 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 547 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 548 stats->ibytes = 549 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 550 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 551 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 552 stats->obytes = 553 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 554 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 555 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 556 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 557 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 558 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 559 } else { 560 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 561 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 562 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 563 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 564 /* 565 * Take into account stats which are whenever supported 566 * on EF10. If some stat is not supported by current 567 * firmware variant or HW revision, it is guaranteed 568 * to be zero in mac_stats. 569 */ 570 stats->imissed = 571 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 572 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 573 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 574 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 575 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 576 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 577 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 578 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 579 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 580 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 581 stats->ierrors = 582 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 583 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 584 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 585 /* no oerrors counters supported on EF10 */ 586 } 587 588 unlock: 589 rte_spinlock_unlock(&port->mac_stats_lock); 590 SFC_ASSERT(ret >= 0); 591 return -ret; 592 } 593 594 static void 595 sfc_stats_reset(struct rte_eth_dev *dev) 596 { 597 struct sfc_adapter *sa = dev->data->dev_private; 598 struct sfc_port *port = &sa->port; 599 int rc; 600 601 if (sa->state != SFC_ADAPTER_STARTED) { 602 /* 603 * The operation cannot be done if port is not started; it 604 * will be scheduled to be done during the next port start 605 */ 606 port->mac_stats_reset_pending = B_TRUE; 607 return; 608 } 609 610 rc = sfc_port_reset_mac_stats(sa); 611 if (rc != 0) 612 sfc_err(sa, "failed to reset statistics (rc = %d)", rc); 613 } 614 615 static int 616 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 617 unsigned int xstats_count) 618 { 619 struct sfc_adapter *sa = dev->data->dev_private; 620 struct sfc_port *port = &sa->port; 621 uint64_t *mac_stats; 622 int rc; 623 unsigned int i; 624 int nstats = 0; 625 626 rte_spinlock_lock(&port->mac_stats_lock); 627 628 rc = sfc_port_update_mac_stats(sa); 629 if (rc != 0) { 630 SFC_ASSERT(rc > 0); 631 nstats = -rc; 632 goto unlock; 633 } 634 635 mac_stats = port->mac_stats_buf; 636 637 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 638 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 639 if (xstats != NULL && nstats < (int)xstats_count) { 640 xstats[nstats].id = nstats; 641 xstats[nstats].value = mac_stats[i]; 642 } 643 nstats++; 644 } 645 } 646 647 unlock: 648 rte_spinlock_unlock(&port->mac_stats_lock); 649 650 return nstats; 651 } 652 653 static int 654 sfc_xstats_get_names(struct rte_eth_dev *dev, 655 struct rte_eth_xstat_name *xstats_names, 656 unsigned int xstats_count) 657 { 658 struct sfc_adapter *sa = dev->data->dev_private; 659 struct sfc_port *port = &sa->port; 660 unsigned int i; 661 unsigned int nstats = 0; 662 663 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 664 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 665 if (xstats_names != NULL && nstats < xstats_count) 666 strncpy(xstats_names[nstats].name, 667 efx_mac_stat_name(sa->nic, i), 668 sizeof(xstats_names[0].name)); 669 nstats++; 670 } 671 } 672 673 return nstats; 674 } 675 676 static int 677 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 678 uint64_t *values, unsigned int n) 679 { 680 struct sfc_adapter *sa = dev->data->dev_private; 681 struct sfc_port *port = &sa->port; 682 uint64_t *mac_stats; 683 unsigned int nb_supported = 0; 684 unsigned int nb_written = 0; 685 unsigned int i; 686 int ret; 687 int rc; 688 689 if (unlikely(values == NULL) || 690 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported))) 691 return port->mac_stats_nb_supported; 692 693 rte_spinlock_lock(&port->mac_stats_lock); 694 695 rc = sfc_port_update_mac_stats(sa); 696 if (rc != 0) { 697 SFC_ASSERT(rc > 0); 698 ret = -rc; 699 goto unlock; 700 } 701 702 mac_stats = port->mac_stats_buf; 703 704 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) { 705 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 706 continue; 707 708 if ((ids == NULL) || (ids[nb_written] == nb_supported)) 709 values[nb_written++] = mac_stats[i]; 710 711 ++nb_supported; 712 } 713 714 ret = nb_written; 715 716 unlock: 717 rte_spinlock_unlock(&port->mac_stats_lock); 718 719 return ret; 720 } 721 722 static int 723 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, 724 struct rte_eth_xstat_name *xstats_names, 725 const uint64_t *ids, unsigned int size) 726 { 727 struct sfc_adapter *sa = dev->data->dev_private; 728 struct sfc_port *port = &sa->port; 729 unsigned int nb_supported = 0; 730 unsigned int nb_written = 0; 731 unsigned int i; 732 733 if (unlikely(xstats_names == NULL) || 734 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported))) 735 return port->mac_stats_nb_supported; 736 737 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) { 738 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 739 continue; 740 741 if ((ids == NULL) || (ids[nb_written] == nb_supported)) { 742 char *name = xstats_names[nb_written++].name; 743 744 strncpy(name, efx_mac_stat_name(sa->nic, i), 745 sizeof(xstats_names[0].name)); 746 name[sizeof(xstats_names[0].name) - 1] = '\0'; 747 } 748 749 ++nb_supported; 750 } 751 752 return nb_written; 753 } 754 755 static int 756 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 757 { 758 struct sfc_adapter *sa = dev->data->dev_private; 759 unsigned int wanted_fc, link_fc; 760 761 memset(fc_conf, 0, sizeof(*fc_conf)); 762 763 sfc_adapter_lock(sa); 764 765 if (sa->state == SFC_ADAPTER_STARTED) 766 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 767 else 768 link_fc = sa->port.flow_ctrl; 769 770 switch (link_fc) { 771 case 0: 772 fc_conf->mode = RTE_FC_NONE; 773 break; 774 case EFX_FCNTL_RESPOND: 775 fc_conf->mode = RTE_FC_RX_PAUSE; 776 break; 777 case EFX_FCNTL_GENERATE: 778 fc_conf->mode = RTE_FC_TX_PAUSE; 779 break; 780 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 781 fc_conf->mode = RTE_FC_FULL; 782 break; 783 default: 784 sfc_err(sa, "%s: unexpected flow control value %#x", 785 __func__, link_fc); 786 } 787 788 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 789 790 sfc_adapter_unlock(sa); 791 792 return 0; 793 } 794 795 static int 796 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 797 { 798 struct sfc_adapter *sa = dev->data->dev_private; 799 struct sfc_port *port = &sa->port; 800 unsigned int fcntl; 801 int rc; 802 803 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 804 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 805 fc_conf->mac_ctrl_frame_fwd != 0) { 806 sfc_err(sa, "unsupported flow control settings specified"); 807 rc = EINVAL; 808 goto fail_inval; 809 } 810 811 switch (fc_conf->mode) { 812 case RTE_FC_NONE: 813 fcntl = 0; 814 break; 815 case RTE_FC_RX_PAUSE: 816 fcntl = EFX_FCNTL_RESPOND; 817 break; 818 case RTE_FC_TX_PAUSE: 819 fcntl = EFX_FCNTL_GENERATE; 820 break; 821 case RTE_FC_FULL: 822 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 823 break; 824 default: 825 rc = EINVAL; 826 goto fail_inval; 827 } 828 829 sfc_adapter_lock(sa); 830 831 if (sa->state == SFC_ADAPTER_STARTED) { 832 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 833 if (rc != 0) 834 goto fail_mac_fcntl_set; 835 } 836 837 port->flow_ctrl = fcntl; 838 port->flow_ctrl_autoneg = fc_conf->autoneg; 839 840 sfc_adapter_unlock(sa); 841 842 return 0; 843 844 fail_mac_fcntl_set: 845 sfc_adapter_unlock(sa); 846 fail_inval: 847 SFC_ASSERT(rc > 0); 848 return -rc; 849 } 850 851 static int 852 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 853 { 854 struct sfc_adapter *sa = dev->data->dev_private; 855 size_t pdu = EFX_MAC_PDU(mtu); 856 size_t old_pdu; 857 int rc; 858 859 sfc_log_init(sa, "mtu=%u", mtu); 860 861 rc = EINVAL; 862 if (pdu < EFX_MAC_PDU_MIN) { 863 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 864 (unsigned int)mtu, (unsigned int)pdu, 865 EFX_MAC_PDU_MIN); 866 goto fail_inval; 867 } 868 if (pdu > EFX_MAC_PDU_MAX) { 869 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 870 (unsigned int)mtu, (unsigned int)pdu, 871 EFX_MAC_PDU_MAX); 872 goto fail_inval; 873 } 874 875 sfc_adapter_lock(sa); 876 877 if (pdu != sa->port.pdu) { 878 if (sa->state == SFC_ADAPTER_STARTED) { 879 sfc_stop(sa); 880 881 old_pdu = sa->port.pdu; 882 sa->port.pdu = pdu; 883 rc = sfc_start(sa); 884 if (rc != 0) 885 goto fail_start; 886 } else { 887 sa->port.pdu = pdu; 888 } 889 } 890 891 /* 892 * The driver does not use it, but other PMDs update jumbo_frame 893 * flag and max_rx_pkt_len when MTU is set. 894 */ 895 if (mtu > ETHER_MAX_LEN) { 896 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 897 898 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 899 rxmode->jumbo_frame = 1; 900 } 901 902 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 903 904 sfc_adapter_unlock(sa); 905 906 sfc_log_init(sa, "done"); 907 return 0; 908 909 fail_start: 910 sa->port.pdu = old_pdu; 911 if (sfc_start(sa) != 0) 912 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 913 "PDU max size - port is stopped", 914 (unsigned int)pdu, (unsigned int)old_pdu); 915 sfc_adapter_unlock(sa); 916 917 fail_inval: 918 sfc_log_init(sa, "failed %d", rc); 919 SFC_ASSERT(rc > 0); 920 return -rc; 921 } 922 static int 923 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 924 { 925 struct sfc_adapter *sa = dev->data->dev_private; 926 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 927 struct sfc_port *port = &sa->port; 928 struct ether_addr *old_addr = &dev->data->mac_addrs[0]; 929 int rc = 0; 930 931 sfc_adapter_lock(sa); 932 933 /* 934 * Copy the address to the device private data so that 935 * it could be recalled in the case of adapter restart. 936 */ 937 ether_addr_copy(mac_addr, &port->default_mac_addr); 938 939 /* 940 * Neither of the two following checks can return 941 * an error. The new MAC address is preserved in 942 * the device private data and can be activated 943 * on the next port start if the user prevents 944 * isolated mode from being enabled. 945 */ 946 if (port->isolated) { 947 sfc_warn(sa, "isolated mode is active on the port"); 948 sfc_warn(sa, "will not set MAC address"); 949 goto unlock; 950 } 951 952 if (sa->state != SFC_ADAPTER_STARTED) { 953 sfc_notice(sa, "the port is not started"); 954 sfc_notice(sa, "the new MAC address will be set on port start"); 955 956 goto unlock; 957 } 958 959 if (encp->enc_allow_set_mac_with_installed_filters) { 960 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 961 if (rc != 0) { 962 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 963 goto unlock; 964 } 965 966 /* 967 * Changing the MAC address by means of MCDI request 968 * has no effect on received traffic, therefore 969 * we also need to update unicast filters 970 */ 971 rc = sfc_set_rx_mode(sa); 972 if (rc != 0) { 973 sfc_err(sa, "cannot set filter (rc = %u)", rc); 974 /* Rollback the old address */ 975 (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes); 976 (void)sfc_set_rx_mode(sa); 977 } 978 } else { 979 sfc_warn(sa, "cannot set MAC address with filters installed"); 980 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 981 sfc_warn(sa, "(some traffic may be dropped)"); 982 983 /* 984 * Since setting MAC address with filters installed is not 985 * allowed on the adapter, the new MAC address will be set 986 * by means of adapter restart. sfc_start() shall retrieve 987 * the new address from the device private data and set it. 988 */ 989 sfc_stop(sa); 990 rc = sfc_start(sa); 991 if (rc != 0) 992 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 993 } 994 995 unlock: 996 if (rc != 0) 997 ether_addr_copy(old_addr, &port->default_mac_addr); 998 999 sfc_adapter_unlock(sa); 1000 1001 SFC_ASSERT(rc >= 0); 1002 return -rc; 1003 } 1004 1005 1006 static int 1007 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, 1008 uint32_t nb_mc_addr) 1009 { 1010 struct sfc_adapter *sa = dev->data->dev_private; 1011 struct sfc_port *port = &sa->port; 1012 uint8_t *mc_addrs = port->mcast_addrs; 1013 int rc; 1014 unsigned int i; 1015 1016 if (port->isolated) { 1017 sfc_err(sa, "isolated mode is active on the port"); 1018 sfc_err(sa, "will not set multicast address list"); 1019 return -ENOTSUP; 1020 } 1021 1022 if (mc_addrs == NULL) 1023 return -ENOBUFS; 1024 1025 if (nb_mc_addr > port->max_mcast_addrs) { 1026 sfc_err(sa, "too many multicast addresses: %u > %u", 1027 nb_mc_addr, port->max_mcast_addrs); 1028 return -EINVAL; 1029 } 1030 1031 for (i = 0; i < nb_mc_addr; ++i) { 1032 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 1033 EFX_MAC_ADDR_LEN); 1034 mc_addrs += EFX_MAC_ADDR_LEN; 1035 } 1036 1037 port->nb_mcast_addrs = nb_mc_addr; 1038 1039 if (sa->state != SFC_ADAPTER_STARTED) 1040 return 0; 1041 1042 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs, 1043 port->nb_mcast_addrs); 1044 if (rc != 0) 1045 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 1046 1047 SFC_ASSERT(rc > 0); 1048 return -rc; 1049 } 1050 1051 /* 1052 * The function is used by the secondary process as well. It must not 1053 * use any process-local pointers from the adapter data. 1054 */ 1055 static void 1056 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1057 struct rte_eth_rxq_info *qinfo) 1058 { 1059 struct sfc_adapter *sa = dev->data->dev_private; 1060 struct sfc_rxq_info *rxq_info; 1061 struct sfc_rxq *rxq; 1062 1063 sfc_adapter_lock(sa); 1064 1065 SFC_ASSERT(rx_queue_id < sa->rxq_count); 1066 1067 rxq_info = &sa->rxq_info[rx_queue_id]; 1068 rxq = rxq_info->rxq; 1069 SFC_ASSERT(rxq != NULL); 1070 1071 qinfo->mp = rxq->refill_mb_pool; 1072 qinfo->conf.rx_free_thresh = rxq->refill_threshold; 1073 qinfo->conf.rx_drop_en = 1; 1074 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 1075 qinfo->conf.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM | 1076 DEV_RX_OFFLOAD_UDP_CKSUM | 1077 DEV_RX_OFFLOAD_TCP_CKSUM; 1078 if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) { 1079 qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER; 1080 qinfo->scattered_rx = 1; 1081 } 1082 qinfo->nb_desc = rxq_info->entries; 1083 1084 sfc_adapter_unlock(sa); 1085 } 1086 1087 /* 1088 * The function is used by the secondary process as well. It must not 1089 * use any process-local pointers from the adapter data. 1090 */ 1091 static void 1092 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1093 struct rte_eth_txq_info *qinfo) 1094 { 1095 struct sfc_adapter *sa = dev->data->dev_private; 1096 struct sfc_txq_info *txq_info; 1097 1098 sfc_adapter_lock(sa); 1099 1100 SFC_ASSERT(tx_queue_id < sa->txq_count); 1101 1102 txq_info = &sa->txq_info[tx_queue_id]; 1103 SFC_ASSERT(txq_info->txq != NULL); 1104 1105 memset(qinfo, 0, sizeof(*qinfo)); 1106 1107 qinfo->conf.txq_flags = txq_info->txq->flags; 1108 qinfo->conf.offloads = txq_info->txq->offloads; 1109 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; 1110 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 1111 qinfo->nb_desc = txq_info->entries; 1112 1113 sfc_adapter_unlock(sa); 1114 } 1115 1116 static uint32_t 1117 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1118 { 1119 struct sfc_adapter *sa = dev->data->dev_private; 1120 1121 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1122 1123 return sfc_rx_qdesc_npending(sa, rx_queue_id); 1124 } 1125 1126 static int 1127 sfc_rx_descriptor_done(void *queue, uint16_t offset) 1128 { 1129 struct sfc_dp_rxq *dp_rxq = queue; 1130 1131 return sfc_rx_qdesc_done(dp_rxq, offset); 1132 } 1133 1134 static int 1135 sfc_rx_descriptor_status(void *queue, uint16_t offset) 1136 { 1137 struct sfc_dp_rxq *dp_rxq = queue; 1138 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq); 1139 1140 return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset); 1141 } 1142 1143 static int 1144 sfc_tx_descriptor_status(void *queue, uint16_t offset) 1145 { 1146 struct sfc_dp_txq *dp_txq = queue; 1147 struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq); 1148 1149 return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset); 1150 } 1151 1152 static int 1153 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1154 { 1155 struct sfc_adapter *sa = dev->data->dev_private; 1156 int rc; 1157 1158 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1159 1160 sfc_adapter_lock(sa); 1161 1162 rc = EINVAL; 1163 if (sa->state != SFC_ADAPTER_STARTED) 1164 goto fail_not_started; 1165 1166 rc = sfc_rx_qstart(sa, rx_queue_id); 1167 if (rc != 0) 1168 goto fail_rx_qstart; 1169 1170 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE; 1171 1172 sfc_adapter_unlock(sa); 1173 1174 return 0; 1175 1176 fail_rx_qstart: 1177 fail_not_started: 1178 sfc_adapter_unlock(sa); 1179 SFC_ASSERT(rc > 0); 1180 return -rc; 1181 } 1182 1183 static int 1184 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1185 { 1186 struct sfc_adapter *sa = dev->data->dev_private; 1187 1188 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1189 1190 sfc_adapter_lock(sa); 1191 sfc_rx_qstop(sa, rx_queue_id); 1192 1193 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE; 1194 1195 sfc_adapter_unlock(sa); 1196 1197 return 0; 1198 } 1199 1200 static int 1201 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1202 { 1203 struct sfc_adapter *sa = dev->data->dev_private; 1204 int rc; 1205 1206 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1207 1208 sfc_adapter_lock(sa); 1209 1210 rc = EINVAL; 1211 if (sa->state != SFC_ADAPTER_STARTED) 1212 goto fail_not_started; 1213 1214 rc = sfc_tx_qstart(sa, tx_queue_id); 1215 if (rc != 0) 1216 goto fail_tx_qstart; 1217 1218 sa->txq_info[tx_queue_id].deferred_started = B_TRUE; 1219 1220 sfc_adapter_unlock(sa); 1221 return 0; 1222 1223 fail_tx_qstart: 1224 1225 fail_not_started: 1226 sfc_adapter_unlock(sa); 1227 SFC_ASSERT(rc > 0); 1228 return -rc; 1229 } 1230 1231 static int 1232 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1233 { 1234 struct sfc_adapter *sa = dev->data->dev_private; 1235 1236 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1237 1238 sfc_adapter_lock(sa); 1239 1240 sfc_tx_qstop(sa, tx_queue_id); 1241 1242 sa->txq_info[tx_queue_id].deferred_started = B_FALSE; 1243 1244 sfc_adapter_unlock(sa); 1245 return 0; 1246 } 1247 1248 static efx_tunnel_protocol_t 1249 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type) 1250 { 1251 switch (rte_type) { 1252 case RTE_TUNNEL_TYPE_VXLAN: 1253 return EFX_TUNNEL_PROTOCOL_VXLAN; 1254 case RTE_TUNNEL_TYPE_GENEVE: 1255 return EFX_TUNNEL_PROTOCOL_GENEVE; 1256 default: 1257 return EFX_TUNNEL_NPROTOS; 1258 } 1259 } 1260 1261 enum sfc_udp_tunnel_op_e { 1262 SFC_UDP_TUNNEL_ADD_PORT, 1263 SFC_UDP_TUNNEL_DEL_PORT, 1264 }; 1265 1266 static int 1267 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev, 1268 struct rte_eth_udp_tunnel *tunnel_udp, 1269 enum sfc_udp_tunnel_op_e op) 1270 { 1271 struct sfc_adapter *sa = dev->data->dev_private; 1272 efx_tunnel_protocol_t tunnel_proto; 1273 int rc; 1274 1275 sfc_log_init(sa, "%s udp_port=%u prot_type=%u", 1276 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" : 1277 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown", 1278 tunnel_udp->udp_port, tunnel_udp->prot_type); 1279 1280 tunnel_proto = 1281 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type); 1282 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) { 1283 rc = ENOTSUP; 1284 goto fail_bad_proto; 1285 } 1286 1287 sfc_adapter_lock(sa); 1288 1289 switch (op) { 1290 case SFC_UDP_TUNNEL_ADD_PORT: 1291 rc = efx_tunnel_config_udp_add(sa->nic, 1292 tunnel_udp->udp_port, 1293 tunnel_proto); 1294 break; 1295 case SFC_UDP_TUNNEL_DEL_PORT: 1296 rc = efx_tunnel_config_udp_remove(sa->nic, 1297 tunnel_udp->udp_port, 1298 tunnel_proto); 1299 break; 1300 default: 1301 rc = EINVAL; 1302 goto fail_bad_op; 1303 } 1304 1305 if (rc != 0) 1306 goto fail_op; 1307 1308 if (sa->state == SFC_ADAPTER_STARTED) { 1309 rc = efx_tunnel_reconfigure(sa->nic); 1310 if (rc == EAGAIN) { 1311 /* 1312 * Configuration is accepted by FW and MC reboot 1313 * is initiated to apply the changes. MC reboot 1314 * will be handled in a usual way (MC reboot 1315 * event on management event queue and adapter 1316 * restart). 1317 */ 1318 rc = 0; 1319 } else if (rc != 0) { 1320 goto fail_reconfigure; 1321 } 1322 } 1323 1324 sfc_adapter_unlock(sa); 1325 return 0; 1326 1327 fail_reconfigure: 1328 /* Remove/restore entry since the change makes the trouble */ 1329 switch (op) { 1330 case SFC_UDP_TUNNEL_ADD_PORT: 1331 (void)efx_tunnel_config_udp_remove(sa->nic, 1332 tunnel_udp->udp_port, 1333 tunnel_proto); 1334 break; 1335 case SFC_UDP_TUNNEL_DEL_PORT: 1336 (void)efx_tunnel_config_udp_add(sa->nic, 1337 tunnel_udp->udp_port, 1338 tunnel_proto); 1339 break; 1340 } 1341 1342 fail_op: 1343 fail_bad_op: 1344 sfc_adapter_unlock(sa); 1345 1346 fail_bad_proto: 1347 SFC_ASSERT(rc > 0); 1348 return -rc; 1349 } 1350 1351 static int 1352 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 1353 struct rte_eth_udp_tunnel *tunnel_udp) 1354 { 1355 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT); 1356 } 1357 1358 static int 1359 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 1360 struct rte_eth_udp_tunnel *tunnel_udp) 1361 { 1362 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); 1363 } 1364 1365 static int 1366 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1367 struct rte_eth_rss_conf *rss_conf) 1368 { 1369 struct sfc_adapter *sa = dev->data->dev_private; 1370 struct sfc_rss *rss = &sa->rss; 1371 struct sfc_port *port = &sa->port; 1372 1373 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated) 1374 return -ENOTSUP; 1375 1376 if (rss->channels == 0) 1377 return -EINVAL; 1378 1379 sfc_adapter_lock(sa); 1380 1381 /* 1382 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1383 * hence, conversion is done here to derive a correct set of ETH_RSS 1384 * flags which corresponds to the active EFX configuration stored 1385 * locally in 'sfc_adapter' and kept up-to-date 1386 */ 1387 rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types); 1388 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; 1389 if (rss_conf->rss_key != NULL) 1390 rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE); 1391 1392 sfc_adapter_unlock(sa); 1393 1394 return 0; 1395 } 1396 1397 static int 1398 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1399 struct rte_eth_rss_conf *rss_conf) 1400 { 1401 struct sfc_adapter *sa = dev->data->dev_private; 1402 struct sfc_rss *rss = &sa->rss; 1403 struct sfc_port *port = &sa->port; 1404 unsigned int efx_hash_types; 1405 int rc = 0; 1406 1407 if (port->isolated) 1408 return -ENOTSUP; 1409 1410 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { 1411 sfc_err(sa, "RSS is not available"); 1412 return -ENOTSUP; 1413 } 1414 1415 if (rss->channels == 0) { 1416 sfc_err(sa, "RSS is not configured"); 1417 return -EINVAL; 1418 } 1419 1420 if ((rss_conf->rss_key != NULL) && 1421 (rss_conf->rss_key_len != sizeof(rss->key))) { 1422 sfc_err(sa, "RSS key size is wrong (should be %lu)", 1423 sizeof(rss->key)); 1424 return -EINVAL; 1425 } 1426 1427 sfc_adapter_lock(sa); 1428 1429 rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types); 1430 if (rc != 0) 1431 goto fail_rx_hf_rte_to_efx; 1432 1433 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1434 rss->hash_alg, efx_hash_types, B_TRUE); 1435 if (rc != 0) 1436 goto fail_scale_mode_set; 1437 1438 if (rss_conf->rss_key != NULL) { 1439 if (sa->state == SFC_ADAPTER_STARTED) { 1440 rc = efx_rx_scale_key_set(sa->nic, 1441 EFX_RSS_CONTEXT_DEFAULT, 1442 rss_conf->rss_key, 1443 sizeof(rss->key)); 1444 if (rc != 0) 1445 goto fail_scale_key_set; 1446 } 1447 1448 rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); 1449 } 1450 1451 rss->hash_types = efx_hash_types; 1452 1453 sfc_adapter_unlock(sa); 1454 1455 return 0; 1456 1457 fail_scale_key_set: 1458 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1459 EFX_RX_HASHALG_TOEPLITZ, 1460 rss->hash_types, B_TRUE) != 0) 1461 sfc_err(sa, "failed to restore RSS mode"); 1462 1463 fail_scale_mode_set: 1464 fail_rx_hf_rte_to_efx: 1465 sfc_adapter_unlock(sa); 1466 return -rc; 1467 } 1468 1469 static int 1470 sfc_dev_rss_reta_query(struct rte_eth_dev *dev, 1471 struct rte_eth_rss_reta_entry64 *reta_conf, 1472 uint16_t reta_size) 1473 { 1474 struct sfc_adapter *sa = dev->data->dev_private; 1475 struct sfc_rss *rss = &sa->rss; 1476 struct sfc_port *port = &sa->port; 1477 int entry; 1478 1479 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated) 1480 return -ENOTSUP; 1481 1482 if (rss->channels == 0) 1483 return -EINVAL; 1484 1485 if (reta_size != EFX_RSS_TBL_SIZE) 1486 return -EINVAL; 1487 1488 sfc_adapter_lock(sa); 1489 1490 for (entry = 0; entry < reta_size; entry++) { 1491 int grp = entry / RTE_RETA_GROUP_SIZE; 1492 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1493 1494 if ((reta_conf[grp].mask >> grp_idx) & 1) 1495 reta_conf[grp].reta[grp_idx] = rss->tbl[entry]; 1496 } 1497 1498 sfc_adapter_unlock(sa); 1499 1500 return 0; 1501 } 1502 1503 static int 1504 sfc_dev_rss_reta_update(struct rte_eth_dev *dev, 1505 struct rte_eth_rss_reta_entry64 *reta_conf, 1506 uint16_t reta_size) 1507 { 1508 struct sfc_adapter *sa = dev->data->dev_private; 1509 struct sfc_rss *rss = &sa->rss; 1510 struct sfc_port *port = &sa->port; 1511 unsigned int *rss_tbl_new; 1512 uint16_t entry; 1513 int rc = 0; 1514 1515 1516 if (port->isolated) 1517 return -ENOTSUP; 1518 1519 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { 1520 sfc_err(sa, "RSS is not available"); 1521 return -ENOTSUP; 1522 } 1523 1524 if (rss->channels == 0) { 1525 sfc_err(sa, "RSS is not configured"); 1526 return -EINVAL; 1527 } 1528 1529 if (reta_size != EFX_RSS_TBL_SIZE) { 1530 sfc_err(sa, "RETA size is wrong (should be %u)", 1531 EFX_RSS_TBL_SIZE); 1532 return -EINVAL; 1533 } 1534 1535 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0); 1536 if (rss_tbl_new == NULL) 1537 return -ENOMEM; 1538 1539 sfc_adapter_lock(sa); 1540 1541 rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl)); 1542 1543 for (entry = 0; entry < reta_size; entry++) { 1544 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1545 struct rte_eth_rss_reta_entry64 *grp; 1546 1547 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; 1548 1549 if (grp->mask & (1ull << grp_idx)) { 1550 if (grp->reta[grp_idx] >= rss->channels) { 1551 rc = EINVAL; 1552 goto bad_reta_entry; 1553 } 1554 rss_tbl_new[entry] = grp->reta[grp_idx]; 1555 } 1556 } 1557 1558 if (sa->state == SFC_ADAPTER_STARTED) { 1559 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1560 rss_tbl_new, EFX_RSS_TBL_SIZE); 1561 if (rc != 0) 1562 goto fail_scale_tbl_set; 1563 } 1564 1565 rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl)); 1566 1567 fail_scale_tbl_set: 1568 bad_reta_entry: 1569 sfc_adapter_unlock(sa); 1570 1571 rte_free(rss_tbl_new); 1572 1573 SFC_ASSERT(rc >= 0); 1574 return -rc; 1575 } 1576 1577 static int 1578 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, 1579 enum rte_filter_op filter_op, 1580 void *arg) 1581 { 1582 struct sfc_adapter *sa = dev->data->dev_private; 1583 int rc = ENOTSUP; 1584 1585 sfc_log_init(sa, "entry"); 1586 1587 switch (filter_type) { 1588 case RTE_ETH_FILTER_NONE: 1589 sfc_err(sa, "Global filters configuration not supported"); 1590 break; 1591 case RTE_ETH_FILTER_MACVLAN: 1592 sfc_err(sa, "MACVLAN filters not supported"); 1593 break; 1594 case RTE_ETH_FILTER_ETHERTYPE: 1595 sfc_err(sa, "EtherType filters not supported"); 1596 break; 1597 case RTE_ETH_FILTER_FLEXIBLE: 1598 sfc_err(sa, "Flexible filters not supported"); 1599 break; 1600 case RTE_ETH_FILTER_SYN: 1601 sfc_err(sa, "SYN filters not supported"); 1602 break; 1603 case RTE_ETH_FILTER_NTUPLE: 1604 sfc_err(sa, "NTUPLE filters not supported"); 1605 break; 1606 case RTE_ETH_FILTER_TUNNEL: 1607 sfc_err(sa, "Tunnel filters not supported"); 1608 break; 1609 case RTE_ETH_FILTER_FDIR: 1610 sfc_err(sa, "Flow Director filters not supported"); 1611 break; 1612 case RTE_ETH_FILTER_HASH: 1613 sfc_err(sa, "Hash filters not supported"); 1614 break; 1615 case RTE_ETH_FILTER_GENERIC: 1616 if (filter_op != RTE_ETH_FILTER_GET) { 1617 rc = EINVAL; 1618 } else { 1619 *(const void **)arg = &sfc_flow_ops; 1620 rc = 0; 1621 } 1622 break; 1623 default: 1624 sfc_err(sa, "Unknown filter type %u", filter_type); 1625 break; 1626 } 1627 1628 sfc_log_init(sa, "exit: %d", -rc); 1629 SFC_ASSERT(rc >= 0); 1630 return -rc; 1631 } 1632 1633 static const struct eth_dev_ops sfc_eth_dev_ops = { 1634 .dev_configure = sfc_dev_configure, 1635 .dev_start = sfc_dev_start, 1636 .dev_stop = sfc_dev_stop, 1637 .dev_set_link_up = sfc_dev_set_link_up, 1638 .dev_set_link_down = sfc_dev_set_link_down, 1639 .dev_close = sfc_dev_close, 1640 .promiscuous_enable = sfc_dev_promisc_enable, 1641 .promiscuous_disable = sfc_dev_promisc_disable, 1642 .allmulticast_enable = sfc_dev_allmulti_enable, 1643 .allmulticast_disable = sfc_dev_allmulti_disable, 1644 .link_update = sfc_dev_link_update, 1645 .stats_get = sfc_stats_get, 1646 .stats_reset = sfc_stats_reset, 1647 .xstats_get = sfc_xstats_get, 1648 .xstats_reset = sfc_stats_reset, 1649 .xstats_get_names = sfc_xstats_get_names, 1650 .dev_infos_get = sfc_dev_infos_get, 1651 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1652 .mtu_set = sfc_dev_set_mtu, 1653 .rx_queue_start = sfc_rx_queue_start, 1654 .rx_queue_stop = sfc_rx_queue_stop, 1655 .tx_queue_start = sfc_tx_queue_start, 1656 .tx_queue_stop = sfc_tx_queue_stop, 1657 .rx_queue_setup = sfc_rx_queue_setup, 1658 .rx_queue_release = sfc_rx_queue_release, 1659 .rx_queue_count = sfc_rx_queue_count, 1660 .rx_descriptor_done = sfc_rx_descriptor_done, 1661 .rx_descriptor_status = sfc_rx_descriptor_status, 1662 .tx_descriptor_status = sfc_tx_descriptor_status, 1663 .tx_queue_setup = sfc_tx_queue_setup, 1664 .tx_queue_release = sfc_tx_queue_release, 1665 .flow_ctrl_get = sfc_flow_ctrl_get, 1666 .flow_ctrl_set = sfc_flow_ctrl_set, 1667 .mac_addr_set = sfc_mac_addr_set, 1668 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, 1669 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, 1670 .reta_update = sfc_dev_rss_reta_update, 1671 .reta_query = sfc_dev_rss_reta_query, 1672 .rss_hash_update = sfc_dev_rss_hash_update, 1673 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1674 .filter_ctrl = sfc_dev_filter_ctrl, 1675 .set_mc_addr_list = sfc_set_mc_addr_list, 1676 .rxq_info_get = sfc_rx_queue_info_get, 1677 .txq_info_get = sfc_tx_queue_info_get, 1678 .fw_version_get = sfc_fw_version_get, 1679 .xstats_get_by_id = sfc_xstats_get_by_id, 1680 .xstats_get_names_by_id = sfc_xstats_get_names_by_id, 1681 }; 1682 1683 /** 1684 * Duplicate a string in potentially shared memory required for 1685 * multi-process support. 1686 * 1687 * strdup() allocates from process-local heap/memory. 1688 */ 1689 static char * 1690 sfc_strdup(const char *str) 1691 { 1692 size_t size; 1693 char *copy; 1694 1695 if (str == NULL) 1696 return NULL; 1697 1698 size = strlen(str) + 1; 1699 copy = rte_malloc(__func__, size, 0); 1700 if (copy != NULL) 1701 rte_memcpy(copy, str, size); 1702 1703 return copy; 1704 } 1705 1706 static int 1707 sfc_eth_dev_set_ops(struct rte_eth_dev *dev) 1708 { 1709 struct sfc_adapter *sa = dev->data->dev_private; 1710 unsigned int avail_caps = 0; 1711 const char *rx_name = NULL; 1712 const char *tx_name = NULL; 1713 int rc; 1714 1715 switch (sa->family) { 1716 case EFX_FAMILY_HUNTINGTON: 1717 case EFX_FAMILY_MEDFORD: 1718 case EFX_FAMILY_MEDFORD2: 1719 avail_caps |= SFC_DP_HW_FW_CAP_EF10; 1720 break; 1721 default: 1722 break; 1723 } 1724 1725 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, 1726 sfc_kvarg_string_handler, &rx_name); 1727 if (rc != 0) 1728 goto fail_kvarg_rx_datapath; 1729 1730 if (rx_name != NULL) { 1731 sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name); 1732 if (sa->dp_rx == NULL) { 1733 sfc_err(sa, "Rx datapath %s not found", rx_name); 1734 rc = ENOENT; 1735 goto fail_dp_rx; 1736 } 1737 if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) { 1738 sfc_err(sa, 1739 "Insufficient Hw/FW capabilities to use Rx datapath %s", 1740 rx_name); 1741 rc = EINVAL; 1742 goto fail_dp_rx_caps; 1743 } 1744 } else { 1745 sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps); 1746 if (sa->dp_rx == NULL) { 1747 sfc_err(sa, "Rx datapath by caps %#x not found", 1748 avail_caps); 1749 rc = ENOENT; 1750 goto fail_dp_rx; 1751 } 1752 } 1753 1754 sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name); 1755 if (sa->dp_rx_name == NULL) { 1756 rc = ENOMEM; 1757 goto fail_dp_rx_name; 1758 } 1759 1760 sfc_notice(sa, "use %s Rx datapath", sa->dp_rx_name); 1761 1762 dev->rx_pkt_burst = sa->dp_rx->pkt_burst; 1763 1764 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH, 1765 sfc_kvarg_string_handler, &tx_name); 1766 if (rc != 0) 1767 goto fail_kvarg_tx_datapath; 1768 1769 if (tx_name != NULL) { 1770 sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name); 1771 if (sa->dp_tx == NULL) { 1772 sfc_err(sa, "Tx datapath %s not found", tx_name); 1773 rc = ENOENT; 1774 goto fail_dp_tx; 1775 } 1776 if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) { 1777 sfc_err(sa, 1778 "Insufficient Hw/FW capabilities to use Tx datapath %s", 1779 tx_name); 1780 rc = EINVAL; 1781 goto fail_dp_tx_caps; 1782 } 1783 } else { 1784 sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps); 1785 if (sa->dp_tx == NULL) { 1786 sfc_err(sa, "Tx datapath by caps %#x not found", 1787 avail_caps); 1788 rc = ENOENT; 1789 goto fail_dp_tx; 1790 } 1791 } 1792 1793 sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name); 1794 if (sa->dp_tx_name == NULL) { 1795 rc = ENOMEM; 1796 goto fail_dp_tx_name; 1797 } 1798 1799 sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name); 1800 1801 dev->tx_pkt_burst = sa->dp_tx->pkt_burst; 1802 1803 dev->dev_ops = &sfc_eth_dev_ops; 1804 1805 return 0; 1806 1807 fail_dp_tx_name: 1808 fail_dp_tx_caps: 1809 sa->dp_tx = NULL; 1810 1811 fail_dp_tx: 1812 fail_kvarg_tx_datapath: 1813 rte_free(sa->dp_rx_name); 1814 sa->dp_rx_name = NULL; 1815 1816 fail_dp_rx_name: 1817 fail_dp_rx_caps: 1818 sa->dp_rx = NULL; 1819 1820 fail_dp_rx: 1821 fail_kvarg_rx_datapath: 1822 return rc; 1823 } 1824 1825 static void 1826 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev) 1827 { 1828 struct sfc_adapter *sa = dev->data->dev_private; 1829 1830 dev->dev_ops = NULL; 1831 dev->rx_pkt_burst = NULL; 1832 dev->tx_pkt_burst = NULL; 1833 1834 rte_free(sa->dp_tx_name); 1835 sa->dp_tx_name = NULL; 1836 sa->dp_tx = NULL; 1837 1838 rte_free(sa->dp_rx_name); 1839 sa->dp_rx_name = NULL; 1840 sa->dp_rx = NULL; 1841 } 1842 1843 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = { 1844 .rxq_info_get = sfc_rx_queue_info_get, 1845 .txq_info_get = sfc_tx_queue_info_get, 1846 }; 1847 1848 static int 1849 sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev) 1850 { 1851 /* 1852 * Device private data has really many process-local pointers. 1853 * Below code should be extremely careful to use data located 1854 * in shared memory only. 1855 */ 1856 struct sfc_adapter *sa = dev->data->dev_private; 1857 const struct sfc_dp_rx *dp_rx; 1858 const struct sfc_dp_tx *dp_tx; 1859 int rc; 1860 1861 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name); 1862 if (dp_rx == NULL) { 1863 sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name); 1864 rc = ENOENT; 1865 goto fail_dp_rx; 1866 } 1867 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) { 1868 sfc_err(sa, "%s Rx datapath does not support multi-process", 1869 sa->dp_tx_name); 1870 rc = EINVAL; 1871 goto fail_dp_rx_multi_process; 1872 } 1873 1874 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name); 1875 if (dp_tx == NULL) { 1876 sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name); 1877 rc = ENOENT; 1878 goto fail_dp_tx; 1879 } 1880 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) { 1881 sfc_err(sa, "%s Tx datapath does not support multi-process", 1882 sa->dp_tx_name); 1883 rc = EINVAL; 1884 goto fail_dp_tx_multi_process; 1885 } 1886 1887 dev->rx_pkt_burst = dp_rx->pkt_burst; 1888 dev->tx_pkt_burst = dp_tx->pkt_burst; 1889 dev->dev_ops = &sfc_eth_dev_secondary_ops; 1890 1891 return 0; 1892 1893 fail_dp_tx_multi_process: 1894 fail_dp_tx: 1895 fail_dp_rx_multi_process: 1896 fail_dp_rx: 1897 return rc; 1898 } 1899 1900 static void 1901 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev) 1902 { 1903 dev->dev_ops = NULL; 1904 dev->tx_pkt_burst = NULL; 1905 dev->rx_pkt_burst = NULL; 1906 } 1907 1908 static void 1909 sfc_register_dp(void) 1910 { 1911 /* Register once */ 1912 if (TAILQ_EMPTY(&sfc_dp_head)) { 1913 /* Prefer EF10 datapath */ 1914 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); 1915 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); 1916 1917 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp); 1918 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp); 1919 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp); 1920 } 1921 } 1922 1923 static int 1924 sfc_eth_dev_init(struct rte_eth_dev *dev) 1925 { 1926 struct sfc_adapter *sa = dev->data->dev_private; 1927 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1928 int rc; 1929 const efx_nic_cfg_t *encp; 1930 const struct ether_addr *from; 1931 1932 sfc_register_dp(); 1933 1934 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1935 return -sfc_eth_dev_secondary_set_ops(dev); 1936 1937 /* Required for logging */ 1938 sa->pci_addr = pci_dev->addr; 1939 sa->port_id = dev->data->port_id; 1940 1941 sa->eth_dev = dev; 1942 1943 /* Copy PCI device info to the dev->data */ 1944 rte_eth_copy_pci_info(dev, pci_dev); 1945 1946 sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR, 1947 RTE_LOG_NOTICE); 1948 1949 rc = sfc_kvargs_parse(sa); 1950 if (rc != 0) 1951 goto fail_kvargs_parse; 1952 1953 sfc_log_init(sa, "entry"); 1954 1955 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 1956 if (dev->data->mac_addrs == NULL) { 1957 rc = ENOMEM; 1958 goto fail_mac_addrs; 1959 } 1960 1961 sfc_adapter_lock_init(sa); 1962 sfc_adapter_lock(sa); 1963 1964 sfc_log_init(sa, "probing"); 1965 rc = sfc_probe(sa); 1966 if (rc != 0) 1967 goto fail_probe; 1968 1969 sfc_log_init(sa, "set device ops"); 1970 rc = sfc_eth_dev_set_ops(dev); 1971 if (rc != 0) 1972 goto fail_set_ops; 1973 1974 sfc_log_init(sa, "attaching"); 1975 rc = sfc_attach(sa); 1976 if (rc != 0) 1977 goto fail_attach; 1978 1979 encp = efx_nic_cfg_get(sa->nic); 1980 1981 /* 1982 * The arguments are really reverse order in comparison to 1983 * Linux kernel. Copy from NIC config to Ethernet device data. 1984 */ 1985 from = (const struct ether_addr *)(encp->enc_mac_addr); 1986 ether_addr_copy(from, &dev->data->mac_addrs[0]); 1987 1988 sfc_adapter_unlock(sa); 1989 1990 sfc_log_init(sa, "done"); 1991 return 0; 1992 1993 fail_attach: 1994 sfc_eth_dev_clear_ops(dev); 1995 1996 fail_set_ops: 1997 sfc_unprobe(sa); 1998 1999 fail_probe: 2000 sfc_adapter_unlock(sa); 2001 sfc_adapter_lock_fini(sa); 2002 rte_free(dev->data->mac_addrs); 2003 dev->data->mac_addrs = NULL; 2004 2005 fail_mac_addrs: 2006 sfc_kvargs_cleanup(sa); 2007 2008 fail_kvargs_parse: 2009 sfc_log_init(sa, "failed %d", rc); 2010 SFC_ASSERT(rc > 0); 2011 return -rc; 2012 } 2013 2014 static int 2015 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 2016 { 2017 struct sfc_adapter *sa; 2018 2019 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2020 sfc_eth_dev_secondary_clear_ops(dev); 2021 return 0; 2022 } 2023 2024 sa = dev->data->dev_private; 2025 sfc_log_init(sa, "entry"); 2026 2027 sfc_adapter_lock(sa); 2028 2029 sfc_eth_dev_clear_ops(dev); 2030 2031 sfc_detach(sa); 2032 sfc_unprobe(sa); 2033 2034 rte_free(dev->data->mac_addrs); 2035 dev->data->mac_addrs = NULL; 2036 2037 sfc_kvargs_cleanup(sa); 2038 2039 sfc_adapter_unlock(sa); 2040 sfc_adapter_lock_fini(sa); 2041 2042 sfc_log_init(sa, "done"); 2043 2044 /* Required for logging, so cleanup last */ 2045 sa->eth_dev = NULL; 2046 return 0; 2047 } 2048 2049 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 2050 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 2051 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) }, 2052 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 2053 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, 2054 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 2055 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, 2056 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) }, 2057 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) }, 2058 { .vendor_id = 0 /* sentinel */ } 2059 }; 2060 2061 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2062 struct rte_pci_device *pci_dev) 2063 { 2064 return rte_eth_dev_pci_generic_probe(pci_dev, 2065 sizeof(struct sfc_adapter), sfc_eth_dev_init); 2066 } 2067 2068 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2069 { 2070 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit); 2071 } 2072 2073 static struct rte_pci_driver sfc_efx_pmd = { 2074 .id_table = pci_id_sfc_efx_map, 2075 .drv_flags = 2076 RTE_PCI_DRV_INTR_LSC | 2077 RTE_PCI_DRV_NEED_MAPPING, 2078 .probe = sfc_eth_dev_pci_probe, 2079 .remove = sfc_eth_dev_pci_remove, 2080 }; 2081 2082 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd); 2083 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 2084 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci"); 2085 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 2086 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " 2087 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " 2088 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 2089 SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " " 2090 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>"); 2091 2092 RTE_INIT(sfc_driver_register_logtype); 2093 static void 2094 sfc_driver_register_logtype(void) 2095 { 2096 int ret; 2097 2098 ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver", 2099 RTE_LOG_NOTICE); 2100 sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret; 2101 } 2102