1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016-2017 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was jointly developed between OKTET Labs (under contract 8 * for Solarflare) and Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <rte_dev.h> 33 #include <rte_ethdev.h> 34 #include <rte_ethdev_pci.h> 35 #include <rte_pci.h> 36 #include <rte_bus_pci.h> 37 #include <rte_errno.h> 38 39 #include "efx.h" 40 41 #include "sfc.h" 42 #include "sfc_debug.h" 43 #include "sfc_log.h" 44 #include "sfc_kvargs.h" 45 #include "sfc_ev.h" 46 #include "sfc_rx.h" 47 #include "sfc_tx.h" 48 #include "sfc_flow.h" 49 #include "sfc_dp.h" 50 #include "sfc_dp_rx.h" 51 52 static struct sfc_dp_list sfc_dp_head = 53 TAILQ_HEAD_INITIALIZER(sfc_dp_head); 54 55 static int 56 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 57 { 58 struct sfc_adapter *sa = dev->data->dev_private; 59 efx_nic_fw_info_t enfi; 60 int ret; 61 int rc; 62 63 /* 64 * Return value of the callback is likely supposed to be 65 * equal to or greater than 0, nevertheless, if an error 66 * occurs, it will be desirable to pass it to the caller 67 */ 68 if ((fw_version == NULL) || (fw_size == 0)) 69 return -EINVAL; 70 71 rc = efx_nic_get_fw_version(sa->nic, &enfi); 72 if (rc != 0) 73 return -rc; 74 75 ret = snprintf(fw_version, fw_size, 76 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16, 77 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1], 78 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]); 79 if (ret < 0) 80 return ret; 81 82 if (enfi.enfi_dpcpu_fw_ids_valid) { 83 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret); 84 int ret_extra; 85 86 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset, 87 fw_size - dpcpu_fw_ids_offset, 88 " rx%" PRIx16 " tx%" PRIx16, 89 enfi.enfi_rx_dpcpu_fw_id, 90 enfi.enfi_tx_dpcpu_fw_id); 91 if (ret_extra < 0) 92 return ret_extra; 93 94 ret += ret_extra; 95 } 96 97 if (fw_size < (size_t)(++ret)) 98 return ret; 99 else 100 return 0; 101 } 102 103 static void 104 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 105 { 106 struct sfc_adapter *sa = dev->data->dev_private; 107 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 108 109 sfc_log_init(sa, "entry"); 110 111 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 112 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 113 114 /* Autonegotiation may be disabled */ 115 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 116 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 117 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 118 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 119 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 120 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 121 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 122 123 dev_info->max_rx_queues = sa->rxq_max; 124 dev_info->max_tx_queues = sa->txq_max; 125 126 /* By default packets are dropped if no descriptors are available */ 127 dev_info->default_rxconf.rx_drop_en = 1; 128 129 dev_info->rx_offload_capa = 130 DEV_RX_OFFLOAD_IPV4_CKSUM | 131 DEV_RX_OFFLOAD_UDP_CKSUM | 132 DEV_RX_OFFLOAD_TCP_CKSUM; 133 134 dev_info->tx_offload_capa = 135 DEV_TX_OFFLOAD_IPV4_CKSUM | 136 DEV_TX_OFFLOAD_UDP_CKSUM | 137 DEV_TX_OFFLOAD_TCP_CKSUM; 138 139 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 140 if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) || 141 !encp->enc_hw_tx_insert_vlan_enabled) 142 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; 143 else 144 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 145 146 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) 147 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; 148 149 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) 150 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP; 151 152 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT) 153 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT; 154 155 #if EFSYS_OPT_RX_SCALE 156 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { 157 dev_info->reta_size = EFX_RSS_TBL_SIZE; 158 dev_info->hash_key_size = EFX_RSS_KEY_SIZE; 159 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; 160 } 161 #endif 162 163 if (sa->tso) 164 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; 165 166 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 167 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 168 /* The RXQ hardware requires that the descriptor count is a power 169 * of 2, but rx_desc_lim cannot properly describe that constraint. 170 */ 171 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 172 173 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 174 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 175 /* 176 * The TXQ hardware requires that the descriptor count is a power 177 * of 2, but tx_desc_lim cannot properly describe that constraint 178 */ 179 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 180 } 181 182 static const uint32_t * 183 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 184 { 185 struct sfc_adapter *sa = dev->data->dev_private; 186 187 return sa->dp_rx->supported_ptypes_get(); 188 } 189 190 static int 191 sfc_dev_configure(struct rte_eth_dev *dev) 192 { 193 struct rte_eth_dev_data *dev_data = dev->data; 194 struct sfc_adapter *sa = dev_data->dev_private; 195 int rc; 196 197 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 198 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 199 200 sfc_adapter_lock(sa); 201 switch (sa->state) { 202 case SFC_ADAPTER_CONFIGURED: 203 /* FALLTHROUGH */ 204 case SFC_ADAPTER_INITIALIZED: 205 rc = sfc_configure(sa); 206 break; 207 default: 208 sfc_err(sa, "unexpected adapter state %u to configure", 209 sa->state); 210 rc = EINVAL; 211 break; 212 } 213 sfc_adapter_unlock(sa); 214 215 sfc_log_init(sa, "done %d", rc); 216 SFC_ASSERT(rc >= 0); 217 return -rc; 218 } 219 220 static int 221 sfc_dev_start(struct rte_eth_dev *dev) 222 { 223 struct sfc_adapter *sa = dev->data->dev_private; 224 int rc; 225 226 sfc_log_init(sa, "entry"); 227 228 sfc_adapter_lock(sa); 229 rc = sfc_start(sa); 230 sfc_adapter_unlock(sa); 231 232 sfc_log_init(sa, "done %d", rc); 233 SFC_ASSERT(rc >= 0); 234 return -rc; 235 } 236 237 static int 238 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 239 { 240 struct sfc_adapter *sa = dev->data->dev_private; 241 struct rte_eth_link *dev_link = &dev->data->dev_link; 242 struct rte_eth_link old_link; 243 struct rte_eth_link current_link; 244 245 sfc_log_init(sa, "entry"); 246 247 retry: 248 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 249 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); 250 251 if (sa->state != SFC_ADAPTER_STARTED) { 252 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); 253 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 254 *(uint64_t *)&old_link, 255 *(uint64_t *)¤t_link)) 256 goto retry; 257 } else if (wait_to_complete) { 258 efx_link_mode_t link_mode; 259 260 if (efx_port_poll(sa->nic, &link_mode) != 0) 261 link_mode = EFX_LINK_UNKNOWN; 262 sfc_port_link_mode_to_info(link_mode, ¤t_link); 263 264 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 265 *(uint64_t *)&old_link, 266 *(uint64_t *)¤t_link)) 267 goto retry; 268 } else { 269 sfc_ev_mgmt_qpoll(sa); 270 *(int64_t *)¤t_link = 271 rte_atomic64_read((rte_atomic64_t *)dev_link); 272 } 273 274 if (old_link.link_status != current_link.link_status) 275 sfc_info(sa, "Link status is %s", 276 current_link.link_status ? "UP" : "DOWN"); 277 278 return old_link.link_status == current_link.link_status ? 0 : -1; 279 } 280 281 static void 282 sfc_dev_stop(struct rte_eth_dev *dev) 283 { 284 struct sfc_adapter *sa = dev->data->dev_private; 285 286 sfc_log_init(sa, "entry"); 287 288 sfc_adapter_lock(sa); 289 sfc_stop(sa); 290 sfc_adapter_unlock(sa); 291 292 sfc_log_init(sa, "done"); 293 } 294 295 static int 296 sfc_dev_set_link_up(struct rte_eth_dev *dev) 297 { 298 struct sfc_adapter *sa = dev->data->dev_private; 299 int rc; 300 301 sfc_log_init(sa, "entry"); 302 303 sfc_adapter_lock(sa); 304 rc = sfc_start(sa); 305 sfc_adapter_unlock(sa); 306 307 SFC_ASSERT(rc >= 0); 308 return -rc; 309 } 310 311 static int 312 sfc_dev_set_link_down(struct rte_eth_dev *dev) 313 { 314 struct sfc_adapter *sa = dev->data->dev_private; 315 316 sfc_log_init(sa, "entry"); 317 318 sfc_adapter_lock(sa); 319 sfc_stop(sa); 320 sfc_adapter_unlock(sa); 321 322 return 0; 323 } 324 325 static void 326 sfc_dev_close(struct rte_eth_dev *dev) 327 { 328 struct sfc_adapter *sa = dev->data->dev_private; 329 330 sfc_log_init(sa, "entry"); 331 332 sfc_adapter_lock(sa); 333 switch (sa->state) { 334 case SFC_ADAPTER_STARTED: 335 sfc_stop(sa); 336 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 337 /* FALLTHROUGH */ 338 case SFC_ADAPTER_CONFIGURED: 339 sfc_close(sa); 340 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 341 /* FALLTHROUGH */ 342 case SFC_ADAPTER_INITIALIZED: 343 break; 344 default: 345 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 346 break; 347 } 348 sfc_adapter_unlock(sa); 349 350 sfc_log_init(sa, "done"); 351 } 352 353 static void 354 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 355 boolean_t enabled) 356 { 357 struct sfc_port *port; 358 boolean_t *toggle; 359 struct sfc_adapter *sa = dev->data->dev_private; 360 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 361 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 362 363 sfc_adapter_lock(sa); 364 365 port = &sa->port; 366 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 367 368 if (*toggle != enabled) { 369 *toggle = enabled; 370 371 if (port->isolated) { 372 sfc_warn(sa, "isolated mode is active on the port"); 373 sfc_warn(sa, "the change is to be applied on the next " 374 "start provided that isolated mode is " 375 "disabled prior the next start"); 376 } else if ((sa->state == SFC_ADAPTER_STARTED) && 377 (sfc_set_rx_mode(sa) != 0)) { 378 *toggle = !(enabled); 379 sfc_warn(sa, "Failed to %s %s mode", 380 ((enabled) ? "enable" : "disable"), desc); 381 } 382 } 383 384 sfc_adapter_unlock(sa); 385 } 386 387 static void 388 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 389 { 390 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 391 } 392 393 static void 394 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 395 { 396 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 397 } 398 399 static void 400 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 401 { 402 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 403 } 404 405 static void 406 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 407 { 408 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 409 } 410 411 static int 412 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 413 uint16_t nb_rx_desc, unsigned int socket_id, 414 const struct rte_eth_rxconf *rx_conf, 415 struct rte_mempool *mb_pool) 416 { 417 struct sfc_adapter *sa = dev->data->dev_private; 418 int rc; 419 420 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 421 rx_queue_id, nb_rx_desc, socket_id); 422 423 sfc_adapter_lock(sa); 424 425 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 426 rx_conf, mb_pool); 427 if (rc != 0) 428 goto fail_rx_qinit; 429 430 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp; 431 432 sfc_adapter_unlock(sa); 433 434 return 0; 435 436 fail_rx_qinit: 437 sfc_adapter_unlock(sa); 438 SFC_ASSERT(rc > 0); 439 return -rc; 440 } 441 442 static void 443 sfc_rx_queue_release(void *queue) 444 { 445 struct sfc_dp_rxq *dp_rxq = queue; 446 struct sfc_rxq *rxq; 447 struct sfc_adapter *sa; 448 unsigned int sw_index; 449 450 if (dp_rxq == NULL) 451 return; 452 453 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 454 sa = rxq->evq->sa; 455 sfc_adapter_lock(sa); 456 457 sw_index = sfc_rxq_sw_index(rxq); 458 459 sfc_log_init(sa, "RxQ=%u", sw_index); 460 461 sa->eth_dev->data->rx_queues[sw_index] = NULL; 462 463 sfc_rx_qfini(sa, sw_index); 464 465 sfc_adapter_unlock(sa); 466 } 467 468 static int 469 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 470 uint16_t nb_tx_desc, unsigned int socket_id, 471 const struct rte_eth_txconf *tx_conf) 472 { 473 struct sfc_adapter *sa = dev->data->dev_private; 474 int rc; 475 476 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 477 tx_queue_id, nb_tx_desc, socket_id); 478 479 sfc_adapter_lock(sa); 480 481 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 482 if (rc != 0) 483 goto fail_tx_qinit; 484 485 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp; 486 487 sfc_adapter_unlock(sa); 488 return 0; 489 490 fail_tx_qinit: 491 sfc_adapter_unlock(sa); 492 SFC_ASSERT(rc > 0); 493 return -rc; 494 } 495 496 static void 497 sfc_tx_queue_release(void *queue) 498 { 499 struct sfc_dp_txq *dp_txq = queue; 500 struct sfc_txq *txq; 501 unsigned int sw_index; 502 struct sfc_adapter *sa; 503 504 if (dp_txq == NULL) 505 return; 506 507 txq = sfc_txq_by_dp_txq(dp_txq); 508 sw_index = sfc_txq_sw_index(txq); 509 510 SFC_ASSERT(txq->evq != NULL); 511 sa = txq->evq->sa; 512 513 sfc_log_init(sa, "TxQ = %u", sw_index); 514 515 sfc_adapter_lock(sa); 516 517 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 518 sa->eth_dev->data->tx_queues[sw_index] = NULL; 519 520 sfc_tx_qfini(sa, sw_index); 521 522 sfc_adapter_unlock(sa); 523 } 524 525 static int 526 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 527 { 528 struct sfc_adapter *sa = dev->data->dev_private; 529 struct sfc_port *port = &sa->port; 530 uint64_t *mac_stats; 531 int ret; 532 533 rte_spinlock_lock(&port->mac_stats_lock); 534 535 ret = sfc_port_update_mac_stats(sa); 536 if (ret != 0) 537 goto unlock; 538 539 mac_stats = port->mac_stats_buf; 540 541 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 542 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 543 stats->ipackets = 544 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 545 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 546 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 547 stats->opackets = 548 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 549 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 550 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 551 stats->ibytes = 552 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 553 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 554 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 555 stats->obytes = 556 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 557 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 558 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 559 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 560 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 561 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 562 } else { 563 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 564 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 565 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 566 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 567 /* 568 * Take into account stats which are whenever supported 569 * on EF10. If some stat is not supported by current 570 * firmware variant or HW revision, it is guaranteed 571 * to be zero in mac_stats. 572 */ 573 stats->imissed = 574 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 575 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 576 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 577 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 578 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 579 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 580 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 581 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 582 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 583 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 584 stats->ierrors = 585 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 586 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 587 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 588 /* no oerrors counters supported on EF10 */ 589 } 590 591 unlock: 592 rte_spinlock_unlock(&port->mac_stats_lock); 593 SFC_ASSERT(ret >= 0); 594 return -ret; 595 } 596 597 static void 598 sfc_stats_reset(struct rte_eth_dev *dev) 599 { 600 struct sfc_adapter *sa = dev->data->dev_private; 601 struct sfc_port *port = &sa->port; 602 int rc; 603 604 if (sa->state != SFC_ADAPTER_STARTED) { 605 /* 606 * The operation cannot be done if port is not started; it 607 * will be scheduled to be done during the next port start 608 */ 609 port->mac_stats_reset_pending = B_TRUE; 610 return; 611 } 612 613 rc = sfc_port_reset_mac_stats(sa); 614 if (rc != 0) 615 sfc_err(sa, "failed to reset statistics (rc = %d)", rc); 616 } 617 618 static int 619 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 620 unsigned int xstats_count) 621 { 622 struct sfc_adapter *sa = dev->data->dev_private; 623 struct sfc_port *port = &sa->port; 624 uint64_t *mac_stats; 625 int rc; 626 unsigned int i; 627 int nstats = 0; 628 629 rte_spinlock_lock(&port->mac_stats_lock); 630 631 rc = sfc_port_update_mac_stats(sa); 632 if (rc != 0) { 633 SFC_ASSERT(rc > 0); 634 nstats = -rc; 635 goto unlock; 636 } 637 638 mac_stats = port->mac_stats_buf; 639 640 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 641 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 642 if (xstats != NULL && nstats < (int)xstats_count) { 643 xstats[nstats].id = nstats; 644 xstats[nstats].value = mac_stats[i]; 645 } 646 nstats++; 647 } 648 } 649 650 unlock: 651 rte_spinlock_unlock(&port->mac_stats_lock); 652 653 return nstats; 654 } 655 656 static int 657 sfc_xstats_get_names(struct rte_eth_dev *dev, 658 struct rte_eth_xstat_name *xstats_names, 659 unsigned int xstats_count) 660 { 661 struct sfc_adapter *sa = dev->data->dev_private; 662 struct sfc_port *port = &sa->port; 663 unsigned int i; 664 unsigned int nstats = 0; 665 666 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 667 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 668 if (xstats_names != NULL && nstats < xstats_count) 669 strncpy(xstats_names[nstats].name, 670 efx_mac_stat_name(sa->nic, i), 671 sizeof(xstats_names[0].name)); 672 nstats++; 673 } 674 } 675 676 return nstats; 677 } 678 679 static int 680 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 681 uint64_t *values, unsigned int n) 682 { 683 struct sfc_adapter *sa = dev->data->dev_private; 684 struct sfc_port *port = &sa->port; 685 uint64_t *mac_stats; 686 unsigned int nb_supported = 0; 687 unsigned int nb_written = 0; 688 unsigned int i; 689 int ret; 690 int rc; 691 692 if (unlikely(values == NULL) || 693 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported))) 694 return port->mac_stats_nb_supported; 695 696 rte_spinlock_lock(&port->mac_stats_lock); 697 698 rc = sfc_port_update_mac_stats(sa); 699 if (rc != 0) { 700 SFC_ASSERT(rc > 0); 701 ret = -rc; 702 goto unlock; 703 } 704 705 mac_stats = port->mac_stats_buf; 706 707 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) { 708 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 709 continue; 710 711 if ((ids == NULL) || (ids[nb_written] == nb_supported)) 712 values[nb_written++] = mac_stats[i]; 713 714 ++nb_supported; 715 } 716 717 ret = nb_written; 718 719 unlock: 720 rte_spinlock_unlock(&port->mac_stats_lock); 721 722 return ret; 723 } 724 725 static int 726 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, 727 struct rte_eth_xstat_name *xstats_names, 728 const uint64_t *ids, unsigned int size) 729 { 730 struct sfc_adapter *sa = dev->data->dev_private; 731 struct sfc_port *port = &sa->port; 732 unsigned int nb_supported = 0; 733 unsigned int nb_written = 0; 734 unsigned int i; 735 736 if (unlikely(xstats_names == NULL) || 737 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported))) 738 return port->mac_stats_nb_supported; 739 740 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) { 741 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) 742 continue; 743 744 if ((ids == NULL) || (ids[nb_written] == nb_supported)) { 745 char *name = xstats_names[nb_written++].name; 746 747 strncpy(name, efx_mac_stat_name(sa->nic, i), 748 sizeof(xstats_names[0].name)); 749 name[sizeof(xstats_names[0].name) - 1] = '\0'; 750 } 751 752 ++nb_supported; 753 } 754 755 return nb_written; 756 } 757 758 static int 759 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 760 { 761 struct sfc_adapter *sa = dev->data->dev_private; 762 unsigned int wanted_fc, link_fc; 763 764 memset(fc_conf, 0, sizeof(*fc_conf)); 765 766 sfc_adapter_lock(sa); 767 768 if (sa->state == SFC_ADAPTER_STARTED) 769 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 770 else 771 link_fc = sa->port.flow_ctrl; 772 773 switch (link_fc) { 774 case 0: 775 fc_conf->mode = RTE_FC_NONE; 776 break; 777 case EFX_FCNTL_RESPOND: 778 fc_conf->mode = RTE_FC_RX_PAUSE; 779 break; 780 case EFX_FCNTL_GENERATE: 781 fc_conf->mode = RTE_FC_TX_PAUSE; 782 break; 783 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 784 fc_conf->mode = RTE_FC_FULL; 785 break; 786 default: 787 sfc_err(sa, "%s: unexpected flow control value %#x", 788 __func__, link_fc); 789 } 790 791 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 792 793 sfc_adapter_unlock(sa); 794 795 return 0; 796 } 797 798 static int 799 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 800 { 801 struct sfc_adapter *sa = dev->data->dev_private; 802 struct sfc_port *port = &sa->port; 803 unsigned int fcntl; 804 int rc; 805 806 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 807 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 808 fc_conf->mac_ctrl_frame_fwd != 0) { 809 sfc_err(sa, "unsupported flow control settings specified"); 810 rc = EINVAL; 811 goto fail_inval; 812 } 813 814 switch (fc_conf->mode) { 815 case RTE_FC_NONE: 816 fcntl = 0; 817 break; 818 case RTE_FC_RX_PAUSE: 819 fcntl = EFX_FCNTL_RESPOND; 820 break; 821 case RTE_FC_TX_PAUSE: 822 fcntl = EFX_FCNTL_GENERATE; 823 break; 824 case RTE_FC_FULL: 825 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 826 break; 827 default: 828 rc = EINVAL; 829 goto fail_inval; 830 } 831 832 sfc_adapter_lock(sa); 833 834 if (sa->state == SFC_ADAPTER_STARTED) { 835 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 836 if (rc != 0) 837 goto fail_mac_fcntl_set; 838 } 839 840 port->flow_ctrl = fcntl; 841 port->flow_ctrl_autoneg = fc_conf->autoneg; 842 843 sfc_adapter_unlock(sa); 844 845 return 0; 846 847 fail_mac_fcntl_set: 848 sfc_adapter_unlock(sa); 849 fail_inval: 850 SFC_ASSERT(rc > 0); 851 return -rc; 852 } 853 854 static int 855 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 856 { 857 struct sfc_adapter *sa = dev->data->dev_private; 858 size_t pdu = EFX_MAC_PDU(mtu); 859 size_t old_pdu; 860 int rc; 861 862 sfc_log_init(sa, "mtu=%u", mtu); 863 864 rc = EINVAL; 865 if (pdu < EFX_MAC_PDU_MIN) { 866 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 867 (unsigned int)mtu, (unsigned int)pdu, 868 EFX_MAC_PDU_MIN); 869 goto fail_inval; 870 } 871 if (pdu > EFX_MAC_PDU_MAX) { 872 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 873 (unsigned int)mtu, (unsigned int)pdu, 874 EFX_MAC_PDU_MAX); 875 goto fail_inval; 876 } 877 878 sfc_adapter_lock(sa); 879 880 if (pdu != sa->port.pdu) { 881 if (sa->state == SFC_ADAPTER_STARTED) { 882 sfc_stop(sa); 883 884 old_pdu = sa->port.pdu; 885 sa->port.pdu = pdu; 886 rc = sfc_start(sa); 887 if (rc != 0) 888 goto fail_start; 889 } else { 890 sa->port.pdu = pdu; 891 } 892 } 893 894 /* 895 * The driver does not use it, but other PMDs update jumbo_frame 896 * flag and max_rx_pkt_len when MTU is set. 897 */ 898 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); 899 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 900 901 sfc_adapter_unlock(sa); 902 903 sfc_log_init(sa, "done"); 904 return 0; 905 906 fail_start: 907 sa->port.pdu = old_pdu; 908 if (sfc_start(sa) != 0) 909 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 910 "PDU max size - port is stopped", 911 (unsigned int)pdu, (unsigned int)old_pdu); 912 sfc_adapter_unlock(sa); 913 914 fail_inval: 915 sfc_log_init(sa, "failed %d", rc); 916 SFC_ASSERT(rc > 0); 917 return -rc; 918 } 919 static void 920 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 921 { 922 struct sfc_adapter *sa = dev->data->dev_private; 923 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 924 struct sfc_port *port = &sa->port; 925 int rc; 926 927 sfc_adapter_lock(sa); 928 929 /* 930 * Copy the address to the device private data so that 931 * it could be recalled in the case of adapter restart. 932 */ 933 ether_addr_copy(mac_addr, &port->default_mac_addr); 934 935 if (port->isolated) { 936 sfc_err(sa, "isolated mode is active on the port"); 937 sfc_err(sa, "will not set MAC address"); 938 goto unlock; 939 } 940 941 if (sa->state != SFC_ADAPTER_STARTED) { 942 sfc_info(sa, "the port is not started"); 943 sfc_info(sa, "the new MAC address will be set on port start"); 944 945 goto unlock; 946 } 947 948 if (encp->enc_allow_set_mac_with_installed_filters) { 949 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 950 if (rc != 0) { 951 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 952 goto unlock; 953 } 954 955 /* 956 * Changing the MAC address by means of MCDI request 957 * has no effect on received traffic, therefore 958 * we also need to update unicast filters 959 */ 960 rc = sfc_set_rx_mode(sa); 961 if (rc != 0) 962 sfc_err(sa, "cannot set filter (rc = %u)", rc); 963 } else { 964 sfc_warn(sa, "cannot set MAC address with filters installed"); 965 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 966 sfc_warn(sa, "(some traffic may be dropped)"); 967 968 /* 969 * Since setting MAC address with filters installed is not 970 * allowed on the adapter, the new MAC address will be set 971 * by means of adapter restart. sfc_start() shall retrieve 972 * the new address from the device private data and set it. 973 */ 974 sfc_stop(sa); 975 rc = sfc_start(sa); 976 if (rc != 0) 977 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 978 } 979 980 unlock: 981 /* 982 * In the case of failure sa->port->default_mac_addr does not 983 * need rollback since no error code is returned, and the upper 984 * API will anyway update the external MAC address storage. 985 * To be consistent with that new value it is better to keep 986 * the device private value the same. 987 */ 988 sfc_adapter_unlock(sa); 989 } 990 991 992 static int 993 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, 994 uint32_t nb_mc_addr) 995 { 996 struct sfc_adapter *sa = dev->data->dev_private; 997 struct sfc_port *port = &sa->port; 998 uint8_t *mc_addrs = port->mcast_addrs; 999 int rc; 1000 unsigned int i; 1001 1002 if (port->isolated) { 1003 sfc_err(sa, "isolated mode is active on the port"); 1004 sfc_err(sa, "will not set multicast address list"); 1005 return -ENOTSUP; 1006 } 1007 1008 if (mc_addrs == NULL) 1009 return -ENOBUFS; 1010 1011 if (nb_mc_addr > port->max_mcast_addrs) { 1012 sfc_err(sa, "too many multicast addresses: %u > %u", 1013 nb_mc_addr, port->max_mcast_addrs); 1014 return -EINVAL; 1015 } 1016 1017 for (i = 0; i < nb_mc_addr; ++i) { 1018 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 1019 EFX_MAC_ADDR_LEN); 1020 mc_addrs += EFX_MAC_ADDR_LEN; 1021 } 1022 1023 port->nb_mcast_addrs = nb_mc_addr; 1024 1025 if (sa->state != SFC_ADAPTER_STARTED) 1026 return 0; 1027 1028 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs, 1029 port->nb_mcast_addrs); 1030 if (rc != 0) 1031 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 1032 1033 SFC_ASSERT(rc > 0); 1034 return -rc; 1035 } 1036 1037 /* 1038 * The function is used by the secondary process as well. It must not 1039 * use any process-local pointers from the adapter data. 1040 */ 1041 static void 1042 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1043 struct rte_eth_rxq_info *qinfo) 1044 { 1045 struct sfc_adapter *sa = dev->data->dev_private; 1046 struct sfc_rxq_info *rxq_info; 1047 struct sfc_rxq *rxq; 1048 1049 sfc_adapter_lock(sa); 1050 1051 SFC_ASSERT(rx_queue_id < sa->rxq_count); 1052 1053 rxq_info = &sa->rxq_info[rx_queue_id]; 1054 rxq = rxq_info->rxq; 1055 SFC_ASSERT(rxq != NULL); 1056 1057 qinfo->mp = rxq->refill_mb_pool; 1058 qinfo->conf.rx_free_thresh = rxq->refill_threshold; 1059 qinfo->conf.rx_drop_en = 1; 1060 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 1061 qinfo->scattered_rx = 1062 ((rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) != 0); 1063 qinfo->nb_desc = rxq_info->entries; 1064 1065 sfc_adapter_unlock(sa); 1066 } 1067 1068 /* 1069 * The function is used by the secondary process as well. It must not 1070 * use any process-local pointers from the adapter data. 1071 */ 1072 static void 1073 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 1074 struct rte_eth_txq_info *qinfo) 1075 { 1076 struct sfc_adapter *sa = dev->data->dev_private; 1077 struct sfc_txq_info *txq_info; 1078 1079 sfc_adapter_lock(sa); 1080 1081 SFC_ASSERT(tx_queue_id < sa->txq_count); 1082 1083 txq_info = &sa->txq_info[tx_queue_id]; 1084 SFC_ASSERT(txq_info->txq != NULL); 1085 1086 memset(qinfo, 0, sizeof(*qinfo)); 1087 1088 qinfo->conf.txq_flags = txq_info->txq->flags; 1089 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; 1090 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 1091 qinfo->nb_desc = txq_info->entries; 1092 1093 sfc_adapter_unlock(sa); 1094 } 1095 1096 static uint32_t 1097 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1098 { 1099 struct sfc_adapter *sa = dev->data->dev_private; 1100 1101 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1102 1103 return sfc_rx_qdesc_npending(sa, rx_queue_id); 1104 } 1105 1106 static int 1107 sfc_rx_descriptor_done(void *queue, uint16_t offset) 1108 { 1109 struct sfc_dp_rxq *dp_rxq = queue; 1110 1111 return sfc_rx_qdesc_done(dp_rxq, offset); 1112 } 1113 1114 static int 1115 sfc_rx_descriptor_status(void *queue, uint16_t offset) 1116 { 1117 struct sfc_dp_rxq *dp_rxq = queue; 1118 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq); 1119 1120 return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset); 1121 } 1122 1123 static int 1124 sfc_tx_descriptor_status(void *queue, uint16_t offset) 1125 { 1126 struct sfc_dp_txq *dp_txq = queue; 1127 struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq); 1128 1129 return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset); 1130 } 1131 1132 static int 1133 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1134 { 1135 struct sfc_adapter *sa = dev->data->dev_private; 1136 int rc; 1137 1138 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1139 1140 sfc_adapter_lock(sa); 1141 1142 rc = EINVAL; 1143 if (sa->state != SFC_ADAPTER_STARTED) 1144 goto fail_not_started; 1145 1146 rc = sfc_rx_qstart(sa, rx_queue_id); 1147 if (rc != 0) 1148 goto fail_rx_qstart; 1149 1150 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE; 1151 1152 sfc_adapter_unlock(sa); 1153 1154 return 0; 1155 1156 fail_rx_qstart: 1157 fail_not_started: 1158 sfc_adapter_unlock(sa); 1159 SFC_ASSERT(rc > 0); 1160 return -rc; 1161 } 1162 1163 static int 1164 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1165 { 1166 struct sfc_adapter *sa = dev->data->dev_private; 1167 1168 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 1169 1170 sfc_adapter_lock(sa); 1171 sfc_rx_qstop(sa, rx_queue_id); 1172 1173 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE; 1174 1175 sfc_adapter_unlock(sa); 1176 1177 return 0; 1178 } 1179 1180 static int 1181 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1182 { 1183 struct sfc_adapter *sa = dev->data->dev_private; 1184 int rc; 1185 1186 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1187 1188 sfc_adapter_lock(sa); 1189 1190 rc = EINVAL; 1191 if (sa->state != SFC_ADAPTER_STARTED) 1192 goto fail_not_started; 1193 1194 rc = sfc_tx_qstart(sa, tx_queue_id); 1195 if (rc != 0) 1196 goto fail_tx_qstart; 1197 1198 sa->txq_info[tx_queue_id].deferred_started = B_TRUE; 1199 1200 sfc_adapter_unlock(sa); 1201 return 0; 1202 1203 fail_tx_qstart: 1204 1205 fail_not_started: 1206 sfc_adapter_unlock(sa); 1207 SFC_ASSERT(rc > 0); 1208 return -rc; 1209 } 1210 1211 static int 1212 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1213 { 1214 struct sfc_adapter *sa = dev->data->dev_private; 1215 1216 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1217 1218 sfc_adapter_lock(sa); 1219 1220 sfc_tx_qstop(sa, tx_queue_id); 1221 1222 sa->txq_info[tx_queue_id].deferred_started = B_FALSE; 1223 1224 sfc_adapter_unlock(sa); 1225 return 0; 1226 } 1227 1228 static efx_tunnel_protocol_t 1229 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type) 1230 { 1231 switch (rte_type) { 1232 case RTE_TUNNEL_TYPE_VXLAN: 1233 return EFX_TUNNEL_PROTOCOL_VXLAN; 1234 case RTE_TUNNEL_TYPE_GENEVE: 1235 return EFX_TUNNEL_PROTOCOL_GENEVE; 1236 default: 1237 return EFX_TUNNEL_NPROTOS; 1238 } 1239 } 1240 1241 enum sfc_udp_tunnel_op_e { 1242 SFC_UDP_TUNNEL_ADD_PORT, 1243 SFC_UDP_TUNNEL_DEL_PORT, 1244 }; 1245 1246 static int 1247 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev, 1248 struct rte_eth_udp_tunnel *tunnel_udp, 1249 enum sfc_udp_tunnel_op_e op) 1250 { 1251 struct sfc_adapter *sa = dev->data->dev_private; 1252 efx_tunnel_protocol_t tunnel_proto; 1253 int rc; 1254 1255 sfc_log_init(sa, "%s udp_port=%u prot_type=%u", 1256 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" : 1257 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown", 1258 tunnel_udp->udp_port, tunnel_udp->prot_type); 1259 1260 tunnel_proto = 1261 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type); 1262 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) { 1263 rc = ENOTSUP; 1264 goto fail_bad_proto; 1265 } 1266 1267 sfc_adapter_lock(sa); 1268 1269 switch (op) { 1270 case SFC_UDP_TUNNEL_ADD_PORT: 1271 rc = efx_tunnel_config_udp_add(sa->nic, 1272 tunnel_udp->udp_port, 1273 tunnel_proto); 1274 break; 1275 case SFC_UDP_TUNNEL_DEL_PORT: 1276 rc = efx_tunnel_config_udp_remove(sa->nic, 1277 tunnel_udp->udp_port, 1278 tunnel_proto); 1279 break; 1280 default: 1281 rc = EINVAL; 1282 goto fail_bad_op; 1283 } 1284 1285 if (rc != 0) 1286 goto fail_op; 1287 1288 if (sa->state == SFC_ADAPTER_STARTED) { 1289 rc = efx_tunnel_reconfigure(sa->nic); 1290 if (rc == EAGAIN) { 1291 /* 1292 * Configuration is accepted by FW and MC reboot 1293 * is initiated to apply the changes. MC reboot 1294 * will be handled in a usual way (MC reboot 1295 * event on management event queue and adapter 1296 * restart). 1297 */ 1298 rc = 0; 1299 } else if (rc != 0) { 1300 goto fail_reconfigure; 1301 } 1302 } 1303 1304 sfc_adapter_unlock(sa); 1305 return 0; 1306 1307 fail_reconfigure: 1308 /* Remove/restore entry since the change makes the trouble */ 1309 switch (op) { 1310 case SFC_UDP_TUNNEL_ADD_PORT: 1311 (void)efx_tunnel_config_udp_remove(sa->nic, 1312 tunnel_udp->udp_port, 1313 tunnel_proto); 1314 break; 1315 case SFC_UDP_TUNNEL_DEL_PORT: 1316 (void)efx_tunnel_config_udp_add(sa->nic, 1317 tunnel_udp->udp_port, 1318 tunnel_proto); 1319 break; 1320 } 1321 1322 fail_op: 1323 fail_bad_op: 1324 sfc_adapter_unlock(sa); 1325 1326 fail_bad_proto: 1327 SFC_ASSERT(rc > 0); 1328 return -rc; 1329 } 1330 1331 static int 1332 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 1333 struct rte_eth_udp_tunnel *tunnel_udp) 1334 { 1335 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT); 1336 } 1337 1338 static int 1339 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 1340 struct rte_eth_udp_tunnel *tunnel_udp) 1341 { 1342 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); 1343 } 1344 1345 #if EFSYS_OPT_RX_SCALE 1346 static int 1347 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1348 struct rte_eth_rss_conf *rss_conf) 1349 { 1350 struct sfc_adapter *sa = dev->data->dev_private; 1351 struct sfc_port *port = &sa->port; 1352 1353 if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated) 1354 return -ENOTSUP; 1355 1356 if (sa->rss_channels == 0) 1357 return -EINVAL; 1358 1359 sfc_adapter_lock(sa); 1360 1361 /* 1362 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1363 * hence, conversion is done here to derive a correct set of ETH_RSS 1364 * flags which corresponds to the active EFX configuration stored 1365 * locally in 'sfc_adapter' and kept up-to-date 1366 */ 1367 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); 1368 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; 1369 if (rss_conf->rss_key != NULL) 1370 rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE); 1371 1372 sfc_adapter_unlock(sa); 1373 1374 return 0; 1375 } 1376 1377 static int 1378 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1379 struct rte_eth_rss_conf *rss_conf) 1380 { 1381 struct sfc_adapter *sa = dev->data->dev_private; 1382 struct sfc_port *port = &sa->port; 1383 unsigned int efx_hash_types; 1384 int rc = 0; 1385 1386 if (port->isolated) 1387 return -ENOTSUP; 1388 1389 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { 1390 sfc_err(sa, "RSS is not available"); 1391 return -ENOTSUP; 1392 } 1393 1394 if (sa->rss_channels == 0) { 1395 sfc_err(sa, "RSS is not configured"); 1396 return -EINVAL; 1397 } 1398 1399 if ((rss_conf->rss_key != NULL) && 1400 (rss_conf->rss_key_len != sizeof(sa->rss_key))) { 1401 sfc_err(sa, "RSS key size is wrong (should be %lu)", 1402 sizeof(sa->rss_key)); 1403 return -EINVAL; 1404 } 1405 1406 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { 1407 sfc_err(sa, "unsupported hash functions requested"); 1408 return -EINVAL; 1409 } 1410 1411 sfc_adapter_lock(sa); 1412 1413 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); 1414 1415 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1416 EFX_RX_HASHALG_TOEPLITZ, 1417 efx_hash_types, B_TRUE); 1418 if (rc != 0) 1419 goto fail_scale_mode_set; 1420 1421 if (rss_conf->rss_key != NULL) { 1422 if (sa->state == SFC_ADAPTER_STARTED) { 1423 rc = efx_rx_scale_key_set(sa->nic, 1424 EFX_RSS_CONTEXT_DEFAULT, 1425 rss_conf->rss_key, 1426 sizeof(sa->rss_key)); 1427 if (rc != 0) 1428 goto fail_scale_key_set; 1429 } 1430 1431 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); 1432 } 1433 1434 sa->rss_hash_types = efx_hash_types; 1435 1436 sfc_adapter_unlock(sa); 1437 1438 return 0; 1439 1440 fail_scale_key_set: 1441 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1442 EFX_RX_HASHALG_TOEPLITZ, 1443 sa->rss_hash_types, B_TRUE) != 0) 1444 sfc_err(sa, "failed to restore RSS mode"); 1445 1446 fail_scale_mode_set: 1447 sfc_adapter_unlock(sa); 1448 return -rc; 1449 } 1450 1451 static int 1452 sfc_dev_rss_reta_query(struct rte_eth_dev *dev, 1453 struct rte_eth_rss_reta_entry64 *reta_conf, 1454 uint16_t reta_size) 1455 { 1456 struct sfc_adapter *sa = dev->data->dev_private; 1457 struct sfc_port *port = &sa->port; 1458 int entry; 1459 1460 if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated) 1461 return -ENOTSUP; 1462 1463 if (sa->rss_channels == 0) 1464 return -EINVAL; 1465 1466 if (reta_size != EFX_RSS_TBL_SIZE) 1467 return -EINVAL; 1468 1469 sfc_adapter_lock(sa); 1470 1471 for (entry = 0; entry < reta_size; entry++) { 1472 int grp = entry / RTE_RETA_GROUP_SIZE; 1473 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1474 1475 if ((reta_conf[grp].mask >> grp_idx) & 1) 1476 reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry]; 1477 } 1478 1479 sfc_adapter_unlock(sa); 1480 1481 return 0; 1482 } 1483 1484 static int 1485 sfc_dev_rss_reta_update(struct rte_eth_dev *dev, 1486 struct rte_eth_rss_reta_entry64 *reta_conf, 1487 uint16_t reta_size) 1488 { 1489 struct sfc_adapter *sa = dev->data->dev_private; 1490 struct sfc_port *port = &sa->port; 1491 unsigned int *rss_tbl_new; 1492 uint16_t entry; 1493 int rc = 0; 1494 1495 1496 if (port->isolated) 1497 return -ENOTSUP; 1498 1499 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { 1500 sfc_err(sa, "RSS is not available"); 1501 return -ENOTSUP; 1502 } 1503 1504 if (sa->rss_channels == 0) { 1505 sfc_err(sa, "RSS is not configured"); 1506 return -EINVAL; 1507 } 1508 1509 if (reta_size != EFX_RSS_TBL_SIZE) { 1510 sfc_err(sa, "RETA size is wrong (should be %u)", 1511 EFX_RSS_TBL_SIZE); 1512 return -EINVAL; 1513 } 1514 1515 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0); 1516 if (rss_tbl_new == NULL) 1517 return -ENOMEM; 1518 1519 sfc_adapter_lock(sa); 1520 1521 rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl)); 1522 1523 for (entry = 0; entry < reta_size; entry++) { 1524 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1525 struct rte_eth_rss_reta_entry64 *grp; 1526 1527 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; 1528 1529 if (grp->mask & (1ull << grp_idx)) { 1530 if (grp->reta[grp_idx] >= sa->rss_channels) { 1531 rc = EINVAL; 1532 goto bad_reta_entry; 1533 } 1534 rss_tbl_new[entry] = grp->reta[grp_idx]; 1535 } 1536 } 1537 1538 if (sa->state == SFC_ADAPTER_STARTED) { 1539 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1540 rss_tbl_new, EFX_RSS_TBL_SIZE); 1541 if (rc != 0) 1542 goto fail_scale_tbl_set; 1543 } 1544 1545 rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl)); 1546 1547 fail_scale_tbl_set: 1548 bad_reta_entry: 1549 sfc_adapter_unlock(sa); 1550 1551 rte_free(rss_tbl_new); 1552 1553 SFC_ASSERT(rc >= 0); 1554 return -rc; 1555 } 1556 #endif 1557 1558 static int 1559 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, 1560 enum rte_filter_op filter_op, 1561 void *arg) 1562 { 1563 struct sfc_adapter *sa = dev->data->dev_private; 1564 int rc = ENOTSUP; 1565 1566 sfc_log_init(sa, "entry"); 1567 1568 switch (filter_type) { 1569 case RTE_ETH_FILTER_NONE: 1570 sfc_err(sa, "Global filters configuration not supported"); 1571 break; 1572 case RTE_ETH_FILTER_MACVLAN: 1573 sfc_err(sa, "MACVLAN filters not supported"); 1574 break; 1575 case RTE_ETH_FILTER_ETHERTYPE: 1576 sfc_err(sa, "EtherType filters not supported"); 1577 break; 1578 case RTE_ETH_FILTER_FLEXIBLE: 1579 sfc_err(sa, "Flexible filters not supported"); 1580 break; 1581 case RTE_ETH_FILTER_SYN: 1582 sfc_err(sa, "SYN filters not supported"); 1583 break; 1584 case RTE_ETH_FILTER_NTUPLE: 1585 sfc_err(sa, "NTUPLE filters not supported"); 1586 break; 1587 case RTE_ETH_FILTER_TUNNEL: 1588 sfc_err(sa, "Tunnel filters not supported"); 1589 break; 1590 case RTE_ETH_FILTER_FDIR: 1591 sfc_err(sa, "Flow Director filters not supported"); 1592 break; 1593 case RTE_ETH_FILTER_HASH: 1594 sfc_err(sa, "Hash filters not supported"); 1595 break; 1596 case RTE_ETH_FILTER_GENERIC: 1597 if (filter_op != RTE_ETH_FILTER_GET) { 1598 rc = EINVAL; 1599 } else { 1600 *(const void **)arg = &sfc_flow_ops; 1601 rc = 0; 1602 } 1603 break; 1604 default: 1605 sfc_err(sa, "Unknown filter type %u", filter_type); 1606 break; 1607 } 1608 1609 sfc_log_init(sa, "exit: %d", -rc); 1610 SFC_ASSERT(rc >= 0); 1611 return -rc; 1612 } 1613 1614 static const struct eth_dev_ops sfc_eth_dev_ops = { 1615 .dev_configure = sfc_dev_configure, 1616 .dev_start = sfc_dev_start, 1617 .dev_stop = sfc_dev_stop, 1618 .dev_set_link_up = sfc_dev_set_link_up, 1619 .dev_set_link_down = sfc_dev_set_link_down, 1620 .dev_close = sfc_dev_close, 1621 .promiscuous_enable = sfc_dev_promisc_enable, 1622 .promiscuous_disable = sfc_dev_promisc_disable, 1623 .allmulticast_enable = sfc_dev_allmulti_enable, 1624 .allmulticast_disable = sfc_dev_allmulti_disable, 1625 .link_update = sfc_dev_link_update, 1626 .stats_get = sfc_stats_get, 1627 .stats_reset = sfc_stats_reset, 1628 .xstats_get = sfc_xstats_get, 1629 .xstats_reset = sfc_stats_reset, 1630 .xstats_get_names = sfc_xstats_get_names, 1631 .dev_infos_get = sfc_dev_infos_get, 1632 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1633 .mtu_set = sfc_dev_set_mtu, 1634 .rx_queue_start = sfc_rx_queue_start, 1635 .rx_queue_stop = sfc_rx_queue_stop, 1636 .tx_queue_start = sfc_tx_queue_start, 1637 .tx_queue_stop = sfc_tx_queue_stop, 1638 .rx_queue_setup = sfc_rx_queue_setup, 1639 .rx_queue_release = sfc_rx_queue_release, 1640 .rx_queue_count = sfc_rx_queue_count, 1641 .rx_descriptor_done = sfc_rx_descriptor_done, 1642 .rx_descriptor_status = sfc_rx_descriptor_status, 1643 .tx_descriptor_status = sfc_tx_descriptor_status, 1644 .tx_queue_setup = sfc_tx_queue_setup, 1645 .tx_queue_release = sfc_tx_queue_release, 1646 .flow_ctrl_get = sfc_flow_ctrl_get, 1647 .flow_ctrl_set = sfc_flow_ctrl_set, 1648 .mac_addr_set = sfc_mac_addr_set, 1649 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, 1650 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, 1651 #if EFSYS_OPT_RX_SCALE 1652 .reta_update = sfc_dev_rss_reta_update, 1653 .reta_query = sfc_dev_rss_reta_query, 1654 .rss_hash_update = sfc_dev_rss_hash_update, 1655 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1656 #endif 1657 .filter_ctrl = sfc_dev_filter_ctrl, 1658 .set_mc_addr_list = sfc_set_mc_addr_list, 1659 .rxq_info_get = sfc_rx_queue_info_get, 1660 .txq_info_get = sfc_tx_queue_info_get, 1661 .fw_version_get = sfc_fw_version_get, 1662 .xstats_get_by_id = sfc_xstats_get_by_id, 1663 .xstats_get_names_by_id = sfc_xstats_get_names_by_id, 1664 }; 1665 1666 /** 1667 * Duplicate a string in potentially shared memory required for 1668 * multi-process support. 1669 * 1670 * strdup() allocates from process-local heap/memory. 1671 */ 1672 static char * 1673 sfc_strdup(const char *str) 1674 { 1675 size_t size; 1676 char *copy; 1677 1678 if (str == NULL) 1679 return NULL; 1680 1681 size = strlen(str) + 1; 1682 copy = rte_malloc(__func__, size, 0); 1683 if (copy != NULL) 1684 rte_memcpy(copy, str, size); 1685 1686 return copy; 1687 } 1688 1689 static int 1690 sfc_eth_dev_set_ops(struct rte_eth_dev *dev) 1691 { 1692 struct sfc_adapter *sa = dev->data->dev_private; 1693 unsigned int avail_caps = 0; 1694 const char *rx_name = NULL; 1695 const char *tx_name = NULL; 1696 int rc; 1697 1698 switch (sa->family) { 1699 case EFX_FAMILY_HUNTINGTON: 1700 case EFX_FAMILY_MEDFORD: 1701 avail_caps |= SFC_DP_HW_FW_CAP_EF10; 1702 break; 1703 default: 1704 break; 1705 } 1706 1707 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, 1708 sfc_kvarg_string_handler, &rx_name); 1709 if (rc != 0) 1710 goto fail_kvarg_rx_datapath; 1711 1712 if (rx_name != NULL) { 1713 sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name); 1714 if (sa->dp_rx == NULL) { 1715 sfc_err(sa, "Rx datapath %s not found", rx_name); 1716 rc = ENOENT; 1717 goto fail_dp_rx; 1718 } 1719 if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) { 1720 sfc_err(sa, 1721 "Insufficient Hw/FW capabilities to use Rx datapath %s", 1722 rx_name); 1723 rc = EINVAL; 1724 goto fail_dp_rx_caps; 1725 } 1726 } else { 1727 sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps); 1728 if (sa->dp_rx == NULL) { 1729 sfc_err(sa, "Rx datapath by caps %#x not found", 1730 avail_caps); 1731 rc = ENOENT; 1732 goto fail_dp_rx; 1733 } 1734 } 1735 1736 sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name); 1737 if (sa->dp_rx_name == NULL) { 1738 rc = ENOMEM; 1739 goto fail_dp_rx_name; 1740 } 1741 1742 sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name); 1743 1744 dev->rx_pkt_burst = sa->dp_rx->pkt_burst; 1745 1746 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH, 1747 sfc_kvarg_string_handler, &tx_name); 1748 if (rc != 0) 1749 goto fail_kvarg_tx_datapath; 1750 1751 if (tx_name != NULL) { 1752 sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name); 1753 if (sa->dp_tx == NULL) { 1754 sfc_err(sa, "Tx datapath %s not found", tx_name); 1755 rc = ENOENT; 1756 goto fail_dp_tx; 1757 } 1758 if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) { 1759 sfc_err(sa, 1760 "Insufficient Hw/FW capabilities to use Tx datapath %s", 1761 tx_name); 1762 rc = EINVAL; 1763 goto fail_dp_tx_caps; 1764 } 1765 } else { 1766 sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps); 1767 if (sa->dp_tx == NULL) { 1768 sfc_err(sa, "Tx datapath by caps %#x not found", 1769 avail_caps); 1770 rc = ENOENT; 1771 goto fail_dp_tx; 1772 } 1773 } 1774 1775 sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name); 1776 if (sa->dp_tx_name == NULL) { 1777 rc = ENOMEM; 1778 goto fail_dp_tx_name; 1779 } 1780 1781 sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name); 1782 1783 dev->tx_pkt_burst = sa->dp_tx->pkt_burst; 1784 1785 dev->dev_ops = &sfc_eth_dev_ops; 1786 1787 return 0; 1788 1789 fail_dp_tx_name: 1790 fail_dp_tx_caps: 1791 sa->dp_tx = NULL; 1792 1793 fail_dp_tx: 1794 fail_kvarg_tx_datapath: 1795 rte_free(sa->dp_rx_name); 1796 sa->dp_rx_name = NULL; 1797 1798 fail_dp_rx_name: 1799 fail_dp_rx_caps: 1800 sa->dp_rx = NULL; 1801 1802 fail_dp_rx: 1803 fail_kvarg_rx_datapath: 1804 return rc; 1805 } 1806 1807 static void 1808 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev) 1809 { 1810 struct sfc_adapter *sa = dev->data->dev_private; 1811 1812 dev->dev_ops = NULL; 1813 dev->rx_pkt_burst = NULL; 1814 dev->tx_pkt_burst = NULL; 1815 1816 rte_free(sa->dp_tx_name); 1817 sa->dp_tx_name = NULL; 1818 sa->dp_tx = NULL; 1819 1820 rte_free(sa->dp_rx_name); 1821 sa->dp_rx_name = NULL; 1822 sa->dp_rx = NULL; 1823 } 1824 1825 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = { 1826 .rxq_info_get = sfc_rx_queue_info_get, 1827 .txq_info_get = sfc_tx_queue_info_get, 1828 }; 1829 1830 static int 1831 sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev) 1832 { 1833 /* 1834 * Device private data has really many process-local pointers. 1835 * Below code should be extremely careful to use data located 1836 * in shared memory only. 1837 */ 1838 struct sfc_adapter *sa = dev->data->dev_private; 1839 const struct sfc_dp_rx *dp_rx; 1840 const struct sfc_dp_tx *dp_tx; 1841 int rc; 1842 1843 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name); 1844 if (dp_rx == NULL) { 1845 sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name); 1846 rc = ENOENT; 1847 goto fail_dp_rx; 1848 } 1849 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) { 1850 sfc_err(sa, "%s Rx datapath does not support multi-process", 1851 sa->dp_tx_name); 1852 rc = EINVAL; 1853 goto fail_dp_rx_multi_process; 1854 } 1855 1856 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name); 1857 if (dp_tx == NULL) { 1858 sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name); 1859 rc = ENOENT; 1860 goto fail_dp_tx; 1861 } 1862 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) { 1863 sfc_err(sa, "%s Tx datapath does not support multi-process", 1864 sa->dp_tx_name); 1865 rc = EINVAL; 1866 goto fail_dp_tx_multi_process; 1867 } 1868 1869 dev->rx_pkt_burst = dp_rx->pkt_burst; 1870 dev->tx_pkt_burst = dp_tx->pkt_burst; 1871 dev->dev_ops = &sfc_eth_dev_secondary_ops; 1872 1873 return 0; 1874 1875 fail_dp_tx_multi_process: 1876 fail_dp_tx: 1877 fail_dp_rx_multi_process: 1878 fail_dp_rx: 1879 return rc; 1880 } 1881 1882 static void 1883 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev) 1884 { 1885 dev->dev_ops = NULL; 1886 dev->tx_pkt_burst = NULL; 1887 dev->rx_pkt_burst = NULL; 1888 } 1889 1890 static void 1891 sfc_register_dp(void) 1892 { 1893 /* Register once */ 1894 if (TAILQ_EMPTY(&sfc_dp_head)) { 1895 /* Prefer EF10 datapath */ 1896 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); 1897 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); 1898 1899 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp); 1900 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp); 1901 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp); 1902 } 1903 } 1904 1905 static int 1906 sfc_eth_dev_init(struct rte_eth_dev *dev) 1907 { 1908 struct sfc_adapter *sa = dev->data->dev_private; 1909 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1910 int rc; 1911 const efx_nic_cfg_t *encp; 1912 const struct ether_addr *from; 1913 1914 sfc_register_dp(); 1915 1916 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1917 return -sfc_eth_dev_secondary_set_ops(dev); 1918 1919 /* Required for logging */ 1920 sa->pci_addr = pci_dev->addr; 1921 sa->port_id = dev->data->port_id; 1922 1923 sa->eth_dev = dev; 1924 1925 /* Copy PCI device info to the dev->data */ 1926 rte_eth_copy_pci_info(dev, pci_dev); 1927 1928 rc = sfc_kvargs_parse(sa); 1929 if (rc != 0) 1930 goto fail_kvargs_parse; 1931 1932 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, 1933 sfc_kvarg_bool_handler, &sa->debug_init); 1934 if (rc != 0) 1935 goto fail_kvarg_debug_init; 1936 1937 sfc_log_init(sa, "entry"); 1938 1939 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 1940 if (dev->data->mac_addrs == NULL) { 1941 rc = ENOMEM; 1942 goto fail_mac_addrs; 1943 } 1944 1945 sfc_adapter_lock_init(sa); 1946 sfc_adapter_lock(sa); 1947 1948 sfc_log_init(sa, "probing"); 1949 rc = sfc_probe(sa); 1950 if (rc != 0) 1951 goto fail_probe; 1952 1953 sfc_log_init(sa, "set device ops"); 1954 rc = sfc_eth_dev_set_ops(dev); 1955 if (rc != 0) 1956 goto fail_set_ops; 1957 1958 sfc_log_init(sa, "attaching"); 1959 rc = sfc_attach(sa); 1960 if (rc != 0) 1961 goto fail_attach; 1962 1963 encp = efx_nic_cfg_get(sa->nic); 1964 1965 /* 1966 * The arguments are really reverse order in comparison to 1967 * Linux kernel. Copy from NIC config to Ethernet device data. 1968 */ 1969 from = (const struct ether_addr *)(encp->enc_mac_addr); 1970 ether_addr_copy(from, &dev->data->mac_addrs[0]); 1971 1972 sfc_adapter_unlock(sa); 1973 1974 sfc_log_init(sa, "done"); 1975 return 0; 1976 1977 fail_attach: 1978 sfc_eth_dev_clear_ops(dev); 1979 1980 fail_set_ops: 1981 sfc_unprobe(sa); 1982 1983 fail_probe: 1984 sfc_adapter_unlock(sa); 1985 sfc_adapter_lock_fini(sa); 1986 rte_free(dev->data->mac_addrs); 1987 dev->data->mac_addrs = NULL; 1988 1989 fail_mac_addrs: 1990 fail_kvarg_debug_init: 1991 sfc_kvargs_cleanup(sa); 1992 1993 fail_kvargs_parse: 1994 sfc_log_init(sa, "failed %d", rc); 1995 SFC_ASSERT(rc > 0); 1996 return -rc; 1997 } 1998 1999 static int 2000 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 2001 { 2002 struct sfc_adapter *sa; 2003 2004 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2005 sfc_eth_dev_secondary_clear_ops(dev); 2006 return 0; 2007 } 2008 2009 sa = dev->data->dev_private; 2010 sfc_log_init(sa, "entry"); 2011 2012 sfc_adapter_lock(sa); 2013 2014 sfc_eth_dev_clear_ops(dev); 2015 2016 sfc_detach(sa); 2017 sfc_unprobe(sa); 2018 2019 rte_free(dev->data->mac_addrs); 2020 dev->data->mac_addrs = NULL; 2021 2022 sfc_kvargs_cleanup(sa); 2023 2024 sfc_adapter_unlock(sa); 2025 sfc_adapter_lock_fini(sa); 2026 2027 sfc_log_init(sa, "done"); 2028 2029 /* Required for logging, so cleanup last */ 2030 sa->eth_dev = NULL; 2031 return 0; 2032 } 2033 2034 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 2035 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 2036 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) }, 2037 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 2038 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, 2039 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 2040 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, 2041 { .vendor_id = 0 /* sentinel */ } 2042 }; 2043 2044 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2045 struct rte_pci_device *pci_dev) 2046 { 2047 return rte_eth_dev_pci_generic_probe(pci_dev, 2048 sizeof(struct sfc_adapter), sfc_eth_dev_init); 2049 } 2050 2051 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2052 { 2053 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit); 2054 } 2055 2056 static struct rte_pci_driver sfc_efx_pmd = { 2057 .id_table = pci_id_sfc_efx_map, 2058 .drv_flags = 2059 RTE_PCI_DRV_INTR_LSC | 2060 RTE_PCI_DRV_NEED_MAPPING, 2061 .probe = sfc_eth_dev_pci_probe, 2062 .remove = sfc_eth_dev_pci_remove, 2063 }; 2064 2065 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd); 2066 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 2067 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci"); 2068 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 2069 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " 2070 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " 2071 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 2072 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> " 2073 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " 2074 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); 2075