1 /*- 2 * Copyright (c) 2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was jointly developed between OKTET Labs (under contract 6 * for Solarflare) and Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <rte_dev.h> 31 #include <rte_ethdev.h> 32 #include <rte_pci.h> 33 34 #include "efx.h" 35 36 #include "sfc.h" 37 #include "sfc_debug.h" 38 #include "sfc_log.h" 39 #include "sfc_kvargs.h" 40 #include "sfc_ev.h" 41 #include "sfc_rx.h" 42 #include "sfc_tx.h" 43 #include "sfc_flow.h" 44 45 static void 46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 47 { 48 struct sfc_adapter *sa = dev->data->dev_private; 49 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 50 51 sfc_log_init(sa, "entry"); 52 53 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 54 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 55 56 /* Autonegotiation may be disabled */ 57 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 58 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 59 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 60 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 61 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 62 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 63 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 64 65 dev_info->max_rx_queues = sa->rxq_max; 66 dev_info->max_tx_queues = sa->txq_max; 67 68 /* By default packets are dropped if no descriptors are available */ 69 dev_info->default_rxconf.rx_drop_en = 1; 70 71 dev_info->rx_offload_capa = 72 DEV_RX_OFFLOAD_IPV4_CKSUM | 73 DEV_RX_OFFLOAD_UDP_CKSUM | 74 DEV_RX_OFFLOAD_TCP_CKSUM; 75 76 dev_info->tx_offload_capa = 77 DEV_TX_OFFLOAD_IPV4_CKSUM | 78 DEV_TX_OFFLOAD_UDP_CKSUM | 79 DEV_TX_OFFLOAD_TCP_CKSUM; 80 81 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 82 if (!encp->enc_hw_tx_insert_vlan_enabled) 83 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; 84 else 85 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 86 87 #if EFSYS_OPT_RX_SCALE 88 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { 89 dev_info->reta_size = EFX_RSS_TBL_SIZE; 90 dev_info->hash_key_size = SFC_RSS_KEY_SIZE; 91 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; 92 } 93 #endif 94 95 if (sa->tso) 96 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; 97 98 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 99 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 100 /* The RXQ hardware requires that the descriptor count is a power 101 * of 2, but rx_desc_lim cannot properly describe that constraint. 102 */ 103 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 104 105 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 106 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 107 /* 108 * The TXQ hardware requires that the descriptor count is a power 109 * of 2, but tx_desc_lim cannot properly describe that constraint 110 */ 111 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 112 } 113 114 static const uint32_t * 115 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 116 { 117 static const uint32_t ptypes[] = { 118 RTE_PTYPE_L2_ETHER, 119 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 120 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 121 RTE_PTYPE_L4_TCP, 122 RTE_PTYPE_L4_UDP, 123 RTE_PTYPE_UNKNOWN 124 }; 125 126 if (dev->rx_pkt_burst == sfc_recv_pkts) 127 return ptypes; 128 129 return NULL; 130 } 131 132 static int 133 sfc_dev_configure(struct rte_eth_dev *dev) 134 { 135 struct rte_eth_dev_data *dev_data = dev->data; 136 struct sfc_adapter *sa = dev_data->dev_private; 137 int rc; 138 139 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 140 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 141 142 sfc_adapter_lock(sa); 143 switch (sa->state) { 144 case SFC_ADAPTER_CONFIGURED: 145 sfc_close(sa); 146 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 147 /* FALLTHROUGH */ 148 case SFC_ADAPTER_INITIALIZED: 149 rc = sfc_configure(sa); 150 break; 151 default: 152 sfc_err(sa, "unexpected adapter state %u to configure", 153 sa->state); 154 rc = EINVAL; 155 break; 156 } 157 sfc_adapter_unlock(sa); 158 159 sfc_log_init(sa, "done %d", rc); 160 SFC_ASSERT(rc >= 0); 161 return -rc; 162 } 163 164 static int 165 sfc_dev_start(struct rte_eth_dev *dev) 166 { 167 struct sfc_adapter *sa = dev->data->dev_private; 168 int rc; 169 170 sfc_log_init(sa, "entry"); 171 172 sfc_adapter_lock(sa); 173 rc = sfc_start(sa); 174 sfc_adapter_unlock(sa); 175 176 sfc_log_init(sa, "done %d", rc); 177 SFC_ASSERT(rc >= 0); 178 return -rc; 179 } 180 181 static int 182 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 183 { 184 struct sfc_adapter *sa = dev->data->dev_private; 185 struct rte_eth_link *dev_link = &dev->data->dev_link; 186 struct rte_eth_link old_link; 187 struct rte_eth_link current_link; 188 189 sfc_log_init(sa, "entry"); 190 191 retry: 192 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 193 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); 194 195 if (sa->state != SFC_ADAPTER_STARTED) { 196 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); 197 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 198 *(uint64_t *)&old_link, 199 *(uint64_t *)¤t_link)) 200 goto retry; 201 } else if (wait_to_complete) { 202 efx_link_mode_t link_mode; 203 204 if (efx_port_poll(sa->nic, &link_mode) != 0) 205 link_mode = EFX_LINK_UNKNOWN; 206 sfc_port_link_mode_to_info(link_mode, ¤t_link); 207 208 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 209 *(uint64_t *)&old_link, 210 *(uint64_t *)¤t_link)) 211 goto retry; 212 } else { 213 sfc_ev_mgmt_qpoll(sa); 214 *(int64_t *)¤t_link = 215 rte_atomic64_read((rte_atomic64_t *)dev_link); 216 } 217 218 if (old_link.link_status != current_link.link_status) 219 sfc_info(sa, "Link status is %s", 220 current_link.link_status ? "UP" : "DOWN"); 221 222 return old_link.link_status == current_link.link_status ? 0 : -1; 223 } 224 225 static void 226 sfc_dev_stop(struct rte_eth_dev *dev) 227 { 228 struct sfc_adapter *sa = dev->data->dev_private; 229 230 sfc_log_init(sa, "entry"); 231 232 sfc_adapter_lock(sa); 233 sfc_stop(sa); 234 sfc_adapter_unlock(sa); 235 236 sfc_log_init(sa, "done"); 237 } 238 239 static int 240 sfc_dev_set_link_up(struct rte_eth_dev *dev) 241 { 242 struct sfc_adapter *sa = dev->data->dev_private; 243 int rc; 244 245 sfc_log_init(sa, "entry"); 246 247 sfc_adapter_lock(sa); 248 rc = sfc_start(sa); 249 sfc_adapter_unlock(sa); 250 251 SFC_ASSERT(rc >= 0); 252 return -rc; 253 } 254 255 static int 256 sfc_dev_set_link_down(struct rte_eth_dev *dev) 257 { 258 struct sfc_adapter *sa = dev->data->dev_private; 259 260 sfc_log_init(sa, "entry"); 261 262 sfc_adapter_lock(sa); 263 sfc_stop(sa); 264 sfc_adapter_unlock(sa); 265 266 return 0; 267 } 268 269 static void 270 sfc_dev_close(struct rte_eth_dev *dev) 271 { 272 struct sfc_adapter *sa = dev->data->dev_private; 273 274 sfc_log_init(sa, "entry"); 275 276 sfc_adapter_lock(sa); 277 switch (sa->state) { 278 case SFC_ADAPTER_STARTED: 279 sfc_stop(sa); 280 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 281 /* FALLTHROUGH */ 282 case SFC_ADAPTER_CONFIGURED: 283 sfc_close(sa); 284 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 285 /* FALLTHROUGH */ 286 case SFC_ADAPTER_INITIALIZED: 287 break; 288 default: 289 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 290 break; 291 } 292 sfc_adapter_unlock(sa); 293 294 sfc_log_init(sa, "done"); 295 } 296 297 static void 298 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 299 boolean_t enabled) 300 { 301 struct sfc_port *port; 302 boolean_t *toggle; 303 struct sfc_adapter *sa = dev->data->dev_private; 304 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 305 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 306 307 sfc_adapter_lock(sa); 308 309 port = &sa->port; 310 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 311 312 if (*toggle != enabled) { 313 *toggle = enabled; 314 315 if ((sa->state == SFC_ADAPTER_STARTED) && 316 (sfc_set_rx_mode(sa) != 0)) { 317 *toggle = !(enabled); 318 sfc_warn(sa, "Failed to %s %s mode", 319 ((enabled) ? "enable" : "disable"), desc); 320 } 321 } 322 323 sfc_adapter_unlock(sa); 324 } 325 326 static void 327 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 328 { 329 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 330 } 331 332 static void 333 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 334 { 335 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 336 } 337 338 static void 339 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 340 { 341 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 342 } 343 344 static void 345 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 346 { 347 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 348 } 349 350 static int 351 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 352 uint16_t nb_rx_desc, unsigned int socket_id, 353 const struct rte_eth_rxconf *rx_conf, 354 struct rte_mempool *mb_pool) 355 { 356 struct sfc_adapter *sa = dev->data->dev_private; 357 int rc; 358 359 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 360 rx_queue_id, nb_rx_desc, socket_id); 361 362 sfc_adapter_lock(sa); 363 364 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 365 rx_conf, mb_pool); 366 if (rc != 0) 367 goto fail_rx_qinit; 368 369 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq; 370 371 sfc_adapter_unlock(sa); 372 373 return 0; 374 375 fail_rx_qinit: 376 sfc_adapter_unlock(sa); 377 SFC_ASSERT(rc > 0); 378 return -rc; 379 } 380 381 static void 382 sfc_rx_queue_release(void *queue) 383 { 384 struct sfc_rxq *rxq = queue; 385 struct sfc_adapter *sa; 386 unsigned int sw_index; 387 388 if (rxq == NULL) 389 return; 390 391 sa = rxq->evq->sa; 392 sfc_adapter_lock(sa); 393 394 sw_index = sfc_rxq_sw_index(rxq); 395 396 sfc_log_init(sa, "RxQ=%u", sw_index); 397 398 sa->eth_dev->data->rx_queues[sw_index] = NULL; 399 400 sfc_rx_qfini(sa, sw_index); 401 402 sfc_adapter_unlock(sa); 403 } 404 405 static int 406 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 407 uint16_t nb_tx_desc, unsigned int socket_id, 408 const struct rte_eth_txconf *tx_conf) 409 { 410 struct sfc_adapter *sa = dev->data->dev_private; 411 int rc; 412 413 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 414 tx_queue_id, nb_tx_desc, socket_id); 415 416 sfc_adapter_lock(sa); 417 418 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 419 if (rc != 0) 420 goto fail_tx_qinit; 421 422 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq; 423 424 sfc_adapter_unlock(sa); 425 return 0; 426 427 fail_tx_qinit: 428 sfc_adapter_unlock(sa); 429 SFC_ASSERT(rc > 0); 430 return -rc; 431 } 432 433 static void 434 sfc_tx_queue_release(void *queue) 435 { 436 struct sfc_txq *txq = queue; 437 unsigned int sw_index; 438 struct sfc_adapter *sa; 439 440 if (txq == NULL) 441 return; 442 443 sw_index = sfc_txq_sw_index(txq); 444 445 SFC_ASSERT(txq->evq != NULL); 446 sa = txq->evq->sa; 447 448 sfc_log_init(sa, "TxQ = %u", sw_index); 449 450 sfc_adapter_lock(sa); 451 452 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 453 sa->eth_dev->data->tx_queues[sw_index] = NULL; 454 455 sfc_tx_qfini(sa, sw_index); 456 457 sfc_adapter_unlock(sa); 458 } 459 460 static void 461 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 462 { 463 struct sfc_adapter *sa = dev->data->dev_private; 464 struct sfc_port *port = &sa->port; 465 uint64_t *mac_stats; 466 467 rte_spinlock_lock(&port->mac_stats_lock); 468 469 if (sfc_port_update_mac_stats(sa) != 0) 470 goto unlock; 471 472 mac_stats = port->mac_stats_buf; 473 474 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 475 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 476 stats->ipackets = 477 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 478 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 479 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 480 stats->opackets = 481 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 482 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 483 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 484 stats->ibytes = 485 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 486 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 487 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 488 stats->obytes = 489 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 490 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 491 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 492 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 493 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 494 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 495 } else { 496 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 497 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 498 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 499 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 500 /* 501 * Take into account stats which are whenever supported 502 * on EF10. If some stat is not supported by current 503 * firmware variant or HW revision, it is guaranteed 504 * to be zero in mac_stats. 505 */ 506 stats->imissed = 507 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 508 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 509 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 510 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 511 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 512 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 513 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 514 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 515 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 516 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 517 stats->ierrors = 518 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 519 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 520 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 521 /* no oerrors counters supported on EF10 */ 522 } 523 524 unlock: 525 rte_spinlock_unlock(&port->mac_stats_lock); 526 } 527 528 static void 529 sfc_stats_reset(struct rte_eth_dev *dev) 530 { 531 struct sfc_adapter *sa = dev->data->dev_private; 532 struct sfc_port *port = &sa->port; 533 int rc; 534 535 if (sa->state != SFC_ADAPTER_STARTED) { 536 /* 537 * The operation cannot be done if port is not started; it 538 * will be scheduled to be done during the next port start 539 */ 540 port->mac_stats_reset_pending = B_TRUE; 541 return; 542 } 543 544 rc = sfc_port_reset_mac_stats(sa); 545 if (rc != 0) 546 sfc_err(sa, "failed to reset statistics (rc = %d)", rc); 547 } 548 549 static int 550 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 551 unsigned int xstats_count) 552 { 553 struct sfc_adapter *sa = dev->data->dev_private; 554 struct sfc_port *port = &sa->port; 555 uint64_t *mac_stats; 556 int rc; 557 unsigned int i; 558 int nstats = 0; 559 560 rte_spinlock_lock(&port->mac_stats_lock); 561 562 rc = sfc_port_update_mac_stats(sa); 563 if (rc != 0) { 564 SFC_ASSERT(rc > 0); 565 nstats = -rc; 566 goto unlock; 567 } 568 569 mac_stats = port->mac_stats_buf; 570 571 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 572 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 573 if (xstats != NULL && nstats < (int)xstats_count) { 574 xstats[nstats].id = nstats; 575 xstats[nstats].value = mac_stats[i]; 576 } 577 nstats++; 578 } 579 } 580 581 unlock: 582 rte_spinlock_unlock(&port->mac_stats_lock); 583 584 return nstats; 585 } 586 587 static int 588 sfc_xstats_get_names(struct rte_eth_dev *dev, 589 struct rte_eth_xstat_name *xstats_names, 590 unsigned int xstats_count) 591 { 592 struct sfc_adapter *sa = dev->data->dev_private; 593 struct sfc_port *port = &sa->port; 594 unsigned int i; 595 unsigned int nstats = 0; 596 597 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 598 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 599 if (xstats_names != NULL && nstats < xstats_count) 600 strncpy(xstats_names[nstats].name, 601 efx_mac_stat_name(sa->nic, i), 602 sizeof(xstats_names[0].name)); 603 nstats++; 604 } 605 } 606 607 return nstats; 608 } 609 610 static int 611 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 612 { 613 struct sfc_adapter *sa = dev->data->dev_private; 614 unsigned int wanted_fc, link_fc; 615 616 memset(fc_conf, 0, sizeof(*fc_conf)); 617 618 sfc_adapter_lock(sa); 619 620 if (sa->state == SFC_ADAPTER_STARTED) 621 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 622 else 623 link_fc = sa->port.flow_ctrl; 624 625 switch (link_fc) { 626 case 0: 627 fc_conf->mode = RTE_FC_NONE; 628 break; 629 case EFX_FCNTL_RESPOND: 630 fc_conf->mode = RTE_FC_RX_PAUSE; 631 break; 632 case EFX_FCNTL_GENERATE: 633 fc_conf->mode = RTE_FC_TX_PAUSE; 634 break; 635 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 636 fc_conf->mode = RTE_FC_FULL; 637 break; 638 default: 639 sfc_err(sa, "%s: unexpected flow control value %#x", 640 __func__, link_fc); 641 } 642 643 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 644 645 sfc_adapter_unlock(sa); 646 647 return 0; 648 } 649 650 static int 651 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 652 { 653 struct sfc_adapter *sa = dev->data->dev_private; 654 struct sfc_port *port = &sa->port; 655 unsigned int fcntl; 656 int rc; 657 658 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 659 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 660 fc_conf->mac_ctrl_frame_fwd != 0) { 661 sfc_err(sa, "unsupported flow control settings specified"); 662 rc = EINVAL; 663 goto fail_inval; 664 } 665 666 switch (fc_conf->mode) { 667 case RTE_FC_NONE: 668 fcntl = 0; 669 break; 670 case RTE_FC_RX_PAUSE: 671 fcntl = EFX_FCNTL_RESPOND; 672 break; 673 case RTE_FC_TX_PAUSE: 674 fcntl = EFX_FCNTL_GENERATE; 675 break; 676 case RTE_FC_FULL: 677 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 678 break; 679 default: 680 rc = EINVAL; 681 goto fail_inval; 682 } 683 684 sfc_adapter_lock(sa); 685 686 if (sa->state == SFC_ADAPTER_STARTED) { 687 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 688 if (rc != 0) 689 goto fail_mac_fcntl_set; 690 } 691 692 port->flow_ctrl = fcntl; 693 port->flow_ctrl_autoneg = fc_conf->autoneg; 694 695 sfc_adapter_unlock(sa); 696 697 return 0; 698 699 fail_mac_fcntl_set: 700 sfc_adapter_unlock(sa); 701 fail_inval: 702 SFC_ASSERT(rc > 0); 703 return -rc; 704 } 705 706 static int 707 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 708 { 709 struct sfc_adapter *sa = dev->data->dev_private; 710 size_t pdu = EFX_MAC_PDU(mtu); 711 size_t old_pdu; 712 int rc; 713 714 sfc_log_init(sa, "mtu=%u", mtu); 715 716 rc = EINVAL; 717 if (pdu < EFX_MAC_PDU_MIN) { 718 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 719 (unsigned int)mtu, (unsigned int)pdu, 720 EFX_MAC_PDU_MIN); 721 goto fail_inval; 722 } 723 if (pdu > EFX_MAC_PDU_MAX) { 724 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 725 (unsigned int)mtu, (unsigned int)pdu, 726 EFX_MAC_PDU_MAX); 727 goto fail_inval; 728 } 729 730 sfc_adapter_lock(sa); 731 732 if (pdu != sa->port.pdu) { 733 if (sa->state == SFC_ADAPTER_STARTED) { 734 sfc_stop(sa); 735 736 old_pdu = sa->port.pdu; 737 sa->port.pdu = pdu; 738 rc = sfc_start(sa); 739 if (rc != 0) 740 goto fail_start; 741 } else { 742 sa->port.pdu = pdu; 743 } 744 } 745 746 /* 747 * The driver does not use it, but other PMDs update jumbo_frame 748 * flag and max_rx_pkt_len when MTU is set. 749 */ 750 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); 751 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 752 753 sfc_adapter_unlock(sa); 754 755 sfc_log_init(sa, "done"); 756 return 0; 757 758 fail_start: 759 sa->port.pdu = old_pdu; 760 if (sfc_start(sa) != 0) 761 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 762 "PDU max size - port is stopped", 763 (unsigned int)pdu, (unsigned int)old_pdu); 764 sfc_adapter_unlock(sa); 765 766 fail_inval: 767 sfc_log_init(sa, "failed %d", rc); 768 SFC_ASSERT(rc > 0); 769 return -rc; 770 } 771 static void 772 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 773 { 774 struct sfc_adapter *sa = dev->data->dev_private; 775 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 776 int rc; 777 778 sfc_adapter_lock(sa); 779 780 if (sa->state != SFC_ADAPTER_STARTED) { 781 sfc_info(sa, "the port is not started"); 782 sfc_info(sa, "the new MAC address will be set on port start"); 783 784 goto unlock; 785 } 786 787 if (encp->enc_allow_set_mac_with_installed_filters) { 788 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 789 if (rc != 0) { 790 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 791 goto unlock; 792 } 793 794 /* 795 * Changing the MAC address by means of MCDI request 796 * has no effect on received traffic, therefore 797 * we also need to update unicast filters 798 */ 799 rc = sfc_set_rx_mode(sa); 800 if (rc != 0) 801 sfc_err(sa, "cannot set filter (rc = %u)", rc); 802 } else { 803 sfc_warn(sa, "cannot set MAC address with filters installed"); 804 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 805 sfc_warn(sa, "(some traffic may be dropped)"); 806 807 /* 808 * Since setting MAC address with filters installed is not 809 * allowed on the adapter, one needs to simply restart adapter 810 * so that the new MAC address will be taken from an outer 811 * storage and set flawlessly by means of sfc_start() call 812 */ 813 sfc_stop(sa); 814 rc = sfc_start(sa); 815 if (rc != 0) 816 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 817 } 818 819 unlock: 820 sfc_adapter_unlock(sa); 821 } 822 823 824 static int 825 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, 826 uint32_t nb_mc_addr) 827 { 828 struct sfc_adapter *sa = dev->data->dev_private; 829 uint8_t *mc_addrs_p = NULL; 830 int rc; 831 unsigned int i; 832 833 if (nb_mc_addr > EFX_MAC_MULTICAST_LIST_MAX) { 834 sfc_err(sa, "too many multicast addresses: %u > %u", 835 nb_mc_addr, EFX_MAC_MULTICAST_LIST_MAX); 836 return -EINVAL; 837 } 838 839 if (nb_mc_addr != 0) { 840 uint8_t *mc_addrs; 841 842 mc_addrs_p = rte_calloc("mc-addrs", nb_mc_addr, 843 EFX_MAC_ADDR_LEN, 0); 844 if (mc_addrs_p == NULL) 845 return -ENOMEM; 846 847 mc_addrs = mc_addrs_p; 848 for (i = 0; i < nb_mc_addr; ++i) { 849 (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 850 EFX_MAC_ADDR_LEN); 851 mc_addrs += EFX_MAC_ADDR_LEN; 852 } 853 } 854 855 rc = efx_mac_multicast_list_set(sa->nic, mc_addrs_p, nb_mc_addr); 856 857 rte_free(mc_addrs_p); 858 859 if (rc != 0) 860 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 861 862 SFC_ASSERT(rc > 0); 863 return -rc; 864 } 865 866 static void 867 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 868 struct rte_eth_rxq_info *qinfo) 869 { 870 struct sfc_adapter *sa = dev->data->dev_private; 871 struct sfc_rxq_info *rxq_info; 872 struct sfc_rxq *rxq; 873 874 sfc_adapter_lock(sa); 875 876 SFC_ASSERT(rx_queue_id < sa->rxq_count); 877 878 rxq_info = &sa->rxq_info[rx_queue_id]; 879 rxq = rxq_info->rxq; 880 SFC_ASSERT(rxq != NULL); 881 882 qinfo->mp = rxq->refill_mb_pool; 883 qinfo->conf.rx_free_thresh = rxq->refill_threshold; 884 qinfo->conf.rx_drop_en = 1; 885 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 886 qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER); 887 qinfo->nb_desc = rxq_info->entries; 888 889 sfc_adapter_unlock(sa); 890 } 891 892 static void 893 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 894 struct rte_eth_txq_info *qinfo) 895 { 896 struct sfc_adapter *sa = dev->data->dev_private; 897 struct sfc_txq_info *txq_info; 898 899 sfc_adapter_lock(sa); 900 901 SFC_ASSERT(tx_queue_id < sa->txq_count); 902 903 txq_info = &sa->txq_info[tx_queue_id]; 904 SFC_ASSERT(txq_info->txq != NULL); 905 906 memset(qinfo, 0, sizeof(*qinfo)); 907 908 qinfo->conf.txq_flags = txq_info->txq->flags; 909 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; 910 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 911 qinfo->nb_desc = txq_info->entries; 912 913 sfc_adapter_unlock(sa); 914 } 915 916 static uint32_t 917 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 918 { 919 struct sfc_adapter *sa = dev->data->dev_private; 920 921 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 922 923 return sfc_rx_qdesc_npending(sa, rx_queue_id); 924 } 925 926 static int 927 sfc_rx_descriptor_done(void *queue, uint16_t offset) 928 { 929 struct sfc_rxq *rxq = queue; 930 931 return sfc_rx_qdesc_done(rxq, offset); 932 } 933 934 static int 935 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 936 { 937 struct sfc_adapter *sa = dev->data->dev_private; 938 int rc; 939 940 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 941 942 sfc_adapter_lock(sa); 943 944 rc = EINVAL; 945 if (sa->state != SFC_ADAPTER_STARTED) 946 goto fail_not_started; 947 948 rc = sfc_rx_qstart(sa, rx_queue_id); 949 if (rc != 0) 950 goto fail_rx_qstart; 951 952 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE; 953 954 sfc_adapter_unlock(sa); 955 956 return 0; 957 958 fail_rx_qstart: 959 fail_not_started: 960 sfc_adapter_unlock(sa); 961 SFC_ASSERT(rc > 0); 962 return -rc; 963 } 964 965 static int 966 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 967 { 968 struct sfc_adapter *sa = dev->data->dev_private; 969 970 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 971 972 sfc_adapter_lock(sa); 973 sfc_rx_qstop(sa, rx_queue_id); 974 975 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE; 976 977 sfc_adapter_unlock(sa); 978 979 return 0; 980 } 981 982 static int 983 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 984 { 985 struct sfc_adapter *sa = dev->data->dev_private; 986 int rc; 987 988 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 989 990 sfc_adapter_lock(sa); 991 992 rc = EINVAL; 993 if (sa->state != SFC_ADAPTER_STARTED) 994 goto fail_not_started; 995 996 rc = sfc_tx_qstart(sa, tx_queue_id); 997 if (rc != 0) 998 goto fail_tx_qstart; 999 1000 sa->txq_info[tx_queue_id].deferred_started = B_TRUE; 1001 1002 sfc_adapter_unlock(sa); 1003 return 0; 1004 1005 fail_tx_qstart: 1006 1007 fail_not_started: 1008 sfc_adapter_unlock(sa); 1009 SFC_ASSERT(rc > 0); 1010 return -rc; 1011 } 1012 1013 static int 1014 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1015 { 1016 struct sfc_adapter *sa = dev->data->dev_private; 1017 1018 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 1019 1020 sfc_adapter_lock(sa); 1021 1022 sfc_tx_qstop(sa, tx_queue_id); 1023 1024 sa->txq_info[tx_queue_id].deferred_started = B_FALSE; 1025 1026 sfc_adapter_unlock(sa); 1027 return 0; 1028 } 1029 1030 #if EFSYS_OPT_RX_SCALE 1031 static int 1032 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1033 struct rte_eth_rss_conf *rss_conf) 1034 { 1035 struct sfc_adapter *sa = dev->data->dev_private; 1036 1037 if ((sa->rss_channels == 1) || 1038 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) 1039 return -ENOTSUP; 1040 1041 sfc_adapter_lock(sa); 1042 1043 /* 1044 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1045 * hence, conversion is done here to derive a correct set of ETH_RSS 1046 * flags which corresponds to the active EFX configuration stored 1047 * locally in 'sfc_adapter' and kept up-to-date 1048 */ 1049 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); 1050 rss_conf->rss_key_len = SFC_RSS_KEY_SIZE; 1051 if (rss_conf->rss_key != NULL) 1052 rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE); 1053 1054 sfc_adapter_unlock(sa); 1055 1056 return 0; 1057 } 1058 1059 static int 1060 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1061 struct rte_eth_rss_conf *rss_conf) 1062 { 1063 struct sfc_adapter *sa = dev->data->dev_private; 1064 unsigned int efx_hash_types; 1065 int rc = 0; 1066 1067 if ((sa->rss_channels == 1) || 1068 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) { 1069 sfc_err(sa, "RSS is not available"); 1070 return -ENOTSUP; 1071 } 1072 1073 if ((rss_conf->rss_key != NULL) && 1074 (rss_conf->rss_key_len != sizeof(sa->rss_key))) { 1075 sfc_err(sa, "RSS key size is wrong (should be %lu)", 1076 sizeof(sa->rss_key)); 1077 return -EINVAL; 1078 } 1079 1080 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { 1081 sfc_err(sa, "unsupported hash functions requested"); 1082 return -EINVAL; 1083 } 1084 1085 sfc_adapter_lock(sa); 1086 1087 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); 1088 1089 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1090 efx_hash_types, B_TRUE); 1091 if (rc != 0) 1092 goto fail_scale_mode_set; 1093 1094 if (rss_conf->rss_key != NULL) { 1095 if (sa->state == SFC_ADAPTER_STARTED) { 1096 rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key, 1097 sizeof(sa->rss_key)); 1098 if (rc != 0) 1099 goto fail_scale_key_set; 1100 } 1101 1102 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); 1103 } 1104 1105 sa->rss_hash_types = efx_hash_types; 1106 1107 sfc_adapter_unlock(sa); 1108 1109 return 0; 1110 1111 fail_scale_key_set: 1112 if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1113 sa->rss_hash_types, B_TRUE) != 0) 1114 sfc_err(sa, "failed to restore RSS mode"); 1115 1116 fail_scale_mode_set: 1117 sfc_adapter_unlock(sa); 1118 return -rc; 1119 } 1120 1121 static int 1122 sfc_dev_rss_reta_query(struct rte_eth_dev *dev, 1123 struct rte_eth_rss_reta_entry64 *reta_conf, 1124 uint16_t reta_size) 1125 { 1126 struct sfc_adapter *sa = dev->data->dev_private; 1127 int entry; 1128 1129 if ((sa->rss_channels == 1) || 1130 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) 1131 return -ENOTSUP; 1132 1133 if (reta_size != EFX_RSS_TBL_SIZE) 1134 return -EINVAL; 1135 1136 sfc_adapter_lock(sa); 1137 1138 for (entry = 0; entry < reta_size; entry++) { 1139 int grp = entry / RTE_RETA_GROUP_SIZE; 1140 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1141 1142 if ((reta_conf[grp].mask >> grp_idx) & 1) 1143 reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry]; 1144 } 1145 1146 sfc_adapter_unlock(sa); 1147 1148 return 0; 1149 } 1150 1151 static int 1152 sfc_dev_rss_reta_update(struct rte_eth_dev *dev, 1153 struct rte_eth_rss_reta_entry64 *reta_conf, 1154 uint16_t reta_size) 1155 { 1156 struct sfc_adapter *sa = dev->data->dev_private; 1157 unsigned int *rss_tbl_new; 1158 uint16_t entry; 1159 int rc; 1160 1161 1162 if ((sa->rss_channels == 1) || 1163 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) { 1164 sfc_err(sa, "RSS is not available"); 1165 return -ENOTSUP; 1166 } 1167 1168 if (reta_size != EFX_RSS_TBL_SIZE) { 1169 sfc_err(sa, "RETA size is wrong (should be %u)", 1170 EFX_RSS_TBL_SIZE); 1171 return -EINVAL; 1172 } 1173 1174 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0); 1175 if (rss_tbl_new == NULL) 1176 return -ENOMEM; 1177 1178 sfc_adapter_lock(sa); 1179 1180 rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl)); 1181 1182 for (entry = 0; entry < reta_size; entry++) { 1183 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1184 struct rte_eth_rss_reta_entry64 *grp; 1185 1186 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; 1187 1188 if (grp->mask & (1ull << grp_idx)) { 1189 if (grp->reta[grp_idx] >= sa->rss_channels) { 1190 rc = EINVAL; 1191 goto bad_reta_entry; 1192 } 1193 rss_tbl_new[entry] = grp->reta[grp_idx]; 1194 } 1195 } 1196 1197 rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE); 1198 if (rc == 0) 1199 rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl)); 1200 1201 bad_reta_entry: 1202 sfc_adapter_unlock(sa); 1203 1204 rte_free(rss_tbl_new); 1205 1206 SFC_ASSERT(rc >= 0); 1207 return -rc; 1208 } 1209 #endif 1210 1211 static int 1212 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, 1213 enum rte_filter_op filter_op, 1214 void *arg) 1215 { 1216 struct sfc_adapter *sa = dev->data->dev_private; 1217 int rc = ENOTSUP; 1218 1219 sfc_log_init(sa, "entry"); 1220 1221 switch (filter_type) { 1222 case RTE_ETH_FILTER_NONE: 1223 sfc_err(sa, "Global filters configuration not supported"); 1224 break; 1225 case RTE_ETH_FILTER_MACVLAN: 1226 sfc_err(sa, "MACVLAN filters not supported"); 1227 break; 1228 case RTE_ETH_FILTER_ETHERTYPE: 1229 sfc_err(sa, "EtherType filters not supported"); 1230 break; 1231 case RTE_ETH_FILTER_FLEXIBLE: 1232 sfc_err(sa, "Flexible filters not supported"); 1233 break; 1234 case RTE_ETH_FILTER_SYN: 1235 sfc_err(sa, "SYN filters not supported"); 1236 break; 1237 case RTE_ETH_FILTER_NTUPLE: 1238 sfc_err(sa, "NTUPLE filters not supported"); 1239 break; 1240 case RTE_ETH_FILTER_TUNNEL: 1241 sfc_err(sa, "Tunnel filters not supported"); 1242 break; 1243 case RTE_ETH_FILTER_FDIR: 1244 sfc_err(sa, "Flow Director filters not supported"); 1245 break; 1246 case RTE_ETH_FILTER_HASH: 1247 sfc_err(sa, "Hash filters not supported"); 1248 break; 1249 case RTE_ETH_FILTER_GENERIC: 1250 if (filter_op != RTE_ETH_FILTER_GET) { 1251 rc = EINVAL; 1252 } else { 1253 *(const void **)arg = &sfc_flow_ops; 1254 rc = 0; 1255 } 1256 break; 1257 default: 1258 sfc_err(sa, "Unknown filter type %u", filter_type); 1259 break; 1260 } 1261 1262 sfc_log_init(sa, "exit: %d", -rc); 1263 SFC_ASSERT(rc >= 0); 1264 return -rc; 1265 } 1266 1267 static const struct eth_dev_ops sfc_eth_dev_ops = { 1268 .dev_configure = sfc_dev_configure, 1269 .dev_start = sfc_dev_start, 1270 .dev_stop = sfc_dev_stop, 1271 .dev_set_link_up = sfc_dev_set_link_up, 1272 .dev_set_link_down = sfc_dev_set_link_down, 1273 .dev_close = sfc_dev_close, 1274 .promiscuous_enable = sfc_dev_promisc_enable, 1275 .promiscuous_disable = sfc_dev_promisc_disable, 1276 .allmulticast_enable = sfc_dev_allmulti_enable, 1277 .allmulticast_disable = sfc_dev_allmulti_disable, 1278 .link_update = sfc_dev_link_update, 1279 .stats_get = sfc_stats_get, 1280 .stats_reset = sfc_stats_reset, 1281 .xstats_get = sfc_xstats_get, 1282 .xstats_reset = sfc_stats_reset, 1283 .xstats_get_names = sfc_xstats_get_names, 1284 .dev_infos_get = sfc_dev_infos_get, 1285 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1286 .mtu_set = sfc_dev_set_mtu, 1287 .rx_queue_start = sfc_rx_queue_start, 1288 .rx_queue_stop = sfc_rx_queue_stop, 1289 .tx_queue_start = sfc_tx_queue_start, 1290 .tx_queue_stop = sfc_tx_queue_stop, 1291 .rx_queue_setup = sfc_rx_queue_setup, 1292 .rx_queue_release = sfc_rx_queue_release, 1293 .rx_queue_count = sfc_rx_queue_count, 1294 .rx_descriptor_done = sfc_rx_descriptor_done, 1295 .tx_queue_setup = sfc_tx_queue_setup, 1296 .tx_queue_release = sfc_tx_queue_release, 1297 .flow_ctrl_get = sfc_flow_ctrl_get, 1298 .flow_ctrl_set = sfc_flow_ctrl_set, 1299 .mac_addr_set = sfc_mac_addr_set, 1300 #if EFSYS_OPT_RX_SCALE 1301 .reta_update = sfc_dev_rss_reta_update, 1302 .reta_query = sfc_dev_rss_reta_query, 1303 .rss_hash_update = sfc_dev_rss_hash_update, 1304 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1305 #endif 1306 .filter_ctrl = sfc_dev_filter_ctrl, 1307 .set_mc_addr_list = sfc_set_mc_addr_list, 1308 .rxq_info_get = sfc_rx_queue_info_get, 1309 .txq_info_get = sfc_tx_queue_info_get, 1310 }; 1311 1312 static int 1313 sfc_eth_dev_init(struct rte_eth_dev *dev) 1314 { 1315 struct sfc_adapter *sa = dev->data->dev_private; 1316 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev); 1317 int rc; 1318 const efx_nic_cfg_t *encp; 1319 const struct ether_addr *from; 1320 1321 /* Required for logging */ 1322 sa->eth_dev = dev; 1323 1324 /* Copy PCI device info to the dev->data */ 1325 rte_eth_copy_pci_info(dev, pci_dev); 1326 1327 rc = sfc_kvargs_parse(sa); 1328 if (rc != 0) 1329 goto fail_kvargs_parse; 1330 1331 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, 1332 sfc_kvarg_bool_handler, &sa->debug_init); 1333 if (rc != 0) 1334 goto fail_kvarg_debug_init; 1335 1336 sfc_log_init(sa, "entry"); 1337 1338 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 1339 if (dev->data->mac_addrs == NULL) { 1340 rc = ENOMEM; 1341 goto fail_mac_addrs; 1342 } 1343 1344 sfc_adapter_lock_init(sa); 1345 sfc_adapter_lock(sa); 1346 1347 sfc_log_init(sa, "attaching"); 1348 rc = sfc_attach(sa); 1349 if (rc != 0) 1350 goto fail_attach; 1351 1352 encp = efx_nic_cfg_get(sa->nic); 1353 1354 /* 1355 * The arguments are really reverse order in comparison to 1356 * Linux kernel. Copy from NIC config to Ethernet device data. 1357 */ 1358 from = (const struct ether_addr *)(encp->enc_mac_addr); 1359 ether_addr_copy(from, &dev->data->mac_addrs[0]); 1360 1361 dev->dev_ops = &sfc_eth_dev_ops; 1362 dev->rx_pkt_burst = &sfc_recv_pkts; 1363 dev->tx_pkt_burst = &sfc_xmit_pkts; 1364 1365 sfc_adapter_unlock(sa); 1366 1367 sfc_log_init(sa, "done"); 1368 return 0; 1369 1370 fail_attach: 1371 sfc_adapter_unlock(sa); 1372 sfc_adapter_lock_fini(sa); 1373 rte_free(dev->data->mac_addrs); 1374 dev->data->mac_addrs = NULL; 1375 1376 fail_mac_addrs: 1377 fail_kvarg_debug_init: 1378 sfc_kvargs_cleanup(sa); 1379 1380 fail_kvargs_parse: 1381 sfc_log_init(sa, "failed %d", rc); 1382 SFC_ASSERT(rc > 0); 1383 return -rc; 1384 } 1385 1386 static int 1387 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 1388 { 1389 struct sfc_adapter *sa = dev->data->dev_private; 1390 1391 sfc_log_init(sa, "entry"); 1392 1393 sfc_adapter_lock(sa); 1394 1395 sfc_detach(sa); 1396 1397 rte_free(dev->data->mac_addrs); 1398 dev->data->mac_addrs = NULL; 1399 1400 dev->dev_ops = NULL; 1401 dev->rx_pkt_burst = NULL; 1402 dev->tx_pkt_burst = NULL; 1403 1404 sfc_kvargs_cleanup(sa); 1405 1406 sfc_adapter_unlock(sa); 1407 sfc_adapter_lock_fini(sa); 1408 1409 sfc_log_init(sa, "done"); 1410 1411 /* Required for logging, so cleanup last */ 1412 sa->eth_dev = NULL; 1413 return 0; 1414 } 1415 1416 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 1417 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 1418 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 1419 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 1420 { .vendor_id = 0 /* sentinel */ } 1421 }; 1422 1423 static struct eth_driver sfc_efx_pmd = { 1424 .pci_drv = { 1425 .id_table = pci_id_sfc_efx_map, 1426 .drv_flags = 1427 RTE_PCI_DRV_INTR_LSC | 1428 RTE_PCI_DRV_NEED_MAPPING, 1429 .probe = rte_eth_dev_pci_probe, 1430 .remove = rte_eth_dev_pci_remove, 1431 }, 1432 .eth_dev_init = sfc_eth_dev_init, 1433 .eth_dev_uninit = sfc_eth_dev_uninit, 1434 .dev_private_size = sizeof(struct sfc_adapter), 1435 }; 1436 1437 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv); 1438 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 1439 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio"); 1440 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 1441 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 1442 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " 1443 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); 1444