1 /*- 2 * Copyright (c) 2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was jointly developed between OKTET Labs (under contract 6 * for Solarflare) and Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <rte_dev.h> 31 #include <rte_ethdev.h> 32 #include <rte_pci.h> 33 34 #include "efx.h" 35 36 #include "sfc.h" 37 #include "sfc_debug.h" 38 #include "sfc_log.h" 39 #include "sfc_kvargs.h" 40 #include "sfc_ev.h" 41 #include "sfc_rx.h" 42 #include "sfc_tx.h" 43 44 45 static void 46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 47 { 48 struct sfc_adapter *sa = dev->data->dev_private; 49 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 50 51 sfc_log_init(sa, "entry"); 52 53 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 54 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 55 56 /* Autonegotiation may be disabled */ 57 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 58 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 59 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 60 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 61 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 62 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 63 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 64 65 dev_info->max_rx_queues = sa->rxq_max; 66 dev_info->max_tx_queues = sa->txq_max; 67 68 /* By default packets are dropped if no descriptors are available */ 69 dev_info->default_rxconf.rx_drop_en = 1; 70 71 dev_info->rx_offload_capa = 72 DEV_RX_OFFLOAD_IPV4_CKSUM | 73 DEV_RX_OFFLOAD_UDP_CKSUM | 74 DEV_RX_OFFLOAD_TCP_CKSUM; 75 76 dev_info->tx_offload_capa = 77 DEV_TX_OFFLOAD_IPV4_CKSUM | 78 DEV_TX_OFFLOAD_UDP_CKSUM | 79 DEV_TX_OFFLOAD_TCP_CKSUM; 80 81 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 82 if (!encp->enc_hw_tx_insert_vlan_enabled) 83 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; 84 else 85 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 86 87 #if EFSYS_OPT_RX_SCALE 88 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { 89 dev_info->reta_size = EFX_RSS_TBL_SIZE; 90 dev_info->hash_key_size = SFC_RSS_KEY_SIZE; 91 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; 92 } 93 #endif 94 95 if (sa->tso) 96 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; 97 98 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 99 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 100 /* The RXQ hardware requires that the descriptor count is a power 101 * of 2, but rx_desc_lim cannot properly describe that constraint. 102 */ 103 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 104 105 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 106 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 107 /* 108 * The TXQ hardware requires that the descriptor count is a power 109 * of 2, but tx_desc_lim cannot properly describe that constraint 110 */ 111 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 112 } 113 114 static const uint32_t * 115 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 116 { 117 static const uint32_t ptypes[] = { 118 RTE_PTYPE_L2_ETHER, 119 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 120 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 121 RTE_PTYPE_L4_TCP, 122 RTE_PTYPE_L4_UDP, 123 RTE_PTYPE_UNKNOWN 124 }; 125 126 if (dev->rx_pkt_burst == sfc_recv_pkts) 127 return ptypes; 128 129 return NULL; 130 } 131 132 static int 133 sfc_dev_configure(struct rte_eth_dev *dev) 134 { 135 struct rte_eth_dev_data *dev_data = dev->data; 136 struct sfc_adapter *sa = dev_data->dev_private; 137 int rc; 138 139 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 140 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 141 142 sfc_adapter_lock(sa); 143 switch (sa->state) { 144 case SFC_ADAPTER_CONFIGURED: 145 sfc_close(sa); 146 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 147 /* FALLTHROUGH */ 148 case SFC_ADAPTER_INITIALIZED: 149 rc = sfc_configure(sa); 150 break; 151 default: 152 sfc_err(sa, "unexpected adapter state %u to configure", 153 sa->state); 154 rc = EINVAL; 155 break; 156 } 157 sfc_adapter_unlock(sa); 158 159 sfc_log_init(sa, "done %d", rc); 160 SFC_ASSERT(rc >= 0); 161 return -rc; 162 } 163 164 static int 165 sfc_dev_start(struct rte_eth_dev *dev) 166 { 167 struct sfc_adapter *sa = dev->data->dev_private; 168 int rc; 169 170 sfc_log_init(sa, "entry"); 171 172 sfc_adapter_lock(sa); 173 rc = sfc_start(sa); 174 sfc_adapter_unlock(sa); 175 176 sfc_log_init(sa, "done %d", rc); 177 SFC_ASSERT(rc >= 0); 178 return -rc; 179 } 180 181 static int 182 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 183 { 184 struct sfc_adapter *sa = dev->data->dev_private; 185 struct rte_eth_link *dev_link = &dev->data->dev_link; 186 struct rte_eth_link old_link; 187 struct rte_eth_link current_link; 188 189 sfc_log_init(sa, "entry"); 190 191 if (sa->state != SFC_ADAPTER_STARTED) 192 return 0; 193 194 retry: 195 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 196 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); 197 198 if (wait_to_complete) { 199 efx_link_mode_t link_mode; 200 201 efx_port_poll(sa->nic, &link_mode); 202 sfc_port_link_mode_to_info(link_mode, ¤t_link); 203 204 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 205 *(uint64_t *)&old_link, 206 *(uint64_t *)¤t_link)) 207 goto retry; 208 } else { 209 sfc_ev_mgmt_qpoll(sa); 210 *(int64_t *)¤t_link = 211 rte_atomic64_read((rte_atomic64_t *)dev_link); 212 } 213 214 if (old_link.link_status != current_link.link_status) 215 sfc_info(sa, "Link status is %s", 216 current_link.link_status ? "UP" : "DOWN"); 217 218 return old_link.link_status == current_link.link_status ? 0 : -1; 219 } 220 221 static void 222 sfc_dev_stop(struct rte_eth_dev *dev) 223 { 224 struct sfc_adapter *sa = dev->data->dev_private; 225 226 sfc_log_init(sa, "entry"); 227 228 sfc_adapter_lock(sa); 229 sfc_stop(sa); 230 sfc_adapter_unlock(sa); 231 232 sfc_log_init(sa, "done"); 233 } 234 235 static int 236 sfc_dev_set_link_up(struct rte_eth_dev *dev) 237 { 238 struct sfc_adapter *sa = dev->data->dev_private; 239 int rc; 240 241 sfc_log_init(sa, "entry"); 242 243 sfc_adapter_lock(sa); 244 rc = sfc_start(sa); 245 sfc_adapter_unlock(sa); 246 247 SFC_ASSERT(rc >= 0); 248 return -rc; 249 } 250 251 static int 252 sfc_dev_set_link_down(struct rte_eth_dev *dev) 253 { 254 struct sfc_adapter *sa = dev->data->dev_private; 255 256 sfc_log_init(sa, "entry"); 257 258 sfc_adapter_lock(sa); 259 sfc_stop(sa); 260 sfc_adapter_unlock(sa); 261 262 return 0; 263 } 264 265 static void 266 sfc_dev_close(struct rte_eth_dev *dev) 267 { 268 struct sfc_adapter *sa = dev->data->dev_private; 269 270 sfc_log_init(sa, "entry"); 271 272 sfc_adapter_lock(sa); 273 switch (sa->state) { 274 case SFC_ADAPTER_STARTED: 275 sfc_stop(sa); 276 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 277 /* FALLTHROUGH */ 278 case SFC_ADAPTER_CONFIGURED: 279 sfc_close(sa); 280 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 281 /* FALLTHROUGH */ 282 case SFC_ADAPTER_INITIALIZED: 283 break; 284 default: 285 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 286 break; 287 } 288 sfc_adapter_unlock(sa); 289 290 sfc_log_init(sa, "done"); 291 } 292 293 static void 294 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 295 boolean_t enabled) 296 { 297 struct sfc_port *port; 298 boolean_t *toggle; 299 struct sfc_adapter *sa = dev->data->dev_private; 300 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 301 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 302 303 sfc_adapter_lock(sa); 304 305 port = &sa->port; 306 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 307 308 if (*toggle != enabled) { 309 *toggle = enabled; 310 311 if ((sa->state == SFC_ADAPTER_STARTED) && 312 (sfc_set_rx_mode(sa) != 0)) { 313 *toggle = !(enabled); 314 sfc_warn(sa, "Failed to %s %s mode", 315 ((enabled) ? "enable" : "disable"), desc); 316 } 317 } 318 319 sfc_adapter_unlock(sa); 320 } 321 322 static void 323 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 324 { 325 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 326 } 327 328 static void 329 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 330 { 331 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 332 } 333 334 static void 335 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 336 { 337 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 338 } 339 340 static void 341 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 342 { 343 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 344 } 345 346 static int 347 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 348 uint16_t nb_rx_desc, unsigned int socket_id, 349 const struct rte_eth_rxconf *rx_conf, 350 struct rte_mempool *mb_pool) 351 { 352 struct sfc_adapter *sa = dev->data->dev_private; 353 int rc; 354 355 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 356 rx_queue_id, nb_rx_desc, socket_id); 357 358 sfc_adapter_lock(sa); 359 360 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 361 rx_conf, mb_pool); 362 if (rc != 0) 363 goto fail_rx_qinit; 364 365 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq; 366 367 sfc_adapter_unlock(sa); 368 369 return 0; 370 371 fail_rx_qinit: 372 sfc_adapter_unlock(sa); 373 SFC_ASSERT(rc > 0); 374 return -rc; 375 } 376 377 static void 378 sfc_rx_queue_release(void *queue) 379 { 380 struct sfc_rxq *rxq = queue; 381 struct sfc_adapter *sa; 382 unsigned int sw_index; 383 384 if (rxq == NULL) 385 return; 386 387 sa = rxq->evq->sa; 388 sfc_adapter_lock(sa); 389 390 sw_index = sfc_rxq_sw_index(rxq); 391 392 sfc_log_init(sa, "RxQ=%u", sw_index); 393 394 sa->eth_dev->data->rx_queues[sw_index] = NULL; 395 396 sfc_rx_qfini(sa, sw_index); 397 398 sfc_adapter_unlock(sa); 399 } 400 401 static int 402 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 403 uint16_t nb_tx_desc, unsigned int socket_id, 404 const struct rte_eth_txconf *tx_conf) 405 { 406 struct sfc_adapter *sa = dev->data->dev_private; 407 int rc; 408 409 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 410 tx_queue_id, nb_tx_desc, socket_id); 411 412 sfc_adapter_lock(sa); 413 414 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 415 if (rc != 0) 416 goto fail_tx_qinit; 417 418 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq; 419 420 sfc_adapter_unlock(sa); 421 return 0; 422 423 fail_tx_qinit: 424 sfc_adapter_unlock(sa); 425 SFC_ASSERT(rc > 0); 426 return -rc; 427 } 428 429 static void 430 sfc_tx_queue_release(void *queue) 431 { 432 struct sfc_txq *txq = queue; 433 unsigned int sw_index; 434 struct sfc_adapter *sa; 435 436 if (txq == NULL) 437 return; 438 439 sw_index = sfc_txq_sw_index(txq); 440 441 SFC_ASSERT(txq->evq != NULL); 442 sa = txq->evq->sa; 443 444 sfc_log_init(sa, "TxQ = %u", sw_index); 445 446 sfc_adapter_lock(sa); 447 448 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 449 sa->eth_dev->data->tx_queues[sw_index] = NULL; 450 451 sfc_tx_qfini(sa, sw_index); 452 453 sfc_adapter_unlock(sa); 454 } 455 456 static void 457 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 458 { 459 struct sfc_adapter *sa = dev->data->dev_private; 460 struct sfc_port *port = &sa->port; 461 uint64_t *mac_stats; 462 463 rte_spinlock_lock(&port->mac_stats_lock); 464 465 if (sfc_port_update_mac_stats(sa) != 0) 466 goto unlock; 467 468 mac_stats = port->mac_stats_buf; 469 470 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 471 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 472 stats->ipackets = 473 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 474 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 475 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 476 stats->opackets = 477 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 478 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 479 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 480 stats->ibytes = 481 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 482 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 483 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 484 stats->obytes = 485 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 486 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 487 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 488 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 489 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 490 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 491 } else { 492 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 493 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 494 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 495 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 496 /* 497 * Take into account stats which are whenever supported 498 * on EF10. If some stat is not supported by current 499 * firmware variant or HW revision, it is guaranteed 500 * to be zero in mac_stats. 501 */ 502 stats->imissed = 503 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 504 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 505 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 506 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 507 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 508 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 509 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 510 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 511 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 512 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 513 stats->ierrors = 514 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 515 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 516 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 517 /* no oerrors counters supported on EF10 */ 518 } 519 520 unlock: 521 rte_spinlock_unlock(&port->mac_stats_lock); 522 } 523 524 static int 525 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 526 unsigned int xstats_count) 527 { 528 struct sfc_adapter *sa = dev->data->dev_private; 529 struct sfc_port *port = &sa->port; 530 uint64_t *mac_stats; 531 int rc; 532 unsigned int i; 533 int nstats = 0; 534 535 rte_spinlock_lock(&port->mac_stats_lock); 536 537 rc = sfc_port_update_mac_stats(sa); 538 if (rc != 0) { 539 SFC_ASSERT(rc > 0); 540 nstats = -rc; 541 goto unlock; 542 } 543 544 mac_stats = port->mac_stats_buf; 545 546 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 547 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 548 if (xstats != NULL && nstats < (int)xstats_count) { 549 xstats[nstats].id = nstats; 550 xstats[nstats].value = mac_stats[i]; 551 } 552 nstats++; 553 } 554 } 555 556 unlock: 557 rte_spinlock_unlock(&port->mac_stats_lock); 558 559 return nstats; 560 } 561 562 static int 563 sfc_xstats_get_names(struct rte_eth_dev *dev, 564 struct rte_eth_xstat_name *xstats_names, 565 unsigned int xstats_count) 566 { 567 struct sfc_adapter *sa = dev->data->dev_private; 568 struct sfc_port *port = &sa->port; 569 unsigned int i; 570 unsigned int nstats = 0; 571 572 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 573 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 574 if (xstats_names != NULL && nstats < xstats_count) 575 strncpy(xstats_names[nstats].name, 576 efx_mac_stat_name(sa->nic, i), 577 sizeof(xstats_names[0].name)); 578 nstats++; 579 } 580 } 581 582 return nstats; 583 } 584 585 static int 586 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 587 { 588 struct sfc_adapter *sa = dev->data->dev_private; 589 unsigned int wanted_fc, link_fc; 590 591 memset(fc_conf, 0, sizeof(*fc_conf)); 592 593 sfc_adapter_lock(sa); 594 595 if (sa->state == SFC_ADAPTER_STARTED) 596 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 597 else 598 link_fc = sa->port.flow_ctrl; 599 600 switch (link_fc) { 601 case 0: 602 fc_conf->mode = RTE_FC_NONE; 603 break; 604 case EFX_FCNTL_RESPOND: 605 fc_conf->mode = RTE_FC_RX_PAUSE; 606 break; 607 case EFX_FCNTL_GENERATE: 608 fc_conf->mode = RTE_FC_TX_PAUSE; 609 break; 610 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 611 fc_conf->mode = RTE_FC_FULL; 612 break; 613 default: 614 sfc_err(sa, "%s: unexpected flow control value %#x", 615 __func__, link_fc); 616 } 617 618 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 619 620 sfc_adapter_unlock(sa); 621 622 return 0; 623 } 624 625 static int 626 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 627 { 628 struct sfc_adapter *sa = dev->data->dev_private; 629 struct sfc_port *port = &sa->port; 630 unsigned int fcntl; 631 int rc; 632 633 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 634 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 635 fc_conf->mac_ctrl_frame_fwd != 0) { 636 sfc_err(sa, "unsupported flow control settings specified"); 637 rc = EINVAL; 638 goto fail_inval; 639 } 640 641 switch (fc_conf->mode) { 642 case RTE_FC_NONE: 643 fcntl = 0; 644 break; 645 case RTE_FC_RX_PAUSE: 646 fcntl = EFX_FCNTL_RESPOND; 647 break; 648 case RTE_FC_TX_PAUSE: 649 fcntl = EFX_FCNTL_GENERATE; 650 break; 651 case RTE_FC_FULL: 652 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 653 break; 654 default: 655 rc = EINVAL; 656 goto fail_inval; 657 } 658 659 sfc_adapter_lock(sa); 660 661 if (sa->state == SFC_ADAPTER_STARTED) { 662 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 663 if (rc != 0) 664 goto fail_mac_fcntl_set; 665 } 666 667 port->flow_ctrl = fcntl; 668 port->flow_ctrl_autoneg = fc_conf->autoneg; 669 670 sfc_adapter_unlock(sa); 671 672 return 0; 673 674 fail_mac_fcntl_set: 675 sfc_adapter_unlock(sa); 676 fail_inval: 677 SFC_ASSERT(rc > 0); 678 return -rc; 679 } 680 681 static int 682 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 683 { 684 struct sfc_adapter *sa = dev->data->dev_private; 685 size_t pdu = EFX_MAC_PDU(mtu); 686 size_t old_pdu; 687 int rc; 688 689 sfc_log_init(sa, "mtu=%u", mtu); 690 691 rc = EINVAL; 692 if (pdu < EFX_MAC_PDU_MIN) { 693 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 694 (unsigned int)mtu, (unsigned int)pdu, 695 EFX_MAC_PDU_MIN); 696 goto fail_inval; 697 } 698 if (pdu > EFX_MAC_PDU_MAX) { 699 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 700 (unsigned int)mtu, (unsigned int)pdu, 701 EFX_MAC_PDU_MAX); 702 goto fail_inval; 703 } 704 705 sfc_adapter_lock(sa); 706 707 if (pdu != sa->port.pdu) { 708 if (sa->state == SFC_ADAPTER_STARTED) { 709 sfc_stop(sa); 710 711 old_pdu = sa->port.pdu; 712 sa->port.pdu = pdu; 713 rc = sfc_start(sa); 714 if (rc != 0) 715 goto fail_start; 716 } else { 717 sa->port.pdu = pdu; 718 } 719 } 720 721 /* 722 * The driver does not use it, but other PMDs update jumbo_frame 723 * flag and max_rx_pkt_len when MTU is set. 724 */ 725 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); 726 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 727 728 sfc_adapter_unlock(sa); 729 730 sfc_log_init(sa, "done"); 731 return 0; 732 733 fail_start: 734 sa->port.pdu = old_pdu; 735 if (sfc_start(sa) != 0) 736 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 737 "PDU max size - port is stopped", 738 (unsigned int)pdu, (unsigned int)old_pdu); 739 sfc_adapter_unlock(sa); 740 741 fail_inval: 742 sfc_log_init(sa, "failed %d", rc); 743 SFC_ASSERT(rc > 0); 744 return -rc; 745 } 746 static void 747 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 748 { 749 struct sfc_adapter *sa = dev->data->dev_private; 750 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 751 int rc; 752 753 sfc_adapter_lock(sa); 754 755 if (sa->state != SFC_ADAPTER_STARTED) { 756 sfc_info(sa, "the port is not started"); 757 sfc_info(sa, "the new MAC address will be set on port start"); 758 759 goto unlock; 760 } 761 762 if (encp->enc_allow_set_mac_with_installed_filters) { 763 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 764 if (rc != 0) { 765 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 766 goto unlock; 767 } 768 769 /* 770 * Changing the MAC address by means of MCDI request 771 * has no effect on received traffic, therefore 772 * we also need to update unicast filters 773 */ 774 rc = sfc_set_rx_mode(sa); 775 if (rc != 0) 776 sfc_err(sa, "cannot set filter (rc = %u)", rc); 777 } else { 778 sfc_warn(sa, "cannot set MAC address with filters installed"); 779 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 780 sfc_warn(sa, "(some traffic may be dropped)"); 781 782 /* 783 * Since setting MAC address with filters installed is not 784 * allowed on the adapter, one needs to simply restart adapter 785 * so that the new MAC address will be taken from an outer 786 * storage and set flawlessly by means of sfc_start() call 787 */ 788 sfc_stop(sa); 789 rc = sfc_start(sa); 790 if (rc != 0) 791 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 792 } 793 794 unlock: 795 sfc_adapter_unlock(sa); 796 } 797 798 799 static int 800 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, 801 uint32_t nb_mc_addr) 802 { 803 struct sfc_adapter *sa = dev->data->dev_private; 804 uint8_t *mc_addrs_p; 805 uint8_t *mc_addrs; 806 int rc; 807 unsigned int i; 808 809 if (nb_mc_addr > EFX_MAC_MULTICAST_LIST_MAX) { 810 sfc_err(sa, "too many multicast addresses: %u > %u", 811 nb_mc_addr, EFX_MAC_MULTICAST_LIST_MAX); 812 return -EINVAL; 813 } 814 815 mc_addrs_p = rte_calloc("mc-addrs", nb_mc_addr, EFX_MAC_ADDR_LEN, 0); 816 if (mc_addrs_p == NULL) 817 return -ENOMEM; 818 819 mc_addrs = mc_addrs_p; 820 821 for (i = 0; i < nb_mc_addr; ++i) { 822 (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 823 EFX_MAC_ADDR_LEN); 824 mc_addrs += EFX_MAC_ADDR_LEN; 825 } 826 827 rc = efx_mac_multicast_list_set(sa->nic, mc_addrs_p, nb_mc_addr); 828 829 rte_free(mc_addrs_p); 830 831 if (rc != 0) 832 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 833 834 SFC_ASSERT(rc > 0); 835 return -rc; 836 } 837 838 static void 839 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 840 struct rte_eth_rxq_info *qinfo) 841 { 842 struct sfc_adapter *sa = dev->data->dev_private; 843 struct sfc_rxq_info *rxq_info; 844 struct sfc_rxq *rxq; 845 846 sfc_adapter_lock(sa); 847 848 SFC_ASSERT(rx_queue_id < sa->rxq_count); 849 850 rxq_info = &sa->rxq_info[rx_queue_id]; 851 rxq = rxq_info->rxq; 852 SFC_ASSERT(rxq != NULL); 853 854 qinfo->mp = rxq->refill_mb_pool; 855 qinfo->conf.rx_free_thresh = rxq->refill_threshold; 856 qinfo->conf.rx_drop_en = 1; 857 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 858 qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER); 859 qinfo->nb_desc = rxq_info->entries; 860 861 sfc_adapter_unlock(sa); 862 } 863 864 static void 865 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 866 struct rte_eth_txq_info *qinfo) 867 { 868 struct sfc_adapter *sa = dev->data->dev_private; 869 struct sfc_txq_info *txq_info; 870 871 sfc_adapter_lock(sa); 872 873 SFC_ASSERT(tx_queue_id < sa->txq_count); 874 875 txq_info = &sa->txq_info[tx_queue_id]; 876 SFC_ASSERT(txq_info->txq != NULL); 877 878 memset(qinfo, 0, sizeof(*qinfo)); 879 880 qinfo->conf.txq_flags = txq_info->txq->flags; 881 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; 882 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 883 qinfo->nb_desc = txq_info->entries; 884 885 sfc_adapter_unlock(sa); 886 } 887 888 static uint32_t 889 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 890 { 891 struct sfc_adapter *sa = dev->data->dev_private; 892 893 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 894 895 return sfc_rx_qdesc_npending(sa, rx_queue_id); 896 } 897 898 static int 899 sfc_rx_descriptor_done(void *queue, uint16_t offset) 900 { 901 struct sfc_rxq *rxq = queue; 902 903 return sfc_rx_qdesc_done(rxq, offset); 904 } 905 906 static int 907 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 908 { 909 struct sfc_adapter *sa = dev->data->dev_private; 910 int rc; 911 912 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 913 914 sfc_adapter_lock(sa); 915 916 rc = EINVAL; 917 if (sa->state != SFC_ADAPTER_STARTED) 918 goto fail_not_started; 919 920 rc = sfc_rx_qstart(sa, rx_queue_id); 921 if (rc != 0) 922 goto fail_rx_qstart; 923 924 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE; 925 926 sfc_adapter_unlock(sa); 927 928 return 0; 929 930 fail_rx_qstart: 931 fail_not_started: 932 sfc_adapter_unlock(sa); 933 SFC_ASSERT(rc > 0); 934 return -rc; 935 } 936 937 static int 938 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 939 { 940 struct sfc_adapter *sa = dev->data->dev_private; 941 942 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 943 944 sfc_adapter_lock(sa); 945 sfc_rx_qstop(sa, rx_queue_id); 946 947 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE; 948 949 sfc_adapter_unlock(sa); 950 951 return 0; 952 } 953 954 static int 955 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 956 { 957 struct sfc_adapter *sa = dev->data->dev_private; 958 int rc; 959 960 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 961 962 sfc_adapter_lock(sa); 963 964 rc = EINVAL; 965 if (sa->state != SFC_ADAPTER_STARTED) 966 goto fail_not_started; 967 968 rc = sfc_tx_qstart(sa, tx_queue_id); 969 if (rc != 0) 970 goto fail_tx_qstart; 971 972 sa->txq_info[tx_queue_id].deferred_started = B_TRUE; 973 974 sfc_adapter_unlock(sa); 975 return 0; 976 977 fail_tx_qstart: 978 979 fail_not_started: 980 sfc_adapter_unlock(sa); 981 SFC_ASSERT(rc > 0); 982 return -rc; 983 } 984 985 static int 986 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 987 { 988 struct sfc_adapter *sa = dev->data->dev_private; 989 990 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 991 992 sfc_adapter_lock(sa); 993 994 sfc_tx_qstop(sa, tx_queue_id); 995 996 sa->txq_info[tx_queue_id].deferred_started = B_FALSE; 997 998 sfc_adapter_unlock(sa); 999 return 0; 1000 } 1001 1002 #if EFSYS_OPT_RX_SCALE 1003 static int 1004 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1005 struct rte_eth_rss_conf *rss_conf) 1006 { 1007 struct sfc_adapter *sa = dev->data->dev_private; 1008 1009 if ((sa->rss_channels == 1) || 1010 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) 1011 return -ENOTSUP; 1012 1013 sfc_adapter_lock(sa); 1014 1015 /* 1016 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1017 * hence, conversion is done here to derive a correct set of ETH_RSS 1018 * flags which corresponds to the active EFX configuration stored 1019 * locally in 'sfc_adapter' and kept up-to-date 1020 */ 1021 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); 1022 rss_conf->rss_key_len = SFC_RSS_KEY_SIZE; 1023 if (rss_conf->rss_key != NULL) 1024 rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE); 1025 1026 sfc_adapter_unlock(sa); 1027 1028 return 0; 1029 } 1030 1031 static int 1032 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1033 struct rte_eth_rss_conf *rss_conf) 1034 { 1035 struct sfc_adapter *sa = dev->data->dev_private; 1036 unsigned int efx_hash_types; 1037 int rc = 0; 1038 1039 if ((sa->rss_channels == 1) || 1040 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) { 1041 sfc_err(sa, "RSS is not available"); 1042 return -ENOTSUP; 1043 } 1044 1045 if ((rss_conf->rss_key != NULL) && 1046 (rss_conf->rss_key_len != sizeof(sa->rss_key))) { 1047 sfc_err(sa, "RSS key size is wrong (should be %lu)", 1048 sizeof(sa->rss_key)); 1049 return -EINVAL; 1050 } 1051 1052 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { 1053 sfc_err(sa, "unsupported hash functions requested"); 1054 return -EINVAL; 1055 } 1056 1057 sfc_adapter_lock(sa); 1058 1059 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); 1060 1061 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1062 efx_hash_types, B_TRUE); 1063 if (rc != 0) 1064 goto fail_scale_mode_set; 1065 1066 if (rss_conf->rss_key != NULL) { 1067 if (sa->state == SFC_ADAPTER_STARTED) { 1068 rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key, 1069 sizeof(sa->rss_key)); 1070 if (rc != 0) 1071 goto fail_scale_key_set; 1072 } 1073 1074 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); 1075 } 1076 1077 sa->rss_hash_types = efx_hash_types; 1078 1079 sfc_adapter_unlock(sa); 1080 1081 return 0; 1082 1083 fail_scale_key_set: 1084 if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1085 sa->rss_hash_types, B_TRUE) != 0) 1086 sfc_err(sa, "failed to restore RSS mode"); 1087 1088 fail_scale_mode_set: 1089 sfc_adapter_unlock(sa); 1090 return -rc; 1091 } 1092 1093 static int 1094 sfc_dev_rss_reta_query(struct rte_eth_dev *dev, 1095 struct rte_eth_rss_reta_entry64 *reta_conf, 1096 uint16_t reta_size) 1097 { 1098 struct sfc_adapter *sa = dev->data->dev_private; 1099 int entry; 1100 1101 if ((sa->rss_channels == 1) || 1102 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) 1103 return -ENOTSUP; 1104 1105 if (reta_size != EFX_RSS_TBL_SIZE) 1106 return -EINVAL; 1107 1108 sfc_adapter_lock(sa); 1109 1110 for (entry = 0; entry < reta_size; entry++) { 1111 int grp = entry / RTE_RETA_GROUP_SIZE; 1112 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1113 1114 if ((reta_conf[grp].mask >> grp_idx) & 1) 1115 reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry]; 1116 } 1117 1118 sfc_adapter_unlock(sa); 1119 1120 return 0; 1121 } 1122 1123 static int 1124 sfc_dev_rss_reta_update(struct rte_eth_dev *dev, 1125 struct rte_eth_rss_reta_entry64 *reta_conf, 1126 uint16_t reta_size) 1127 { 1128 struct sfc_adapter *sa = dev->data->dev_private; 1129 unsigned int *rss_tbl_new; 1130 uint16_t entry; 1131 int rc; 1132 1133 1134 if ((sa->rss_channels == 1) || 1135 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) { 1136 sfc_err(sa, "RSS is not available"); 1137 return -ENOTSUP; 1138 } 1139 1140 if (reta_size != EFX_RSS_TBL_SIZE) { 1141 sfc_err(sa, "RETA size is wrong (should be %u)", 1142 EFX_RSS_TBL_SIZE); 1143 return -EINVAL; 1144 } 1145 1146 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0); 1147 if (rss_tbl_new == NULL) 1148 return -ENOMEM; 1149 1150 sfc_adapter_lock(sa); 1151 1152 rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl)); 1153 1154 for (entry = 0; entry < reta_size; entry++) { 1155 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1156 struct rte_eth_rss_reta_entry64 *grp; 1157 1158 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; 1159 1160 if (grp->mask & (1ull << grp_idx)) { 1161 if (grp->reta[grp_idx] >= sa->rss_channels) { 1162 rc = EINVAL; 1163 goto bad_reta_entry; 1164 } 1165 rss_tbl_new[entry] = grp->reta[grp_idx]; 1166 } 1167 } 1168 1169 rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE); 1170 if (rc == 0) 1171 rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl)); 1172 1173 bad_reta_entry: 1174 sfc_adapter_unlock(sa); 1175 1176 rte_free(rss_tbl_new); 1177 1178 SFC_ASSERT(rc >= 0); 1179 return -rc; 1180 } 1181 #endif 1182 1183 static const struct eth_dev_ops sfc_eth_dev_ops = { 1184 .dev_configure = sfc_dev_configure, 1185 .dev_start = sfc_dev_start, 1186 .dev_stop = sfc_dev_stop, 1187 .dev_set_link_up = sfc_dev_set_link_up, 1188 .dev_set_link_down = sfc_dev_set_link_down, 1189 .dev_close = sfc_dev_close, 1190 .promiscuous_enable = sfc_dev_promisc_enable, 1191 .promiscuous_disable = sfc_dev_promisc_disable, 1192 .allmulticast_enable = sfc_dev_allmulti_enable, 1193 .allmulticast_disable = sfc_dev_allmulti_disable, 1194 .link_update = sfc_dev_link_update, 1195 .stats_get = sfc_stats_get, 1196 .xstats_get = sfc_xstats_get, 1197 .xstats_get_names = sfc_xstats_get_names, 1198 .dev_infos_get = sfc_dev_infos_get, 1199 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1200 .mtu_set = sfc_dev_set_mtu, 1201 .rx_queue_start = sfc_rx_queue_start, 1202 .rx_queue_stop = sfc_rx_queue_stop, 1203 .tx_queue_start = sfc_tx_queue_start, 1204 .tx_queue_stop = sfc_tx_queue_stop, 1205 .rx_queue_setup = sfc_rx_queue_setup, 1206 .rx_queue_release = sfc_rx_queue_release, 1207 .rx_queue_count = sfc_rx_queue_count, 1208 .rx_descriptor_done = sfc_rx_descriptor_done, 1209 .tx_queue_setup = sfc_tx_queue_setup, 1210 .tx_queue_release = sfc_tx_queue_release, 1211 .flow_ctrl_get = sfc_flow_ctrl_get, 1212 .flow_ctrl_set = sfc_flow_ctrl_set, 1213 .mac_addr_set = sfc_mac_addr_set, 1214 #if EFSYS_OPT_RX_SCALE 1215 .reta_update = sfc_dev_rss_reta_update, 1216 .reta_query = sfc_dev_rss_reta_query, 1217 .rss_hash_update = sfc_dev_rss_hash_update, 1218 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1219 #endif 1220 .set_mc_addr_list = sfc_set_mc_addr_list, 1221 .rxq_info_get = sfc_rx_queue_info_get, 1222 .txq_info_get = sfc_tx_queue_info_get, 1223 }; 1224 1225 static int 1226 sfc_eth_dev_init(struct rte_eth_dev *dev) 1227 { 1228 struct sfc_adapter *sa = dev->data->dev_private; 1229 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev); 1230 int rc; 1231 const efx_nic_cfg_t *encp; 1232 const struct ether_addr *from; 1233 1234 /* Required for logging */ 1235 sa->eth_dev = dev; 1236 1237 /* Copy PCI device info to the dev->data */ 1238 rte_eth_copy_pci_info(dev, pci_dev); 1239 1240 rc = sfc_kvargs_parse(sa); 1241 if (rc != 0) 1242 goto fail_kvargs_parse; 1243 1244 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, 1245 sfc_kvarg_bool_handler, &sa->debug_init); 1246 if (rc != 0) 1247 goto fail_kvarg_debug_init; 1248 1249 sfc_log_init(sa, "entry"); 1250 1251 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 1252 if (dev->data->mac_addrs == NULL) { 1253 rc = ENOMEM; 1254 goto fail_mac_addrs; 1255 } 1256 1257 sfc_adapter_lock_init(sa); 1258 sfc_adapter_lock(sa); 1259 1260 sfc_log_init(sa, "attaching"); 1261 rc = sfc_attach(sa); 1262 if (rc != 0) 1263 goto fail_attach; 1264 1265 encp = efx_nic_cfg_get(sa->nic); 1266 1267 /* 1268 * The arguments are really reverse order in comparison to 1269 * Linux kernel. Copy from NIC config to Ethernet device data. 1270 */ 1271 from = (const struct ether_addr *)(encp->enc_mac_addr); 1272 ether_addr_copy(from, &dev->data->mac_addrs[0]); 1273 1274 dev->dev_ops = &sfc_eth_dev_ops; 1275 dev->rx_pkt_burst = &sfc_recv_pkts; 1276 dev->tx_pkt_burst = &sfc_xmit_pkts; 1277 1278 sfc_adapter_unlock(sa); 1279 1280 sfc_log_init(sa, "done"); 1281 return 0; 1282 1283 fail_attach: 1284 sfc_adapter_unlock(sa); 1285 sfc_adapter_lock_fini(sa); 1286 rte_free(dev->data->mac_addrs); 1287 dev->data->mac_addrs = NULL; 1288 1289 fail_mac_addrs: 1290 fail_kvarg_debug_init: 1291 sfc_kvargs_cleanup(sa); 1292 1293 fail_kvargs_parse: 1294 sfc_log_init(sa, "failed %d", rc); 1295 SFC_ASSERT(rc > 0); 1296 return -rc; 1297 } 1298 1299 static int 1300 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 1301 { 1302 struct sfc_adapter *sa = dev->data->dev_private; 1303 1304 sfc_log_init(sa, "entry"); 1305 1306 sfc_adapter_lock(sa); 1307 1308 sfc_detach(sa); 1309 1310 rte_free(dev->data->mac_addrs); 1311 dev->data->mac_addrs = NULL; 1312 1313 dev->dev_ops = NULL; 1314 dev->rx_pkt_burst = NULL; 1315 dev->tx_pkt_burst = NULL; 1316 1317 sfc_kvargs_cleanup(sa); 1318 1319 sfc_adapter_unlock(sa); 1320 sfc_adapter_lock_fini(sa); 1321 1322 sfc_log_init(sa, "done"); 1323 1324 /* Required for logging, so cleanup last */ 1325 sa->eth_dev = NULL; 1326 return 0; 1327 } 1328 1329 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 1330 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 1331 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 1332 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 1333 { .vendor_id = 0 /* sentinel */ } 1334 }; 1335 1336 static struct eth_driver sfc_efx_pmd = { 1337 .pci_drv = { 1338 .id_table = pci_id_sfc_efx_map, 1339 .drv_flags = 1340 RTE_PCI_DRV_INTR_LSC | 1341 RTE_PCI_DRV_NEED_MAPPING, 1342 .probe = rte_eth_dev_pci_probe, 1343 .remove = rte_eth_dev_pci_remove, 1344 }, 1345 .eth_dev_init = sfc_eth_dev_init, 1346 .eth_dev_uninit = sfc_eth_dev_uninit, 1347 .dev_private_size = sizeof(struct sfc_adapter), 1348 }; 1349 1350 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv); 1351 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 1352 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio"); 1353 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 1354 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 1355 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " 1356 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); 1357