1 /*- 2 * Copyright (c) 2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was jointly developed between OKTET Labs (under contract 6 * for Solarflare) and Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <rte_dev.h> 31 #include <rte_ethdev.h> 32 #include <rte_pci.h> 33 34 #include "efx.h" 35 36 #include "sfc.h" 37 #include "sfc_debug.h" 38 #include "sfc_log.h" 39 #include "sfc_kvargs.h" 40 #include "sfc_ev.h" 41 #include "sfc_rx.h" 42 #include "sfc_tx.h" 43 44 45 static void 46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 47 { 48 struct sfc_adapter *sa = dev->data->dev_private; 49 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 50 51 sfc_log_init(sa, "entry"); 52 53 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 54 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 55 56 /* Autonegotiation may be disabled */ 57 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 58 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 59 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 60 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 61 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 62 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 63 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 64 65 dev_info->max_rx_queues = sa->rxq_max; 66 dev_info->max_tx_queues = sa->txq_max; 67 68 /* By default packets are dropped if no descriptors are available */ 69 dev_info->default_rxconf.rx_drop_en = 1; 70 71 dev_info->rx_offload_capa = 72 DEV_RX_OFFLOAD_IPV4_CKSUM | 73 DEV_RX_OFFLOAD_UDP_CKSUM | 74 DEV_RX_OFFLOAD_TCP_CKSUM; 75 76 dev_info->tx_offload_capa = 77 DEV_TX_OFFLOAD_IPV4_CKSUM | 78 DEV_TX_OFFLOAD_UDP_CKSUM | 79 DEV_TX_OFFLOAD_TCP_CKSUM; 80 81 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 82 if (!encp->enc_hw_tx_insert_vlan_enabled) 83 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; 84 else 85 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 86 87 #if EFSYS_OPT_RX_SCALE 88 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { 89 dev_info->reta_size = EFX_RSS_TBL_SIZE; 90 dev_info->hash_key_size = SFC_RSS_KEY_SIZE; 91 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; 92 } 93 #endif 94 95 if (sa->tso) 96 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; 97 98 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 99 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 100 /* The RXQ hardware requires that the descriptor count is a power 101 * of 2, but rx_desc_lim cannot properly describe that constraint. 102 */ 103 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 104 105 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 106 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 107 /* 108 * The TXQ hardware requires that the descriptor count is a power 109 * of 2, but tx_desc_lim cannot properly describe that constraint 110 */ 111 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 112 } 113 114 static const uint32_t * 115 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 116 { 117 static const uint32_t ptypes[] = { 118 RTE_PTYPE_L2_ETHER, 119 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 120 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 121 RTE_PTYPE_L4_TCP, 122 RTE_PTYPE_L4_UDP, 123 RTE_PTYPE_UNKNOWN 124 }; 125 126 if (dev->rx_pkt_burst == sfc_recv_pkts) 127 return ptypes; 128 129 return NULL; 130 } 131 132 static int 133 sfc_dev_configure(struct rte_eth_dev *dev) 134 { 135 struct rte_eth_dev_data *dev_data = dev->data; 136 struct sfc_adapter *sa = dev_data->dev_private; 137 int rc; 138 139 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 140 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 141 142 sfc_adapter_lock(sa); 143 switch (sa->state) { 144 case SFC_ADAPTER_CONFIGURED: 145 sfc_close(sa); 146 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 147 /* FALLTHROUGH */ 148 case SFC_ADAPTER_INITIALIZED: 149 rc = sfc_configure(sa); 150 break; 151 default: 152 sfc_err(sa, "unexpected adapter state %u to configure", 153 sa->state); 154 rc = EINVAL; 155 break; 156 } 157 sfc_adapter_unlock(sa); 158 159 sfc_log_init(sa, "done %d", rc); 160 SFC_ASSERT(rc >= 0); 161 return -rc; 162 } 163 164 static int 165 sfc_dev_start(struct rte_eth_dev *dev) 166 { 167 struct sfc_adapter *sa = dev->data->dev_private; 168 int rc; 169 170 sfc_log_init(sa, "entry"); 171 172 sfc_adapter_lock(sa); 173 rc = sfc_start(sa); 174 sfc_adapter_unlock(sa); 175 176 sfc_log_init(sa, "done %d", rc); 177 SFC_ASSERT(rc >= 0); 178 return -rc; 179 } 180 181 static int 182 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 183 { 184 struct sfc_adapter *sa = dev->data->dev_private; 185 struct rte_eth_link *dev_link = &dev->data->dev_link; 186 struct rte_eth_link old_link; 187 struct rte_eth_link current_link; 188 189 sfc_log_init(sa, "entry"); 190 191 retry: 192 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 193 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); 194 195 if (sa->state != SFC_ADAPTER_STARTED) { 196 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); 197 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 198 *(uint64_t *)&old_link, 199 *(uint64_t *)¤t_link)) 200 goto retry; 201 } else if (wait_to_complete) { 202 efx_link_mode_t link_mode; 203 204 if (efx_port_poll(sa->nic, &link_mode) != 0) 205 link_mode = EFX_LINK_UNKNOWN; 206 sfc_port_link_mode_to_info(link_mode, ¤t_link); 207 208 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 209 *(uint64_t *)&old_link, 210 *(uint64_t *)¤t_link)) 211 goto retry; 212 } else { 213 sfc_ev_mgmt_qpoll(sa); 214 *(int64_t *)¤t_link = 215 rte_atomic64_read((rte_atomic64_t *)dev_link); 216 } 217 218 if (old_link.link_status != current_link.link_status) 219 sfc_info(sa, "Link status is %s", 220 current_link.link_status ? "UP" : "DOWN"); 221 222 return old_link.link_status == current_link.link_status ? 0 : -1; 223 } 224 225 static void 226 sfc_dev_stop(struct rte_eth_dev *dev) 227 { 228 struct sfc_adapter *sa = dev->data->dev_private; 229 230 sfc_log_init(sa, "entry"); 231 232 sfc_adapter_lock(sa); 233 sfc_stop(sa); 234 sfc_adapter_unlock(sa); 235 236 sfc_log_init(sa, "done"); 237 } 238 239 static int 240 sfc_dev_set_link_up(struct rte_eth_dev *dev) 241 { 242 struct sfc_adapter *sa = dev->data->dev_private; 243 int rc; 244 245 sfc_log_init(sa, "entry"); 246 247 sfc_adapter_lock(sa); 248 rc = sfc_start(sa); 249 sfc_adapter_unlock(sa); 250 251 SFC_ASSERT(rc >= 0); 252 return -rc; 253 } 254 255 static int 256 sfc_dev_set_link_down(struct rte_eth_dev *dev) 257 { 258 struct sfc_adapter *sa = dev->data->dev_private; 259 260 sfc_log_init(sa, "entry"); 261 262 sfc_adapter_lock(sa); 263 sfc_stop(sa); 264 sfc_adapter_unlock(sa); 265 266 return 0; 267 } 268 269 static void 270 sfc_dev_close(struct rte_eth_dev *dev) 271 { 272 struct sfc_adapter *sa = dev->data->dev_private; 273 274 sfc_log_init(sa, "entry"); 275 276 sfc_adapter_lock(sa); 277 switch (sa->state) { 278 case SFC_ADAPTER_STARTED: 279 sfc_stop(sa); 280 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 281 /* FALLTHROUGH */ 282 case SFC_ADAPTER_CONFIGURED: 283 sfc_close(sa); 284 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 285 /* FALLTHROUGH */ 286 case SFC_ADAPTER_INITIALIZED: 287 break; 288 default: 289 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 290 break; 291 } 292 sfc_adapter_unlock(sa); 293 294 sfc_log_init(sa, "done"); 295 } 296 297 static void 298 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 299 boolean_t enabled) 300 { 301 struct sfc_port *port; 302 boolean_t *toggle; 303 struct sfc_adapter *sa = dev->data->dev_private; 304 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 305 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 306 307 sfc_adapter_lock(sa); 308 309 port = &sa->port; 310 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 311 312 if (*toggle != enabled) { 313 *toggle = enabled; 314 315 if ((sa->state == SFC_ADAPTER_STARTED) && 316 (sfc_set_rx_mode(sa) != 0)) { 317 *toggle = !(enabled); 318 sfc_warn(sa, "Failed to %s %s mode", 319 ((enabled) ? "enable" : "disable"), desc); 320 } 321 } 322 323 sfc_adapter_unlock(sa); 324 } 325 326 static void 327 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 328 { 329 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 330 } 331 332 static void 333 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 334 { 335 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 336 } 337 338 static void 339 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 340 { 341 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 342 } 343 344 static void 345 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 346 { 347 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 348 } 349 350 static int 351 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 352 uint16_t nb_rx_desc, unsigned int socket_id, 353 const struct rte_eth_rxconf *rx_conf, 354 struct rte_mempool *mb_pool) 355 { 356 struct sfc_adapter *sa = dev->data->dev_private; 357 int rc; 358 359 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 360 rx_queue_id, nb_rx_desc, socket_id); 361 362 sfc_adapter_lock(sa); 363 364 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 365 rx_conf, mb_pool); 366 if (rc != 0) 367 goto fail_rx_qinit; 368 369 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq; 370 371 sfc_adapter_unlock(sa); 372 373 return 0; 374 375 fail_rx_qinit: 376 sfc_adapter_unlock(sa); 377 SFC_ASSERT(rc > 0); 378 return -rc; 379 } 380 381 static void 382 sfc_rx_queue_release(void *queue) 383 { 384 struct sfc_rxq *rxq = queue; 385 struct sfc_adapter *sa; 386 unsigned int sw_index; 387 388 if (rxq == NULL) 389 return; 390 391 sa = rxq->evq->sa; 392 sfc_adapter_lock(sa); 393 394 sw_index = sfc_rxq_sw_index(rxq); 395 396 sfc_log_init(sa, "RxQ=%u", sw_index); 397 398 sa->eth_dev->data->rx_queues[sw_index] = NULL; 399 400 sfc_rx_qfini(sa, sw_index); 401 402 sfc_adapter_unlock(sa); 403 } 404 405 static int 406 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 407 uint16_t nb_tx_desc, unsigned int socket_id, 408 const struct rte_eth_txconf *tx_conf) 409 { 410 struct sfc_adapter *sa = dev->data->dev_private; 411 int rc; 412 413 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 414 tx_queue_id, nb_tx_desc, socket_id); 415 416 sfc_adapter_lock(sa); 417 418 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 419 if (rc != 0) 420 goto fail_tx_qinit; 421 422 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq; 423 424 sfc_adapter_unlock(sa); 425 return 0; 426 427 fail_tx_qinit: 428 sfc_adapter_unlock(sa); 429 SFC_ASSERT(rc > 0); 430 return -rc; 431 } 432 433 static void 434 sfc_tx_queue_release(void *queue) 435 { 436 struct sfc_txq *txq = queue; 437 unsigned int sw_index; 438 struct sfc_adapter *sa; 439 440 if (txq == NULL) 441 return; 442 443 sw_index = sfc_txq_sw_index(txq); 444 445 SFC_ASSERT(txq->evq != NULL); 446 sa = txq->evq->sa; 447 448 sfc_log_init(sa, "TxQ = %u", sw_index); 449 450 sfc_adapter_lock(sa); 451 452 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 453 sa->eth_dev->data->tx_queues[sw_index] = NULL; 454 455 sfc_tx_qfini(sa, sw_index); 456 457 sfc_adapter_unlock(sa); 458 } 459 460 static void 461 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 462 { 463 struct sfc_adapter *sa = dev->data->dev_private; 464 struct sfc_port *port = &sa->port; 465 uint64_t *mac_stats; 466 467 rte_spinlock_lock(&port->mac_stats_lock); 468 469 if (sfc_port_update_mac_stats(sa) != 0) 470 goto unlock; 471 472 mac_stats = port->mac_stats_buf; 473 474 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 475 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 476 stats->ipackets = 477 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 478 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 479 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 480 stats->opackets = 481 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 482 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 483 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 484 stats->ibytes = 485 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 486 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 487 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 488 stats->obytes = 489 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 490 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 491 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 492 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 493 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 494 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 495 } else { 496 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 497 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 498 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 499 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 500 /* 501 * Take into account stats which are whenever supported 502 * on EF10. If some stat is not supported by current 503 * firmware variant or HW revision, it is guaranteed 504 * to be zero in mac_stats. 505 */ 506 stats->imissed = 507 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 508 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 509 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 510 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 511 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 512 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 513 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 514 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 515 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 516 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 517 stats->ierrors = 518 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 519 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 520 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 521 /* no oerrors counters supported on EF10 */ 522 } 523 524 unlock: 525 rte_spinlock_unlock(&port->mac_stats_lock); 526 } 527 528 static int 529 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 530 unsigned int xstats_count) 531 { 532 struct sfc_adapter *sa = dev->data->dev_private; 533 struct sfc_port *port = &sa->port; 534 uint64_t *mac_stats; 535 int rc; 536 unsigned int i; 537 int nstats = 0; 538 539 rte_spinlock_lock(&port->mac_stats_lock); 540 541 rc = sfc_port_update_mac_stats(sa); 542 if (rc != 0) { 543 SFC_ASSERT(rc > 0); 544 nstats = -rc; 545 goto unlock; 546 } 547 548 mac_stats = port->mac_stats_buf; 549 550 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 551 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 552 if (xstats != NULL && nstats < (int)xstats_count) { 553 xstats[nstats].id = nstats; 554 xstats[nstats].value = mac_stats[i]; 555 } 556 nstats++; 557 } 558 } 559 560 unlock: 561 rte_spinlock_unlock(&port->mac_stats_lock); 562 563 return nstats; 564 } 565 566 static int 567 sfc_xstats_get_names(struct rte_eth_dev *dev, 568 struct rte_eth_xstat_name *xstats_names, 569 unsigned int xstats_count) 570 { 571 struct sfc_adapter *sa = dev->data->dev_private; 572 struct sfc_port *port = &sa->port; 573 unsigned int i; 574 unsigned int nstats = 0; 575 576 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 577 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 578 if (xstats_names != NULL && nstats < xstats_count) 579 strncpy(xstats_names[nstats].name, 580 efx_mac_stat_name(sa->nic, i), 581 sizeof(xstats_names[0].name)); 582 nstats++; 583 } 584 } 585 586 return nstats; 587 } 588 589 static int 590 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 591 { 592 struct sfc_adapter *sa = dev->data->dev_private; 593 unsigned int wanted_fc, link_fc; 594 595 memset(fc_conf, 0, sizeof(*fc_conf)); 596 597 sfc_adapter_lock(sa); 598 599 if (sa->state == SFC_ADAPTER_STARTED) 600 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 601 else 602 link_fc = sa->port.flow_ctrl; 603 604 switch (link_fc) { 605 case 0: 606 fc_conf->mode = RTE_FC_NONE; 607 break; 608 case EFX_FCNTL_RESPOND: 609 fc_conf->mode = RTE_FC_RX_PAUSE; 610 break; 611 case EFX_FCNTL_GENERATE: 612 fc_conf->mode = RTE_FC_TX_PAUSE; 613 break; 614 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 615 fc_conf->mode = RTE_FC_FULL; 616 break; 617 default: 618 sfc_err(sa, "%s: unexpected flow control value %#x", 619 __func__, link_fc); 620 } 621 622 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 623 624 sfc_adapter_unlock(sa); 625 626 return 0; 627 } 628 629 static int 630 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 631 { 632 struct sfc_adapter *sa = dev->data->dev_private; 633 struct sfc_port *port = &sa->port; 634 unsigned int fcntl; 635 int rc; 636 637 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 638 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 639 fc_conf->mac_ctrl_frame_fwd != 0) { 640 sfc_err(sa, "unsupported flow control settings specified"); 641 rc = EINVAL; 642 goto fail_inval; 643 } 644 645 switch (fc_conf->mode) { 646 case RTE_FC_NONE: 647 fcntl = 0; 648 break; 649 case RTE_FC_RX_PAUSE: 650 fcntl = EFX_FCNTL_RESPOND; 651 break; 652 case RTE_FC_TX_PAUSE: 653 fcntl = EFX_FCNTL_GENERATE; 654 break; 655 case RTE_FC_FULL: 656 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 657 break; 658 default: 659 rc = EINVAL; 660 goto fail_inval; 661 } 662 663 sfc_adapter_lock(sa); 664 665 if (sa->state == SFC_ADAPTER_STARTED) { 666 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 667 if (rc != 0) 668 goto fail_mac_fcntl_set; 669 } 670 671 port->flow_ctrl = fcntl; 672 port->flow_ctrl_autoneg = fc_conf->autoneg; 673 674 sfc_adapter_unlock(sa); 675 676 return 0; 677 678 fail_mac_fcntl_set: 679 sfc_adapter_unlock(sa); 680 fail_inval: 681 SFC_ASSERT(rc > 0); 682 return -rc; 683 } 684 685 static int 686 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 687 { 688 struct sfc_adapter *sa = dev->data->dev_private; 689 size_t pdu = EFX_MAC_PDU(mtu); 690 size_t old_pdu; 691 int rc; 692 693 sfc_log_init(sa, "mtu=%u", mtu); 694 695 rc = EINVAL; 696 if (pdu < EFX_MAC_PDU_MIN) { 697 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 698 (unsigned int)mtu, (unsigned int)pdu, 699 EFX_MAC_PDU_MIN); 700 goto fail_inval; 701 } 702 if (pdu > EFX_MAC_PDU_MAX) { 703 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 704 (unsigned int)mtu, (unsigned int)pdu, 705 EFX_MAC_PDU_MAX); 706 goto fail_inval; 707 } 708 709 sfc_adapter_lock(sa); 710 711 if (pdu != sa->port.pdu) { 712 if (sa->state == SFC_ADAPTER_STARTED) { 713 sfc_stop(sa); 714 715 old_pdu = sa->port.pdu; 716 sa->port.pdu = pdu; 717 rc = sfc_start(sa); 718 if (rc != 0) 719 goto fail_start; 720 } else { 721 sa->port.pdu = pdu; 722 } 723 } 724 725 /* 726 * The driver does not use it, but other PMDs update jumbo_frame 727 * flag and max_rx_pkt_len when MTU is set. 728 */ 729 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); 730 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 731 732 sfc_adapter_unlock(sa); 733 734 sfc_log_init(sa, "done"); 735 return 0; 736 737 fail_start: 738 sa->port.pdu = old_pdu; 739 if (sfc_start(sa) != 0) 740 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 741 "PDU max size - port is stopped", 742 (unsigned int)pdu, (unsigned int)old_pdu); 743 sfc_adapter_unlock(sa); 744 745 fail_inval: 746 sfc_log_init(sa, "failed %d", rc); 747 SFC_ASSERT(rc > 0); 748 return -rc; 749 } 750 static void 751 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 752 { 753 struct sfc_adapter *sa = dev->data->dev_private; 754 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 755 int rc; 756 757 sfc_adapter_lock(sa); 758 759 if (sa->state != SFC_ADAPTER_STARTED) { 760 sfc_info(sa, "the port is not started"); 761 sfc_info(sa, "the new MAC address will be set on port start"); 762 763 goto unlock; 764 } 765 766 if (encp->enc_allow_set_mac_with_installed_filters) { 767 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 768 if (rc != 0) { 769 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 770 goto unlock; 771 } 772 773 /* 774 * Changing the MAC address by means of MCDI request 775 * has no effect on received traffic, therefore 776 * we also need to update unicast filters 777 */ 778 rc = sfc_set_rx_mode(sa); 779 if (rc != 0) 780 sfc_err(sa, "cannot set filter (rc = %u)", rc); 781 } else { 782 sfc_warn(sa, "cannot set MAC address with filters installed"); 783 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 784 sfc_warn(sa, "(some traffic may be dropped)"); 785 786 /* 787 * Since setting MAC address with filters installed is not 788 * allowed on the adapter, one needs to simply restart adapter 789 * so that the new MAC address will be taken from an outer 790 * storage and set flawlessly by means of sfc_start() call 791 */ 792 sfc_stop(sa); 793 rc = sfc_start(sa); 794 if (rc != 0) 795 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 796 } 797 798 unlock: 799 sfc_adapter_unlock(sa); 800 } 801 802 803 static int 804 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, 805 uint32_t nb_mc_addr) 806 { 807 struct sfc_adapter *sa = dev->data->dev_private; 808 uint8_t *mc_addrs_p = NULL; 809 int rc; 810 unsigned int i; 811 812 if (nb_mc_addr > EFX_MAC_MULTICAST_LIST_MAX) { 813 sfc_err(sa, "too many multicast addresses: %u > %u", 814 nb_mc_addr, EFX_MAC_MULTICAST_LIST_MAX); 815 return -EINVAL; 816 } 817 818 if (nb_mc_addr != 0) { 819 uint8_t *mc_addrs; 820 821 mc_addrs_p = rte_calloc("mc-addrs", nb_mc_addr, 822 EFX_MAC_ADDR_LEN, 0); 823 if (mc_addrs_p == NULL) 824 return -ENOMEM; 825 826 mc_addrs = mc_addrs_p; 827 for (i = 0; i < nb_mc_addr; ++i) { 828 (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 829 EFX_MAC_ADDR_LEN); 830 mc_addrs += EFX_MAC_ADDR_LEN; 831 } 832 } 833 834 rc = efx_mac_multicast_list_set(sa->nic, mc_addrs_p, nb_mc_addr); 835 836 rte_free(mc_addrs_p); 837 838 if (rc != 0) 839 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 840 841 SFC_ASSERT(rc > 0); 842 return -rc; 843 } 844 845 static void 846 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 847 struct rte_eth_rxq_info *qinfo) 848 { 849 struct sfc_adapter *sa = dev->data->dev_private; 850 struct sfc_rxq_info *rxq_info; 851 struct sfc_rxq *rxq; 852 853 sfc_adapter_lock(sa); 854 855 SFC_ASSERT(rx_queue_id < sa->rxq_count); 856 857 rxq_info = &sa->rxq_info[rx_queue_id]; 858 rxq = rxq_info->rxq; 859 SFC_ASSERT(rxq != NULL); 860 861 qinfo->mp = rxq->refill_mb_pool; 862 qinfo->conf.rx_free_thresh = rxq->refill_threshold; 863 qinfo->conf.rx_drop_en = 1; 864 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 865 qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER); 866 qinfo->nb_desc = rxq_info->entries; 867 868 sfc_adapter_unlock(sa); 869 } 870 871 static void 872 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 873 struct rte_eth_txq_info *qinfo) 874 { 875 struct sfc_adapter *sa = dev->data->dev_private; 876 struct sfc_txq_info *txq_info; 877 878 sfc_adapter_lock(sa); 879 880 SFC_ASSERT(tx_queue_id < sa->txq_count); 881 882 txq_info = &sa->txq_info[tx_queue_id]; 883 SFC_ASSERT(txq_info->txq != NULL); 884 885 memset(qinfo, 0, sizeof(*qinfo)); 886 887 qinfo->conf.txq_flags = txq_info->txq->flags; 888 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; 889 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 890 qinfo->nb_desc = txq_info->entries; 891 892 sfc_adapter_unlock(sa); 893 } 894 895 static uint32_t 896 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 897 { 898 struct sfc_adapter *sa = dev->data->dev_private; 899 900 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 901 902 return sfc_rx_qdesc_npending(sa, rx_queue_id); 903 } 904 905 static int 906 sfc_rx_descriptor_done(void *queue, uint16_t offset) 907 { 908 struct sfc_rxq *rxq = queue; 909 910 return sfc_rx_qdesc_done(rxq, offset); 911 } 912 913 static int 914 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 915 { 916 struct sfc_adapter *sa = dev->data->dev_private; 917 int rc; 918 919 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 920 921 sfc_adapter_lock(sa); 922 923 rc = EINVAL; 924 if (sa->state != SFC_ADAPTER_STARTED) 925 goto fail_not_started; 926 927 rc = sfc_rx_qstart(sa, rx_queue_id); 928 if (rc != 0) 929 goto fail_rx_qstart; 930 931 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE; 932 933 sfc_adapter_unlock(sa); 934 935 return 0; 936 937 fail_rx_qstart: 938 fail_not_started: 939 sfc_adapter_unlock(sa); 940 SFC_ASSERT(rc > 0); 941 return -rc; 942 } 943 944 static int 945 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 946 { 947 struct sfc_adapter *sa = dev->data->dev_private; 948 949 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 950 951 sfc_adapter_lock(sa); 952 sfc_rx_qstop(sa, rx_queue_id); 953 954 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE; 955 956 sfc_adapter_unlock(sa); 957 958 return 0; 959 } 960 961 static int 962 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 963 { 964 struct sfc_adapter *sa = dev->data->dev_private; 965 int rc; 966 967 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 968 969 sfc_adapter_lock(sa); 970 971 rc = EINVAL; 972 if (sa->state != SFC_ADAPTER_STARTED) 973 goto fail_not_started; 974 975 rc = sfc_tx_qstart(sa, tx_queue_id); 976 if (rc != 0) 977 goto fail_tx_qstart; 978 979 sa->txq_info[tx_queue_id].deferred_started = B_TRUE; 980 981 sfc_adapter_unlock(sa); 982 return 0; 983 984 fail_tx_qstart: 985 986 fail_not_started: 987 sfc_adapter_unlock(sa); 988 SFC_ASSERT(rc > 0); 989 return -rc; 990 } 991 992 static int 993 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 994 { 995 struct sfc_adapter *sa = dev->data->dev_private; 996 997 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 998 999 sfc_adapter_lock(sa); 1000 1001 sfc_tx_qstop(sa, tx_queue_id); 1002 1003 sa->txq_info[tx_queue_id].deferred_started = B_FALSE; 1004 1005 sfc_adapter_unlock(sa); 1006 return 0; 1007 } 1008 1009 #if EFSYS_OPT_RX_SCALE 1010 static int 1011 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1012 struct rte_eth_rss_conf *rss_conf) 1013 { 1014 struct sfc_adapter *sa = dev->data->dev_private; 1015 1016 if ((sa->rss_channels == 1) || 1017 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) 1018 return -ENOTSUP; 1019 1020 sfc_adapter_lock(sa); 1021 1022 /* 1023 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1024 * hence, conversion is done here to derive a correct set of ETH_RSS 1025 * flags which corresponds to the active EFX configuration stored 1026 * locally in 'sfc_adapter' and kept up-to-date 1027 */ 1028 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); 1029 rss_conf->rss_key_len = SFC_RSS_KEY_SIZE; 1030 if (rss_conf->rss_key != NULL) 1031 rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE); 1032 1033 sfc_adapter_unlock(sa); 1034 1035 return 0; 1036 } 1037 1038 static int 1039 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1040 struct rte_eth_rss_conf *rss_conf) 1041 { 1042 struct sfc_adapter *sa = dev->data->dev_private; 1043 unsigned int efx_hash_types; 1044 int rc = 0; 1045 1046 if ((sa->rss_channels == 1) || 1047 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) { 1048 sfc_err(sa, "RSS is not available"); 1049 return -ENOTSUP; 1050 } 1051 1052 if ((rss_conf->rss_key != NULL) && 1053 (rss_conf->rss_key_len != sizeof(sa->rss_key))) { 1054 sfc_err(sa, "RSS key size is wrong (should be %lu)", 1055 sizeof(sa->rss_key)); 1056 return -EINVAL; 1057 } 1058 1059 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { 1060 sfc_err(sa, "unsupported hash functions requested"); 1061 return -EINVAL; 1062 } 1063 1064 sfc_adapter_lock(sa); 1065 1066 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); 1067 1068 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1069 efx_hash_types, B_TRUE); 1070 if (rc != 0) 1071 goto fail_scale_mode_set; 1072 1073 if (rss_conf->rss_key != NULL) { 1074 if (sa->state == SFC_ADAPTER_STARTED) { 1075 rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key, 1076 sizeof(sa->rss_key)); 1077 if (rc != 0) 1078 goto fail_scale_key_set; 1079 } 1080 1081 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); 1082 } 1083 1084 sa->rss_hash_types = efx_hash_types; 1085 1086 sfc_adapter_unlock(sa); 1087 1088 return 0; 1089 1090 fail_scale_key_set: 1091 if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1092 sa->rss_hash_types, B_TRUE) != 0) 1093 sfc_err(sa, "failed to restore RSS mode"); 1094 1095 fail_scale_mode_set: 1096 sfc_adapter_unlock(sa); 1097 return -rc; 1098 } 1099 1100 static int 1101 sfc_dev_rss_reta_query(struct rte_eth_dev *dev, 1102 struct rte_eth_rss_reta_entry64 *reta_conf, 1103 uint16_t reta_size) 1104 { 1105 struct sfc_adapter *sa = dev->data->dev_private; 1106 int entry; 1107 1108 if ((sa->rss_channels == 1) || 1109 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) 1110 return -ENOTSUP; 1111 1112 if (reta_size != EFX_RSS_TBL_SIZE) 1113 return -EINVAL; 1114 1115 sfc_adapter_lock(sa); 1116 1117 for (entry = 0; entry < reta_size; entry++) { 1118 int grp = entry / RTE_RETA_GROUP_SIZE; 1119 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1120 1121 if ((reta_conf[grp].mask >> grp_idx) & 1) 1122 reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry]; 1123 } 1124 1125 sfc_adapter_unlock(sa); 1126 1127 return 0; 1128 } 1129 1130 static int 1131 sfc_dev_rss_reta_update(struct rte_eth_dev *dev, 1132 struct rte_eth_rss_reta_entry64 *reta_conf, 1133 uint16_t reta_size) 1134 { 1135 struct sfc_adapter *sa = dev->data->dev_private; 1136 unsigned int *rss_tbl_new; 1137 uint16_t entry; 1138 int rc; 1139 1140 1141 if ((sa->rss_channels == 1) || 1142 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) { 1143 sfc_err(sa, "RSS is not available"); 1144 return -ENOTSUP; 1145 } 1146 1147 if (reta_size != EFX_RSS_TBL_SIZE) { 1148 sfc_err(sa, "RETA size is wrong (should be %u)", 1149 EFX_RSS_TBL_SIZE); 1150 return -EINVAL; 1151 } 1152 1153 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0); 1154 if (rss_tbl_new == NULL) 1155 return -ENOMEM; 1156 1157 sfc_adapter_lock(sa); 1158 1159 rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl)); 1160 1161 for (entry = 0; entry < reta_size; entry++) { 1162 int grp_idx = entry % RTE_RETA_GROUP_SIZE; 1163 struct rte_eth_rss_reta_entry64 *grp; 1164 1165 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; 1166 1167 if (grp->mask & (1ull << grp_idx)) { 1168 if (grp->reta[grp_idx] >= sa->rss_channels) { 1169 rc = EINVAL; 1170 goto bad_reta_entry; 1171 } 1172 rss_tbl_new[entry] = grp->reta[grp_idx]; 1173 } 1174 } 1175 1176 rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE); 1177 if (rc == 0) 1178 rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl)); 1179 1180 bad_reta_entry: 1181 sfc_adapter_unlock(sa); 1182 1183 rte_free(rss_tbl_new); 1184 1185 SFC_ASSERT(rc >= 0); 1186 return -rc; 1187 } 1188 #endif 1189 1190 static const struct eth_dev_ops sfc_eth_dev_ops = { 1191 .dev_configure = sfc_dev_configure, 1192 .dev_start = sfc_dev_start, 1193 .dev_stop = sfc_dev_stop, 1194 .dev_set_link_up = sfc_dev_set_link_up, 1195 .dev_set_link_down = sfc_dev_set_link_down, 1196 .dev_close = sfc_dev_close, 1197 .promiscuous_enable = sfc_dev_promisc_enable, 1198 .promiscuous_disable = sfc_dev_promisc_disable, 1199 .allmulticast_enable = sfc_dev_allmulti_enable, 1200 .allmulticast_disable = sfc_dev_allmulti_disable, 1201 .link_update = sfc_dev_link_update, 1202 .stats_get = sfc_stats_get, 1203 .xstats_get = sfc_xstats_get, 1204 .xstats_get_names = sfc_xstats_get_names, 1205 .dev_infos_get = sfc_dev_infos_get, 1206 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1207 .mtu_set = sfc_dev_set_mtu, 1208 .rx_queue_start = sfc_rx_queue_start, 1209 .rx_queue_stop = sfc_rx_queue_stop, 1210 .tx_queue_start = sfc_tx_queue_start, 1211 .tx_queue_stop = sfc_tx_queue_stop, 1212 .rx_queue_setup = sfc_rx_queue_setup, 1213 .rx_queue_release = sfc_rx_queue_release, 1214 .rx_queue_count = sfc_rx_queue_count, 1215 .rx_descriptor_done = sfc_rx_descriptor_done, 1216 .tx_queue_setup = sfc_tx_queue_setup, 1217 .tx_queue_release = sfc_tx_queue_release, 1218 .flow_ctrl_get = sfc_flow_ctrl_get, 1219 .flow_ctrl_set = sfc_flow_ctrl_set, 1220 .mac_addr_set = sfc_mac_addr_set, 1221 #if EFSYS_OPT_RX_SCALE 1222 .reta_update = sfc_dev_rss_reta_update, 1223 .reta_query = sfc_dev_rss_reta_query, 1224 .rss_hash_update = sfc_dev_rss_hash_update, 1225 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1226 #endif 1227 .set_mc_addr_list = sfc_set_mc_addr_list, 1228 .rxq_info_get = sfc_rx_queue_info_get, 1229 .txq_info_get = sfc_tx_queue_info_get, 1230 }; 1231 1232 static int 1233 sfc_eth_dev_init(struct rte_eth_dev *dev) 1234 { 1235 struct sfc_adapter *sa = dev->data->dev_private; 1236 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev); 1237 int rc; 1238 const efx_nic_cfg_t *encp; 1239 const struct ether_addr *from; 1240 1241 /* Required for logging */ 1242 sa->eth_dev = dev; 1243 1244 /* Copy PCI device info to the dev->data */ 1245 rte_eth_copy_pci_info(dev, pci_dev); 1246 1247 rc = sfc_kvargs_parse(sa); 1248 if (rc != 0) 1249 goto fail_kvargs_parse; 1250 1251 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, 1252 sfc_kvarg_bool_handler, &sa->debug_init); 1253 if (rc != 0) 1254 goto fail_kvarg_debug_init; 1255 1256 sfc_log_init(sa, "entry"); 1257 1258 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 1259 if (dev->data->mac_addrs == NULL) { 1260 rc = ENOMEM; 1261 goto fail_mac_addrs; 1262 } 1263 1264 sfc_adapter_lock_init(sa); 1265 sfc_adapter_lock(sa); 1266 1267 sfc_log_init(sa, "attaching"); 1268 rc = sfc_attach(sa); 1269 if (rc != 0) 1270 goto fail_attach; 1271 1272 encp = efx_nic_cfg_get(sa->nic); 1273 1274 /* 1275 * The arguments are really reverse order in comparison to 1276 * Linux kernel. Copy from NIC config to Ethernet device data. 1277 */ 1278 from = (const struct ether_addr *)(encp->enc_mac_addr); 1279 ether_addr_copy(from, &dev->data->mac_addrs[0]); 1280 1281 dev->dev_ops = &sfc_eth_dev_ops; 1282 dev->rx_pkt_burst = &sfc_recv_pkts; 1283 dev->tx_pkt_burst = &sfc_xmit_pkts; 1284 1285 sfc_adapter_unlock(sa); 1286 1287 sfc_log_init(sa, "done"); 1288 return 0; 1289 1290 fail_attach: 1291 sfc_adapter_unlock(sa); 1292 sfc_adapter_lock_fini(sa); 1293 rte_free(dev->data->mac_addrs); 1294 dev->data->mac_addrs = NULL; 1295 1296 fail_mac_addrs: 1297 fail_kvarg_debug_init: 1298 sfc_kvargs_cleanup(sa); 1299 1300 fail_kvargs_parse: 1301 sfc_log_init(sa, "failed %d", rc); 1302 SFC_ASSERT(rc > 0); 1303 return -rc; 1304 } 1305 1306 static int 1307 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 1308 { 1309 struct sfc_adapter *sa = dev->data->dev_private; 1310 1311 sfc_log_init(sa, "entry"); 1312 1313 sfc_adapter_lock(sa); 1314 1315 sfc_detach(sa); 1316 1317 rte_free(dev->data->mac_addrs); 1318 dev->data->mac_addrs = NULL; 1319 1320 dev->dev_ops = NULL; 1321 dev->rx_pkt_burst = NULL; 1322 dev->tx_pkt_burst = NULL; 1323 1324 sfc_kvargs_cleanup(sa); 1325 1326 sfc_adapter_unlock(sa); 1327 sfc_adapter_lock_fini(sa); 1328 1329 sfc_log_init(sa, "done"); 1330 1331 /* Required for logging, so cleanup last */ 1332 sa->eth_dev = NULL; 1333 return 0; 1334 } 1335 1336 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 1337 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 1338 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 1339 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 1340 { .vendor_id = 0 /* sentinel */ } 1341 }; 1342 1343 static struct eth_driver sfc_efx_pmd = { 1344 .pci_drv = { 1345 .id_table = pci_id_sfc_efx_map, 1346 .drv_flags = 1347 RTE_PCI_DRV_INTR_LSC | 1348 RTE_PCI_DRV_NEED_MAPPING, 1349 .probe = rte_eth_dev_pci_probe, 1350 .remove = rte_eth_dev_pci_remove, 1351 }, 1352 .eth_dev_init = sfc_eth_dev_init, 1353 .eth_dev_uninit = sfc_eth_dev_uninit, 1354 .dev_private_size = sizeof(struct sfc_adapter), 1355 }; 1356 1357 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv); 1358 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 1359 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio"); 1360 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 1361 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 1362 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " 1363 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); 1364