1 /*- 2 * Copyright (c) 2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was jointly developed between OKTET Labs (under contract 6 * for Solarflare) and Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <rte_dev.h> 31 #include <rte_ethdev.h> 32 #include <rte_pci.h> 33 34 #include "efx.h" 35 36 #include "sfc.h" 37 #include "sfc_debug.h" 38 #include "sfc_log.h" 39 #include "sfc_kvargs.h" 40 #include "sfc_ev.h" 41 #include "sfc_rx.h" 42 #include "sfc_tx.h" 43 44 45 static void 46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 47 { 48 struct sfc_adapter *sa = dev->data->dev_private; 49 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 50 51 sfc_log_init(sa, "entry"); 52 53 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 54 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 55 56 /* Autonegotiation may be disabled */ 57 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 58 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 59 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 60 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 61 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 62 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 63 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 64 65 dev_info->max_rx_queues = sa->rxq_max; 66 dev_info->max_tx_queues = sa->txq_max; 67 68 /* By default packets are dropped if no descriptors are available */ 69 dev_info->default_rxconf.rx_drop_en = 1; 70 71 dev_info->rx_offload_capa = 72 DEV_RX_OFFLOAD_IPV4_CKSUM | 73 DEV_RX_OFFLOAD_UDP_CKSUM | 74 DEV_RX_OFFLOAD_TCP_CKSUM; 75 76 dev_info->tx_offload_capa = 77 DEV_TX_OFFLOAD_IPV4_CKSUM | 78 DEV_TX_OFFLOAD_UDP_CKSUM | 79 DEV_TX_OFFLOAD_TCP_CKSUM; 80 81 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 82 if (!encp->enc_hw_tx_insert_vlan_enabled) 83 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; 84 else 85 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 86 87 #if EFSYS_OPT_RX_SCALE 88 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { 89 dev_info->reta_size = EFX_RSS_TBL_SIZE; 90 dev_info->hash_key_size = SFC_RSS_KEY_SIZE; 91 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; 92 } 93 #endif 94 95 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 96 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 97 /* The RXQ hardware requires that the descriptor count is a power 98 * of 2, but rx_desc_lim cannot properly describe that constraint. 99 */ 100 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 101 102 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 103 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 104 /* 105 * The TXQ hardware requires that the descriptor count is a power 106 * of 2, but tx_desc_lim cannot properly describe that constraint 107 */ 108 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 109 } 110 111 static const uint32_t * 112 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 113 { 114 static const uint32_t ptypes[] = { 115 RTE_PTYPE_L2_ETHER, 116 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 117 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 118 RTE_PTYPE_L4_TCP, 119 RTE_PTYPE_L4_UDP, 120 RTE_PTYPE_UNKNOWN 121 }; 122 123 if (dev->rx_pkt_burst == sfc_recv_pkts) 124 return ptypes; 125 126 return NULL; 127 } 128 129 static int 130 sfc_dev_configure(struct rte_eth_dev *dev) 131 { 132 struct rte_eth_dev_data *dev_data = dev->data; 133 struct sfc_adapter *sa = dev_data->dev_private; 134 int rc; 135 136 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 137 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 138 139 sfc_adapter_lock(sa); 140 switch (sa->state) { 141 case SFC_ADAPTER_CONFIGURED: 142 sfc_close(sa); 143 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 144 /* FALLTHROUGH */ 145 case SFC_ADAPTER_INITIALIZED: 146 rc = sfc_configure(sa); 147 break; 148 default: 149 sfc_err(sa, "unexpected adapter state %u to configure", 150 sa->state); 151 rc = EINVAL; 152 break; 153 } 154 sfc_adapter_unlock(sa); 155 156 sfc_log_init(sa, "done %d", rc); 157 SFC_ASSERT(rc >= 0); 158 return -rc; 159 } 160 161 static int 162 sfc_dev_start(struct rte_eth_dev *dev) 163 { 164 struct sfc_adapter *sa = dev->data->dev_private; 165 int rc; 166 167 sfc_log_init(sa, "entry"); 168 169 sfc_adapter_lock(sa); 170 rc = sfc_start(sa); 171 sfc_adapter_unlock(sa); 172 173 sfc_log_init(sa, "done %d", rc); 174 SFC_ASSERT(rc >= 0); 175 return -rc; 176 } 177 178 static int 179 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 180 { 181 struct sfc_adapter *sa = dev->data->dev_private; 182 struct rte_eth_link *dev_link = &dev->data->dev_link; 183 struct rte_eth_link old_link; 184 struct rte_eth_link current_link; 185 186 sfc_log_init(sa, "entry"); 187 188 if (sa->state != SFC_ADAPTER_STARTED) 189 return 0; 190 191 retry: 192 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 193 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); 194 195 if (wait_to_complete) { 196 efx_link_mode_t link_mode; 197 198 efx_port_poll(sa->nic, &link_mode); 199 sfc_port_link_mode_to_info(link_mode, ¤t_link); 200 201 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 202 *(uint64_t *)&old_link, 203 *(uint64_t *)¤t_link)) 204 goto retry; 205 } else { 206 sfc_ev_mgmt_qpoll(sa); 207 *(int64_t *)¤t_link = 208 rte_atomic64_read((rte_atomic64_t *)dev_link); 209 } 210 211 if (old_link.link_status != current_link.link_status) 212 sfc_info(sa, "Link status is %s", 213 current_link.link_status ? "UP" : "DOWN"); 214 215 return old_link.link_status == current_link.link_status ? 0 : -1; 216 } 217 218 static void 219 sfc_dev_stop(struct rte_eth_dev *dev) 220 { 221 struct sfc_adapter *sa = dev->data->dev_private; 222 223 sfc_log_init(sa, "entry"); 224 225 sfc_adapter_lock(sa); 226 sfc_stop(sa); 227 sfc_adapter_unlock(sa); 228 229 sfc_log_init(sa, "done"); 230 } 231 232 static int 233 sfc_dev_set_link_up(struct rte_eth_dev *dev) 234 { 235 struct sfc_adapter *sa = dev->data->dev_private; 236 int rc; 237 238 sfc_log_init(sa, "entry"); 239 240 sfc_adapter_lock(sa); 241 rc = sfc_start(sa); 242 sfc_adapter_unlock(sa); 243 244 SFC_ASSERT(rc >= 0); 245 return -rc; 246 } 247 248 static int 249 sfc_dev_set_link_down(struct rte_eth_dev *dev) 250 { 251 struct sfc_adapter *sa = dev->data->dev_private; 252 253 sfc_log_init(sa, "entry"); 254 255 sfc_adapter_lock(sa); 256 sfc_stop(sa); 257 sfc_adapter_unlock(sa); 258 259 return 0; 260 } 261 262 static void 263 sfc_dev_close(struct rte_eth_dev *dev) 264 { 265 struct sfc_adapter *sa = dev->data->dev_private; 266 267 sfc_log_init(sa, "entry"); 268 269 sfc_adapter_lock(sa); 270 switch (sa->state) { 271 case SFC_ADAPTER_STARTED: 272 sfc_stop(sa); 273 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 274 /* FALLTHROUGH */ 275 case SFC_ADAPTER_CONFIGURED: 276 sfc_close(sa); 277 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 278 /* FALLTHROUGH */ 279 case SFC_ADAPTER_INITIALIZED: 280 break; 281 default: 282 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 283 break; 284 } 285 sfc_adapter_unlock(sa); 286 287 sfc_log_init(sa, "done"); 288 } 289 290 static void 291 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 292 boolean_t enabled) 293 { 294 struct sfc_port *port; 295 boolean_t *toggle; 296 struct sfc_adapter *sa = dev->data->dev_private; 297 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 298 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 299 300 sfc_adapter_lock(sa); 301 302 port = &sa->port; 303 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 304 305 if (*toggle != enabled) { 306 *toggle = enabled; 307 308 if ((sa->state == SFC_ADAPTER_STARTED) && 309 (sfc_set_rx_mode(sa) != 0)) { 310 *toggle = !(enabled); 311 sfc_warn(sa, "Failed to %s %s mode", 312 ((enabled) ? "enable" : "disable"), desc); 313 } 314 } 315 316 sfc_adapter_unlock(sa); 317 } 318 319 static void 320 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 321 { 322 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 323 } 324 325 static void 326 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 327 { 328 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 329 } 330 331 static void 332 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 333 { 334 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 335 } 336 337 static void 338 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 339 { 340 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 341 } 342 343 static int 344 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 345 uint16_t nb_rx_desc, unsigned int socket_id, 346 const struct rte_eth_rxconf *rx_conf, 347 struct rte_mempool *mb_pool) 348 { 349 struct sfc_adapter *sa = dev->data->dev_private; 350 int rc; 351 352 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 353 rx_queue_id, nb_rx_desc, socket_id); 354 355 sfc_adapter_lock(sa); 356 357 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 358 rx_conf, mb_pool); 359 if (rc != 0) 360 goto fail_rx_qinit; 361 362 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq; 363 364 sfc_adapter_unlock(sa); 365 366 return 0; 367 368 fail_rx_qinit: 369 sfc_adapter_unlock(sa); 370 SFC_ASSERT(rc > 0); 371 return -rc; 372 } 373 374 static void 375 sfc_rx_queue_release(void *queue) 376 { 377 struct sfc_rxq *rxq = queue; 378 struct sfc_adapter *sa; 379 unsigned int sw_index; 380 381 if (rxq == NULL) 382 return; 383 384 sa = rxq->evq->sa; 385 sfc_adapter_lock(sa); 386 387 sw_index = sfc_rxq_sw_index(rxq); 388 389 sfc_log_init(sa, "RxQ=%u", sw_index); 390 391 sa->eth_dev->data->rx_queues[sw_index] = NULL; 392 393 sfc_rx_qfini(sa, sw_index); 394 395 sfc_adapter_unlock(sa); 396 } 397 398 static int 399 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 400 uint16_t nb_tx_desc, unsigned int socket_id, 401 const struct rte_eth_txconf *tx_conf) 402 { 403 struct sfc_adapter *sa = dev->data->dev_private; 404 int rc; 405 406 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 407 tx_queue_id, nb_tx_desc, socket_id); 408 409 sfc_adapter_lock(sa); 410 411 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 412 if (rc != 0) 413 goto fail_tx_qinit; 414 415 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq; 416 417 sfc_adapter_unlock(sa); 418 return 0; 419 420 fail_tx_qinit: 421 sfc_adapter_unlock(sa); 422 SFC_ASSERT(rc > 0); 423 return -rc; 424 } 425 426 static void 427 sfc_tx_queue_release(void *queue) 428 { 429 struct sfc_txq *txq = queue; 430 unsigned int sw_index; 431 struct sfc_adapter *sa; 432 433 if (txq == NULL) 434 return; 435 436 sw_index = sfc_txq_sw_index(txq); 437 438 SFC_ASSERT(txq->evq != NULL); 439 sa = txq->evq->sa; 440 441 sfc_log_init(sa, "TxQ = %u", sw_index); 442 443 sfc_adapter_lock(sa); 444 445 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 446 sa->eth_dev->data->tx_queues[sw_index] = NULL; 447 448 sfc_tx_qfini(sa, sw_index); 449 450 sfc_adapter_unlock(sa); 451 } 452 453 static void 454 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 455 { 456 struct sfc_adapter *sa = dev->data->dev_private; 457 struct sfc_port *port = &sa->port; 458 uint64_t *mac_stats; 459 460 rte_spinlock_lock(&port->mac_stats_lock); 461 462 if (sfc_port_update_mac_stats(sa) != 0) 463 goto unlock; 464 465 mac_stats = port->mac_stats_buf; 466 467 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 468 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 469 stats->ipackets = 470 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 471 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 472 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 473 stats->opackets = 474 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 475 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 476 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 477 stats->ibytes = 478 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 479 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 480 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 481 stats->obytes = 482 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 483 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 484 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 485 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 486 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 487 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 488 } else { 489 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 490 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 491 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 492 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 493 /* 494 * Take into account stats which are whenever supported 495 * on EF10. If some stat is not supported by current 496 * firmware variant or HW revision, it is guaranteed 497 * to be zero in mac_stats. 498 */ 499 stats->imissed = 500 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 501 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 502 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 503 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 504 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 505 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 506 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 507 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 508 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 509 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 510 stats->ierrors = 511 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 512 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 513 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 514 /* no oerrors counters supported on EF10 */ 515 } 516 517 unlock: 518 rte_spinlock_unlock(&port->mac_stats_lock); 519 } 520 521 static int 522 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 523 unsigned int xstats_count) 524 { 525 struct sfc_adapter *sa = dev->data->dev_private; 526 struct sfc_port *port = &sa->port; 527 uint64_t *mac_stats; 528 int rc; 529 unsigned int i; 530 int nstats = 0; 531 532 rte_spinlock_lock(&port->mac_stats_lock); 533 534 rc = sfc_port_update_mac_stats(sa); 535 if (rc != 0) { 536 SFC_ASSERT(rc > 0); 537 nstats = -rc; 538 goto unlock; 539 } 540 541 mac_stats = port->mac_stats_buf; 542 543 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 544 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 545 if (xstats != NULL && nstats < (int)xstats_count) { 546 xstats[nstats].id = nstats; 547 xstats[nstats].value = mac_stats[i]; 548 } 549 nstats++; 550 } 551 } 552 553 unlock: 554 rte_spinlock_unlock(&port->mac_stats_lock); 555 556 return nstats; 557 } 558 559 static int 560 sfc_xstats_get_names(struct rte_eth_dev *dev, 561 struct rte_eth_xstat_name *xstats_names, 562 unsigned int xstats_count) 563 { 564 struct sfc_adapter *sa = dev->data->dev_private; 565 struct sfc_port *port = &sa->port; 566 unsigned int i; 567 unsigned int nstats = 0; 568 569 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 570 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 571 if (xstats_names != NULL && nstats < xstats_count) 572 strncpy(xstats_names[nstats].name, 573 efx_mac_stat_name(sa->nic, i), 574 sizeof(xstats_names[0].name)); 575 nstats++; 576 } 577 } 578 579 return nstats; 580 } 581 582 static int 583 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 584 { 585 struct sfc_adapter *sa = dev->data->dev_private; 586 unsigned int wanted_fc, link_fc; 587 588 memset(fc_conf, 0, sizeof(*fc_conf)); 589 590 sfc_adapter_lock(sa); 591 592 if (sa->state == SFC_ADAPTER_STARTED) 593 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 594 else 595 link_fc = sa->port.flow_ctrl; 596 597 switch (link_fc) { 598 case 0: 599 fc_conf->mode = RTE_FC_NONE; 600 break; 601 case EFX_FCNTL_RESPOND: 602 fc_conf->mode = RTE_FC_RX_PAUSE; 603 break; 604 case EFX_FCNTL_GENERATE: 605 fc_conf->mode = RTE_FC_TX_PAUSE; 606 break; 607 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 608 fc_conf->mode = RTE_FC_FULL; 609 break; 610 default: 611 sfc_err(sa, "%s: unexpected flow control value %#x", 612 __func__, link_fc); 613 } 614 615 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 616 617 sfc_adapter_unlock(sa); 618 619 return 0; 620 } 621 622 static int 623 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 624 { 625 struct sfc_adapter *sa = dev->data->dev_private; 626 struct sfc_port *port = &sa->port; 627 unsigned int fcntl; 628 int rc; 629 630 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 631 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 632 fc_conf->mac_ctrl_frame_fwd != 0) { 633 sfc_err(sa, "unsupported flow control settings specified"); 634 rc = EINVAL; 635 goto fail_inval; 636 } 637 638 switch (fc_conf->mode) { 639 case RTE_FC_NONE: 640 fcntl = 0; 641 break; 642 case RTE_FC_RX_PAUSE: 643 fcntl = EFX_FCNTL_RESPOND; 644 break; 645 case RTE_FC_TX_PAUSE: 646 fcntl = EFX_FCNTL_GENERATE; 647 break; 648 case RTE_FC_FULL: 649 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 650 break; 651 default: 652 rc = EINVAL; 653 goto fail_inval; 654 } 655 656 sfc_adapter_lock(sa); 657 658 if (sa->state == SFC_ADAPTER_STARTED) { 659 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 660 if (rc != 0) 661 goto fail_mac_fcntl_set; 662 } 663 664 port->flow_ctrl = fcntl; 665 port->flow_ctrl_autoneg = fc_conf->autoneg; 666 667 sfc_adapter_unlock(sa); 668 669 return 0; 670 671 fail_mac_fcntl_set: 672 sfc_adapter_unlock(sa); 673 fail_inval: 674 SFC_ASSERT(rc > 0); 675 return -rc; 676 } 677 678 static int 679 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 680 { 681 struct sfc_adapter *sa = dev->data->dev_private; 682 size_t pdu = EFX_MAC_PDU(mtu); 683 size_t old_pdu; 684 int rc; 685 686 sfc_log_init(sa, "mtu=%u", mtu); 687 688 rc = EINVAL; 689 if (pdu < EFX_MAC_PDU_MIN) { 690 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 691 (unsigned int)mtu, (unsigned int)pdu, 692 EFX_MAC_PDU_MIN); 693 goto fail_inval; 694 } 695 if (pdu > EFX_MAC_PDU_MAX) { 696 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 697 (unsigned int)mtu, (unsigned int)pdu, 698 EFX_MAC_PDU_MAX); 699 goto fail_inval; 700 } 701 702 sfc_adapter_lock(sa); 703 704 if (pdu != sa->port.pdu) { 705 if (sa->state == SFC_ADAPTER_STARTED) { 706 sfc_stop(sa); 707 708 old_pdu = sa->port.pdu; 709 sa->port.pdu = pdu; 710 rc = sfc_start(sa); 711 if (rc != 0) 712 goto fail_start; 713 } else { 714 sa->port.pdu = pdu; 715 } 716 } 717 718 /* 719 * The driver does not use it, but other PMDs update jumbo_frame 720 * flag and max_rx_pkt_len when MTU is set. 721 */ 722 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); 723 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 724 725 sfc_adapter_unlock(sa); 726 727 sfc_log_init(sa, "done"); 728 return 0; 729 730 fail_start: 731 sa->port.pdu = old_pdu; 732 if (sfc_start(sa) != 0) 733 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 734 "PDU max size - port is stopped", 735 (unsigned int)pdu, (unsigned int)old_pdu); 736 sfc_adapter_unlock(sa); 737 738 fail_inval: 739 sfc_log_init(sa, "failed %d", rc); 740 SFC_ASSERT(rc > 0); 741 return -rc; 742 } 743 static void 744 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 745 { 746 struct sfc_adapter *sa = dev->data->dev_private; 747 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 748 int rc; 749 750 sfc_adapter_lock(sa); 751 752 if (sa->state != SFC_ADAPTER_STARTED) { 753 sfc_info(sa, "the port is not started"); 754 sfc_info(sa, "the new MAC address will be set on port start"); 755 756 goto unlock; 757 } 758 759 if (encp->enc_allow_set_mac_with_installed_filters) { 760 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 761 if (rc != 0) { 762 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 763 goto unlock; 764 } 765 766 /* 767 * Changing the MAC address by means of MCDI request 768 * has no effect on received traffic, therefore 769 * we also need to update unicast filters 770 */ 771 rc = sfc_set_rx_mode(sa); 772 if (rc != 0) 773 sfc_err(sa, "cannot set filter (rc = %u)", rc); 774 } else { 775 sfc_warn(sa, "cannot set MAC address with filters installed"); 776 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 777 sfc_warn(sa, "(some traffic may be dropped)"); 778 779 /* 780 * Since setting MAC address with filters installed is not 781 * allowed on the adapter, one needs to simply restart adapter 782 * so that the new MAC address will be taken from an outer 783 * storage and set flawlessly by means of sfc_start() call 784 */ 785 sfc_stop(sa); 786 rc = sfc_start(sa); 787 if (rc != 0) 788 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 789 } 790 791 unlock: 792 sfc_adapter_unlock(sa); 793 } 794 795 796 static int 797 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, 798 uint32_t nb_mc_addr) 799 { 800 struct sfc_adapter *sa = dev->data->dev_private; 801 uint8_t *mc_addrs_p; 802 uint8_t *mc_addrs; 803 int rc; 804 unsigned int i; 805 806 if (nb_mc_addr > EFX_MAC_MULTICAST_LIST_MAX) { 807 sfc_err(sa, "too many multicast addresses: %u > %u", 808 nb_mc_addr, EFX_MAC_MULTICAST_LIST_MAX); 809 return -EINVAL; 810 } 811 812 mc_addrs_p = rte_calloc("mc-addrs", nb_mc_addr, EFX_MAC_ADDR_LEN, 0); 813 if (mc_addrs_p == NULL) 814 return -ENOMEM; 815 816 mc_addrs = mc_addrs_p; 817 818 for (i = 0; i < nb_mc_addr; ++i) { 819 (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 820 EFX_MAC_ADDR_LEN); 821 mc_addrs += EFX_MAC_ADDR_LEN; 822 } 823 824 rc = efx_mac_multicast_list_set(sa->nic, mc_addrs_p, nb_mc_addr); 825 826 rte_free(mc_addrs_p); 827 828 if (rc != 0) 829 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 830 831 SFC_ASSERT(rc > 0); 832 return -rc; 833 } 834 835 static void 836 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 837 struct rte_eth_rxq_info *qinfo) 838 { 839 struct sfc_adapter *sa = dev->data->dev_private; 840 struct sfc_rxq_info *rxq_info; 841 struct sfc_rxq *rxq; 842 843 sfc_adapter_lock(sa); 844 845 SFC_ASSERT(rx_queue_id < sa->rxq_count); 846 847 rxq_info = &sa->rxq_info[rx_queue_id]; 848 rxq = rxq_info->rxq; 849 SFC_ASSERT(rxq != NULL); 850 851 qinfo->mp = rxq->refill_mb_pool; 852 qinfo->conf.rx_free_thresh = rxq->refill_threshold; 853 qinfo->conf.rx_drop_en = 1; 854 qinfo->conf.rx_deferred_start = rxq_info->deferred_start; 855 qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER); 856 qinfo->nb_desc = rxq_info->entries; 857 858 sfc_adapter_unlock(sa); 859 } 860 861 static void 862 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, 863 struct rte_eth_txq_info *qinfo) 864 { 865 struct sfc_adapter *sa = dev->data->dev_private; 866 struct sfc_txq_info *txq_info; 867 868 sfc_adapter_lock(sa); 869 870 SFC_ASSERT(tx_queue_id < sa->txq_count); 871 872 txq_info = &sa->txq_info[tx_queue_id]; 873 SFC_ASSERT(txq_info->txq != NULL); 874 875 memset(qinfo, 0, sizeof(*qinfo)); 876 877 qinfo->conf.txq_flags = txq_info->txq->flags; 878 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh; 879 qinfo->conf.tx_deferred_start = txq_info->deferred_start; 880 qinfo->nb_desc = txq_info->entries; 881 882 sfc_adapter_unlock(sa); 883 } 884 885 static uint32_t 886 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 887 { 888 struct sfc_adapter *sa = dev->data->dev_private; 889 890 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 891 892 return sfc_rx_qdesc_npending(sa, rx_queue_id); 893 } 894 895 static int 896 sfc_rx_descriptor_done(void *queue, uint16_t offset) 897 { 898 struct sfc_rxq *rxq = queue; 899 900 return sfc_rx_qdesc_done(rxq, offset); 901 } 902 903 static int 904 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 905 { 906 struct sfc_adapter *sa = dev->data->dev_private; 907 int rc; 908 909 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 910 911 sfc_adapter_lock(sa); 912 913 rc = EINVAL; 914 if (sa->state != SFC_ADAPTER_STARTED) 915 goto fail_not_started; 916 917 rc = sfc_rx_qstart(sa, rx_queue_id); 918 if (rc != 0) 919 goto fail_rx_qstart; 920 921 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE; 922 923 sfc_adapter_unlock(sa); 924 925 return 0; 926 927 fail_rx_qstart: 928 fail_not_started: 929 sfc_adapter_unlock(sa); 930 SFC_ASSERT(rc > 0); 931 return -rc; 932 } 933 934 static int 935 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 936 { 937 struct sfc_adapter *sa = dev->data->dev_private; 938 939 sfc_log_init(sa, "RxQ=%u", rx_queue_id); 940 941 sfc_adapter_lock(sa); 942 sfc_rx_qstop(sa, rx_queue_id); 943 944 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE; 945 946 sfc_adapter_unlock(sa); 947 948 return 0; 949 } 950 951 static int 952 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 953 { 954 struct sfc_adapter *sa = dev->data->dev_private; 955 int rc; 956 957 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 958 959 sfc_adapter_lock(sa); 960 961 rc = EINVAL; 962 if (sa->state != SFC_ADAPTER_STARTED) 963 goto fail_not_started; 964 965 rc = sfc_tx_qstart(sa, tx_queue_id); 966 if (rc != 0) 967 goto fail_tx_qstart; 968 969 sa->txq_info[tx_queue_id].deferred_started = B_TRUE; 970 971 sfc_adapter_unlock(sa); 972 return 0; 973 974 fail_tx_qstart: 975 976 fail_not_started: 977 sfc_adapter_unlock(sa); 978 SFC_ASSERT(rc > 0); 979 return -rc; 980 } 981 982 static int 983 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 984 { 985 struct sfc_adapter *sa = dev->data->dev_private; 986 987 sfc_log_init(sa, "TxQ = %u", tx_queue_id); 988 989 sfc_adapter_lock(sa); 990 991 sfc_tx_qstop(sa, tx_queue_id); 992 993 sa->txq_info[tx_queue_id].deferred_started = B_FALSE; 994 995 sfc_adapter_unlock(sa); 996 return 0; 997 } 998 999 #if EFSYS_OPT_RX_SCALE 1000 static int 1001 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1002 struct rte_eth_rss_conf *rss_conf) 1003 { 1004 struct sfc_adapter *sa = dev->data->dev_private; 1005 1006 if ((sa->rss_channels == 1) || 1007 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) 1008 return -ENOTSUP; 1009 1010 sfc_adapter_lock(sa); 1011 1012 /* 1013 * Mapping of hash configuration between RTE and EFX is not one-to-one, 1014 * hence, conversion is done here to derive a correct set of ETH_RSS 1015 * flags which corresponds to the active EFX configuration stored 1016 * locally in 'sfc_adapter' and kept up-to-date 1017 */ 1018 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); 1019 rss_conf->rss_key_len = SFC_RSS_KEY_SIZE; 1020 if (rss_conf->rss_key != NULL) 1021 rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE); 1022 1023 sfc_adapter_unlock(sa); 1024 1025 return 0; 1026 } 1027 1028 static int 1029 sfc_dev_rss_hash_update(struct rte_eth_dev *dev, 1030 struct rte_eth_rss_conf *rss_conf) 1031 { 1032 struct sfc_adapter *sa = dev->data->dev_private; 1033 unsigned int efx_hash_types; 1034 int rc = 0; 1035 1036 if ((sa->rss_channels == 1) || 1037 (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)) { 1038 sfc_err(sa, "RSS is not available"); 1039 return -ENOTSUP; 1040 } 1041 1042 if ((rss_conf->rss_key != NULL) && 1043 (rss_conf->rss_key_len != sizeof(sa->rss_key))) { 1044 sfc_err(sa, "RSS key size is wrong (should be %lu)", 1045 sizeof(sa->rss_key)); 1046 return -EINVAL; 1047 } 1048 1049 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { 1050 sfc_err(sa, "unsupported hash functions requested"); 1051 return -EINVAL; 1052 } 1053 1054 sfc_adapter_lock(sa); 1055 1056 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); 1057 1058 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1059 efx_hash_types, B_TRUE); 1060 if (rc != 0) 1061 goto fail_scale_mode_set; 1062 1063 if (rss_conf->rss_key != NULL) { 1064 if (sa->state == SFC_ADAPTER_STARTED) { 1065 rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key, 1066 sizeof(sa->rss_key)); 1067 if (rc != 0) 1068 goto fail_scale_key_set; 1069 } 1070 1071 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); 1072 } 1073 1074 sa->rss_hash_types = efx_hash_types; 1075 1076 sfc_adapter_unlock(sa); 1077 1078 return 0; 1079 1080 fail_scale_key_set: 1081 if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1082 sa->rss_hash_types, B_TRUE) != 0) 1083 sfc_err(sa, "failed to restore RSS mode"); 1084 1085 fail_scale_mode_set: 1086 sfc_adapter_unlock(sa); 1087 return -rc; 1088 } 1089 #endif 1090 1091 static const struct eth_dev_ops sfc_eth_dev_ops = { 1092 .dev_configure = sfc_dev_configure, 1093 .dev_start = sfc_dev_start, 1094 .dev_stop = sfc_dev_stop, 1095 .dev_set_link_up = sfc_dev_set_link_up, 1096 .dev_set_link_down = sfc_dev_set_link_down, 1097 .dev_close = sfc_dev_close, 1098 .promiscuous_enable = sfc_dev_promisc_enable, 1099 .promiscuous_disable = sfc_dev_promisc_disable, 1100 .allmulticast_enable = sfc_dev_allmulti_enable, 1101 .allmulticast_disable = sfc_dev_allmulti_disable, 1102 .link_update = sfc_dev_link_update, 1103 .stats_get = sfc_stats_get, 1104 .xstats_get = sfc_xstats_get, 1105 .xstats_get_names = sfc_xstats_get_names, 1106 .dev_infos_get = sfc_dev_infos_get, 1107 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 1108 .mtu_set = sfc_dev_set_mtu, 1109 .rx_queue_start = sfc_rx_queue_start, 1110 .rx_queue_stop = sfc_rx_queue_stop, 1111 .tx_queue_start = sfc_tx_queue_start, 1112 .tx_queue_stop = sfc_tx_queue_stop, 1113 .rx_queue_setup = sfc_rx_queue_setup, 1114 .rx_queue_release = sfc_rx_queue_release, 1115 .rx_queue_count = sfc_rx_queue_count, 1116 .rx_descriptor_done = sfc_rx_descriptor_done, 1117 .tx_queue_setup = sfc_tx_queue_setup, 1118 .tx_queue_release = sfc_tx_queue_release, 1119 .flow_ctrl_get = sfc_flow_ctrl_get, 1120 .flow_ctrl_set = sfc_flow_ctrl_set, 1121 .mac_addr_set = sfc_mac_addr_set, 1122 #if EFSYS_OPT_RX_SCALE 1123 .rss_hash_update = sfc_dev_rss_hash_update, 1124 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, 1125 #endif 1126 .set_mc_addr_list = sfc_set_mc_addr_list, 1127 .rxq_info_get = sfc_rx_queue_info_get, 1128 .txq_info_get = sfc_tx_queue_info_get, 1129 }; 1130 1131 static int 1132 sfc_eth_dev_init(struct rte_eth_dev *dev) 1133 { 1134 struct sfc_adapter *sa = dev->data->dev_private; 1135 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev); 1136 int rc; 1137 const efx_nic_cfg_t *encp; 1138 const struct ether_addr *from; 1139 1140 /* Required for logging */ 1141 sa->eth_dev = dev; 1142 1143 /* Copy PCI device info to the dev->data */ 1144 rte_eth_copy_pci_info(dev, pci_dev); 1145 1146 rc = sfc_kvargs_parse(sa); 1147 if (rc != 0) 1148 goto fail_kvargs_parse; 1149 1150 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, 1151 sfc_kvarg_bool_handler, &sa->debug_init); 1152 if (rc != 0) 1153 goto fail_kvarg_debug_init; 1154 1155 sfc_log_init(sa, "entry"); 1156 1157 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 1158 if (dev->data->mac_addrs == NULL) { 1159 rc = ENOMEM; 1160 goto fail_mac_addrs; 1161 } 1162 1163 sfc_adapter_lock_init(sa); 1164 sfc_adapter_lock(sa); 1165 1166 sfc_log_init(sa, "attaching"); 1167 rc = sfc_attach(sa); 1168 if (rc != 0) 1169 goto fail_attach; 1170 1171 encp = efx_nic_cfg_get(sa->nic); 1172 1173 /* 1174 * The arguments are really reverse order in comparison to 1175 * Linux kernel. Copy from NIC config to Ethernet device data. 1176 */ 1177 from = (const struct ether_addr *)(encp->enc_mac_addr); 1178 ether_addr_copy(from, &dev->data->mac_addrs[0]); 1179 1180 dev->dev_ops = &sfc_eth_dev_ops; 1181 dev->rx_pkt_burst = &sfc_recv_pkts; 1182 dev->tx_pkt_burst = &sfc_xmit_pkts; 1183 1184 sfc_adapter_unlock(sa); 1185 1186 sfc_log_init(sa, "done"); 1187 return 0; 1188 1189 fail_attach: 1190 sfc_adapter_unlock(sa); 1191 sfc_adapter_lock_fini(sa); 1192 rte_free(dev->data->mac_addrs); 1193 dev->data->mac_addrs = NULL; 1194 1195 fail_mac_addrs: 1196 fail_kvarg_debug_init: 1197 sfc_kvargs_cleanup(sa); 1198 1199 fail_kvargs_parse: 1200 sfc_log_init(sa, "failed %d", rc); 1201 SFC_ASSERT(rc > 0); 1202 return -rc; 1203 } 1204 1205 static int 1206 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 1207 { 1208 struct sfc_adapter *sa = dev->data->dev_private; 1209 1210 sfc_log_init(sa, "entry"); 1211 1212 sfc_adapter_lock(sa); 1213 1214 sfc_detach(sa); 1215 1216 rte_free(dev->data->mac_addrs); 1217 dev->data->mac_addrs = NULL; 1218 1219 dev->dev_ops = NULL; 1220 dev->rx_pkt_burst = NULL; 1221 dev->tx_pkt_burst = NULL; 1222 1223 sfc_kvargs_cleanup(sa); 1224 1225 sfc_adapter_unlock(sa); 1226 sfc_adapter_lock_fini(sa); 1227 1228 sfc_log_init(sa, "done"); 1229 1230 /* Required for logging, so cleanup last */ 1231 sa->eth_dev = NULL; 1232 return 0; 1233 } 1234 1235 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 1236 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 1237 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 1238 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 1239 { .vendor_id = 0 /* sentinel */ } 1240 }; 1241 1242 static struct eth_driver sfc_efx_pmd = { 1243 .pci_drv = { 1244 .id_table = pci_id_sfc_efx_map, 1245 .drv_flags = 1246 RTE_PCI_DRV_INTR_LSC | 1247 RTE_PCI_DRV_NEED_MAPPING, 1248 .probe = rte_eth_dev_pci_probe, 1249 .remove = rte_eth_dev_pci_remove, 1250 }, 1251 .eth_dev_init = sfc_eth_dev_init, 1252 .eth_dev_uninit = sfc_eth_dev_uninit, 1253 .dev_private_size = sizeof(struct sfc_adapter), 1254 }; 1255 1256 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv); 1257 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 1258 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 1259 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 1260 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " 1261 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); 1262