1 /*- 2 * Copyright (c) 2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was jointly developed between OKTET Labs (under contract 6 * for Solarflare) and Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <rte_dev.h> 31 #include <rte_ethdev.h> 32 #include <rte_pci.h> 33 34 #include "efx.h" 35 36 #include "sfc.h" 37 #include "sfc_debug.h" 38 #include "sfc_log.h" 39 #include "sfc_kvargs.h" 40 #include "sfc_ev.h" 41 #include "sfc_rx.h" 42 #include "sfc_tx.h" 43 44 45 static void 46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 47 { 48 struct sfc_adapter *sa = dev->data->dev_private; 49 50 sfc_log_init(sa, "entry"); 51 52 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 53 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 54 55 /* Autonegotiation may be disabled */ 56 dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 57 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 58 dev_info->speed_capa |= ETH_LINK_SPEED_1G; 59 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 60 dev_info->speed_capa |= ETH_LINK_SPEED_10G; 61 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 62 dev_info->speed_capa |= ETH_LINK_SPEED_40G; 63 64 dev_info->max_rx_queues = sa->rxq_max; 65 dev_info->max_tx_queues = sa->txq_max; 66 67 /* By default packets are dropped if no descriptors are available */ 68 dev_info->default_rxconf.rx_drop_en = 1; 69 70 dev_info->rx_offload_capa = 71 DEV_RX_OFFLOAD_IPV4_CKSUM | 72 DEV_RX_OFFLOAD_UDP_CKSUM | 73 DEV_RX_OFFLOAD_TCP_CKSUM; 74 75 dev_info->tx_offload_capa = 76 DEV_TX_OFFLOAD_IPV4_CKSUM | 77 DEV_TX_OFFLOAD_UDP_CKSUM | 78 DEV_TX_OFFLOAD_TCP_CKSUM; 79 80 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOVLANOFFL | 81 ETH_TXQ_FLAGS_NOXSUMSCTP; 82 83 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 84 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 85 /* The RXQ hardware requires that the descriptor count is a power 86 * of 2, but rx_desc_lim cannot properly describe that constraint. 87 */ 88 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 89 90 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 91 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 92 /* 93 * The TXQ hardware requires that the descriptor count is a power 94 * of 2, but tx_desc_lim cannot properly describe that constraint 95 */ 96 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 97 } 98 99 static const uint32_t * 100 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev) 101 { 102 static const uint32_t ptypes[] = { 103 RTE_PTYPE_L2_ETHER, 104 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 105 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 106 RTE_PTYPE_L4_TCP, 107 RTE_PTYPE_L4_UDP, 108 RTE_PTYPE_UNKNOWN 109 }; 110 111 if (dev->rx_pkt_burst == sfc_recv_pkts) 112 return ptypes; 113 114 return NULL; 115 } 116 117 static int 118 sfc_dev_configure(struct rte_eth_dev *dev) 119 { 120 struct rte_eth_dev_data *dev_data = dev->data; 121 struct sfc_adapter *sa = dev_data->dev_private; 122 int rc; 123 124 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 125 dev_data->nb_rx_queues, dev_data->nb_tx_queues); 126 127 sfc_adapter_lock(sa); 128 switch (sa->state) { 129 case SFC_ADAPTER_CONFIGURED: 130 sfc_close(sa); 131 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 132 /* FALLTHROUGH */ 133 case SFC_ADAPTER_INITIALIZED: 134 rc = sfc_configure(sa); 135 break; 136 default: 137 sfc_err(sa, "unexpected adapter state %u to configure", 138 sa->state); 139 rc = EINVAL; 140 break; 141 } 142 sfc_adapter_unlock(sa); 143 144 sfc_log_init(sa, "done %d", rc); 145 SFC_ASSERT(rc >= 0); 146 return -rc; 147 } 148 149 static int 150 sfc_dev_start(struct rte_eth_dev *dev) 151 { 152 struct sfc_adapter *sa = dev->data->dev_private; 153 int rc; 154 155 sfc_log_init(sa, "entry"); 156 157 sfc_adapter_lock(sa); 158 rc = sfc_start(sa); 159 sfc_adapter_unlock(sa); 160 161 sfc_log_init(sa, "done %d", rc); 162 SFC_ASSERT(rc >= 0); 163 return -rc; 164 } 165 166 static int 167 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 168 { 169 struct sfc_adapter *sa = dev->data->dev_private; 170 struct rte_eth_link *dev_link = &dev->data->dev_link; 171 struct rte_eth_link old_link; 172 struct rte_eth_link current_link; 173 174 sfc_log_init(sa, "entry"); 175 176 if (sa->state != SFC_ADAPTER_STARTED) 177 return 0; 178 179 retry: 180 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 181 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); 182 183 if (wait_to_complete) { 184 efx_link_mode_t link_mode; 185 186 efx_port_poll(sa->nic, &link_mode); 187 sfc_port_link_mode_to_info(link_mode, ¤t_link); 188 189 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 190 *(uint64_t *)&old_link, 191 *(uint64_t *)¤t_link)) 192 goto retry; 193 } else { 194 sfc_ev_mgmt_qpoll(sa); 195 *(int64_t *)¤t_link = 196 rte_atomic64_read((rte_atomic64_t *)dev_link); 197 } 198 199 if (old_link.link_status != current_link.link_status) 200 sfc_info(sa, "Link status is %s", 201 current_link.link_status ? "UP" : "DOWN"); 202 203 return old_link.link_status == current_link.link_status ? 0 : -1; 204 } 205 206 static void 207 sfc_dev_stop(struct rte_eth_dev *dev) 208 { 209 struct sfc_adapter *sa = dev->data->dev_private; 210 211 sfc_log_init(sa, "entry"); 212 213 sfc_adapter_lock(sa); 214 sfc_stop(sa); 215 sfc_adapter_unlock(sa); 216 217 sfc_log_init(sa, "done"); 218 } 219 220 static int 221 sfc_dev_set_link_up(struct rte_eth_dev *dev) 222 { 223 struct sfc_adapter *sa = dev->data->dev_private; 224 int rc; 225 226 sfc_log_init(sa, "entry"); 227 228 sfc_adapter_lock(sa); 229 rc = sfc_start(sa); 230 sfc_adapter_unlock(sa); 231 232 SFC_ASSERT(rc >= 0); 233 return -rc; 234 } 235 236 static int 237 sfc_dev_set_link_down(struct rte_eth_dev *dev) 238 { 239 struct sfc_adapter *sa = dev->data->dev_private; 240 241 sfc_log_init(sa, "entry"); 242 243 sfc_adapter_lock(sa); 244 sfc_stop(sa); 245 sfc_adapter_unlock(sa); 246 247 return 0; 248 } 249 250 static void 251 sfc_dev_close(struct rte_eth_dev *dev) 252 { 253 struct sfc_adapter *sa = dev->data->dev_private; 254 255 sfc_log_init(sa, "entry"); 256 257 sfc_adapter_lock(sa); 258 switch (sa->state) { 259 case SFC_ADAPTER_STARTED: 260 sfc_stop(sa); 261 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 262 /* FALLTHROUGH */ 263 case SFC_ADAPTER_CONFIGURED: 264 sfc_close(sa); 265 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 266 /* FALLTHROUGH */ 267 case SFC_ADAPTER_INITIALIZED: 268 break; 269 default: 270 sfc_err(sa, "unexpected adapter state %u on close", sa->state); 271 break; 272 } 273 sfc_adapter_unlock(sa); 274 275 sfc_log_init(sa, "done"); 276 } 277 278 static void 279 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 280 boolean_t enabled) 281 { 282 struct sfc_port *port; 283 boolean_t *toggle; 284 struct sfc_adapter *sa = dev->data->dev_private; 285 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 286 const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 287 288 sfc_adapter_lock(sa); 289 290 port = &sa->port; 291 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 292 293 if (*toggle != enabled) { 294 *toggle = enabled; 295 296 if ((sa->state == SFC_ADAPTER_STARTED) && 297 (sfc_set_rx_mode(sa) != 0)) { 298 *toggle = !(enabled); 299 sfc_warn(sa, "Failed to %s %s mode", 300 ((enabled) ? "enable" : "disable"), desc); 301 } 302 } 303 304 sfc_adapter_unlock(sa); 305 } 306 307 static void 308 sfc_dev_promisc_enable(struct rte_eth_dev *dev) 309 { 310 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 311 } 312 313 static void 314 sfc_dev_promisc_disable(struct rte_eth_dev *dev) 315 { 316 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 317 } 318 319 static void 320 sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 321 { 322 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 323 } 324 325 static void 326 sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 327 { 328 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 329 } 330 331 static int 332 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 333 uint16_t nb_rx_desc, unsigned int socket_id, 334 const struct rte_eth_rxconf *rx_conf, 335 struct rte_mempool *mb_pool) 336 { 337 struct sfc_adapter *sa = dev->data->dev_private; 338 int rc; 339 340 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 341 rx_queue_id, nb_rx_desc, socket_id); 342 343 sfc_adapter_lock(sa); 344 345 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 346 rx_conf, mb_pool); 347 if (rc != 0) 348 goto fail_rx_qinit; 349 350 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq; 351 352 sfc_adapter_unlock(sa); 353 354 return 0; 355 356 fail_rx_qinit: 357 sfc_adapter_unlock(sa); 358 SFC_ASSERT(rc > 0); 359 return -rc; 360 } 361 362 static void 363 sfc_rx_queue_release(void *queue) 364 { 365 struct sfc_rxq *rxq = queue; 366 struct sfc_adapter *sa; 367 unsigned int sw_index; 368 369 if (rxq == NULL) 370 return; 371 372 sa = rxq->evq->sa; 373 sfc_adapter_lock(sa); 374 375 sw_index = sfc_rxq_sw_index(rxq); 376 377 sfc_log_init(sa, "RxQ=%u", sw_index); 378 379 sa->eth_dev->data->rx_queues[sw_index] = NULL; 380 381 sfc_rx_qfini(sa, sw_index); 382 383 sfc_adapter_unlock(sa); 384 } 385 386 static int 387 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 388 uint16_t nb_tx_desc, unsigned int socket_id, 389 const struct rte_eth_txconf *tx_conf) 390 { 391 struct sfc_adapter *sa = dev->data->dev_private; 392 int rc; 393 394 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 395 tx_queue_id, nb_tx_desc, socket_id); 396 397 sfc_adapter_lock(sa); 398 399 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 400 if (rc != 0) 401 goto fail_tx_qinit; 402 403 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq; 404 405 sfc_adapter_unlock(sa); 406 return 0; 407 408 fail_tx_qinit: 409 sfc_adapter_unlock(sa); 410 SFC_ASSERT(rc > 0); 411 return -rc; 412 } 413 414 static void 415 sfc_tx_queue_release(void *queue) 416 { 417 struct sfc_txq *txq = queue; 418 unsigned int sw_index; 419 struct sfc_adapter *sa; 420 421 if (txq == NULL) 422 return; 423 424 sw_index = sfc_txq_sw_index(txq); 425 426 SFC_ASSERT(txq->evq != NULL); 427 sa = txq->evq->sa; 428 429 sfc_log_init(sa, "TxQ = %u", sw_index); 430 431 sfc_adapter_lock(sa); 432 433 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 434 sa->eth_dev->data->tx_queues[sw_index] = NULL; 435 436 sfc_tx_qfini(sa, sw_index); 437 438 sfc_adapter_unlock(sa); 439 } 440 441 static void 442 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 443 { 444 struct sfc_adapter *sa = dev->data->dev_private; 445 struct sfc_port *port = &sa->port; 446 uint64_t *mac_stats; 447 448 rte_spinlock_lock(&port->mac_stats_lock); 449 450 if (sfc_port_update_mac_stats(sa) != 0) 451 goto unlock; 452 453 mac_stats = port->mac_stats_buf; 454 455 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 456 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 457 stats->ipackets = 458 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 459 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 460 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 461 stats->opackets = 462 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 463 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 464 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 465 stats->ibytes = 466 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 467 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 468 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 469 stats->obytes = 470 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 471 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 472 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 473 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 474 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 475 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 476 } else { 477 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 478 stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 479 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 480 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 481 /* 482 * Take into account stats which are whenever supported 483 * on EF10. If some stat is not supported by current 484 * firmware variant or HW revision, it is guaranteed 485 * to be zero in mac_stats. 486 */ 487 stats->imissed = 488 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 489 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 490 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 491 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 492 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 493 mac_stats[EFX_MAC_PM_TRUNC_QBB] + 494 mac_stats[EFX_MAC_PM_DISCARD_QBB] + 495 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 496 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 497 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 498 stats->ierrors = 499 mac_stats[EFX_MAC_RX_FCS_ERRORS] + 500 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 501 mac_stats[EFX_MAC_RX_JABBER_PKTS]; 502 /* no oerrors counters supported on EF10 */ 503 } 504 505 unlock: 506 rte_spinlock_unlock(&port->mac_stats_lock); 507 } 508 509 static int 510 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 511 unsigned int xstats_count) 512 { 513 struct sfc_adapter *sa = dev->data->dev_private; 514 struct sfc_port *port = &sa->port; 515 uint64_t *mac_stats; 516 int rc; 517 unsigned int i; 518 int nstats = 0; 519 520 rte_spinlock_lock(&port->mac_stats_lock); 521 522 rc = sfc_port_update_mac_stats(sa); 523 if (rc != 0) { 524 SFC_ASSERT(rc > 0); 525 nstats = -rc; 526 goto unlock; 527 } 528 529 mac_stats = port->mac_stats_buf; 530 531 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 532 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 533 if (xstats != NULL && nstats < (int)xstats_count) { 534 xstats[nstats].id = nstats; 535 xstats[nstats].value = mac_stats[i]; 536 } 537 nstats++; 538 } 539 } 540 541 unlock: 542 rte_spinlock_unlock(&port->mac_stats_lock); 543 544 return nstats; 545 } 546 547 static int 548 sfc_xstats_get_names(struct rte_eth_dev *dev, 549 struct rte_eth_xstat_name *xstats_names, 550 unsigned int xstats_count) 551 { 552 struct sfc_adapter *sa = dev->data->dev_private; 553 struct sfc_port *port = &sa->port; 554 unsigned int i; 555 unsigned int nstats = 0; 556 557 for (i = 0; i < EFX_MAC_NSTATS; ++i) { 558 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 559 if (xstats_names != NULL && nstats < xstats_count) 560 strncpy(xstats_names[nstats].name, 561 efx_mac_stat_name(sa->nic, i), 562 sizeof(xstats_names[0].name)); 563 nstats++; 564 } 565 } 566 567 return nstats; 568 } 569 570 static int 571 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 572 { 573 struct sfc_adapter *sa = dev->data->dev_private; 574 unsigned int wanted_fc, link_fc; 575 576 memset(fc_conf, 0, sizeof(*fc_conf)); 577 578 sfc_adapter_lock(sa); 579 580 if (sa->state == SFC_ADAPTER_STARTED) 581 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 582 else 583 link_fc = sa->port.flow_ctrl; 584 585 switch (link_fc) { 586 case 0: 587 fc_conf->mode = RTE_FC_NONE; 588 break; 589 case EFX_FCNTL_RESPOND: 590 fc_conf->mode = RTE_FC_RX_PAUSE; 591 break; 592 case EFX_FCNTL_GENERATE: 593 fc_conf->mode = RTE_FC_TX_PAUSE; 594 break; 595 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 596 fc_conf->mode = RTE_FC_FULL; 597 break; 598 default: 599 sfc_err(sa, "%s: unexpected flow control value %#x", 600 __func__, link_fc); 601 } 602 603 fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 604 605 sfc_adapter_unlock(sa); 606 607 return 0; 608 } 609 610 static int 611 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 612 { 613 struct sfc_adapter *sa = dev->data->dev_private; 614 struct sfc_port *port = &sa->port; 615 unsigned int fcntl; 616 int rc; 617 618 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 619 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 620 fc_conf->mac_ctrl_frame_fwd != 0) { 621 sfc_err(sa, "unsupported flow control settings specified"); 622 rc = EINVAL; 623 goto fail_inval; 624 } 625 626 switch (fc_conf->mode) { 627 case RTE_FC_NONE: 628 fcntl = 0; 629 break; 630 case RTE_FC_RX_PAUSE: 631 fcntl = EFX_FCNTL_RESPOND; 632 break; 633 case RTE_FC_TX_PAUSE: 634 fcntl = EFX_FCNTL_GENERATE; 635 break; 636 case RTE_FC_FULL: 637 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 638 break; 639 default: 640 rc = EINVAL; 641 goto fail_inval; 642 } 643 644 sfc_adapter_lock(sa); 645 646 if (sa->state == SFC_ADAPTER_STARTED) { 647 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 648 if (rc != 0) 649 goto fail_mac_fcntl_set; 650 } 651 652 port->flow_ctrl = fcntl; 653 port->flow_ctrl_autoneg = fc_conf->autoneg; 654 655 sfc_adapter_unlock(sa); 656 657 return 0; 658 659 fail_mac_fcntl_set: 660 sfc_adapter_unlock(sa); 661 fail_inval: 662 SFC_ASSERT(rc > 0); 663 return -rc; 664 } 665 666 static int 667 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 668 { 669 struct sfc_adapter *sa = dev->data->dev_private; 670 size_t pdu = EFX_MAC_PDU(mtu); 671 size_t old_pdu; 672 int rc; 673 674 sfc_log_init(sa, "mtu=%u", mtu); 675 676 rc = EINVAL; 677 if (pdu < EFX_MAC_PDU_MIN) { 678 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 679 (unsigned int)mtu, (unsigned int)pdu, 680 EFX_MAC_PDU_MIN); 681 goto fail_inval; 682 } 683 if (pdu > EFX_MAC_PDU_MAX) { 684 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 685 (unsigned int)mtu, (unsigned int)pdu, 686 EFX_MAC_PDU_MAX); 687 goto fail_inval; 688 } 689 690 sfc_adapter_lock(sa); 691 692 if (pdu != sa->port.pdu) { 693 if (sa->state == SFC_ADAPTER_STARTED) { 694 sfc_stop(sa); 695 696 old_pdu = sa->port.pdu; 697 sa->port.pdu = pdu; 698 rc = sfc_start(sa); 699 if (rc != 0) 700 goto fail_start; 701 } else { 702 sa->port.pdu = pdu; 703 } 704 } 705 706 /* 707 * The driver does not use it, but other PMDs update jumbo_frame 708 * flag and max_rx_pkt_len when MTU is set. 709 */ 710 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); 711 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 712 713 sfc_adapter_unlock(sa); 714 715 sfc_log_init(sa, "done"); 716 return 0; 717 718 fail_start: 719 sa->port.pdu = old_pdu; 720 if (sfc_start(sa) != 0) 721 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 722 "PDU max size - port is stopped", 723 (unsigned int)pdu, (unsigned int)old_pdu); 724 sfc_adapter_unlock(sa); 725 726 fail_inval: 727 sfc_log_init(sa, "failed %d", rc); 728 SFC_ASSERT(rc > 0); 729 return -rc; 730 } 731 static void 732 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 733 { 734 struct sfc_adapter *sa = dev->data->dev_private; 735 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 736 int rc; 737 738 sfc_adapter_lock(sa); 739 740 if (sa->state != SFC_ADAPTER_STARTED) { 741 sfc_info(sa, "the port is not started"); 742 sfc_info(sa, "the new MAC address will be set on port start"); 743 744 goto unlock; 745 } 746 747 if (encp->enc_allow_set_mac_with_installed_filters) { 748 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes); 749 if (rc != 0) { 750 sfc_err(sa, "cannot set MAC address (rc = %u)", rc); 751 goto unlock; 752 } 753 754 /* 755 * Changing the MAC address by means of MCDI request 756 * has no effect on received traffic, therefore 757 * we also need to update unicast filters 758 */ 759 rc = sfc_set_rx_mode(sa); 760 if (rc != 0) 761 sfc_err(sa, "cannot set filter (rc = %u)", rc); 762 } else { 763 sfc_warn(sa, "cannot set MAC address with filters installed"); 764 sfc_warn(sa, "adapter will be restarted to pick the new MAC"); 765 sfc_warn(sa, "(some traffic may be dropped)"); 766 767 /* 768 * Since setting MAC address with filters installed is not 769 * allowed on the adapter, one needs to simply restart adapter 770 * so that the new MAC address will be taken from an outer 771 * storage and set flawlessly by means of sfc_start() call 772 */ 773 sfc_stop(sa); 774 rc = sfc_start(sa); 775 if (rc != 0) 776 sfc_err(sa, "cannot restart adapter (rc = %u)", rc); 777 } 778 779 unlock: 780 sfc_adapter_unlock(sa); 781 } 782 783 784 static int 785 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, 786 uint32_t nb_mc_addr) 787 { 788 struct sfc_adapter *sa = dev->data->dev_private; 789 uint8_t *mc_addrs_p; 790 uint8_t *mc_addrs; 791 int rc; 792 unsigned int i; 793 794 if (nb_mc_addr > EFX_MAC_MULTICAST_LIST_MAX) { 795 sfc_err(sa, "too many multicast addresses: %u > %u", 796 nb_mc_addr, EFX_MAC_MULTICAST_LIST_MAX); 797 return -EINVAL; 798 } 799 800 mc_addrs_p = rte_calloc("mc-addrs", nb_mc_addr, EFX_MAC_ADDR_LEN, 0); 801 if (mc_addrs_p == NULL) 802 return -ENOMEM; 803 804 mc_addrs = mc_addrs_p; 805 806 for (i = 0; i < nb_mc_addr; ++i) { 807 (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes, 808 EFX_MAC_ADDR_LEN); 809 mc_addrs += EFX_MAC_ADDR_LEN; 810 } 811 812 rc = efx_mac_multicast_list_set(sa->nic, mc_addrs_p, nb_mc_addr); 813 814 rte_free(mc_addrs_p); 815 816 if (rc != 0) 817 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc); 818 819 SFC_ASSERT(rc > 0); 820 return -rc; 821 } 822 823 static void 824 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, 825 struct rte_eth_rxq_info *qinfo) 826 { 827 struct sfc_adapter *sa = dev->data->dev_private; 828 struct sfc_rxq_info *rxq_info; 829 struct sfc_rxq *rxq; 830 831 sfc_adapter_lock(sa); 832 833 SFC_ASSERT(rx_queue_id < sa->rxq_count); 834 835 rxq_info = &sa->rxq_info[rx_queue_id]; 836 rxq = rxq_info->rxq; 837 SFC_ASSERT(rxq != NULL); 838 839 qinfo->mp = rxq->refill_mb_pool; 840 qinfo->conf.rx_free_thresh = rxq->refill_threshold; 841 qinfo->conf.rx_drop_en = 1; 842 qinfo->nb_desc = rxq_info->entries; 843 844 sfc_adapter_unlock(sa); 845 } 846 847 static const struct eth_dev_ops sfc_eth_dev_ops = { 848 .dev_configure = sfc_dev_configure, 849 .dev_start = sfc_dev_start, 850 .dev_stop = sfc_dev_stop, 851 .dev_set_link_up = sfc_dev_set_link_up, 852 .dev_set_link_down = sfc_dev_set_link_down, 853 .dev_close = sfc_dev_close, 854 .promiscuous_enable = sfc_dev_promisc_enable, 855 .promiscuous_disable = sfc_dev_promisc_disable, 856 .allmulticast_enable = sfc_dev_allmulti_enable, 857 .allmulticast_disable = sfc_dev_allmulti_disable, 858 .link_update = sfc_dev_link_update, 859 .stats_get = sfc_stats_get, 860 .xstats_get = sfc_xstats_get, 861 .xstats_get_names = sfc_xstats_get_names, 862 .dev_infos_get = sfc_dev_infos_get, 863 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get, 864 .mtu_set = sfc_dev_set_mtu, 865 .rx_queue_setup = sfc_rx_queue_setup, 866 .rx_queue_release = sfc_rx_queue_release, 867 .tx_queue_setup = sfc_tx_queue_setup, 868 .tx_queue_release = sfc_tx_queue_release, 869 .flow_ctrl_get = sfc_flow_ctrl_get, 870 .flow_ctrl_set = sfc_flow_ctrl_set, 871 .mac_addr_set = sfc_mac_addr_set, 872 .set_mc_addr_list = sfc_set_mc_addr_list, 873 .rxq_info_get = sfc_rx_queue_info_get, 874 }; 875 876 static int 877 sfc_eth_dev_init(struct rte_eth_dev *dev) 878 { 879 struct sfc_adapter *sa = dev->data->dev_private; 880 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev); 881 int rc; 882 const efx_nic_cfg_t *encp; 883 const struct ether_addr *from; 884 885 /* Required for logging */ 886 sa->eth_dev = dev; 887 888 /* Copy PCI device info to the dev->data */ 889 rte_eth_copy_pci_info(dev, pci_dev); 890 891 rc = sfc_kvargs_parse(sa); 892 if (rc != 0) 893 goto fail_kvargs_parse; 894 895 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, 896 sfc_kvarg_bool_handler, &sa->debug_init); 897 if (rc != 0) 898 goto fail_kvarg_debug_init; 899 900 sfc_log_init(sa, "entry"); 901 902 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 903 if (dev->data->mac_addrs == NULL) { 904 rc = ENOMEM; 905 goto fail_mac_addrs; 906 } 907 908 sfc_adapter_lock_init(sa); 909 sfc_adapter_lock(sa); 910 911 sfc_log_init(sa, "attaching"); 912 rc = sfc_attach(sa); 913 if (rc != 0) 914 goto fail_attach; 915 916 encp = efx_nic_cfg_get(sa->nic); 917 918 /* 919 * The arguments are really reverse order in comparison to 920 * Linux kernel. Copy from NIC config to Ethernet device data. 921 */ 922 from = (const struct ether_addr *)(encp->enc_mac_addr); 923 ether_addr_copy(from, &dev->data->mac_addrs[0]); 924 925 dev->dev_ops = &sfc_eth_dev_ops; 926 dev->rx_pkt_burst = &sfc_recv_pkts; 927 dev->tx_pkt_burst = &sfc_xmit_pkts; 928 929 sfc_adapter_unlock(sa); 930 931 sfc_log_init(sa, "done"); 932 return 0; 933 934 fail_attach: 935 sfc_adapter_unlock(sa); 936 sfc_adapter_lock_fini(sa); 937 rte_free(dev->data->mac_addrs); 938 dev->data->mac_addrs = NULL; 939 940 fail_mac_addrs: 941 fail_kvarg_debug_init: 942 sfc_kvargs_cleanup(sa); 943 944 fail_kvargs_parse: 945 sfc_log_init(sa, "failed %d", rc); 946 SFC_ASSERT(rc > 0); 947 return -rc; 948 } 949 950 static int 951 sfc_eth_dev_uninit(struct rte_eth_dev *dev) 952 { 953 struct sfc_adapter *sa = dev->data->dev_private; 954 955 sfc_log_init(sa, "entry"); 956 957 sfc_adapter_lock(sa); 958 959 sfc_detach(sa); 960 961 rte_free(dev->data->mac_addrs); 962 dev->data->mac_addrs = NULL; 963 964 dev->dev_ops = NULL; 965 dev->rx_pkt_burst = NULL; 966 dev->tx_pkt_burst = NULL; 967 968 sfc_kvargs_cleanup(sa); 969 970 sfc_adapter_unlock(sa); 971 sfc_adapter_lock_fini(sa); 972 973 sfc_log_init(sa, "done"); 974 975 /* Required for logging, so cleanup last */ 976 sa->eth_dev = NULL; 977 return 0; 978 } 979 980 static const struct rte_pci_id pci_id_sfc_efx_map[] = { 981 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 982 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 983 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 984 { .vendor_id = 0 /* sentinel */ } 985 }; 986 987 static struct eth_driver sfc_efx_pmd = { 988 .pci_drv = { 989 .id_table = pci_id_sfc_efx_map, 990 .drv_flags = 991 RTE_PCI_DRV_INTR_LSC | 992 RTE_PCI_DRV_NEED_MAPPING, 993 .probe = rte_eth_dev_pci_probe, 994 .remove = rte_eth_dev_pci_remove, 995 }, 996 .eth_dev_init = sfc_eth_dev_init, 997 .eth_dev_uninit = sfc_eth_dev_uninit, 998 .dev_private_size = sizeof(struct sfc_adapter), 999 }; 1000 1001 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv); 1002 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 1003 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 1004 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 1005 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " 1006 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); 1007