163d588ffSAndrew Rybchenko /*- 263d588ffSAndrew Rybchenko * Copyright (c) 2016 Solarflare Communications Inc. 363d588ffSAndrew Rybchenko * All rights reserved. 463d588ffSAndrew Rybchenko * 563d588ffSAndrew Rybchenko * This software was jointly developed between OKTET Labs (under contract 663d588ffSAndrew Rybchenko * for Solarflare) and Solarflare Communications, Inc. 763d588ffSAndrew Rybchenko * 863d588ffSAndrew Rybchenko * Redistribution and use in source and binary forms, with or without 963d588ffSAndrew Rybchenko * modification, are permitted provided that the following conditions are met: 1063d588ffSAndrew Rybchenko * 1163d588ffSAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice, 1263d588ffSAndrew Rybchenko * this list of conditions and the following disclaimer. 1363d588ffSAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice, 1463d588ffSAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation 1563d588ffSAndrew Rybchenko * and/or other materials provided with the distribution. 1663d588ffSAndrew Rybchenko * 1763d588ffSAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 1863d588ffSAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 1963d588ffSAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2063d588ffSAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 2163d588ffSAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 2263d588ffSAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 2363d588ffSAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 2463d588ffSAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 2563d588ffSAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 2663d588ffSAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 2763d588ffSAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2863d588ffSAndrew Rybchenko */ 2963d588ffSAndrew Rybchenko 3063d588ffSAndrew Rybchenko #include <rte_dev.h> 3163d588ffSAndrew Rybchenko #include <rte_ethdev.h> 3263d588ffSAndrew Rybchenko #include <rte_pci.h> 3363d588ffSAndrew Rybchenko 34ba641f20SAndrew Rybchenko #include "efx.h" 35ba641f20SAndrew Rybchenko 3663d588ffSAndrew Rybchenko #include "sfc.h" 3763d588ffSAndrew Rybchenko #include "sfc_debug.h" 3863d588ffSAndrew Rybchenko #include "sfc_log.h" 3963d588ffSAndrew Rybchenko #include "sfc_kvargs.h" 40886f8d8aSArtem Andreev #include "sfc_ev.h" 41ce35b05cSAndrew Rybchenko #include "sfc_rx.h" 42b1b7ad93SIvan Malov #include "sfc_tx.h" 4363d588ffSAndrew Rybchenko 4463d588ffSAndrew Rybchenko 4563d588ffSAndrew Rybchenko static void 4663d588ffSAndrew Rybchenko sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 4763d588ffSAndrew Rybchenko { 4863d588ffSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 4963d588ffSAndrew Rybchenko 5063d588ffSAndrew Rybchenko sfc_log_init(sa, "entry"); 5163d588ffSAndrew Rybchenko 5263d588ffSAndrew Rybchenko dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 5303ed2119SAndrew Rybchenko dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; 54a8e64c6bSAndrew Rybchenko 55d23f3a89SAndrew Rybchenko /* Autonegotiation may be disabled */ 56d23f3a89SAndrew Rybchenko dev_info->speed_capa = ETH_LINK_SPEED_FIXED; 57d23f3a89SAndrew Rybchenko if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX) 58d23f3a89SAndrew Rybchenko dev_info->speed_capa |= ETH_LINK_SPEED_1G; 59d23f3a89SAndrew Rybchenko if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) 60d23f3a89SAndrew Rybchenko dev_info->speed_capa |= ETH_LINK_SPEED_10G; 61d23f3a89SAndrew Rybchenko if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) 62d23f3a89SAndrew Rybchenko dev_info->speed_capa |= ETH_LINK_SPEED_40G; 63d23f3a89SAndrew Rybchenko 64ce35b05cSAndrew Rybchenko dev_info->max_rx_queues = sa->rxq_max; 65a8ad8cf8SIvan Malov dev_info->max_tx_queues = sa->txq_max; 66ce35b05cSAndrew Rybchenko 67a8e64c6bSAndrew Rybchenko /* By default packets are dropped if no descriptors are available */ 68a8e64c6bSAndrew Rybchenko dev_info->default_rxconf.rx_drop_en = 1; 69a8e64c6bSAndrew Rybchenko 70a8ad8cf8SIvan Malov dev_info->tx_offload_capa = 71a8ad8cf8SIvan Malov DEV_TX_OFFLOAD_IPV4_CKSUM | 72a8ad8cf8SIvan Malov DEV_TX_OFFLOAD_UDP_CKSUM | 73a8ad8cf8SIvan Malov DEV_TX_OFFLOAD_TCP_CKSUM; 74a8ad8cf8SIvan Malov 75a8ad8cf8SIvan Malov dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOVLANOFFL | 76a8ad8cf8SIvan Malov ETH_TXQ_FLAGS_NOXSUMSCTP; 77a8ad8cf8SIvan Malov 78a8e64c6bSAndrew Rybchenko dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; 79a8e64c6bSAndrew Rybchenko dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS; 80a8e64c6bSAndrew Rybchenko /* The RXQ hardware requires that the descriptor count is a power 81a8e64c6bSAndrew Rybchenko * of 2, but rx_desc_lim cannot properly describe that constraint. 82a8e64c6bSAndrew Rybchenko */ 83a8e64c6bSAndrew Rybchenko dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; 84a8ad8cf8SIvan Malov 85a8ad8cf8SIvan Malov dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; 86a8ad8cf8SIvan Malov dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; 87a8ad8cf8SIvan Malov /* 88a8ad8cf8SIvan Malov * The TXQ hardware requires that the descriptor count is a power 89a8ad8cf8SIvan Malov * of 2, but tx_desc_lim cannot properly describe that constraint 90a8ad8cf8SIvan Malov */ 91a8ad8cf8SIvan Malov dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS; 9263d588ffSAndrew Rybchenko } 9363d588ffSAndrew Rybchenko 94aaa3f5f0SAndrew Rybchenko static int 95aaa3f5f0SAndrew Rybchenko sfc_dev_configure(struct rte_eth_dev *dev) 96aaa3f5f0SAndrew Rybchenko { 97aaa3f5f0SAndrew Rybchenko struct rte_eth_dev_data *dev_data = dev->data; 98aaa3f5f0SAndrew Rybchenko struct sfc_adapter *sa = dev_data->dev_private; 99aaa3f5f0SAndrew Rybchenko int rc; 100aaa3f5f0SAndrew Rybchenko 101aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "entry n_rxq=%u n_txq=%u", 102aaa3f5f0SAndrew Rybchenko dev_data->nb_rx_queues, dev_data->nb_tx_queues); 103aaa3f5f0SAndrew Rybchenko 104aaa3f5f0SAndrew Rybchenko sfc_adapter_lock(sa); 105aaa3f5f0SAndrew Rybchenko switch (sa->state) { 106aaa3f5f0SAndrew Rybchenko case SFC_ADAPTER_CONFIGURED: 107aaa3f5f0SAndrew Rybchenko sfc_close(sa); 108aaa3f5f0SAndrew Rybchenko SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 109aaa3f5f0SAndrew Rybchenko /* FALLTHROUGH */ 110aaa3f5f0SAndrew Rybchenko case SFC_ADAPTER_INITIALIZED: 111aaa3f5f0SAndrew Rybchenko rc = sfc_configure(sa); 112aaa3f5f0SAndrew Rybchenko break; 113aaa3f5f0SAndrew Rybchenko default: 114aaa3f5f0SAndrew Rybchenko sfc_err(sa, "unexpected adapter state %u to configure", 115aaa3f5f0SAndrew Rybchenko sa->state); 116aaa3f5f0SAndrew Rybchenko rc = EINVAL; 117aaa3f5f0SAndrew Rybchenko break; 118aaa3f5f0SAndrew Rybchenko } 119aaa3f5f0SAndrew Rybchenko sfc_adapter_unlock(sa); 120aaa3f5f0SAndrew Rybchenko 121aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "done %d", rc); 122aaa3f5f0SAndrew Rybchenko SFC_ASSERT(rc >= 0); 123aaa3f5f0SAndrew Rybchenko return -rc; 124aaa3f5f0SAndrew Rybchenko } 125aaa3f5f0SAndrew Rybchenko 12693fcf09bSAndrew Rybchenko static int 12793fcf09bSAndrew Rybchenko sfc_dev_start(struct rte_eth_dev *dev) 12893fcf09bSAndrew Rybchenko { 12993fcf09bSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 13093fcf09bSAndrew Rybchenko int rc; 13193fcf09bSAndrew Rybchenko 13293fcf09bSAndrew Rybchenko sfc_log_init(sa, "entry"); 13393fcf09bSAndrew Rybchenko 13493fcf09bSAndrew Rybchenko sfc_adapter_lock(sa); 13593fcf09bSAndrew Rybchenko rc = sfc_start(sa); 13693fcf09bSAndrew Rybchenko sfc_adapter_unlock(sa); 13793fcf09bSAndrew Rybchenko 13893fcf09bSAndrew Rybchenko sfc_log_init(sa, "done %d", rc); 13993fcf09bSAndrew Rybchenko SFC_ASSERT(rc >= 0); 14093fcf09bSAndrew Rybchenko return -rc; 14193fcf09bSAndrew Rybchenko } 14293fcf09bSAndrew Rybchenko 143886f8d8aSArtem Andreev static int 144886f8d8aSArtem Andreev sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 145886f8d8aSArtem Andreev { 146886f8d8aSArtem Andreev struct sfc_adapter *sa = dev->data->dev_private; 147886f8d8aSArtem Andreev struct rte_eth_link *dev_link = &dev->data->dev_link; 148886f8d8aSArtem Andreev struct rte_eth_link old_link; 149886f8d8aSArtem Andreev struct rte_eth_link current_link; 150886f8d8aSArtem Andreev 151886f8d8aSArtem Andreev sfc_log_init(sa, "entry"); 152886f8d8aSArtem Andreev 153886f8d8aSArtem Andreev if (sa->state != SFC_ADAPTER_STARTED) 154886f8d8aSArtem Andreev return 0; 155886f8d8aSArtem Andreev 156886f8d8aSArtem Andreev retry: 157886f8d8aSArtem Andreev EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 158886f8d8aSArtem Andreev *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); 159886f8d8aSArtem Andreev 160886f8d8aSArtem Andreev if (wait_to_complete) { 161886f8d8aSArtem Andreev efx_link_mode_t link_mode; 162886f8d8aSArtem Andreev 163886f8d8aSArtem Andreev efx_port_poll(sa->nic, &link_mode); 164886f8d8aSArtem Andreev sfc_port_link_mode_to_info(link_mode, ¤t_link); 165886f8d8aSArtem Andreev 166886f8d8aSArtem Andreev if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, 167886f8d8aSArtem Andreev *(uint64_t *)&old_link, 168886f8d8aSArtem Andreev *(uint64_t *)¤t_link)) 169886f8d8aSArtem Andreev goto retry; 170886f8d8aSArtem Andreev } else { 171886f8d8aSArtem Andreev sfc_ev_mgmt_qpoll(sa); 172886f8d8aSArtem Andreev *(int64_t *)¤t_link = 173886f8d8aSArtem Andreev rte_atomic64_read((rte_atomic64_t *)dev_link); 174886f8d8aSArtem Andreev } 175886f8d8aSArtem Andreev 176886f8d8aSArtem Andreev if (old_link.link_status != current_link.link_status) 177886f8d8aSArtem Andreev sfc_info(sa, "Link status is %s", 178886f8d8aSArtem Andreev current_link.link_status ? "UP" : "DOWN"); 179886f8d8aSArtem Andreev 180886f8d8aSArtem Andreev return old_link.link_status == current_link.link_status ? 0 : -1; 181886f8d8aSArtem Andreev } 182886f8d8aSArtem Andreev 18393fcf09bSAndrew Rybchenko static void 18493fcf09bSAndrew Rybchenko sfc_dev_stop(struct rte_eth_dev *dev) 18593fcf09bSAndrew Rybchenko { 18693fcf09bSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 18793fcf09bSAndrew Rybchenko 18893fcf09bSAndrew Rybchenko sfc_log_init(sa, "entry"); 18993fcf09bSAndrew Rybchenko 19093fcf09bSAndrew Rybchenko sfc_adapter_lock(sa); 19193fcf09bSAndrew Rybchenko sfc_stop(sa); 19293fcf09bSAndrew Rybchenko sfc_adapter_unlock(sa); 19393fcf09bSAndrew Rybchenko 19493fcf09bSAndrew Rybchenko sfc_log_init(sa, "done"); 19593fcf09bSAndrew Rybchenko } 19693fcf09bSAndrew Rybchenko 1972a05f337SArtem Andreev static int 1982a05f337SArtem Andreev sfc_dev_set_link_up(struct rte_eth_dev *dev) 1992a05f337SArtem Andreev { 2002a05f337SArtem Andreev struct sfc_adapter *sa = dev->data->dev_private; 2012a05f337SArtem Andreev int rc; 2022a05f337SArtem Andreev 2032a05f337SArtem Andreev sfc_log_init(sa, "entry"); 2042a05f337SArtem Andreev 2052a05f337SArtem Andreev sfc_adapter_lock(sa); 2062a05f337SArtem Andreev rc = sfc_start(sa); 2072a05f337SArtem Andreev sfc_adapter_unlock(sa); 2082a05f337SArtem Andreev 2092a05f337SArtem Andreev SFC_ASSERT(rc >= 0); 2102a05f337SArtem Andreev return -rc; 2112a05f337SArtem Andreev } 2122a05f337SArtem Andreev 2132a05f337SArtem Andreev static int 2142a05f337SArtem Andreev sfc_dev_set_link_down(struct rte_eth_dev *dev) 2152a05f337SArtem Andreev { 2162a05f337SArtem Andreev struct sfc_adapter *sa = dev->data->dev_private; 2172a05f337SArtem Andreev 2182a05f337SArtem Andreev sfc_log_init(sa, "entry"); 2192a05f337SArtem Andreev 2202a05f337SArtem Andreev sfc_adapter_lock(sa); 2212a05f337SArtem Andreev sfc_stop(sa); 2222a05f337SArtem Andreev sfc_adapter_unlock(sa); 2232a05f337SArtem Andreev 2242a05f337SArtem Andreev return 0; 2252a05f337SArtem Andreev } 2262a05f337SArtem Andreev 227aaa3f5f0SAndrew Rybchenko static void 228aaa3f5f0SAndrew Rybchenko sfc_dev_close(struct rte_eth_dev *dev) 229aaa3f5f0SAndrew Rybchenko { 230aaa3f5f0SAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 231aaa3f5f0SAndrew Rybchenko 232aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "entry"); 233aaa3f5f0SAndrew Rybchenko 234aaa3f5f0SAndrew Rybchenko sfc_adapter_lock(sa); 235aaa3f5f0SAndrew Rybchenko switch (sa->state) { 23693fcf09bSAndrew Rybchenko case SFC_ADAPTER_STARTED: 23793fcf09bSAndrew Rybchenko sfc_stop(sa); 23893fcf09bSAndrew Rybchenko SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 23993fcf09bSAndrew Rybchenko /* FALLTHROUGH */ 240aaa3f5f0SAndrew Rybchenko case SFC_ADAPTER_CONFIGURED: 241aaa3f5f0SAndrew Rybchenko sfc_close(sa); 242aaa3f5f0SAndrew Rybchenko SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED); 243aaa3f5f0SAndrew Rybchenko /* FALLTHROUGH */ 244aaa3f5f0SAndrew Rybchenko case SFC_ADAPTER_INITIALIZED: 245aaa3f5f0SAndrew Rybchenko break; 246aaa3f5f0SAndrew Rybchenko default: 247aaa3f5f0SAndrew Rybchenko sfc_err(sa, "unexpected adapter state %u on close", sa->state); 248aaa3f5f0SAndrew Rybchenko break; 249aaa3f5f0SAndrew Rybchenko } 250aaa3f5f0SAndrew Rybchenko sfc_adapter_unlock(sa); 251aaa3f5f0SAndrew Rybchenko 252aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "done"); 253aaa3f5f0SAndrew Rybchenko } 254aaa3f5f0SAndrew Rybchenko 255*f3de3840SIvan Malov static void 256*f3de3840SIvan Malov sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode, 257*f3de3840SIvan Malov boolean_t enabled) 258*f3de3840SIvan Malov { 259*f3de3840SIvan Malov struct sfc_port *port; 260*f3de3840SIvan Malov boolean_t *toggle; 261*f3de3840SIvan Malov struct sfc_adapter *sa = dev->data->dev_private; 262*f3de3840SIvan Malov boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI); 263*f3de3840SIvan Malov const char *desc = (allmulti) ? "all-multi" : "promiscuous"; 264*f3de3840SIvan Malov 265*f3de3840SIvan Malov sfc_adapter_lock(sa); 266*f3de3840SIvan Malov 267*f3de3840SIvan Malov port = &sa->port; 268*f3de3840SIvan Malov toggle = (allmulti) ? (&port->allmulti) : (&port->promisc); 269*f3de3840SIvan Malov 270*f3de3840SIvan Malov if (*toggle != enabled) { 271*f3de3840SIvan Malov *toggle = enabled; 272*f3de3840SIvan Malov 273*f3de3840SIvan Malov if ((sa->state == SFC_ADAPTER_STARTED) && 274*f3de3840SIvan Malov (sfc_set_rx_mode(sa) != 0)) { 275*f3de3840SIvan Malov *toggle = !(enabled); 276*f3de3840SIvan Malov sfc_warn(sa, "Failed to %s %s mode", 277*f3de3840SIvan Malov ((enabled) ? "enable" : "disable"), desc); 278*f3de3840SIvan Malov } 279*f3de3840SIvan Malov } 280*f3de3840SIvan Malov 281*f3de3840SIvan Malov sfc_adapter_unlock(sa); 282*f3de3840SIvan Malov } 283*f3de3840SIvan Malov 284*f3de3840SIvan Malov static void 285*f3de3840SIvan Malov sfc_dev_promisc_enable(struct rte_eth_dev *dev) 286*f3de3840SIvan Malov { 287*f3de3840SIvan Malov sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE); 288*f3de3840SIvan Malov } 289*f3de3840SIvan Malov 290*f3de3840SIvan Malov static void 291*f3de3840SIvan Malov sfc_dev_promisc_disable(struct rte_eth_dev *dev) 292*f3de3840SIvan Malov { 293*f3de3840SIvan Malov sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE); 294*f3de3840SIvan Malov } 295*f3de3840SIvan Malov 296*f3de3840SIvan Malov static void 297*f3de3840SIvan Malov sfc_dev_allmulti_enable(struct rte_eth_dev *dev) 298*f3de3840SIvan Malov { 299*f3de3840SIvan Malov sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE); 300*f3de3840SIvan Malov } 301*f3de3840SIvan Malov 302*f3de3840SIvan Malov static void 303*f3de3840SIvan Malov sfc_dev_allmulti_disable(struct rte_eth_dev *dev) 304*f3de3840SIvan Malov { 305*f3de3840SIvan Malov sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE); 306*f3de3840SIvan Malov } 307*f3de3840SIvan Malov 308ce35b05cSAndrew Rybchenko static int 309ce35b05cSAndrew Rybchenko sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 310ce35b05cSAndrew Rybchenko uint16_t nb_rx_desc, unsigned int socket_id, 311ce35b05cSAndrew Rybchenko const struct rte_eth_rxconf *rx_conf, 312ce35b05cSAndrew Rybchenko struct rte_mempool *mb_pool) 313ce35b05cSAndrew Rybchenko { 314ce35b05cSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 315ce35b05cSAndrew Rybchenko int rc; 316ce35b05cSAndrew Rybchenko 317ce35b05cSAndrew Rybchenko sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", 318ce35b05cSAndrew Rybchenko rx_queue_id, nb_rx_desc, socket_id); 319ce35b05cSAndrew Rybchenko 320ce35b05cSAndrew Rybchenko sfc_adapter_lock(sa); 321ce35b05cSAndrew Rybchenko 322ce35b05cSAndrew Rybchenko rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, 323ce35b05cSAndrew Rybchenko rx_conf, mb_pool); 324ce35b05cSAndrew Rybchenko if (rc != 0) 325ce35b05cSAndrew Rybchenko goto fail_rx_qinit; 326ce35b05cSAndrew Rybchenko 327ce35b05cSAndrew Rybchenko dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq; 328ce35b05cSAndrew Rybchenko 329ce35b05cSAndrew Rybchenko sfc_adapter_unlock(sa); 330ce35b05cSAndrew Rybchenko 331ce35b05cSAndrew Rybchenko return 0; 332ce35b05cSAndrew Rybchenko 333ce35b05cSAndrew Rybchenko fail_rx_qinit: 334ce35b05cSAndrew Rybchenko sfc_adapter_unlock(sa); 335ce35b05cSAndrew Rybchenko SFC_ASSERT(rc > 0); 336ce35b05cSAndrew Rybchenko return -rc; 337ce35b05cSAndrew Rybchenko } 338ce35b05cSAndrew Rybchenko 339ce35b05cSAndrew Rybchenko static void 340ce35b05cSAndrew Rybchenko sfc_rx_queue_release(void *queue) 341ce35b05cSAndrew Rybchenko { 342ce35b05cSAndrew Rybchenko struct sfc_rxq *rxq = queue; 343ce35b05cSAndrew Rybchenko struct sfc_adapter *sa; 344ce35b05cSAndrew Rybchenko unsigned int sw_index; 345ce35b05cSAndrew Rybchenko 346ce35b05cSAndrew Rybchenko if (rxq == NULL) 347ce35b05cSAndrew Rybchenko return; 348ce35b05cSAndrew Rybchenko 349ce35b05cSAndrew Rybchenko sa = rxq->evq->sa; 350ce35b05cSAndrew Rybchenko sfc_adapter_lock(sa); 351ce35b05cSAndrew Rybchenko 352ce35b05cSAndrew Rybchenko sw_index = sfc_rxq_sw_index(rxq); 353ce35b05cSAndrew Rybchenko 354ce35b05cSAndrew Rybchenko sfc_log_init(sa, "RxQ=%u", sw_index); 355ce35b05cSAndrew Rybchenko 356ce35b05cSAndrew Rybchenko sa->eth_dev->data->rx_queues[sw_index] = NULL; 357ce35b05cSAndrew Rybchenko 358ce35b05cSAndrew Rybchenko sfc_rx_qfini(sa, sw_index); 359ce35b05cSAndrew Rybchenko 360ce35b05cSAndrew Rybchenko sfc_adapter_unlock(sa); 361ce35b05cSAndrew Rybchenko } 362ce35b05cSAndrew Rybchenko 363b1b7ad93SIvan Malov static int 364b1b7ad93SIvan Malov sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 365b1b7ad93SIvan Malov uint16_t nb_tx_desc, unsigned int socket_id, 366b1b7ad93SIvan Malov const struct rte_eth_txconf *tx_conf) 367b1b7ad93SIvan Malov { 368b1b7ad93SIvan Malov struct sfc_adapter *sa = dev->data->dev_private; 369b1b7ad93SIvan Malov int rc; 370b1b7ad93SIvan Malov 371b1b7ad93SIvan Malov sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u", 372b1b7ad93SIvan Malov tx_queue_id, nb_tx_desc, socket_id); 373b1b7ad93SIvan Malov 374b1b7ad93SIvan Malov sfc_adapter_lock(sa); 375b1b7ad93SIvan Malov 376b1b7ad93SIvan Malov rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf); 377b1b7ad93SIvan Malov if (rc != 0) 378b1b7ad93SIvan Malov goto fail_tx_qinit; 379b1b7ad93SIvan Malov 380b1b7ad93SIvan Malov dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq; 381b1b7ad93SIvan Malov 382b1b7ad93SIvan Malov sfc_adapter_unlock(sa); 383b1b7ad93SIvan Malov return 0; 384b1b7ad93SIvan Malov 385b1b7ad93SIvan Malov fail_tx_qinit: 386b1b7ad93SIvan Malov sfc_adapter_unlock(sa); 387b1b7ad93SIvan Malov SFC_ASSERT(rc > 0); 388b1b7ad93SIvan Malov return -rc; 389b1b7ad93SIvan Malov } 390b1b7ad93SIvan Malov 391b1b7ad93SIvan Malov static void 392b1b7ad93SIvan Malov sfc_tx_queue_release(void *queue) 393b1b7ad93SIvan Malov { 394b1b7ad93SIvan Malov struct sfc_txq *txq = queue; 395b1b7ad93SIvan Malov unsigned int sw_index; 396b1b7ad93SIvan Malov struct sfc_adapter *sa; 397b1b7ad93SIvan Malov 398b1b7ad93SIvan Malov if (txq == NULL) 399b1b7ad93SIvan Malov return; 400b1b7ad93SIvan Malov 401b1b7ad93SIvan Malov sw_index = sfc_txq_sw_index(txq); 402b1b7ad93SIvan Malov 403b1b7ad93SIvan Malov SFC_ASSERT(txq->evq != NULL); 404b1b7ad93SIvan Malov sa = txq->evq->sa; 405b1b7ad93SIvan Malov 406b1b7ad93SIvan Malov sfc_log_init(sa, "TxQ = %u", sw_index); 407b1b7ad93SIvan Malov 408b1b7ad93SIvan Malov sfc_adapter_lock(sa); 409b1b7ad93SIvan Malov 410b1b7ad93SIvan Malov SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues); 411b1b7ad93SIvan Malov sa->eth_dev->data->tx_queues[sw_index] = NULL; 412b1b7ad93SIvan Malov 413b1b7ad93SIvan Malov sfc_tx_qfini(sa, sw_index); 414b1b7ad93SIvan Malov 415b1b7ad93SIvan Malov sfc_adapter_unlock(sa); 416b1b7ad93SIvan Malov } 417b1b7ad93SIvan Malov 4181caab2f1SAndrew Rybchenko static void 4191caab2f1SAndrew Rybchenko sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 4201caab2f1SAndrew Rybchenko { 4211caab2f1SAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 4221caab2f1SAndrew Rybchenko struct sfc_port *port = &sa->port; 4231caab2f1SAndrew Rybchenko uint64_t *mac_stats; 4241caab2f1SAndrew Rybchenko 4251caab2f1SAndrew Rybchenko rte_spinlock_lock(&port->mac_stats_lock); 4261caab2f1SAndrew Rybchenko 4271caab2f1SAndrew Rybchenko if (sfc_port_update_mac_stats(sa) != 0) 4281caab2f1SAndrew Rybchenko goto unlock; 4291caab2f1SAndrew Rybchenko 4301caab2f1SAndrew Rybchenko mac_stats = port->mac_stats_buf; 4311caab2f1SAndrew Rybchenko 4321caab2f1SAndrew Rybchenko if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, 4331caab2f1SAndrew Rybchenko EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) { 4341caab2f1SAndrew Rybchenko stats->ipackets = 4351caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] + 4361caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] + 4371caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]; 4381caab2f1SAndrew Rybchenko stats->opackets = 4391caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] + 4401caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] + 4411caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]; 4421caab2f1SAndrew Rybchenko stats->ibytes = 4431caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] + 4441caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] + 4451caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]; 4461caab2f1SAndrew Rybchenko stats->obytes = 4471caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] + 4481caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] + 4491caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]; 4501caab2f1SAndrew Rybchenko stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW]; 4511caab2f1SAndrew Rybchenko stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS]; 4521caab2f1SAndrew Rybchenko stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS]; 4531caab2f1SAndrew Rybchenko } else { 4541caab2f1SAndrew Rybchenko stats->ipackets = mac_stats[EFX_MAC_RX_PKTS]; 4551caab2f1SAndrew Rybchenko stats->opackets = mac_stats[EFX_MAC_TX_PKTS]; 4561caab2f1SAndrew Rybchenko stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS]; 4571caab2f1SAndrew Rybchenko stats->obytes = mac_stats[EFX_MAC_TX_OCTETS]; 4581caab2f1SAndrew Rybchenko /* 4591caab2f1SAndrew Rybchenko * Take into account stats which are whenever supported 4601caab2f1SAndrew Rybchenko * on EF10. If some stat is not supported by current 4611caab2f1SAndrew Rybchenko * firmware variant or HW revision, it is guaranteed 4621caab2f1SAndrew Rybchenko * to be zero in mac_stats. 4631caab2f1SAndrew Rybchenko */ 4641caab2f1SAndrew Rybchenko stats->imissed = 4651caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] + 4661caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] + 4671caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] + 4681caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] + 4691caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] + 4701caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_PM_TRUNC_QBB] + 4711caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_PM_DISCARD_QBB] + 4721caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_PM_DISCARD_MAPPING] + 4731caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] + 4741caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS]; 4751caab2f1SAndrew Rybchenko stats->ierrors = 4761caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_RX_FCS_ERRORS] + 4771caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_RX_ALIGN_ERRORS] + 4781caab2f1SAndrew Rybchenko mac_stats[EFX_MAC_RX_JABBER_PKTS]; 4791caab2f1SAndrew Rybchenko /* no oerrors counters supported on EF10 */ 4801caab2f1SAndrew Rybchenko } 4811caab2f1SAndrew Rybchenko 4821caab2f1SAndrew Rybchenko unlock: 4831caab2f1SAndrew Rybchenko rte_spinlock_unlock(&port->mac_stats_lock); 4841caab2f1SAndrew Rybchenko } 4851caab2f1SAndrew Rybchenko 4867b989176SAndrew Rybchenko static int 4877b989176SAndrew Rybchenko sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 4887b989176SAndrew Rybchenko unsigned int xstats_count) 4897b989176SAndrew Rybchenko { 4907b989176SAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 4917b989176SAndrew Rybchenko struct sfc_port *port = &sa->port; 4927b989176SAndrew Rybchenko uint64_t *mac_stats; 4937b989176SAndrew Rybchenko int rc; 4947b989176SAndrew Rybchenko unsigned int i; 4957b989176SAndrew Rybchenko int nstats = 0; 4967b989176SAndrew Rybchenko 4977b989176SAndrew Rybchenko rte_spinlock_lock(&port->mac_stats_lock); 4987b989176SAndrew Rybchenko 4997b989176SAndrew Rybchenko rc = sfc_port_update_mac_stats(sa); 5007b989176SAndrew Rybchenko if (rc != 0) { 5017b989176SAndrew Rybchenko SFC_ASSERT(rc > 0); 5027b989176SAndrew Rybchenko nstats = -rc; 5037b989176SAndrew Rybchenko goto unlock; 5047b989176SAndrew Rybchenko } 5057b989176SAndrew Rybchenko 5067b989176SAndrew Rybchenko mac_stats = port->mac_stats_buf; 5077b989176SAndrew Rybchenko 5087b989176SAndrew Rybchenko for (i = 0; i < EFX_MAC_NSTATS; ++i) { 5097b989176SAndrew Rybchenko if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 5107b989176SAndrew Rybchenko if (xstats != NULL && nstats < (int)xstats_count) { 5117b989176SAndrew Rybchenko xstats[nstats].id = nstats; 5127b989176SAndrew Rybchenko xstats[nstats].value = mac_stats[i]; 5137b989176SAndrew Rybchenko } 5147b989176SAndrew Rybchenko nstats++; 5157b989176SAndrew Rybchenko } 5167b989176SAndrew Rybchenko } 5177b989176SAndrew Rybchenko 5187b989176SAndrew Rybchenko unlock: 5197b989176SAndrew Rybchenko rte_spinlock_unlock(&port->mac_stats_lock); 5207b989176SAndrew Rybchenko 5217b989176SAndrew Rybchenko return nstats; 5227b989176SAndrew Rybchenko } 5237b989176SAndrew Rybchenko 5247b989176SAndrew Rybchenko static int 5257b989176SAndrew Rybchenko sfc_xstats_get_names(struct rte_eth_dev *dev, 5267b989176SAndrew Rybchenko struct rte_eth_xstat_name *xstats_names, 5277b989176SAndrew Rybchenko unsigned int xstats_count) 5287b989176SAndrew Rybchenko { 5297b989176SAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 5307b989176SAndrew Rybchenko struct sfc_port *port = &sa->port; 5317b989176SAndrew Rybchenko unsigned int i; 5327b989176SAndrew Rybchenko unsigned int nstats = 0; 5337b989176SAndrew Rybchenko 5347b989176SAndrew Rybchenko for (i = 0; i < EFX_MAC_NSTATS; ++i) { 5357b989176SAndrew Rybchenko if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { 5367b989176SAndrew Rybchenko if (xstats_names != NULL && nstats < xstats_count) 5377b989176SAndrew Rybchenko strncpy(xstats_names[nstats].name, 5387b989176SAndrew Rybchenko efx_mac_stat_name(sa->nic, i), 5397b989176SAndrew Rybchenko sizeof(xstats_names[0].name)); 5407b989176SAndrew Rybchenko nstats++; 5417b989176SAndrew Rybchenko } 5427b989176SAndrew Rybchenko } 5437b989176SAndrew Rybchenko 5447b989176SAndrew Rybchenko return nstats; 5457b989176SAndrew Rybchenko } 5467b989176SAndrew Rybchenko 547cdbb29cfSAndrew Rybchenko static int 548cdbb29cfSAndrew Rybchenko sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 549cdbb29cfSAndrew Rybchenko { 550cdbb29cfSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 551cdbb29cfSAndrew Rybchenko unsigned int wanted_fc, link_fc; 552cdbb29cfSAndrew Rybchenko 553cdbb29cfSAndrew Rybchenko memset(fc_conf, 0, sizeof(*fc_conf)); 554cdbb29cfSAndrew Rybchenko 555cdbb29cfSAndrew Rybchenko sfc_adapter_lock(sa); 556cdbb29cfSAndrew Rybchenko 557cdbb29cfSAndrew Rybchenko if (sa->state == SFC_ADAPTER_STARTED) 558cdbb29cfSAndrew Rybchenko efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc); 559cdbb29cfSAndrew Rybchenko else 560cdbb29cfSAndrew Rybchenko link_fc = sa->port.flow_ctrl; 561cdbb29cfSAndrew Rybchenko 562cdbb29cfSAndrew Rybchenko switch (link_fc) { 563cdbb29cfSAndrew Rybchenko case 0: 564cdbb29cfSAndrew Rybchenko fc_conf->mode = RTE_FC_NONE; 565cdbb29cfSAndrew Rybchenko break; 566cdbb29cfSAndrew Rybchenko case EFX_FCNTL_RESPOND: 567cdbb29cfSAndrew Rybchenko fc_conf->mode = RTE_FC_RX_PAUSE; 568cdbb29cfSAndrew Rybchenko break; 569cdbb29cfSAndrew Rybchenko case EFX_FCNTL_GENERATE: 570cdbb29cfSAndrew Rybchenko fc_conf->mode = RTE_FC_TX_PAUSE; 571cdbb29cfSAndrew Rybchenko break; 572cdbb29cfSAndrew Rybchenko case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE): 573cdbb29cfSAndrew Rybchenko fc_conf->mode = RTE_FC_FULL; 574cdbb29cfSAndrew Rybchenko break; 575cdbb29cfSAndrew Rybchenko default: 576cdbb29cfSAndrew Rybchenko sfc_err(sa, "%s: unexpected flow control value %#x", 577cdbb29cfSAndrew Rybchenko __func__, link_fc); 578cdbb29cfSAndrew Rybchenko } 579cdbb29cfSAndrew Rybchenko 580cdbb29cfSAndrew Rybchenko fc_conf->autoneg = sa->port.flow_ctrl_autoneg; 581cdbb29cfSAndrew Rybchenko 582cdbb29cfSAndrew Rybchenko sfc_adapter_unlock(sa); 583cdbb29cfSAndrew Rybchenko 584cdbb29cfSAndrew Rybchenko return 0; 585cdbb29cfSAndrew Rybchenko } 586cdbb29cfSAndrew Rybchenko 587cdbb29cfSAndrew Rybchenko static int 588cdbb29cfSAndrew Rybchenko sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 589cdbb29cfSAndrew Rybchenko { 590cdbb29cfSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 591cdbb29cfSAndrew Rybchenko struct sfc_port *port = &sa->port; 592cdbb29cfSAndrew Rybchenko unsigned int fcntl; 593cdbb29cfSAndrew Rybchenko int rc; 594cdbb29cfSAndrew Rybchenko 595cdbb29cfSAndrew Rybchenko if (fc_conf->high_water != 0 || fc_conf->low_water != 0 || 596cdbb29cfSAndrew Rybchenko fc_conf->pause_time != 0 || fc_conf->send_xon != 0 || 597cdbb29cfSAndrew Rybchenko fc_conf->mac_ctrl_frame_fwd != 0) { 598cdbb29cfSAndrew Rybchenko sfc_err(sa, "unsupported flow control settings specified"); 599cdbb29cfSAndrew Rybchenko rc = EINVAL; 600cdbb29cfSAndrew Rybchenko goto fail_inval; 601cdbb29cfSAndrew Rybchenko } 602cdbb29cfSAndrew Rybchenko 603cdbb29cfSAndrew Rybchenko switch (fc_conf->mode) { 604cdbb29cfSAndrew Rybchenko case RTE_FC_NONE: 605cdbb29cfSAndrew Rybchenko fcntl = 0; 606cdbb29cfSAndrew Rybchenko break; 607cdbb29cfSAndrew Rybchenko case RTE_FC_RX_PAUSE: 608cdbb29cfSAndrew Rybchenko fcntl = EFX_FCNTL_RESPOND; 609cdbb29cfSAndrew Rybchenko break; 610cdbb29cfSAndrew Rybchenko case RTE_FC_TX_PAUSE: 611cdbb29cfSAndrew Rybchenko fcntl = EFX_FCNTL_GENERATE; 612cdbb29cfSAndrew Rybchenko break; 613cdbb29cfSAndrew Rybchenko case RTE_FC_FULL: 614cdbb29cfSAndrew Rybchenko fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; 615cdbb29cfSAndrew Rybchenko break; 616cdbb29cfSAndrew Rybchenko default: 617cdbb29cfSAndrew Rybchenko rc = EINVAL; 618cdbb29cfSAndrew Rybchenko goto fail_inval; 619cdbb29cfSAndrew Rybchenko } 620cdbb29cfSAndrew Rybchenko 621cdbb29cfSAndrew Rybchenko sfc_adapter_lock(sa); 622cdbb29cfSAndrew Rybchenko 623cdbb29cfSAndrew Rybchenko if (sa->state == SFC_ADAPTER_STARTED) { 624cdbb29cfSAndrew Rybchenko rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg); 625cdbb29cfSAndrew Rybchenko if (rc != 0) 626cdbb29cfSAndrew Rybchenko goto fail_mac_fcntl_set; 627cdbb29cfSAndrew Rybchenko } 628cdbb29cfSAndrew Rybchenko 629cdbb29cfSAndrew Rybchenko port->flow_ctrl = fcntl; 630cdbb29cfSAndrew Rybchenko port->flow_ctrl_autoneg = fc_conf->autoneg; 631cdbb29cfSAndrew Rybchenko 632cdbb29cfSAndrew Rybchenko sfc_adapter_unlock(sa); 633cdbb29cfSAndrew Rybchenko 634cdbb29cfSAndrew Rybchenko return 0; 635cdbb29cfSAndrew Rybchenko 636cdbb29cfSAndrew Rybchenko fail_mac_fcntl_set: 637cdbb29cfSAndrew Rybchenko sfc_adapter_unlock(sa); 638cdbb29cfSAndrew Rybchenko fail_inval: 639cdbb29cfSAndrew Rybchenko SFC_ASSERT(rc > 0); 640cdbb29cfSAndrew Rybchenko return -rc; 641cdbb29cfSAndrew Rybchenko } 642cdbb29cfSAndrew Rybchenko 643e961cf42SAndrew Rybchenko static int 644e961cf42SAndrew Rybchenko sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 645e961cf42SAndrew Rybchenko { 646e961cf42SAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 647e961cf42SAndrew Rybchenko size_t pdu = EFX_MAC_PDU(mtu); 648e961cf42SAndrew Rybchenko size_t old_pdu; 649e961cf42SAndrew Rybchenko int rc; 650e961cf42SAndrew Rybchenko 651e961cf42SAndrew Rybchenko sfc_log_init(sa, "mtu=%u", mtu); 652e961cf42SAndrew Rybchenko 653e961cf42SAndrew Rybchenko rc = EINVAL; 654e961cf42SAndrew Rybchenko if (pdu < EFX_MAC_PDU_MIN) { 655e961cf42SAndrew Rybchenko sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)", 656e961cf42SAndrew Rybchenko (unsigned int)mtu, (unsigned int)pdu, 657e961cf42SAndrew Rybchenko EFX_MAC_PDU_MIN); 658e961cf42SAndrew Rybchenko goto fail_inval; 659e961cf42SAndrew Rybchenko } 660e961cf42SAndrew Rybchenko if (pdu > EFX_MAC_PDU_MAX) { 661e961cf42SAndrew Rybchenko sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", 662e961cf42SAndrew Rybchenko (unsigned int)mtu, (unsigned int)pdu, 663e961cf42SAndrew Rybchenko EFX_MAC_PDU_MAX); 664e961cf42SAndrew Rybchenko goto fail_inval; 665e961cf42SAndrew Rybchenko } 666e961cf42SAndrew Rybchenko 667e961cf42SAndrew Rybchenko sfc_adapter_lock(sa); 668e961cf42SAndrew Rybchenko 669e961cf42SAndrew Rybchenko if (pdu != sa->port.pdu) { 670e961cf42SAndrew Rybchenko if (sa->state == SFC_ADAPTER_STARTED) { 671e961cf42SAndrew Rybchenko sfc_stop(sa); 672e961cf42SAndrew Rybchenko 673e961cf42SAndrew Rybchenko old_pdu = sa->port.pdu; 674e961cf42SAndrew Rybchenko sa->port.pdu = pdu; 675e961cf42SAndrew Rybchenko rc = sfc_start(sa); 676e961cf42SAndrew Rybchenko if (rc != 0) 677e961cf42SAndrew Rybchenko goto fail_start; 678e961cf42SAndrew Rybchenko } else { 679e961cf42SAndrew Rybchenko sa->port.pdu = pdu; 680e961cf42SAndrew Rybchenko } 681e961cf42SAndrew Rybchenko } 682e961cf42SAndrew Rybchenko 683e961cf42SAndrew Rybchenko /* 684e961cf42SAndrew Rybchenko * The driver does not use it, but other PMDs update jumbo_frame 685e961cf42SAndrew Rybchenko * flag and max_rx_pkt_len when MTU is set. 686e961cf42SAndrew Rybchenko */ 687e961cf42SAndrew Rybchenko dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN); 688e961cf42SAndrew Rybchenko dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu; 689e961cf42SAndrew Rybchenko 690e961cf42SAndrew Rybchenko sfc_adapter_unlock(sa); 691e961cf42SAndrew Rybchenko 692e961cf42SAndrew Rybchenko sfc_log_init(sa, "done"); 693e961cf42SAndrew Rybchenko return 0; 694e961cf42SAndrew Rybchenko 695e961cf42SAndrew Rybchenko fail_start: 696e961cf42SAndrew Rybchenko sa->port.pdu = old_pdu; 697e961cf42SAndrew Rybchenko if (sfc_start(sa) != 0) 698e961cf42SAndrew Rybchenko sfc_err(sa, "cannot start with neither new (%u) nor old (%u) " 699e961cf42SAndrew Rybchenko "PDU max size - port is stopped", 700e961cf42SAndrew Rybchenko (unsigned int)pdu, (unsigned int)old_pdu); 701e961cf42SAndrew Rybchenko sfc_adapter_unlock(sa); 702e961cf42SAndrew Rybchenko 703e961cf42SAndrew Rybchenko fail_inval: 704e961cf42SAndrew Rybchenko sfc_log_init(sa, "failed %d", rc); 705e961cf42SAndrew Rybchenko SFC_ASSERT(rc > 0); 706e961cf42SAndrew Rybchenko return -rc; 707e961cf42SAndrew Rybchenko } 708e961cf42SAndrew Rybchenko 70963d588ffSAndrew Rybchenko static const struct eth_dev_ops sfc_eth_dev_ops = { 710aaa3f5f0SAndrew Rybchenko .dev_configure = sfc_dev_configure, 71193fcf09bSAndrew Rybchenko .dev_start = sfc_dev_start, 71293fcf09bSAndrew Rybchenko .dev_stop = sfc_dev_stop, 7132a05f337SArtem Andreev .dev_set_link_up = sfc_dev_set_link_up, 7142a05f337SArtem Andreev .dev_set_link_down = sfc_dev_set_link_down, 715aaa3f5f0SAndrew Rybchenko .dev_close = sfc_dev_close, 716*f3de3840SIvan Malov .promiscuous_enable = sfc_dev_promisc_enable, 717*f3de3840SIvan Malov .promiscuous_disable = sfc_dev_promisc_disable, 718*f3de3840SIvan Malov .allmulticast_enable = sfc_dev_allmulti_enable, 719*f3de3840SIvan Malov .allmulticast_disable = sfc_dev_allmulti_disable, 720886f8d8aSArtem Andreev .link_update = sfc_dev_link_update, 7211caab2f1SAndrew Rybchenko .stats_get = sfc_stats_get, 7227b989176SAndrew Rybchenko .xstats_get = sfc_xstats_get, 7237b989176SAndrew Rybchenko .xstats_get_names = sfc_xstats_get_names, 72463d588ffSAndrew Rybchenko .dev_infos_get = sfc_dev_infos_get, 725e961cf42SAndrew Rybchenko .mtu_set = sfc_dev_set_mtu, 726ce35b05cSAndrew Rybchenko .rx_queue_setup = sfc_rx_queue_setup, 727ce35b05cSAndrew Rybchenko .rx_queue_release = sfc_rx_queue_release, 728b1b7ad93SIvan Malov .tx_queue_setup = sfc_tx_queue_setup, 729b1b7ad93SIvan Malov .tx_queue_release = sfc_tx_queue_release, 730cdbb29cfSAndrew Rybchenko .flow_ctrl_get = sfc_flow_ctrl_get, 731cdbb29cfSAndrew Rybchenko .flow_ctrl_set = sfc_flow_ctrl_set, 73263d588ffSAndrew Rybchenko }; 73363d588ffSAndrew Rybchenko 73463d588ffSAndrew Rybchenko static int 73563d588ffSAndrew Rybchenko sfc_eth_dev_init(struct rte_eth_dev *dev) 73663d588ffSAndrew Rybchenko { 73763d588ffSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 73863d588ffSAndrew Rybchenko struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev); 73963d588ffSAndrew Rybchenko int rc; 740ba641f20SAndrew Rybchenko const efx_nic_cfg_t *encp; 741ba641f20SAndrew Rybchenko const struct ether_addr *from; 74263d588ffSAndrew Rybchenko 74363d588ffSAndrew Rybchenko /* Required for logging */ 74463d588ffSAndrew Rybchenko sa->eth_dev = dev; 74563d588ffSAndrew Rybchenko 74663d588ffSAndrew Rybchenko /* Copy PCI device info to the dev->data */ 74763d588ffSAndrew Rybchenko rte_eth_copy_pci_info(dev, pci_dev); 74863d588ffSAndrew Rybchenko 74963d588ffSAndrew Rybchenko rc = sfc_kvargs_parse(sa); 75063d588ffSAndrew Rybchenko if (rc != 0) 75163d588ffSAndrew Rybchenko goto fail_kvargs_parse; 75263d588ffSAndrew Rybchenko 75363d588ffSAndrew Rybchenko rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, 75463d588ffSAndrew Rybchenko sfc_kvarg_bool_handler, &sa->debug_init); 75563d588ffSAndrew Rybchenko if (rc != 0) 75663d588ffSAndrew Rybchenko goto fail_kvarg_debug_init; 75763d588ffSAndrew Rybchenko 75863d588ffSAndrew Rybchenko sfc_log_init(sa, "entry"); 75963d588ffSAndrew Rybchenko 760ba641f20SAndrew Rybchenko dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); 761ba641f20SAndrew Rybchenko if (dev->data->mac_addrs == NULL) { 762ba641f20SAndrew Rybchenko rc = ENOMEM; 763ba641f20SAndrew Rybchenko goto fail_mac_addrs; 764ba641f20SAndrew Rybchenko } 765ba641f20SAndrew Rybchenko 766ba641f20SAndrew Rybchenko sfc_adapter_lock_init(sa); 767ba641f20SAndrew Rybchenko sfc_adapter_lock(sa); 768ba641f20SAndrew Rybchenko 769ba641f20SAndrew Rybchenko sfc_log_init(sa, "attaching"); 770ba641f20SAndrew Rybchenko rc = sfc_attach(sa); 771ba641f20SAndrew Rybchenko if (rc != 0) 772ba641f20SAndrew Rybchenko goto fail_attach; 773ba641f20SAndrew Rybchenko 774ba641f20SAndrew Rybchenko encp = efx_nic_cfg_get(sa->nic); 775ba641f20SAndrew Rybchenko 776ba641f20SAndrew Rybchenko /* 777ba641f20SAndrew Rybchenko * The arguments are really reverse order in comparison to 778ba641f20SAndrew Rybchenko * Linux kernel. Copy from NIC config to Ethernet device data. 779ba641f20SAndrew Rybchenko */ 780ba641f20SAndrew Rybchenko from = (const struct ether_addr *)(encp->enc_mac_addr); 781ba641f20SAndrew Rybchenko ether_addr_copy(from, &dev->data->mac_addrs[0]); 782ba641f20SAndrew Rybchenko 78363d588ffSAndrew Rybchenko dev->dev_ops = &sfc_eth_dev_ops; 784921f6cf1SAndrew Rybchenko dev->rx_pkt_burst = &sfc_recv_pkts; 785428c7dddSIvan Malov dev->tx_pkt_burst = &sfc_xmit_pkts; 78663d588ffSAndrew Rybchenko 787ba641f20SAndrew Rybchenko sfc_adapter_unlock(sa); 788ba641f20SAndrew Rybchenko 78963d588ffSAndrew Rybchenko sfc_log_init(sa, "done"); 79063d588ffSAndrew Rybchenko return 0; 79163d588ffSAndrew Rybchenko 792ba641f20SAndrew Rybchenko fail_attach: 793ba641f20SAndrew Rybchenko sfc_adapter_unlock(sa); 794ba641f20SAndrew Rybchenko sfc_adapter_lock_fini(sa); 795ba641f20SAndrew Rybchenko rte_free(dev->data->mac_addrs); 796ba641f20SAndrew Rybchenko dev->data->mac_addrs = NULL; 797ba641f20SAndrew Rybchenko 798ba641f20SAndrew Rybchenko fail_mac_addrs: 79963d588ffSAndrew Rybchenko fail_kvarg_debug_init: 80063d588ffSAndrew Rybchenko sfc_kvargs_cleanup(sa); 80163d588ffSAndrew Rybchenko 80263d588ffSAndrew Rybchenko fail_kvargs_parse: 80363d588ffSAndrew Rybchenko sfc_log_init(sa, "failed %d", rc); 80463d588ffSAndrew Rybchenko SFC_ASSERT(rc > 0); 80563d588ffSAndrew Rybchenko return -rc; 80663d588ffSAndrew Rybchenko } 80763d588ffSAndrew Rybchenko 80863d588ffSAndrew Rybchenko static int 80963d588ffSAndrew Rybchenko sfc_eth_dev_uninit(struct rte_eth_dev *dev) 81063d588ffSAndrew Rybchenko { 81163d588ffSAndrew Rybchenko struct sfc_adapter *sa = dev->data->dev_private; 81263d588ffSAndrew Rybchenko 81363d588ffSAndrew Rybchenko sfc_log_init(sa, "entry"); 81463d588ffSAndrew Rybchenko 815ba641f20SAndrew Rybchenko sfc_adapter_lock(sa); 816ba641f20SAndrew Rybchenko 817ba641f20SAndrew Rybchenko sfc_detach(sa); 818ba641f20SAndrew Rybchenko 819ba641f20SAndrew Rybchenko rte_free(dev->data->mac_addrs); 820ba641f20SAndrew Rybchenko dev->data->mac_addrs = NULL; 821ba641f20SAndrew Rybchenko 82263d588ffSAndrew Rybchenko dev->dev_ops = NULL; 823921f6cf1SAndrew Rybchenko dev->rx_pkt_burst = NULL; 824428c7dddSIvan Malov dev->tx_pkt_burst = NULL; 82563d588ffSAndrew Rybchenko 82663d588ffSAndrew Rybchenko sfc_kvargs_cleanup(sa); 82763d588ffSAndrew Rybchenko 828ba641f20SAndrew Rybchenko sfc_adapter_unlock(sa); 829ba641f20SAndrew Rybchenko sfc_adapter_lock_fini(sa); 830ba641f20SAndrew Rybchenko 83163d588ffSAndrew Rybchenko sfc_log_init(sa, "done"); 83263d588ffSAndrew Rybchenko 83363d588ffSAndrew Rybchenko /* Required for logging, so cleanup last */ 83463d588ffSAndrew Rybchenko sa->eth_dev = NULL; 83563d588ffSAndrew Rybchenko return 0; 83663d588ffSAndrew Rybchenko } 83763d588ffSAndrew Rybchenko 83863d588ffSAndrew Rybchenko static const struct rte_pci_id pci_id_sfc_efx_map[] = { 839ba641f20SAndrew Rybchenko { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) }, 840ba641f20SAndrew Rybchenko { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) }, 841ba641f20SAndrew Rybchenko { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, 84263d588ffSAndrew Rybchenko { .vendor_id = 0 /* sentinel */ } 84363d588ffSAndrew Rybchenko }; 84463d588ffSAndrew Rybchenko 84563d588ffSAndrew Rybchenko static struct eth_driver sfc_efx_pmd = { 84663d588ffSAndrew Rybchenko .pci_drv = { 84763d588ffSAndrew Rybchenko .id_table = pci_id_sfc_efx_map, 848ba641f20SAndrew Rybchenko .drv_flags = 8493b809c27SAndrew Rybchenko RTE_PCI_DRV_INTR_LSC | 850ba641f20SAndrew Rybchenko RTE_PCI_DRV_NEED_MAPPING, 85163d588ffSAndrew Rybchenko .probe = rte_eth_dev_pci_probe, 85263d588ffSAndrew Rybchenko .remove = rte_eth_dev_pci_remove, 85363d588ffSAndrew Rybchenko }, 85463d588ffSAndrew Rybchenko .eth_dev_init = sfc_eth_dev_init, 85563d588ffSAndrew Rybchenko .eth_dev_uninit = sfc_eth_dev_uninit, 85663d588ffSAndrew Rybchenko .dev_private_size = sizeof(struct sfc_adapter), 85763d588ffSAndrew Rybchenko }; 85863d588ffSAndrew Rybchenko 85963d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv); 86063d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map); 86163d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, 862c22d3c50SAndrew Rybchenko SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " 8633e3b2e4cSAndrew Rybchenko SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " 86463d588ffSAndrew Rybchenko SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); 865