144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause 2244cfa79SAndrew Rybchenko * 3*a0147be5SAndrew Rybchenko * Copyright(c) 2019-2020 Xilinx, Inc. 4*a0147be5SAndrew Rybchenko * Copyright(c) 2016-2019 Solarflare Communications Inc. 5ba641f20SAndrew Rybchenko * 6ba641f20SAndrew Rybchenko * This software was jointly developed between OKTET Labs (under contract 7ba641f20SAndrew Rybchenko * for Solarflare) and Solarflare Communications, Inc. 8ba641f20SAndrew Rybchenko */ 9ba641f20SAndrew Rybchenko 10ba641f20SAndrew Rybchenko /* sysconf() */ 11ba641f20SAndrew Rybchenko #include <unistd.h> 12ba641f20SAndrew Rybchenko 13ba641f20SAndrew Rybchenko #include <rte_errno.h> 14e77f9f19SAndrew Rybchenko #include <rte_alarm.h> 15ba641f20SAndrew Rybchenko 16ba641f20SAndrew Rybchenko #include "efx.h" 17ba641f20SAndrew Rybchenko 18ba641f20SAndrew Rybchenko #include "sfc.h" 19ba641f20SAndrew Rybchenko #include "sfc_log.h" 2058294ee6SAndrew Rybchenko #include "sfc_ev.h" 21a8e64c6bSAndrew Rybchenko #include "sfc_rx.h" 22a8ad8cf8SIvan Malov #include "sfc_tx.h" 239e7fc8b8SRoman Zhukov #include "sfc_kvargs.h" 245a1ae82dSAndrew Rybchenko #include "sfc_tweak.h" 25ba641f20SAndrew Rybchenko 26ba641f20SAndrew Rybchenko 27ba641f20SAndrew Rybchenko int 28ba641f20SAndrew Rybchenko sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id, 29ba641f20SAndrew Rybchenko size_t len, int socket_id, efsys_mem_t *esmp) 30ba641f20SAndrew Rybchenko { 31ba641f20SAndrew Rybchenko const struct rte_memzone *mz; 32ba641f20SAndrew Rybchenko 336b9a30d9SFerruh Yigit sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d", 34ba641f20SAndrew Rybchenko name, id, len, socket_id); 35ba641f20SAndrew Rybchenko 36ba641f20SAndrew Rybchenko mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len, 37ba641f20SAndrew Rybchenko sysconf(_SC_PAGESIZE), socket_id); 38ba641f20SAndrew Rybchenko if (mz == NULL) { 39ba641f20SAndrew Rybchenko sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s", 40ba641f20SAndrew Rybchenko name, (unsigned int)id, (unsigned int)len, socket_id, 41ba641f20SAndrew Rybchenko rte_strerror(rte_errno)); 42ba641f20SAndrew Rybchenko return ENOMEM; 43ba641f20SAndrew Rybchenko } 44ba641f20SAndrew Rybchenko 45f17ca787SThomas Monjalon esmp->esm_addr = mz->iova; 46df6e0a06SSantosh Shukla if (esmp->esm_addr == RTE_BAD_IOVA) { 47ba641f20SAndrew Rybchenko (void)rte_memzone_free(mz); 48ba641f20SAndrew Rybchenko return EFAULT; 49ba641f20SAndrew Rybchenko } 50ba641f20SAndrew Rybchenko 51ba641f20SAndrew Rybchenko esmp->esm_mz = mz; 52ba641f20SAndrew Rybchenko esmp->esm_base = mz->addr; 53ba641f20SAndrew Rybchenko 54ba641f20SAndrew Rybchenko return 0; 55ba641f20SAndrew Rybchenko } 56ba641f20SAndrew Rybchenko 57ba641f20SAndrew Rybchenko void 58ba641f20SAndrew Rybchenko sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp) 59ba641f20SAndrew Rybchenko { 60ba641f20SAndrew Rybchenko int rc; 61ba641f20SAndrew Rybchenko 62ba641f20SAndrew Rybchenko sfc_log_init(sa, "name=%s", esmp->esm_mz->name); 63ba641f20SAndrew Rybchenko 64ba641f20SAndrew Rybchenko rc = rte_memzone_free(esmp->esm_mz); 65ba641f20SAndrew Rybchenko if (rc != 0) 66ba641f20SAndrew Rybchenko sfc_err(sa, "rte_memzone_free(() failed: %d", rc); 67ba641f20SAndrew Rybchenko 68ba641f20SAndrew Rybchenko memset(esmp, 0, sizeof(*esmp)); 69ba641f20SAndrew Rybchenko } 70ba641f20SAndrew Rybchenko 71d23f3a89SAndrew Rybchenko static uint32_t 72d23f3a89SAndrew Rybchenko sfc_phy_cap_from_link_speeds(uint32_t speeds) 73d23f3a89SAndrew Rybchenko { 74d23f3a89SAndrew Rybchenko uint32_t phy_caps = 0; 75d23f3a89SAndrew Rybchenko 76d23f3a89SAndrew Rybchenko if (~speeds & ETH_LINK_SPEED_FIXED) { 77d23f3a89SAndrew Rybchenko phy_caps |= (1 << EFX_PHY_CAP_AN); 78d23f3a89SAndrew Rybchenko /* 79d23f3a89SAndrew Rybchenko * If no speeds are specified in the mask, any supported 80d23f3a89SAndrew Rybchenko * may be negotiated 81d23f3a89SAndrew Rybchenko */ 82d23f3a89SAndrew Rybchenko if (speeds == ETH_LINK_SPEED_AUTONEG) 83d23f3a89SAndrew Rybchenko phy_caps |= 84d23f3a89SAndrew Rybchenko (1 << EFX_PHY_CAP_1000FDX) | 85d23f3a89SAndrew Rybchenko (1 << EFX_PHY_CAP_10000FDX) | 86f82e33afSAndrew Rybchenko (1 << EFX_PHY_CAP_25000FDX) | 87f82e33afSAndrew Rybchenko (1 << EFX_PHY_CAP_40000FDX) | 88f82e33afSAndrew Rybchenko (1 << EFX_PHY_CAP_50000FDX) | 89f82e33afSAndrew Rybchenko (1 << EFX_PHY_CAP_100000FDX); 90d23f3a89SAndrew Rybchenko } 91d23f3a89SAndrew Rybchenko if (speeds & ETH_LINK_SPEED_1G) 92d23f3a89SAndrew Rybchenko phy_caps |= (1 << EFX_PHY_CAP_1000FDX); 93d23f3a89SAndrew Rybchenko if (speeds & ETH_LINK_SPEED_10G) 94d23f3a89SAndrew Rybchenko phy_caps |= (1 << EFX_PHY_CAP_10000FDX); 95f82e33afSAndrew Rybchenko if (speeds & ETH_LINK_SPEED_25G) 96f82e33afSAndrew Rybchenko phy_caps |= (1 << EFX_PHY_CAP_25000FDX); 97d23f3a89SAndrew Rybchenko if (speeds & ETH_LINK_SPEED_40G) 98d23f3a89SAndrew Rybchenko phy_caps |= (1 << EFX_PHY_CAP_40000FDX); 99f82e33afSAndrew Rybchenko if (speeds & ETH_LINK_SPEED_50G) 100f82e33afSAndrew Rybchenko phy_caps |= (1 << EFX_PHY_CAP_50000FDX); 101f82e33afSAndrew Rybchenko if (speeds & ETH_LINK_SPEED_100G) 102f82e33afSAndrew Rybchenko phy_caps |= (1 << EFX_PHY_CAP_100000FDX); 103d23f3a89SAndrew Rybchenko 104d23f3a89SAndrew Rybchenko return phy_caps; 105d23f3a89SAndrew Rybchenko } 106d23f3a89SAndrew Rybchenko 107c7cb2d7aSAndrew Rybchenko /* 108c7cb2d7aSAndrew Rybchenko * Check requested device level configuration. 109c7cb2d7aSAndrew Rybchenko * Receive and transmit configuration is checked in corresponding 110c7cb2d7aSAndrew Rybchenko * modules. 111c7cb2d7aSAndrew Rybchenko */ 112c7cb2d7aSAndrew Rybchenko static int 113c7cb2d7aSAndrew Rybchenko sfc_check_conf(struct sfc_adapter *sa) 114c7cb2d7aSAndrew Rybchenko { 115c7cb2d7aSAndrew Rybchenko const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf; 116c7cb2d7aSAndrew Rybchenko int rc = 0; 117c7cb2d7aSAndrew Rybchenko 118d23f3a89SAndrew Rybchenko sa->port.phy_adv_cap = 119d23f3a89SAndrew Rybchenko sfc_phy_cap_from_link_speeds(conf->link_speeds) & 120d23f3a89SAndrew Rybchenko sa->port.phy_adv_cap_mask; 121d23f3a89SAndrew Rybchenko if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) { 122d23f3a89SAndrew Rybchenko sfc_err(sa, "No link speeds from mask %#x are supported", 123d23f3a89SAndrew Rybchenko conf->link_speeds); 124c7cb2d7aSAndrew Rybchenko rc = EINVAL; 125c7cb2d7aSAndrew Rybchenko } 126c7cb2d7aSAndrew Rybchenko 127b16cf4b2SAndrew Rybchenko #if !EFSYS_OPT_LOOPBACK 128c7cb2d7aSAndrew Rybchenko if (conf->lpbk_mode != 0) { 129c7cb2d7aSAndrew Rybchenko sfc_err(sa, "Loopback not supported"); 130c7cb2d7aSAndrew Rybchenko rc = EINVAL; 131c7cb2d7aSAndrew Rybchenko } 132b16cf4b2SAndrew Rybchenko #endif 133c7cb2d7aSAndrew Rybchenko 134c7cb2d7aSAndrew Rybchenko if (conf->dcb_capability_en != 0) { 135c7cb2d7aSAndrew Rybchenko sfc_err(sa, "Priority-based flow control not supported"); 136c7cb2d7aSAndrew Rybchenko rc = EINVAL; 137c7cb2d7aSAndrew Rybchenko } 138c7cb2d7aSAndrew Rybchenko 139c7cb2d7aSAndrew Rybchenko if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { 140c7cb2d7aSAndrew Rybchenko sfc_err(sa, "Flow Director not supported"); 141c7cb2d7aSAndrew Rybchenko rc = EINVAL; 142c7cb2d7aSAndrew Rybchenko } 143c7cb2d7aSAndrew Rybchenko 1443b809c27SAndrew Rybchenko if ((conf->intr_conf.lsc != 0) && 1453b809c27SAndrew Rybchenko (sa->intr.type != EFX_INTR_LINE) && 1463b809c27SAndrew Rybchenko (sa->intr.type != EFX_INTR_MESSAGE)) { 147c7cb2d7aSAndrew Rybchenko sfc_err(sa, "Link status change interrupt not supported"); 148c7cb2d7aSAndrew Rybchenko rc = EINVAL; 149c7cb2d7aSAndrew Rybchenko } 150c7cb2d7aSAndrew Rybchenko 1514279b54eSGeorgiy Levashov if (conf->intr_conf.rxq != 0 && 1524279b54eSGeorgiy Levashov (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) { 153c7cb2d7aSAndrew Rybchenko sfc_err(sa, "Receive queue interrupt not supported"); 154c7cb2d7aSAndrew Rybchenko rc = EINVAL; 155c7cb2d7aSAndrew Rybchenko } 156c7cb2d7aSAndrew Rybchenko 157c7cb2d7aSAndrew Rybchenko return rc; 158c7cb2d7aSAndrew Rybchenko } 159c7cb2d7aSAndrew Rybchenko 16091831d40SAndrew Rybchenko /* 16191831d40SAndrew Rybchenko * Find out maximum number of receive and transmit queues which could be 16291831d40SAndrew Rybchenko * advertised. 16391831d40SAndrew Rybchenko * 16491831d40SAndrew Rybchenko * NIC is kept initialized on success to allow other modules acquire 16591831d40SAndrew Rybchenko * defaults and capabilities. 16691831d40SAndrew Rybchenko */ 16791831d40SAndrew Rybchenko static int 16891831d40SAndrew Rybchenko sfc_estimate_resource_limits(struct sfc_adapter *sa) 16991831d40SAndrew Rybchenko { 17091831d40SAndrew Rybchenko const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 17191831d40SAndrew Rybchenko efx_drv_limits_t limits; 17291831d40SAndrew Rybchenko int rc; 17391831d40SAndrew Rybchenko uint32_t evq_allocated; 17491831d40SAndrew Rybchenko uint32_t rxq_allocated; 17591831d40SAndrew Rybchenko uint32_t txq_allocated; 17691831d40SAndrew Rybchenko 17791831d40SAndrew Rybchenko memset(&limits, 0, sizeof(limits)); 17891831d40SAndrew Rybchenko 17991831d40SAndrew Rybchenko /* Request at least one Rx and Tx queue */ 18091831d40SAndrew Rybchenko limits.edl_min_rxq_count = 1; 18191831d40SAndrew Rybchenko limits.edl_min_txq_count = 1; 18291831d40SAndrew Rybchenko /* Management event queue plus event queue for each Tx and Rx queue */ 18391831d40SAndrew Rybchenko limits.edl_min_evq_count = 18491831d40SAndrew Rybchenko 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count; 18591831d40SAndrew Rybchenko 18691831d40SAndrew Rybchenko /* Divide by number of functions to guarantee that all functions 18791831d40SAndrew Rybchenko * will get promised resources 18891831d40SAndrew Rybchenko */ 18991831d40SAndrew Rybchenko /* FIXME Divide by number of functions (not 2) below */ 19091831d40SAndrew Rybchenko limits.edl_max_evq_count = encp->enc_evq_limit / 2; 19191831d40SAndrew Rybchenko SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count); 19291831d40SAndrew Rybchenko 19391831d40SAndrew Rybchenko /* Split equally between receive and transmit */ 19491831d40SAndrew Rybchenko limits.edl_max_rxq_count = 19591831d40SAndrew Rybchenko MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2); 19691831d40SAndrew Rybchenko SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count); 19791831d40SAndrew Rybchenko 19891831d40SAndrew Rybchenko limits.edl_max_txq_count = 19991831d40SAndrew Rybchenko MIN(encp->enc_txq_limit, 20091831d40SAndrew Rybchenko limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count); 2011f014258SIvan Malov 2021f014258SIvan Malov if (sa->tso) 2031f014258SIvan Malov limits.edl_max_txq_count = 2041f014258SIvan Malov MIN(limits.edl_max_txq_count, 2051f014258SIvan Malov encp->enc_fw_assisted_tso_v2_n_contexts / 2061f014258SIvan Malov encp->enc_hw_pf_count); 2071f014258SIvan Malov 20891831d40SAndrew Rybchenko SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count); 20991831d40SAndrew Rybchenko 21091831d40SAndrew Rybchenko /* Configure the minimum required resources needed for the 21191831d40SAndrew Rybchenko * driver to operate, and the maximum desired resources that the 21291831d40SAndrew Rybchenko * driver is capable of using. 21391831d40SAndrew Rybchenko */ 21491831d40SAndrew Rybchenko efx_nic_set_drv_limits(sa->nic, &limits); 21591831d40SAndrew Rybchenko 21691831d40SAndrew Rybchenko sfc_log_init(sa, "init nic"); 21791831d40SAndrew Rybchenko rc = efx_nic_init(sa->nic); 21891831d40SAndrew Rybchenko if (rc != 0) 21991831d40SAndrew Rybchenko goto fail_nic_init; 22091831d40SAndrew Rybchenko 22191831d40SAndrew Rybchenko /* Find resource dimensions assigned by firmware to this function */ 22291831d40SAndrew Rybchenko rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated, 22391831d40SAndrew Rybchenko &txq_allocated); 22491831d40SAndrew Rybchenko if (rc != 0) 22591831d40SAndrew Rybchenko goto fail_get_vi_pool; 22691831d40SAndrew Rybchenko 22791831d40SAndrew Rybchenko /* It still may allocate more than maximum, ensure limit */ 22891831d40SAndrew Rybchenko evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count); 22991831d40SAndrew Rybchenko rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count); 23091831d40SAndrew Rybchenko txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count); 23191831d40SAndrew Rybchenko 23291831d40SAndrew Rybchenko /* Subtract management EVQ not used for traffic */ 23391831d40SAndrew Rybchenko SFC_ASSERT(evq_allocated > 0); 23491831d40SAndrew Rybchenko evq_allocated--; 23591831d40SAndrew Rybchenko 23691831d40SAndrew Rybchenko /* Right now we use separate EVQ for Rx and Tx */ 23791831d40SAndrew Rybchenko sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2); 23891831d40SAndrew Rybchenko sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max); 23991831d40SAndrew Rybchenko 24091831d40SAndrew Rybchenko /* Keep NIC initialized */ 24191831d40SAndrew Rybchenko return 0; 24291831d40SAndrew Rybchenko 24391831d40SAndrew Rybchenko fail_get_vi_pool: 24491831d40SAndrew Rybchenko efx_nic_fini(sa->nic); 245ba77f3e1SIgor Romanov fail_nic_init: 24691831d40SAndrew Rybchenko return rc; 24791831d40SAndrew Rybchenko } 24891831d40SAndrew Rybchenko 24991831d40SAndrew Rybchenko static int 25091831d40SAndrew Rybchenko sfc_set_drv_limits(struct sfc_adapter *sa) 25191831d40SAndrew Rybchenko { 25291831d40SAndrew Rybchenko const struct rte_eth_dev_data *data = sa->eth_dev->data; 25391831d40SAndrew Rybchenko efx_drv_limits_t lim; 25491831d40SAndrew Rybchenko 25591831d40SAndrew Rybchenko memset(&lim, 0, sizeof(lim)); 25691831d40SAndrew Rybchenko 25791831d40SAndrew Rybchenko /* Limits are strict since take into account initial estimation */ 25891831d40SAndrew Rybchenko lim.edl_min_evq_count = lim.edl_max_evq_count = 25991831d40SAndrew Rybchenko 1 + data->nb_rx_queues + data->nb_tx_queues; 26091831d40SAndrew Rybchenko lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues; 26191831d40SAndrew Rybchenko lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues; 26291831d40SAndrew Rybchenko 26391831d40SAndrew Rybchenko return efx_nic_set_drv_limits(sa->nic, &lim); 26491831d40SAndrew Rybchenko } 26591831d40SAndrew Rybchenko 266b1ffa211SAndrew Rybchenko static int 2672f44752cSAndrew Rybchenko sfc_set_fw_subvariant(struct sfc_adapter *sa) 2682f44752cSAndrew Rybchenko { 269113a14a6SAndrew Rybchenko struct sfc_adapter_shared *sas = sfc_sa2shared(sa); 2702f44752cSAndrew Rybchenko const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 2712f44752cSAndrew Rybchenko uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads; 2722f44752cSAndrew Rybchenko unsigned int txq_index; 2732f44752cSAndrew Rybchenko efx_nic_fw_subvariant_t req_fw_subvariant; 2742f44752cSAndrew Rybchenko efx_nic_fw_subvariant_t cur_fw_subvariant; 2752f44752cSAndrew Rybchenko int rc; 2762f44752cSAndrew Rybchenko 2772f44752cSAndrew Rybchenko if (!encp->enc_fw_subvariant_no_tx_csum_supported) { 2782f44752cSAndrew Rybchenko sfc_info(sa, "no-Tx-checksum subvariant not supported"); 2792f44752cSAndrew Rybchenko return 0; 2802f44752cSAndrew Rybchenko } 2812f44752cSAndrew Rybchenko 282113a14a6SAndrew Rybchenko for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) { 283113a14a6SAndrew Rybchenko struct sfc_txq_info *txq_info = &sas->txq_info[txq_index]; 2842f44752cSAndrew Rybchenko 28529e4237dSAndrew Rybchenko if (txq_info->state & SFC_TXQ_INITIALIZED) 286b57870f2SAndrew Rybchenko tx_offloads |= txq_info->offloads; 2872f44752cSAndrew Rybchenko } 2882f44752cSAndrew Rybchenko 2892f44752cSAndrew Rybchenko if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | 2902f44752cSAndrew Rybchenko DEV_TX_OFFLOAD_TCP_CKSUM | 2912f44752cSAndrew Rybchenko DEV_TX_OFFLOAD_UDP_CKSUM | 2922f44752cSAndrew Rybchenko DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) 2932f44752cSAndrew Rybchenko req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT; 2942f44752cSAndrew Rybchenko else 2952f44752cSAndrew Rybchenko req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM; 2962f44752cSAndrew Rybchenko 2972f44752cSAndrew Rybchenko rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant); 2982f44752cSAndrew Rybchenko if (rc != 0) { 2992f44752cSAndrew Rybchenko sfc_err(sa, "failed to get FW subvariant: %d", rc); 3002f44752cSAndrew Rybchenko return rc; 3012f44752cSAndrew Rybchenko } 3022f44752cSAndrew Rybchenko sfc_info(sa, "FW subvariant is %u vs required %u", 3032f44752cSAndrew Rybchenko cur_fw_subvariant, req_fw_subvariant); 3042f44752cSAndrew Rybchenko 3052f44752cSAndrew Rybchenko if (cur_fw_subvariant == req_fw_subvariant) 3062f44752cSAndrew Rybchenko return 0; 3072f44752cSAndrew Rybchenko 3082f44752cSAndrew Rybchenko rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant); 3092f44752cSAndrew Rybchenko if (rc != 0) { 3102f44752cSAndrew Rybchenko sfc_err(sa, "failed to set FW subvariant %u: %d", 3112f44752cSAndrew Rybchenko req_fw_subvariant, rc); 3122f44752cSAndrew Rybchenko return rc; 3132f44752cSAndrew Rybchenko } 3142f44752cSAndrew Rybchenko sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant); 3152f44752cSAndrew Rybchenko 3162f44752cSAndrew Rybchenko return 0; 3172f44752cSAndrew Rybchenko } 3182f44752cSAndrew Rybchenko 3192f44752cSAndrew Rybchenko static int 320b1ffa211SAndrew Rybchenko sfc_try_start(struct sfc_adapter *sa) 32193fcf09bSAndrew Rybchenko { 32236c35355SAndrew Rybchenko const efx_nic_cfg_t *encp; 32393fcf09bSAndrew Rybchenko int rc; 32493fcf09bSAndrew Rybchenko 32593fcf09bSAndrew Rybchenko sfc_log_init(sa, "entry"); 32693fcf09bSAndrew Rybchenko 32793fcf09bSAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 328b1ffa211SAndrew Rybchenko SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING); 32993fcf09bSAndrew Rybchenko 3302f44752cSAndrew Rybchenko sfc_log_init(sa, "set FW subvariant"); 3312f44752cSAndrew Rybchenko rc = sfc_set_fw_subvariant(sa); 3322f44752cSAndrew Rybchenko if (rc != 0) 3332f44752cSAndrew Rybchenko goto fail_set_fw_subvariant; 3342f44752cSAndrew Rybchenko 33591831d40SAndrew Rybchenko sfc_log_init(sa, "set resource limits"); 33691831d40SAndrew Rybchenko rc = sfc_set_drv_limits(sa); 33791831d40SAndrew Rybchenko if (rc != 0) 33891831d40SAndrew Rybchenko goto fail_set_drv_limits; 33991831d40SAndrew Rybchenko 34093fcf09bSAndrew Rybchenko sfc_log_init(sa, "init nic"); 34193fcf09bSAndrew Rybchenko rc = efx_nic_init(sa->nic); 34293fcf09bSAndrew Rybchenko if (rc != 0) 34393fcf09bSAndrew Rybchenko goto fail_nic_init; 34493fcf09bSAndrew Rybchenko 34536c35355SAndrew Rybchenko encp = efx_nic_cfg_get(sa->nic); 3462646d42fSAndrew Rybchenko 3472646d42fSAndrew Rybchenko /* 3482646d42fSAndrew Rybchenko * Refresh (since it may change on NIC reset/restart) a copy of 3492646d42fSAndrew Rybchenko * supported tunnel encapsulations in shared memory to be used 3502646d42fSAndrew Rybchenko * on supported Rx packet type classes get. 3512646d42fSAndrew Rybchenko */ 3522646d42fSAndrew Rybchenko sa->priv.shared->tunnel_encaps = 3532646d42fSAndrew Rybchenko encp->enc_tunnel_encapsulations_supported; 3542646d42fSAndrew Rybchenko 35536c35355SAndrew Rybchenko if (encp->enc_tunnel_encapsulations_supported != 0) { 35636c35355SAndrew Rybchenko sfc_log_init(sa, "apply tunnel config"); 35736c35355SAndrew Rybchenko rc = efx_tunnel_reconfigure(sa->nic); 35836c35355SAndrew Rybchenko if (rc != 0) 35936c35355SAndrew Rybchenko goto fail_tunnel_reconfigure; 36036c35355SAndrew Rybchenko } 36136c35355SAndrew Rybchenko 36206bc1977SAndrew Rybchenko rc = sfc_intr_start(sa); 36306bc1977SAndrew Rybchenko if (rc != 0) 36406bc1977SAndrew Rybchenko goto fail_intr_start; 36506bc1977SAndrew Rybchenko 36658294ee6SAndrew Rybchenko rc = sfc_ev_start(sa); 36758294ee6SAndrew Rybchenko if (rc != 0) 36858294ee6SAndrew Rybchenko goto fail_ev_start; 36958294ee6SAndrew Rybchenko 37003ed2119SAndrew Rybchenko rc = sfc_port_start(sa); 37103ed2119SAndrew Rybchenko if (rc != 0) 37203ed2119SAndrew Rybchenko goto fail_port_start; 37303ed2119SAndrew Rybchenko 37428944ac0SAndrew Rybchenko rc = sfc_rx_start(sa); 37528944ac0SAndrew Rybchenko if (rc != 0) 37628944ac0SAndrew Rybchenko goto fail_rx_start; 37728944ac0SAndrew Rybchenko 378fed9aeb4SIvan Malov rc = sfc_tx_start(sa); 379fed9aeb4SIvan Malov if (rc != 0) 380fed9aeb4SIvan Malov goto fail_tx_start; 381fed9aeb4SIvan Malov 382a9825ccfSRoman Zhukov rc = sfc_flow_start(sa); 383a9825ccfSRoman Zhukov if (rc != 0) 384a9825ccfSRoman Zhukov goto fail_flows_insert; 385a9825ccfSRoman Zhukov 38693fcf09bSAndrew Rybchenko sfc_log_init(sa, "done"); 38793fcf09bSAndrew Rybchenko return 0; 38893fcf09bSAndrew Rybchenko 389a9825ccfSRoman Zhukov fail_flows_insert: 390a9825ccfSRoman Zhukov sfc_tx_stop(sa); 391a9825ccfSRoman Zhukov 392fed9aeb4SIvan Malov fail_tx_start: 393fed9aeb4SIvan Malov sfc_rx_stop(sa); 394fed9aeb4SIvan Malov 39528944ac0SAndrew Rybchenko fail_rx_start: 39628944ac0SAndrew Rybchenko sfc_port_stop(sa); 39728944ac0SAndrew Rybchenko 39803ed2119SAndrew Rybchenko fail_port_start: 39903ed2119SAndrew Rybchenko sfc_ev_stop(sa); 40003ed2119SAndrew Rybchenko 40158294ee6SAndrew Rybchenko fail_ev_start: 40258294ee6SAndrew Rybchenko sfc_intr_stop(sa); 40358294ee6SAndrew Rybchenko 40406bc1977SAndrew Rybchenko fail_intr_start: 40536c35355SAndrew Rybchenko fail_tunnel_reconfigure: 40606bc1977SAndrew Rybchenko efx_nic_fini(sa->nic); 40706bc1977SAndrew Rybchenko 40893fcf09bSAndrew Rybchenko fail_nic_init: 40991831d40SAndrew Rybchenko fail_set_drv_limits: 4102f44752cSAndrew Rybchenko fail_set_fw_subvariant: 411b1ffa211SAndrew Rybchenko sfc_log_init(sa, "failed %d", rc); 412b1ffa211SAndrew Rybchenko return rc; 413b1ffa211SAndrew Rybchenko } 414b1ffa211SAndrew Rybchenko 415b1ffa211SAndrew Rybchenko int 416b1ffa211SAndrew Rybchenko sfc_start(struct sfc_adapter *sa) 417b1ffa211SAndrew Rybchenko { 418b1ffa211SAndrew Rybchenko unsigned int start_tries = 3; 419b1ffa211SAndrew Rybchenko int rc; 420b1ffa211SAndrew Rybchenko 421b1ffa211SAndrew Rybchenko sfc_log_init(sa, "entry"); 422b1ffa211SAndrew Rybchenko 423b1ffa211SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 424b1ffa211SAndrew Rybchenko 425b1ffa211SAndrew Rybchenko switch (sa->state) { 426b1ffa211SAndrew Rybchenko case SFC_ADAPTER_CONFIGURED: 427b1ffa211SAndrew Rybchenko break; 428b1ffa211SAndrew Rybchenko case SFC_ADAPTER_STARTED: 42991d16276SIvan Malov sfc_notice(sa, "already started"); 430b1ffa211SAndrew Rybchenko return 0; 431b1ffa211SAndrew Rybchenko default: 432b1ffa211SAndrew Rybchenko rc = EINVAL; 433b1ffa211SAndrew Rybchenko goto fail_bad_state; 434b1ffa211SAndrew Rybchenko } 435b1ffa211SAndrew Rybchenko 436b1ffa211SAndrew Rybchenko sa->state = SFC_ADAPTER_STARTING; 437b1ffa211SAndrew Rybchenko 438b1ffa211SAndrew Rybchenko do { 439b1ffa211SAndrew Rybchenko rc = sfc_try_start(sa); 440b1ffa211SAndrew Rybchenko } while ((--start_tries > 0) && 441b1ffa211SAndrew Rybchenko (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL)); 442b1ffa211SAndrew Rybchenko 443b1ffa211SAndrew Rybchenko if (rc != 0) 444b1ffa211SAndrew Rybchenko goto fail_try_start; 445b1ffa211SAndrew Rybchenko 446b1ffa211SAndrew Rybchenko sa->state = SFC_ADAPTER_STARTED; 447b1ffa211SAndrew Rybchenko sfc_log_init(sa, "done"); 448b1ffa211SAndrew Rybchenko return 0; 449b1ffa211SAndrew Rybchenko 450b1ffa211SAndrew Rybchenko fail_try_start: 45193fcf09bSAndrew Rybchenko sa->state = SFC_ADAPTER_CONFIGURED; 45293fcf09bSAndrew Rybchenko fail_bad_state: 45393fcf09bSAndrew Rybchenko sfc_log_init(sa, "failed %d", rc); 45493fcf09bSAndrew Rybchenko return rc; 45593fcf09bSAndrew Rybchenko } 45693fcf09bSAndrew Rybchenko 45793fcf09bSAndrew Rybchenko void 45893fcf09bSAndrew Rybchenko sfc_stop(struct sfc_adapter *sa) 45993fcf09bSAndrew Rybchenko { 46093fcf09bSAndrew Rybchenko sfc_log_init(sa, "entry"); 46193fcf09bSAndrew Rybchenko 46293fcf09bSAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 46393fcf09bSAndrew Rybchenko 46493fcf09bSAndrew Rybchenko switch (sa->state) { 46593fcf09bSAndrew Rybchenko case SFC_ADAPTER_STARTED: 46693fcf09bSAndrew Rybchenko break; 46793fcf09bSAndrew Rybchenko case SFC_ADAPTER_CONFIGURED: 46891d16276SIvan Malov sfc_notice(sa, "already stopped"); 46993fcf09bSAndrew Rybchenko return; 47093fcf09bSAndrew Rybchenko default: 47193fcf09bSAndrew Rybchenko sfc_err(sa, "stop in unexpected state %u", sa->state); 47293fcf09bSAndrew Rybchenko SFC_ASSERT(B_FALSE); 47393fcf09bSAndrew Rybchenko return; 47493fcf09bSAndrew Rybchenko } 47593fcf09bSAndrew Rybchenko 47693fcf09bSAndrew Rybchenko sa->state = SFC_ADAPTER_STOPPING; 47793fcf09bSAndrew Rybchenko 478a9825ccfSRoman Zhukov sfc_flow_stop(sa); 479fed9aeb4SIvan Malov sfc_tx_stop(sa); 48028944ac0SAndrew Rybchenko sfc_rx_stop(sa); 48103ed2119SAndrew Rybchenko sfc_port_stop(sa); 48258294ee6SAndrew Rybchenko sfc_ev_stop(sa); 48306bc1977SAndrew Rybchenko sfc_intr_stop(sa); 48493fcf09bSAndrew Rybchenko efx_nic_fini(sa->nic); 48593fcf09bSAndrew Rybchenko 48693fcf09bSAndrew Rybchenko sa->state = SFC_ADAPTER_CONFIGURED; 48793fcf09bSAndrew Rybchenko sfc_log_init(sa, "done"); 48893fcf09bSAndrew Rybchenko } 48993fcf09bSAndrew Rybchenko 490e77f9f19SAndrew Rybchenko static int 491e77f9f19SAndrew Rybchenko sfc_restart(struct sfc_adapter *sa) 492e77f9f19SAndrew Rybchenko { 493e77f9f19SAndrew Rybchenko int rc; 494e77f9f19SAndrew Rybchenko 495e77f9f19SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 496e77f9f19SAndrew Rybchenko 497e77f9f19SAndrew Rybchenko if (sa->state != SFC_ADAPTER_STARTED) 498e77f9f19SAndrew Rybchenko return EINVAL; 499e77f9f19SAndrew Rybchenko 500e77f9f19SAndrew Rybchenko sfc_stop(sa); 501e77f9f19SAndrew Rybchenko 502e77f9f19SAndrew Rybchenko rc = sfc_start(sa); 503e77f9f19SAndrew Rybchenko if (rc != 0) 504e77f9f19SAndrew Rybchenko sfc_err(sa, "restart failed"); 505e77f9f19SAndrew Rybchenko 506e77f9f19SAndrew Rybchenko return rc; 507e77f9f19SAndrew Rybchenko } 508e77f9f19SAndrew Rybchenko 509e77f9f19SAndrew Rybchenko static void 510e77f9f19SAndrew Rybchenko sfc_restart_if_required(void *arg) 511e77f9f19SAndrew Rybchenko { 512e77f9f19SAndrew Rybchenko struct sfc_adapter *sa = arg; 513e77f9f19SAndrew Rybchenko 514e77f9f19SAndrew Rybchenko /* If restart is scheduled, clear the flag and do it */ 515e77f9f19SAndrew Rybchenko if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required, 516e77f9f19SAndrew Rybchenko 1, 0)) { 517e77f9f19SAndrew Rybchenko sfc_adapter_lock(sa); 518e77f9f19SAndrew Rybchenko if (sa->state == SFC_ADAPTER_STARTED) 519e77f9f19SAndrew Rybchenko (void)sfc_restart(sa); 520e77f9f19SAndrew Rybchenko sfc_adapter_unlock(sa); 521e77f9f19SAndrew Rybchenko } 522e77f9f19SAndrew Rybchenko } 523e77f9f19SAndrew Rybchenko 524e77f9f19SAndrew Rybchenko void 525e77f9f19SAndrew Rybchenko sfc_schedule_restart(struct sfc_adapter *sa) 526e77f9f19SAndrew Rybchenko { 527e77f9f19SAndrew Rybchenko int rc; 528e77f9f19SAndrew Rybchenko 529e77f9f19SAndrew Rybchenko /* Schedule restart alarm if it is not scheduled yet */ 530e77f9f19SAndrew Rybchenko if (!rte_atomic32_test_and_set(&sa->restart_required)) 531e77f9f19SAndrew Rybchenko return; 532e77f9f19SAndrew Rybchenko 533e77f9f19SAndrew Rybchenko rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa); 534e77f9f19SAndrew Rybchenko if (rc == -ENOTSUP) 535e77f9f19SAndrew Rybchenko sfc_warn(sa, "alarms are not supported, restart is pending"); 536e77f9f19SAndrew Rybchenko else if (rc != 0) 537e77f9f19SAndrew Rybchenko sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc); 538e77f9f19SAndrew Rybchenko else 53991d16276SIvan Malov sfc_notice(sa, "restart scheduled"); 540e77f9f19SAndrew Rybchenko } 541e77f9f19SAndrew Rybchenko 54293fcf09bSAndrew Rybchenko int 543aaa3f5f0SAndrew Rybchenko sfc_configure(struct sfc_adapter *sa) 544aaa3f5f0SAndrew Rybchenko { 545c7cb2d7aSAndrew Rybchenko int rc; 546c7cb2d7aSAndrew Rybchenko 547aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "entry"); 548aaa3f5f0SAndrew Rybchenko 549aaa3f5f0SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 550aaa3f5f0SAndrew Rybchenko 551005870b4SAndrew Rybchenko SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED || 552005870b4SAndrew Rybchenko sa->state == SFC_ADAPTER_CONFIGURED); 553aaa3f5f0SAndrew Rybchenko sa->state = SFC_ADAPTER_CONFIGURING; 554aaa3f5f0SAndrew Rybchenko 555c7cb2d7aSAndrew Rybchenko rc = sfc_check_conf(sa); 556c7cb2d7aSAndrew Rybchenko if (rc != 0) 557c7cb2d7aSAndrew Rybchenko goto fail_check_conf; 558c7cb2d7aSAndrew Rybchenko 55952597396SAndrew Rybchenko rc = sfc_intr_configure(sa); 56006bc1977SAndrew Rybchenko if (rc != 0) 56152597396SAndrew Rybchenko goto fail_intr_configure; 56206bc1977SAndrew Rybchenko 563c577a525SAndrew Rybchenko rc = sfc_port_configure(sa); 56403ed2119SAndrew Rybchenko if (rc != 0) 565c577a525SAndrew Rybchenko goto fail_port_configure; 56603ed2119SAndrew Rybchenko 567f7637d4dSAndrew Rybchenko rc = sfc_rx_configure(sa); 568a8e64c6bSAndrew Rybchenko if (rc != 0) 569f7637d4dSAndrew Rybchenko goto fail_rx_configure; 570a8e64c6bSAndrew Rybchenko 571df64eaddSAndrew Rybchenko rc = sfc_tx_configure(sa); 572a8ad8cf8SIvan Malov if (rc != 0) 573df64eaddSAndrew Rybchenko goto fail_tx_configure; 574a8ad8cf8SIvan Malov 575aaa3f5f0SAndrew Rybchenko sa->state = SFC_ADAPTER_CONFIGURED; 576aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "done"); 577aaa3f5f0SAndrew Rybchenko return 0; 578c7cb2d7aSAndrew Rybchenko 579df64eaddSAndrew Rybchenko fail_tx_configure: 580f7637d4dSAndrew Rybchenko sfc_rx_close(sa); 581a8ad8cf8SIvan Malov 582f7637d4dSAndrew Rybchenko fail_rx_configure: 583c577a525SAndrew Rybchenko sfc_port_close(sa); 584a8e64c6bSAndrew Rybchenko 585c577a525SAndrew Rybchenko fail_port_configure: 58652597396SAndrew Rybchenko sfc_intr_close(sa); 58758294ee6SAndrew Rybchenko 58852597396SAndrew Rybchenko fail_intr_configure: 589c7cb2d7aSAndrew Rybchenko fail_check_conf: 590c7cb2d7aSAndrew Rybchenko sa->state = SFC_ADAPTER_INITIALIZED; 591c7cb2d7aSAndrew Rybchenko sfc_log_init(sa, "failed %d", rc); 592c7cb2d7aSAndrew Rybchenko return rc; 593aaa3f5f0SAndrew Rybchenko } 594aaa3f5f0SAndrew Rybchenko 595aaa3f5f0SAndrew Rybchenko void 596aaa3f5f0SAndrew Rybchenko sfc_close(struct sfc_adapter *sa) 597aaa3f5f0SAndrew Rybchenko { 598aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "entry"); 599aaa3f5f0SAndrew Rybchenko 600aaa3f5f0SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 601aaa3f5f0SAndrew Rybchenko 602aaa3f5f0SAndrew Rybchenko SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED); 603aaa3f5f0SAndrew Rybchenko sa->state = SFC_ADAPTER_CLOSING; 604aaa3f5f0SAndrew Rybchenko 605df64eaddSAndrew Rybchenko sfc_tx_close(sa); 606f7637d4dSAndrew Rybchenko sfc_rx_close(sa); 607c577a525SAndrew Rybchenko sfc_port_close(sa); 60852597396SAndrew Rybchenko sfc_intr_close(sa); 60906bc1977SAndrew Rybchenko 610aaa3f5f0SAndrew Rybchenko sa->state = SFC_ADAPTER_INITIALIZED; 611aaa3f5f0SAndrew Rybchenko sfc_log_init(sa, "done"); 612aaa3f5f0SAndrew Rybchenko } 613aaa3f5f0SAndrew Rybchenko 614ba641f20SAndrew Rybchenko static int 615e434de5dSAndy Moreton sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar) 616ba641f20SAndrew Rybchenko { 617ba641f20SAndrew Rybchenko struct rte_eth_dev *eth_dev = sa->eth_dev; 618c0802544SFerruh Yigit struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 619ba641f20SAndrew Rybchenko efsys_bar_t *ebp = &sa->mem_bar; 620e434de5dSAndy Moreton struct rte_mem_resource *res = &pci_dev->mem_resource[membar]; 621ba641f20SAndrew Rybchenko 622ba641f20SAndrew Rybchenko SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name); 623e434de5dSAndy Moreton ebp->esb_rid = membar; 624ba641f20SAndrew Rybchenko ebp->esb_dev = pci_dev; 625ba641f20SAndrew Rybchenko ebp->esb_base = res->addr; 626ba641f20SAndrew Rybchenko return 0; 627ba641f20SAndrew Rybchenko } 628ba641f20SAndrew Rybchenko 629ba641f20SAndrew Rybchenko static void 630ba641f20SAndrew Rybchenko sfc_mem_bar_fini(struct sfc_adapter *sa) 631ba641f20SAndrew Rybchenko { 632ba641f20SAndrew Rybchenko efsys_bar_t *ebp = &sa->mem_bar; 633ba641f20SAndrew Rybchenko 634ba641f20SAndrew Rybchenko SFC_BAR_LOCK_DESTROY(ebp); 635ba641f20SAndrew Rybchenko memset(ebp, 0, sizeof(*ebp)); 636ba641f20SAndrew Rybchenko } 637ba641f20SAndrew Rybchenko 6384ec1fc3bSIvan Malov /* 6394ec1fc3bSIvan Malov * A fixed RSS key which has a property of being symmetric 6404ec1fc3bSIvan Malov * (symmetrical flows are distributed to the same CPU) 6414ec1fc3bSIvan Malov * and also known to give a uniform distribution 6424ec1fc3bSIvan Malov * (a good distribution of traffic between different CPUs) 6434ec1fc3bSIvan Malov */ 64437a42c61SAndrew Rybchenko static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = { 6454ec1fc3bSIvan Malov 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 6464ec1fc3bSIvan Malov 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 6474ec1fc3bSIvan Malov 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 6484ec1fc3bSIvan Malov 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 6494ec1fc3bSIvan Malov 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 6504ec1fc3bSIvan Malov }; 6514ec1fc3bSIvan Malov 6524ec1fc3bSIvan Malov static int 65301764b20SIvan Malov sfc_rss_attach(struct sfc_adapter *sa) 6544ec1fc3bSIvan Malov { 655e295f175SAndrew Rybchenko struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 6564ec1fc3bSIvan Malov int rc; 6574ec1fc3bSIvan Malov 6584ec1fc3bSIvan Malov rc = efx_intr_init(sa->nic, sa->intr.type, NULL); 6594ec1fc3bSIvan Malov if (rc != 0) 6604ec1fc3bSIvan Malov goto fail_intr_init; 6614ec1fc3bSIvan Malov 6624ec1fc3bSIvan Malov rc = efx_ev_init(sa->nic); 6634ec1fc3bSIvan Malov if (rc != 0) 6644ec1fc3bSIvan Malov goto fail_ev_init; 6654ec1fc3bSIvan Malov 6664ec1fc3bSIvan Malov rc = efx_rx_init(sa->nic); 6674ec1fc3bSIvan Malov if (rc != 0) 6684ec1fc3bSIvan Malov goto fail_rx_init; 6694ec1fc3bSIvan Malov 670d1482e21SIvan Malov rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type); 6714ec1fc3bSIvan Malov if (rc != 0) 6724ec1fc3bSIvan Malov goto fail_scale_support_get; 6734ec1fc3bSIvan Malov 674d1482e21SIvan Malov rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support); 6754ec1fc3bSIvan Malov if (rc != 0) 6764ec1fc3bSIvan Malov goto fail_hash_support_get; 6774ec1fc3bSIvan Malov 67801764b20SIvan Malov rc = sfc_rx_hash_init(sa); 67901764b20SIvan Malov if (rc != 0) 68001764b20SIvan Malov goto fail_rx_hash_init; 68101764b20SIvan Malov 6824ec1fc3bSIvan Malov efx_rx_fini(sa->nic); 6834ec1fc3bSIvan Malov efx_ev_fini(sa->nic); 6844ec1fc3bSIvan Malov efx_intr_fini(sa->nic); 6854ec1fc3bSIvan Malov 686d1482e21SIvan Malov rte_memcpy(rss->key, default_rss_key, sizeof(rss->key)); 6874ec1fc3bSIvan Malov 6884ec1fc3bSIvan Malov return 0; 6894ec1fc3bSIvan Malov 69001764b20SIvan Malov fail_rx_hash_init: 6914ec1fc3bSIvan Malov fail_hash_support_get: 6924ec1fc3bSIvan Malov fail_scale_support_get: 69300b94c1cSIvan Malov efx_rx_fini(sa->nic); 69400b94c1cSIvan Malov 6954ec1fc3bSIvan Malov fail_rx_init: 6964ec1fc3bSIvan Malov efx_ev_fini(sa->nic); 6974ec1fc3bSIvan Malov 6984ec1fc3bSIvan Malov fail_ev_init: 6994ec1fc3bSIvan Malov efx_intr_fini(sa->nic); 7004ec1fc3bSIvan Malov 7014ec1fc3bSIvan Malov fail_intr_init: 7024ec1fc3bSIvan Malov return rc; 7034ec1fc3bSIvan Malov } 7044ec1fc3bSIvan Malov 70501764b20SIvan Malov static void 70601764b20SIvan Malov sfc_rss_detach(struct sfc_adapter *sa) 70701764b20SIvan Malov { 70801764b20SIvan Malov sfc_rx_hash_fini(sa); 70901764b20SIvan Malov } 71001764b20SIvan Malov 711ba641f20SAndrew Rybchenko int 712ba641f20SAndrew Rybchenko sfc_attach(struct sfc_adapter *sa) 713ba641f20SAndrew Rybchenko { 714a8ad8cf8SIvan Malov const efx_nic_cfg_t *encp; 715329472d4SAndrew Rybchenko efx_nic_t *enp = sa->nic; 716ba641f20SAndrew Rybchenko int rc; 717ba641f20SAndrew Rybchenko 718ba641f20SAndrew Rybchenko sfc_log_init(sa, "entry"); 719ba641f20SAndrew Rybchenko 720ba641f20SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 721ba641f20SAndrew Rybchenko 722ba641f20SAndrew Rybchenko efx_mcdi_new_epoch(enp); 723ba641f20SAndrew Rybchenko 724ba641f20SAndrew Rybchenko sfc_log_init(sa, "reset nic"); 725ba641f20SAndrew Rybchenko rc = efx_nic_reset(enp); 726ba641f20SAndrew Rybchenko if (rc != 0) 727ba641f20SAndrew Rybchenko goto fail_nic_reset; 728ba641f20SAndrew Rybchenko 72936c35355SAndrew Rybchenko /* 73036c35355SAndrew Rybchenko * Probed NIC is sufficient for tunnel init. 73136c35355SAndrew Rybchenko * Initialize tunnel support to be able to use libefx 73236c35355SAndrew Rybchenko * efx_tunnel_config_udp_{add,remove}() in any state and 73336c35355SAndrew Rybchenko * efx_tunnel_reconfigure() on start up. 73436c35355SAndrew Rybchenko */ 73536c35355SAndrew Rybchenko rc = efx_tunnel_init(enp); 73636c35355SAndrew Rybchenko if (rc != 0) 73736c35355SAndrew Rybchenko goto fail_tunnel_init; 73836c35355SAndrew Rybchenko 7391f014258SIvan Malov encp = efx_nic_cfg_get(sa->nic); 7401f014258SIvan Malov 7412646d42fSAndrew Rybchenko /* 7422646d42fSAndrew Rybchenko * Make a copy of supported tunnel encapsulations in shared 7432646d42fSAndrew Rybchenko * memory to be used on supported Rx packet type classes get. 7442646d42fSAndrew Rybchenko */ 7452646d42fSAndrew Rybchenko sa->priv.shared->tunnel_encaps = 7462646d42fSAndrew Rybchenko encp->enc_tunnel_encapsulations_supported; 7472646d42fSAndrew Rybchenko 7489aa0afd1SAndrew Rybchenko if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) { 7491f014258SIvan Malov sa->tso = encp->enc_fw_assisted_tso_v2_enabled; 7501f014258SIvan Malov if (!sa->tso) 7519906cb29SIvan Malov sfc_info(sa, "TSO support isn't available on this adapter"); 752b3b667c9SAndrew Rybchenko } 7531f014258SIvan Malov 7549aa0afd1SAndrew Rybchenko if (sa->tso && 7559aa0afd1SAndrew Rybchenko (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & 7569aa0afd1SAndrew Rybchenko (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 7579aa0afd1SAndrew Rybchenko DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) { 758c1ce2ba2SIvan Malov sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled; 759c1ce2ba2SIvan Malov if (!sa->tso_encap) 760c1ce2ba2SIvan Malov sfc_info(sa, "Encapsulated TSO support isn't available on this adapter"); 761c1ce2ba2SIvan Malov } 762c1ce2ba2SIvan Malov 76391831d40SAndrew Rybchenko sfc_log_init(sa, "estimate resource limits"); 76491831d40SAndrew Rybchenko rc = sfc_estimate_resource_limits(sa); 765ba641f20SAndrew Rybchenko if (rc != 0) 76691831d40SAndrew Rybchenko goto fail_estimate_rsrc_limits; 767ba641f20SAndrew Rybchenko 768d5371f3dSIgor Romanov sa->evq_max_entries = encp->enc_evq_max_nevs; 769d5371f3dSIgor Romanov SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries)); 770d5371f3dSIgor Romanov 771d5371f3dSIgor Romanov sa->evq_min_entries = encp->enc_evq_min_nevs; 772d5371f3dSIgor Romanov SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries)); 773d5371f3dSIgor Romanov 774048a0d1aSIgor Romanov sa->rxq_max_entries = encp->enc_rxq_max_ndescs; 775048a0d1aSIgor Romanov SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries)); 776048a0d1aSIgor Romanov 777048a0d1aSIgor Romanov sa->rxq_min_entries = encp->enc_rxq_min_ndescs; 778048a0d1aSIgor Romanov SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries)); 779048a0d1aSIgor Romanov 780a8ad8cf8SIvan Malov sa->txq_max_entries = encp->enc_txq_max_ndescs; 781a8ad8cf8SIvan Malov SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries)); 782a8ad8cf8SIvan Malov 7839dbd28dfSIgor Romanov sa->txq_min_entries = encp->enc_txq_min_ndescs; 7849dbd28dfSIgor Romanov SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries)); 7859dbd28dfSIgor Romanov 78606bc1977SAndrew Rybchenko rc = sfc_intr_attach(sa); 78706bc1977SAndrew Rybchenko if (rc != 0) 78806bc1977SAndrew Rybchenko goto fail_intr_attach; 78906bc1977SAndrew Rybchenko 79047995190SAndrew Rybchenko rc = sfc_ev_attach(sa); 79147995190SAndrew Rybchenko if (rc != 0) 79247995190SAndrew Rybchenko goto fail_ev_attach; 79347995190SAndrew Rybchenko 794c577a525SAndrew Rybchenko rc = sfc_port_attach(sa); 795c577a525SAndrew Rybchenko if (rc != 0) 796c577a525SAndrew Rybchenko goto fail_port_attach; 797d23f3a89SAndrew Rybchenko 79801764b20SIvan Malov rc = sfc_rss_attach(sa); 7994ec1fc3bSIvan Malov if (rc != 0) 80001764b20SIvan Malov goto fail_rss_attach; 8014ec1fc3bSIvan Malov 802791f57acSAndrew Rybchenko rc = sfc_filter_attach(sa); 803791f57acSAndrew Rybchenko if (rc != 0) 804791f57acSAndrew Rybchenko goto fail_filter_attach; 805791f57acSAndrew Rybchenko 806ba641f20SAndrew Rybchenko sfc_log_init(sa, "fini nic"); 807ba641f20SAndrew Rybchenko efx_nic_fini(enp); 808ba641f20SAndrew Rybchenko 809a9825ccfSRoman Zhukov sfc_flow_init(sa); 810a9825ccfSRoman Zhukov 811ba641f20SAndrew Rybchenko sa->state = SFC_ADAPTER_INITIALIZED; 812ba641f20SAndrew Rybchenko 813ba641f20SAndrew Rybchenko sfc_log_init(sa, "done"); 814ba641f20SAndrew Rybchenko return 0; 815ba641f20SAndrew Rybchenko 816791f57acSAndrew Rybchenko fail_filter_attach: 81701764b20SIvan Malov sfc_rss_detach(sa); 81801764b20SIvan Malov 81901764b20SIvan Malov fail_rss_attach: 820c577a525SAndrew Rybchenko sfc_port_detach(sa); 821c577a525SAndrew Rybchenko 822c577a525SAndrew Rybchenko fail_port_attach: 82347995190SAndrew Rybchenko sfc_ev_detach(sa); 82447995190SAndrew Rybchenko 82547995190SAndrew Rybchenko fail_ev_attach: 8264ec1fc3bSIvan Malov sfc_intr_detach(sa); 8274ec1fc3bSIvan Malov 82806bc1977SAndrew Rybchenko fail_intr_attach: 8294ec1fc3bSIvan Malov efx_nic_fini(sa->nic); 8304ec1fc3bSIvan Malov 83191831d40SAndrew Rybchenko fail_estimate_rsrc_limits: 83236c35355SAndrew Rybchenko fail_tunnel_init: 83336c35355SAndrew Rybchenko efx_tunnel_fini(sa->nic); 83436c35355SAndrew Rybchenko 835ba641f20SAndrew Rybchenko fail_nic_reset: 836329472d4SAndrew Rybchenko 837329472d4SAndrew Rybchenko sfc_log_init(sa, "failed %d", rc); 838329472d4SAndrew Rybchenko return rc; 839329472d4SAndrew Rybchenko } 840329472d4SAndrew Rybchenko 841329472d4SAndrew Rybchenko void 842329472d4SAndrew Rybchenko sfc_detach(struct sfc_adapter *sa) 843329472d4SAndrew Rybchenko { 844329472d4SAndrew Rybchenko sfc_log_init(sa, "entry"); 845329472d4SAndrew Rybchenko 846329472d4SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 847329472d4SAndrew Rybchenko 848329472d4SAndrew Rybchenko sfc_flow_fini(sa); 849329472d4SAndrew Rybchenko 850329472d4SAndrew Rybchenko sfc_filter_detach(sa); 85101764b20SIvan Malov sfc_rss_detach(sa); 852c577a525SAndrew Rybchenko sfc_port_detach(sa); 85347995190SAndrew Rybchenko sfc_ev_detach(sa); 854329472d4SAndrew Rybchenko sfc_intr_detach(sa); 85536c35355SAndrew Rybchenko efx_tunnel_fini(sa->nic); 856329472d4SAndrew Rybchenko 857329472d4SAndrew Rybchenko sa->state = SFC_ADAPTER_UNINITIALIZED; 858329472d4SAndrew Rybchenko } 859329472d4SAndrew Rybchenko 8609e7fc8b8SRoman Zhukov static int 8619e7fc8b8SRoman Zhukov sfc_kvarg_fv_variant_handler(__rte_unused const char *key, 8629e7fc8b8SRoman Zhukov const char *value_str, void *opaque) 8639e7fc8b8SRoman Zhukov { 8649e7fc8b8SRoman Zhukov uint32_t *value = opaque; 8659e7fc8b8SRoman Zhukov 8669e7fc8b8SRoman Zhukov if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0) 8679e7fc8b8SRoman Zhukov *value = EFX_FW_VARIANT_DONT_CARE; 8689e7fc8b8SRoman Zhukov else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0) 8699e7fc8b8SRoman Zhukov *value = EFX_FW_VARIANT_FULL_FEATURED; 8709e7fc8b8SRoman Zhukov else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0) 8719e7fc8b8SRoman Zhukov *value = EFX_FW_VARIANT_LOW_LATENCY; 8729e7fc8b8SRoman Zhukov else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0) 8739e7fc8b8SRoman Zhukov *value = EFX_FW_VARIANT_PACKED_STREAM; 8746e899accSAndrew Rybchenko else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0) 8756e899accSAndrew Rybchenko *value = EFX_FW_VARIANT_DPDK; 8769e7fc8b8SRoman Zhukov else 8779e7fc8b8SRoman Zhukov return -EINVAL; 8789e7fc8b8SRoman Zhukov 8799e7fc8b8SRoman Zhukov return 0; 8809e7fc8b8SRoman Zhukov } 8819e7fc8b8SRoman Zhukov 8829e7fc8b8SRoman Zhukov static int 8839e7fc8b8SRoman Zhukov sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv) 8849e7fc8b8SRoman Zhukov { 8859e7fc8b8SRoman Zhukov efx_nic_fw_info_t enfi; 8869e7fc8b8SRoman Zhukov int rc; 8879e7fc8b8SRoman Zhukov 8889e7fc8b8SRoman Zhukov rc = efx_nic_get_fw_version(sa->nic, &enfi); 8899e7fc8b8SRoman Zhukov if (rc != 0) 8909e7fc8b8SRoman Zhukov return rc; 8919e7fc8b8SRoman Zhukov else if (!enfi.enfi_dpcpu_fw_ids_valid) 8929e7fc8b8SRoman Zhukov return ENOTSUP; 8939e7fc8b8SRoman Zhukov 8949e7fc8b8SRoman Zhukov /* 8959e7fc8b8SRoman Zhukov * Firmware variant can be uniquely identified by the RxDPCPU 8969e7fc8b8SRoman Zhukov * firmware id 8979e7fc8b8SRoman Zhukov */ 8989e7fc8b8SRoman Zhukov switch (enfi.enfi_rx_dpcpu_fw_id) { 8999e7fc8b8SRoman Zhukov case EFX_RXDP_FULL_FEATURED_FW_ID: 9009e7fc8b8SRoman Zhukov *efv = EFX_FW_VARIANT_FULL_FEATURED; 9019e7fc8b8SRoman Zhukov break; 9029e7fc8b8SRoman Zhukov 9039e7fc8b8SRoman Zhukov case EFX_RXDP_LOW_LATENCY_FW_ID: 9049e7fc8b8SRoman Zhukov *efv = EFX_FW_VARIANT_LOW_LATENCY; 9059e7fc8b8SRoman Zhukov break; 9069e7fc8b8SRoman Zhukov 9079e7fc8b8SRoman Zhukov case EFX_RXDP_PACKED_STREAM_FW_ID: 9089e7fc8b8SRoman Zhukov *efv = EFX_FW_VARIANT_PACKED_STREAM; 9099e7fc8b8SRoman Zhukov break; 9109e7fc8b8SRoman Zhukov 9116e899accSAndrew Rybchenko case EFX_RXDP_DPDK_FW_ID: 9126e899accSAndrew Rybchenko *efv = EFX_FW_VARIANT_DPDK; 9136e899accSAndrew Rybchenko break; 9146e899accSAndrew Rybchenko 9159e7fc8b8SRoman Zhukov default: 9169e7fc8b8SRoman Zhukov /* 9179e7fc8b8SRoman Zhukov * Other firmware variants are not considered, since they are 9189e7fc8b8SRoman Zhukov * not supported in the device parameters 9199e7fc8b8SRoman Zhukov */ 9209e7fc8b8SRoman Zhukov *efv = EFX_FW_VARIANT_DONT_CARE; 9219e7fc8b8SRoman Zhukov break; 9229e7fc8b8SRoman Zhukov } 9239e7fc8b8SRoman Zhukov 9249e7fc8b8SRoman Zhukov return 0; 9259e7fc8b8SRoman Zhukov } 9269e7fc8b8SRoman Zhukov 9279e7fc8b8SRoman Zhukov static const char * 9289e7fc8b8SRoman Zhukov sfc_fw_variant2str(efx_fw_variant_t efv) 9299e7fc8b8SRoman Zhukov { 9309e7fc8b8SRoman Zhukov switch (efv) { 9319e7fc8b8SRoman Zhukov case EFX_RXDP_FULL_FEATURED_FW_ID: 9329e7fc8b8SRoman Zhukov return SFC_KVARG_FW_VARIANT_FULL_FEATURED; 9339e7fc8b8SRoman Zhukov case EFX_RXDP_LOW_LATENCY_FW_ID: 9349e7fc8b8SRoman Zhukov return SFC_KVARG_FW_VARIANT_LOW_LATENCY; 9359e7fc8b8SRoman Zhukov case EFX_RXDP_PACKED_STREAM_FW_ID: 9369e7fc8b8SRoman Zhukov return SFC_KVARG_FW_VARIANT_PACKED_STREAM; 9376e899accSAndrew Rybchenko case EFX_RXDP_DPDK_FW_ID: 9386e899accSAndrew Rybchenko return SFC_KVARG_FW_VARIANT_DPDK; 9399e7fc8b8SRoman Zhukov default: 9409e7fc8b8SRoman Zhukov return "unknown"; 9419e7fc8b8SRoman Zhukov } 9429e7fc8b8SRoman Zhukov } 9439e7fc8b8SRoman Zhukov 9449e7fc8b8SRoman Zhukov static int 9455a1ae82dSAndrew Rybchenko sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa) 9465a1ae82dSAndrew Rybchenko { 9475a1ae82dSAndrew Rybchenko int rc; 9485a1ae82dSAndrew Rybchenko long value; 9495a1ae82dSAndrew Rybchenko 9505a1ae82dSAndrew Rybchenko value = SFC_RXD_WAIT_TIMEOUT_NS_DEF; 9515a1ae82dSAndrew Rybchenko 9525a1ae82dSAndrew Rybchenko rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS, 9535a1ae82dSAndrew Rybchenko sfc_kvarg_long_handler, &value); 9545a1ae82dSAndrew Rybchenko if (rc != 0) 9555a1ae82dSAndrew Rybchenko return rc; 9565a1ae82dSAndrew Rybchenko 9575a1ae82dSAndrew Rybchenko if (value < 0 || 9585a1ae82dSAndrew Rybchenko (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) { 9595a1ae82dSAndrew Rybchenko sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' " 9605a1ae82dSAndrew Rybchenko "was set (%ld);", value); 9615a1ae82dSAndrew Rybchenko sfc_err(sa, "it must not be less than 0 or greater than %u", 9625a1ae82dSAndrew Rybchenko EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX); 9635a1ae82dSAndrew Rybchenko return EINVAL; 9645a1ae82dSAndrew Rybchenko } 9655a1ae82dSAndrew Rybchenko 9665a1ae82dSAndrew Rybchenko sa->rxd_wait_timeout_ns = value; 9675a1ae82dSAndrew Rybchenko return 0; 9685a1ae82dSAndrew Rybchenko } 9695a1ae82dSAndrew Rybchenko 9705a1ae82dSAndrew Rybchenko static int 9719e7fc8b8SRoman Zhukov sfc_nic_probe(struct sfc_adapter *sa) 9729e7fc8b8SRoman Zhukov { 9739e7fc8b8SRoman Zhukov efx_nic_t *enp = sa->nic; 9749e7fc8b8SRoman Zhukov efx_fw_variant_t preferred_efv; 9759e7fc8b8SRoman Zhukov efx_fw_variant_t efv; 9769e7fc8b8SRoman Zhukov int rc; 9779e7fc8b8SRoman Zhukov 9789e7fc8b8SRoman Zhukov preferred_efv = EFX_FW_VARIANT_DONT_CARE; 9799e7fc8b8SRoman Zhukov rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT, 9809e7fc8b8SRoman Zhukov sfc_kvarg_fv_variant_handler, 9819e7fc8b8SRoman Zhukov &preferred_efv); 9829e7fc8b8SRoman Zhukov if (rc != 0) { 9839e7fc8b8SRoman Zhukov sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT); 9849e7fc8b8SRoman Zhukov return rc; 9859e7fc8b8SRoman Zhukov } 9869e7fc8b8SRoman Zhukov 9875a1ae82dSAndrew Rybchenko rc = sfc_kvarg_rxd_wait_timeout_ns(sa); 9885a1ae82dSAndrew Rybchenko if (rc != 0) 9895a1ae82dSAndrew Rybchenko return rc; 9905a1ae82dSAndrew Rybchenko 9919e7fc8b8SRoman Zhukov rc = efx_nic_probe(enp, preferred_efv); 9929e7fc8b8SRoman Zhukov if (rc == EACCES) { 9939e7fc8b8SRoman Zhukov /* Unprivileged functions cannot set FW variant */ 9949e7fc8b8SRoman Zhukov rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE); 9959e7fc8b8SRoman Zhukov } 9969e7fc8b8SRoman Zhukov if (rc != 0) 9979e7fc8b8SRoman Zhukov return rc; 9989e7fc8b8SRoman Zhukov 9999e7fc8b8SRoman Zhukov rc = sfc_get_fw_variant(sa, &efv); 10009e7fc8b8SRoman Zhukov if (rc == ENOTSUP) { 10019e7fc8b8SRoman Zhukov sfc_warn(sa, "FW variant can not be obtained"); 10029e7fc8b8SRoman Zhukov return 0; 10039e7fc8b8SRoman Zhukov } 10049e7fc8b8SRoman Zhukov if (rc != 0) 10059e7fc8b8SRoman Zhukov return rc; 10069e7fc8b8SRoman Zhukov 10079e7fc8b8SRoman Zhukov /* Check that firmware variant was changed to the requested one */ 10089e7fc8b8SRoman Zhukov if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) { 10099e7fc8b8SRoman Zhukov sfc_warn(sa, "FW variant has not changed to the requested %s", 10109e7fc8b8SRoman Zhukov sfc_fw_variant2str(preferred_efv)); 10119e7fc8b8SRoman Zhukov } 10129e7fc8b8SRoman Zhukov 10139e7fc8b8SRoman Zhukov sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv)); 10149e7fc8b8SRoman Zhukov 10159e7fc8b8SRoman Zhukov return 0; 10169e7fc8b8SRoman Zhukov } 10179e7fc8b8SRoman Zhukov 1018329472d4SAndrew Rybchenko int 1019329472d4SAndrew Rybchenko sfc_probe(struct sfc_adapter *sa) 1020329472d4SAndrew Rybchenko { 1021c0802544SFerruh Yigit struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); 1022e434de5dSAndy Moreton unsigned int membar; 1023329472d4SAndrew Rybchenko efx_nic_t *enp; 1024329472d4SAndrew Rybchenko int rc; 1025329472d4SAndrew Rybchenko 1026329472d4SAndrew Rybchenko sfc_log_init(sa, "entry"); 1027329472d4SAndrew Rybchenko 1028329472d4SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 1029329472d4SAndrew Rybchenko 1030329472d4SAndrew Rybchenko sa->socket_id = rte_socket_id(); 1031e77f9f19SAndrew Rybchenko rte_atomic32_init(&sa->restart_required); 1032329472d4SAndrew Rybchenko 1033329472d4SAndrew Rybchenko sfc_log_init(sa, "get family"); 1034329472d4SAndrew Rybchenko rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id, 1035e434de5dSAndy Moreton &sa->family, &membar); 1036329472d4SAndrew Rybchenko if (rc != 0) 1037329472d4SAndrew Rybchenko goto fail_family; 1038e434de5dSAndy Moreton sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar); 1039e434de5dSAndy Moreton 1040e434de5dSAndy Moreton sfc_log_init(sa, "init mem bar"); 1041e434de5dSAndy Moreton rc = sfc_mem_bar_init(sa, membar); 1042e434de5dSAndy Moreton if (rc != 0) 1043e434de5dSAndy Moreton goto fail_mem_bar_init; 1044329472d4SAndrew Rybchenko 1045329472d4SAndrew Rybchenko sfc_log_init(sa, "create nic"); 1046329472d4SAndrew Rybchenko rte_spinlock_init(&sa->nic_lock); 1047329472d4SAndrew Rybchenko rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa, 1048329472d4SAndrew Rybchenko &sa->mem_bar, &sa->nic_lock, &enp); 1049329472d4SAndrew Rybchenko if (rc != 0) 1050329472d4SAndrew Rybchenko goto fail_nic_create; 1051329472d4SAndrew Rybchenko sa->nic = enp; 1052329472d4SAndrew Rybchenko 1053329472d4SAndrew Rybchenko rc = sfc_mcdi_init(sa); 1054329472d4SAndrew Rybchenko if (rc != 0) 1055329472d4SAndrew Rybchenko goto fail_mcdi_init; 1056329472d4SAndrew Rybchenko 1057329472d4SAndrew Rybchenko sfc_log_init(sa, "probe nic"); 10589e7fc8b8SRoman Zhukov rc = sfc_nic_probe(sa); 1059329472d4SAndrew Rybchenko if (rc != 0) 1060329472d4SAndrew Rybchenko goto fail_nic_probe; 1061329472d4SAndrew Rybchenko 1062329472d4SAndrew Rybchenko sfc_log_init(sa, "done"); 1063329472d4SAndrew Rybchenko return 0; 1064ba641f20SAndrew Rybchenko 1065ba641f20SAndrew Rybchenko fail_nic_probe: 1066ba641f20SAndrew Rybchenko sfc_mcdi_fini(sa); 1067ba641f20SAndrew Rybchenko 1068ba641f20SAndrew Rybchenko fail_mcdi_init: 1069ba641f20SAndrew Rybchenko sfc_log_init(sa, "destroy nic"); 1070ba641f20SAndrew Rybchenko sa->nic = NULL; 1071ba641f20SAndrew Rybchenko efx_nic_destroy(enp); 1072ba641f20SAndrew Rybchenko 1073ba641f20SAndrew Rybchenko fail_nic_create: 1074ba641f20SAndrew Rybchenko sfc_mem_bar_fini(sa); 1075ba641f20SAndrew Rybchenko 1076ba641f20SAndrew Rybchenko fail_mem_bar_init: 1077e434de5dSAndy Moreton fail_family: 1078ba641f20SAndrew Rybchenko sfc_log_init(sa, "failed %d", rc); 1079ba641f20SAndrew Rybchenko return rc; 1080ba641f20SAndrew Rybchenko } 1081ba641f20SAndrew Rybchenko 1082ba641f20SAndrew Rybchenko void 1083329472d4SAndrew Rybchenko sfc_unprobe(struct sfc_adapter *sa) 1084ba641f20SAndrew Rybchenko { 1085ba641f20SAndrew Rybchenko efx_nic_t *enp = sa->nic; 1086ba641f20SAndrew Rybchenko 1087ba641f20SAndrew Rybchenko sfc_log_init(sa, "entry"); 1088ba641f20SAndrew Rybchenko 1089ba641f20SAndrew Rybchenko SFC_ASSERT(sfc_adapter_is_locked(sa)); 1090ba641f20SAndrew Rybchenko 1091ba641f20SAndrew Rybchenko sfc_log_init(sa, "unprobe nic"); 1092ba641f20SAndrew Rybchenko efx_nic_unprobe(enp); 1093ba641f20SAndrew Rybchenko 1094ba641f20SAndrew Rybchenko sfc_mcdi_fini(sa); 1095ba641f20SAndrew Rybchenko 1096e77f9f19SAndrew Rybchenko /* 1097e77f9f19SAndrew Rybchenko * Make sure there is no pending alarm to restart since we are 1098e77f9f19SAndrew Rybchenko * going to free device private which is passed as the callback 1099e77f9f19SAndrew Rybchenko * opaque data. A new alarm cannot be scheduled since MCDI is 1100e77f9f19SAndrew Rybchenko * shut down. 1101e77f9f19SAndrew Rybchenko */ 1102e77f9f19SAndrew Rybchenko rte_eal_alarm_cancel(sfc_restart_if_required, sa); 1103e77f9f19SAndrew Rybchenko 1104ba641f20SAndrew Rybchenko sfc_log_init(sa, "destroy nic"); 1105ba641f20SAndrew Rybchenko sa->nic = NULL; 1106ba641f20SAndrew Rybchenko efx_nic_destroy(enp); 1107ba641f20SAndrew Rybchenko 1108ba641f20SAndrew Rybchenko sfc_mem_bar_fini(sa); 1109ba641f20SAndrew Rybchenko 1110a9825ccfSRoman Zhukov sfc_flow_fini(sa); 1111ba641f20SAndrew Rybchenko sa->state = SFC_ADAPTER_UNINITIALIZED; 1112ba641f20SAndrew Rybchenko } 1113dad99d92SIvan Malov 1114dad99d92SIvan Malov uint32_t 1115e2c3639aSAndrew Rybchenko sfc_register_logtype(const struct rte_pci_addr *pci_addr, 1116e2c3639aSAndrew Rybchenko const char *lt_prefix_str, uint32_t ll_default) 1117dad99d92SIvan Malov { 1118dad99d92SIvan Malov size_t lt_prefix_str_size = strlen(lt_prefix_str); 1119dad99d92SIvan Malov size_t lt_str_size_max; 1120dad99d92SIvan Malov char *lt_str = NULL; 1121dad99d92SIvan Malov int ret; 1122dad99d92SIvan Malov 1123dad99d92SIvan Malov if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) { 1124dad99d92SIvan Malov ++lt_prefix_str_size; /* Reserve space for prefix separator */ 1125dad99d92SIvan Malov lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1; 1126dad99d92SIvan Malov } else { 112742c807feSStephen Hemminger return sfc_logtype_driver; 1128dad99d92SIvan Malov } 1129dad99d92SIvan Malov 1130dad99d92SIvan Malov lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0); 1131dad99d92SIvan Malov if (lt_str == NULL) 113242c807feSStephen Hemminger return sfc_logtype_driver; 1133dad99d92SIvan Malov 1134dad99d92SIvan Malov strncpy(lt_str, lt_prefix_str, lt_prefix_str_size); 1135dad99d92SIvan Malov lt_str[lt_prefix_str_size - 1] = '.'; 1136e2c3639aSAndrew Rybchenko rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size, 1137dad99d92SIvan Malov lt_str_size_max - lt_prefix_str_size); 1138dad99d92SIvan Malov lt_str[lt_str_size_max - 1] = '\0'; 1139dad99d92SIvan Malov 1140dad99d92SIvan Malov ret = rte_log_register_type_and_pick_level(lt_str, ll_default); 1141dad99d92SIvan Malov rte_free(lt_str); 1142dad99d92SIvan Malov 114342c807feSStephen Hemminger if (ret < 0) 114442c807feSStephen Hemminger return sfc_logtype_driver; 114542c807feSStephen Hemminger 114642c807feSStephen Hemminger return ret; 1147dad99d92SIvan Malov } 1148