xref: /dpdk/drivers/net/sfc/sfc.c (revision 8cff0013d9a6e1d2425db5e569bb8093279069d6)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2244cfa79SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5ba641f20SAndrew Rybchenko  *
6ba641f20SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7ba641f20SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8ba641f20SAndrew Rybchenko  */
9ba641f20SAndrew Rybchenko 
10ba641f20SAndrew Rybchenko /* sysconf() */
11ba641f20SAndrew Rybchenko #include <unistd.h>
12ba641f20SAndrew Rybchenko 
13ba641f20SAndrew Rybchenko #include <rte_errno.h>
14e77f9f19SAndrew Rybchenko #include <rte_alarm.h>
15ba641f20SAndrew Rybchenko 
16ba641f20SAndrew Rybchenko #include "efx.h"
17ba641f20SAndrew Rybchenko 
18ba641f20SAndrew Rybchenko #include "sfc.h"
191b0236e2SAndrew Rybchenko #include "sfc_debug.h"
20ba641f20SAndrew Rybchenko #include "sfc_log.h"
2158294ee6SAndrew Rybchenko #include "sfc_ev.h"
22a8e64c6bSAndrew Rybchenko #include "sfc_rx.h"
23983ce116SIgor Romanov #include "sfc_mae_counter.h"
24a8ad8cf8SIvan Malov #include "sfc_tx.h"
259e7fc8b8SRoman Zhukov #include "sfc_kvargs.h"
265a1ae82dSAndrew Rybchenko #include "sfc_tweak.h"
27fdd7719eSIvan Ilchenko #include "sfc_sw_stats.h"
2826706314SViacheslav Galaktionov #include "sfc_switch.h"
293037e6cfSViacheslav Galaktionov #include "sfc_nic_dma.h"
30ba641f20SAndrew Rybchenko 
3152e80b1bSIgor Romanov bool
sfc_repr_supported(const struct sfc_adapter * sa)3252e80b1bSIgor Romanov sfc_repr_supported(const struct sfc_adapter *sa)
3352e80b1bSIgor Romanov {
3452e80b1bSIgor Romanov 	if (!sa->switchdev)
3552e80b1bSIgor Romanov 		return false;
3652e80b1bSIgor Romanov 
3752e80b1bSIgor Romanov 	/*
3852e80b1bSIgor Romanov 	 * Representor proxy should use service lcore on PF's socket
3952e80b1bSIgor Romanov 	 * (sa->socket_id) to be efficient. But the proxy will fall back
4052e80b1bSIgor Romanov 	 * to any socket if it is not possible to get the service core
4152e80b1bSIgor Romanov 	 * on the same socket. Check that at least service core on any
4252e80b1bSIgor Romanov 	 * socket is available.
4352e80b1bSIgor Romanov 	 */
4452e80b1bSIgor Romanov 	if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
4552e80b1bSIgor Romanov 		return false;
4652e80b1bSIgor Romanov 
4752e80b1bSIgor Romanov 	return true;
4852e80b1bSIgor Romanov }
4952e80b1bSIgor Romanov 
50689a5674SIgor Romanov bool
sfc_repr_available(const struct sfc_adapter_shared * sas)51689a5674SIgor Romanov sfc_repr_available(const struct sfc_adapter_shared *sas)
52689a5674SIgor Romanov {
53689a5674SIgor Romanov 	return sas->nb_repr_rxq > 0 && sas->nb_repr_txq > 0;
54689a5674SIgor Romanov }
55689a5674SIgor Romanov 
56ba641f20SAndrew Rybchenko int
sfc_dma_alloc(struct sfc_adapter * sa,const char * name,uint16_t id,efx_nic_dma_addr_type_t addr_type,size_t len,int socket_id,efsys_mem_t * esmp)573037e6cfSViacheslav Galaktionov sfc_dma_alloc(struct sfc_adapter *sa, const char *name, uint16_t id,
583037e6cfSViacheslav Galaktionov 	      efx_nic_dma_addr_type_t addr_type, size_t len, int socket_id,
593037e6cfSViacheslav Galaktionov 	      efsys_mem_t *esmp)
60ba641f20SAndrew Rybchenko {
61ba641f20SAndrew Rybchenko 	const struct rte_memzone *mz;
623037e6cfSViacheslav Galaktionov 	int rc;
63ba641f20SAndrew Rybchenko 
646b9a30d9SFerruh Yigit 	sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d",
65ba641f20SAndrew Rybchenko 		     name, id, len, socket_id);
66ba641f20SAndrew Rybchenko 
67ba641f20SAndrew Rybchenko 	mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
68ba641f20SAndrew Rybchenko 				      sysconf(_SC_PAGESIZE), socket_id);
69ba641f20SAndrew Rybchenko 	if (mz == NULL) {
70ba641f20SAndrew Rybchenko 		sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
71ba641f20SAndrew Rybchenko 			name, (unsigned int)id, (unsigned int)len, socket_id,
72ba641f20SAndrew Rybchenko 			rte_strerror(rte_errno));
73ba641f20SAndrew Rybchenko 		return ENOMEM;
74ba641f20SAndrew Rybchenko 	}
753037e6cfSViacheslav Galaktionov 	if (mz->iova == RTE_BAD_IOVA) {
76ba641f20SAndrew Rybchenko 		(void)rte_memzone_free(mz);
77ba641f20SAndrew Rybchenko 		return EFAULT;
78ba641f20SAndrew Rybchenko 	}
79ba641f20SAndrew Rybchenko 
803037e6cfSViacheslav Galaktionov 	rc = sfc_nic_dma_mz_map(sa, mz, addr_type, &esmp->esm_addr);
813037e6cfSViacheslav Galaktionov 	if (rc != 0) {
823037e6cfSViacheslav Galaktionov 		(void)rte_memzone_free(mz);
833037e6cfSViacheslav Galaktionov 		return rc;
843037e6cfSViacheslav Galaktionov 	}
853037e6cfSViacheslav Galaktionov 
86ba641f20SAndrew Rybchenko 	esmp->esm_mz = mz;
87ba641f20SAndrew Rybchenko 	esmp->esm_base = mz->addr;
88ba641f20SAndrew Rybchenko 
892d98a5a6SAndrew Rybchenko 	sfc_info(sa,
902d98a5a6SAndrew Rybchenko 		 "DMA name=%s id=%u len=%lu socket_id=%d => virt=%p iova=%lx",
912d98a5a6SAndrew Rybchenko 		 name, id, len, socket_id, esmp->esm_base,
922d98a5a6SAndrew Rybchenko 		 (unsigned long)esmp->esm_addr);
932d98a5a6SAndrew Rybchenko 
94ba641f20SAndrew Rybchenko 	return 0;
95ba641f20SAndrew Rybchenko }
96ba641f20SAndrew Rybchenko 
97ba641f20SAndrew Rybchenko void
sfc_dma_free(const struct sfc_adapter * sa,efsys_mem_t * esmp)98ba641f20SAndrew Rybchenko sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
99ba641f20SAndrew Rybchenko {
100ba641f20SAndrew Rybchenko 	int rc;
101ba641f20SAndrew Rybchenko 
102ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
103ba641f20SAndrew Rybchenko 
104ba641f20SAndrew Rybchenko 	rc = rte_memzone_free(esmp->esm_mz);
105ba641f20SAndrew Rybchenko 	if (rc != 0)
106ba641f20SAndrew Rybchenko 		sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
107ba641f20SAndrew Rybchenko 
108ba641f20SAndrew Rybchenko 	memset(esmp, 0, sizeof(*esmp));
109ba641f20SAndrew Rybchenko }
110ba641f20SAndrew Rybchenko 
111d23f3a89SAndrew Rybchenko static uint32_t
sfc_phy_cap_from_link_speeds(uint32_t speeds)112d23f3a89SAndrew Rybchenko sfc_phy_cap_from_link_speeds(uint32_t speeds)
113d23f3a89SAndrew Rybchenko {
114d23f3a89SAndrew Rybchenko 	uint32_t phy_caps = 0;
115d23f3a89SAndrew Rybchenko 
116295968d1SFerruh Yigit 	if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
117d23f3a89SAndrew Rybchenko 		phy_caps |= (1 << EFX_PHY_CAP_AN);
118d23f3a89SAndrew Rybchenko 		/*
119d23f3a89SAndrew Rybchenko 		 * If no speeds are specified in the mask, any supported
120d23f3a89SAndrew Rybchenko 		 * may be negotiated
121d23f3a89SAndrew Rybchenko 		 */
122295968d1SFerruh Yigit 		if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
123d23f3a89SAndrew Rybchenko 			phy_caps |=
124d23f3a89SAndrew Rybchenko 				(1 << EFX_PHY_CAP_1000FDX) |
125d23f3a89SAndrew Rybchenko 				(1 << EFX_PHY_CAP_10000FDX) |
126f82e33afSAndrew Rybchenko 				(1 << EFX_PHY_CAP_25000FDX) |
127f82e33afSAndrew Rybchenko 				(1 << EFX_PHY_CAP_40000FDX) |
128f82e33afSAndrew Rybchenko 				(1 << EFX_PHY_CAP_50000FDX) |
129f82e33afSAndrew Rybchenko 				(1 << EFX_PHY_CAP_100000FDX);
130d23f3a89SAndrew Rybchenko 	}
131295968d1SFerruh Yigit 	if (speeds & RTE_ETH_LINK_SPEED_1G)
132d23f3a89SAndrew Rybchenko 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
133295968d1SFerruh Yigit 	if (speeds & RTE_ETH_LINK_SPEED_10G)
134d23f3a89SAndrew Rybchenko 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
135295968d1SFerruh Yigit 	if (speeds & RTE_ETH_LINK_SPEED_25G)
136f82e33afSAndrew Rybchenko 		phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
137295968d1SFerruh Yigit 	if (speeds & RTE_ETH_LINK_SPEED_40G)
138d23f3a89SAndrew Rybchenko 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
139295968d1SFerruh Yigit 	if (speeds & RTE_ETH_LINK_SPEED_50G)
140f82e33afSAndrew Rybchenko 		phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
141295968d1SFerruh Yigit 	if (speeds & RTE_ETH_LINK_SPEED_100G)
142f82e33afSAndrew Rybchenko 		phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
143d23f3a89SAndrew Rybchenko 
144d23f3a89SAndrew Rybchenko 	return phy_caps;
145d23f3a89SAndrew Rybchenko }
146d23f3a89SAndrew Rybchenko 
147c7cb2d7aSAndrew Rybchenko /*
148c7cb2d7aSAndrew Rybchenko  * Check requested device level configuration.
149c7cb2d7aSAndrew Rybchenko  * Receive and transmit configuration is checked in corresponding
150c7cb2d7aSAndrew Rybchenko  * modules.
151c7cb2d7aSAndrew Rybchenko  */
152c7cb2d7aSAndrew Rybchenko static int
sfc_check_conf(struct sfc_adapter * sa)153c7cb2d7aSAndrew Rybchenko sfc_check_conf(struct sfc_adapter *sa)
154c7cb2d7aSAndrew Rybchenko {
155c7cb2d7aSAndrew Rybchenko 	const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
156c7cb2d7aSAndrew Rybchenko 	int rc = 0;
157c7cb2d7aSAndrew Rybchenko 
158d23f3a89SAndrew Rybchenko 	sa->port.phy_adv_cap =
159d23f3a89SAndrew Rybchenko 		sfc_phy_cap_from_link_speeds(conf->link_speeds) &
160d23f3a89SAndrew Rybchenko 		sa->port.phy_adv_cap_mask;
161d23f3a89SAndrew Rybchenko 	if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
162d23f3a89SAndrew Rybchenko 		sfc_err(sa, "No link speeds from mask %#x are supported",
163d23f3a89SAndrew Rybchenko 			conf->link_speeds);
164c7cb2d7aSAndrew Rybchenko 		rc = EINVAL;
165c7cb2d7aSAndrew Rybchenko 	}
166c7cb2d7aSAndrew Rybchenko 
167b16cf4b2SAndrew Rybchenko #if !EFSYS_OPT_LOOPBACK
168c7cb2d7aSAndrew Rybchenko 	if (conf->lpbk_mode != 0) {
169c7cb2d7aSAndrew Rybchenko 		sfc_err(sa, "Loopback not supported");
170c7cb2d7aSAndrew Rybchenko 		rc = EINVAL;
171c7cb2d7aSAndrew Rybchenko 	}
172b16cf4b2SAndrew Rybchenko #endif
173c7cb2d7aSAndrew Rybchenko 
174c7cb2d7aSAndrew Rybchenko 	if (conf->dcb_capability_en != 0) {
175c7cb2d7aSAndrew Rybchenko 		sfc_err(sa, "Priority-based flow control not supported");
176c7cb2d7aSAndrew Rybchenko 		rc = EINVAL;
177c7cb2d7aSAndrew Rybchenko 	}
178c7cb2d7aSAndrew Rybchenko 
1793b809c27SAndrew Rybchenko 	if ((conf->intr_conf.lsc != 0) &&
1803b809c27SAndrew Rybchenko 	    (sa->intr.type != EFX_INTR_LINE) &&
1813b809c27SAndrew Rybchenko 	    (sa->intr.type != EFX_INTR_MESSAGE)) {
182c7cb2d7aSAndrew Rybchenko 		sfc_err(sa, "Link status change interrupt not supported");
183c7cb2d7aSAndrew Rybchenko 		rc = EINVAL;
184c7cb2d7aSAndrew Rybchenko 	}
185c7cb2d7aSAndrew Rybchenko 
1864279b54eSGeorgiy Levashov 	if (conf->intr_conf.rxq != 0 &&
1874279b54eSGeorgiy Levashov 	    (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
188c7cb2d7aSAndrew Rybchenko 		sfc_err(sa, "Receive queue interrupt not supported");
189c7cb2d7aSAndrew Rybchenko 		rc = EINVAL;
190c7cb2d7aSAndrew Rybchenko 	}
191c7cb2d7aSAndrew Rybchenko 
192c7cb2d7aSAndrew Rybchenko 	return rc;
193c7cb2d7aSAndrew Rybchenko }
194c7cb2d7aSAndrew Rybchenko 
19591831d40SAndrew Rybchenko /*
19691831d40SAndrew Rybchenko  * Find out maximum number of receive and transmit queues which could be
19791831d40SAndrew Rybchenko  * advertised.
19891831d40SAndrew Rybchenko  *
19991831d40SAndrew Rybchenko  * NIC is kept initialized on success to allow other modules acquire
20091831d40SAndrew Rybchenko  * defaults and capabilities.
20191831d40SAndrew Rybchenko  */
20291831d40SAndrew Rybchenko static int
sfc_estimate_resource_limits(struct sfc_adapter * sa)20391831d40SAndrew Rybchenko sfc_estimate_resource_limits(struct sfc_adapter *sa)
20491831d40SAndrew Rybchenko {
20591831d40SAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
206983ce116SIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
20791831d40SAndrew Rybchenko 	efx_drv_limits_t limits;
20891831d40SAndrew Rybchenko 	int rc;
20991831d40SAndrew Rybchenko 	uint32_t evq_allocated;
21091831d40SAndrew Rybchenko 	uint32_t rxq_allocated;
21191831d40SAndrew Rybchenko 	uint32_t txq_allocated;
21291831d40SAndrew Rybchenko 
21391831d40SAndrew Rybchenko 	memset(&limits, 0, sizeof(limits));
21491831d40SAndrew Rybchenko 
21591831d40SAndrew Rybchenko 	/* Request at least one Rx and Tx queue */
21691831d40SAndrew Rybchenko 	limits.edl_min_rxq_count = 1;
21791831d40SAndrew Rybchenko 	limits.edl_min_txq_count = 1;
21891831d40SAndrew Rybchenko 	/* Management event queue plus event queue for each Tx and Rx queue */
21991831d40SAndrew Rybchenko 	limits.edl_min_evq_count =
22091831d40SAndrew Rybchenko 		1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
22191831d40SAndrew Rybchenko 
22291831d40SAndrew Rybchenko 	/* Divide by number of functions to guarantee that all functions
22391831d40SAndrew Rybchenko 	 * will get promised resources
22491831d40SAndrew Rybchenko 	 */
22591831d40SAndrew Rybchenko 	/* FIXME Divide by number of functions (not 2) below */
22691831d40SAndrew Rybchenko 	limits.edl_max_evq_count = encp->enc_evq_limit / 2;
22791831d40SAndrew Rybchenko 	SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
22891831d40SAndrew Rybchenko 
22991831d40SAndrew Rybchenko 	/* Split equally between receive and transmit */
23091831d40SAndrew Rybchenko 	limits.edl_max_rxq_count =
23191831d40SAndrew Rybchenko 		MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
23291831d40SAndrew Rybchenko 	SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
23391831d40SAndrew Rybchenko 
23491831d40SAndrew Rybchenko 	limits.edl_max_txq_count =
23591831d40SAndrew Rybchenko 		MIN(encp->enc_txq_limit,
23691831d40SAndrew Rybchenko 		    limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
2371f014258SIvan Malov 
2384f936666SIvan Malov 	if (sa->tso && encp->enc_fw_assisted_tso_v2_enabled)
2391f014258SIvan Malov 		limits.edl_max_txq_count =
2401f014258SIvan Malov 			MIN(limits.edl_max_txq_count,
2411f014258SIvan Malov 			    encp->enc_fw_assisted_tso_v2_n_contexts /
2421f014258SIvan Malov 			    encp->enc_hw_pf_count);
2431f014258SIvan Malov 
24491831d40SAndrew Rybchenko 	SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
24591831d40SAndrew Rybchenko 
24691831d40SAndrew Rybchenko 	/* Configure the minimum required resources needed for the
24791831d40SAndrew Rybchenko 	 * driver to operate, and the maximum desired resources that the
24891831d40SAndrew Rybchenko 	 * driver is capable of using.
24991831d40SAndrew Rybchenko 	 */
25091831d40SAndrew Rybchenko 	efx_nic_set_drv_limits(sa->nic, &limits);
25191831d40SAndrew Rybchenko 
25291831d40SAndrew Rybchenko 	sfc_log_init(sa, "init nic");
25391831d40SAndrew Rybchenko 	rc = efx_nic_init(sa->nic);
25491831d40SAndrew Rybchenko 	if (rc != 0)
25591831d40SAndrew Rybchenko 		goto fail_nic_init;
25691831d40SAndrew Rybchenko 
25791831d40SAndrew Rybchenko 	/* Find resource dimensions assigned by firmware to this function */
25891831d40SAndrew Rybchenko 	rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
25991831d40SAndrew Rybchenko 				 &txq_allocated);
26091831d40SAndrew Rybchenko 	if (rc != 0)
26191831d40SAndrew Rybchenko 		goto fail_get_vi_pool;
26291831d40SAndrew Rybchenko 
26391831d40SAndrew Rybchenko 	/* It still may allocate more than maximum, ensure limit */
26491831d40SAndrew Rybchenko 	evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
26591831d40SAndrew Rybchenko 	rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
26691831d40SAndrew Rybchenko 	txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
26791831d40SAndrew Rybchenko 
268983ce116SIgor Romanov 	/*
269983ce116SIgor Romanov 	 * Subtract management EVQ not used for traffic
270983ce116SIgor Romanov 	 * The resource allocation strategy is as follows:
271983ce116SIgor Romanov 	 * - one EVQ for management
272983ce116SIgor Romanov 	 * - one EVQ for each ethdev RXQ
273983ce116SIgor Romanov 	 * - one EVQ for each ethdev TXQ
274983ce116SIgor Romanov 	 * - one EVQ and one RXQ for optional MAE counters.
275983ce116SIgor Romanov 	 */
276983ce116SIgor Romanov 	if (evq_allocated == 0) {
277983ce116SIgor Romanov 		sfc_err(sa, "count of allocated EvQ is 0");
278983ce116SIgor Romanov 		rc = ENOMEM;
279983ce116SIgor Romanov 		goto fail_allocate_evq;
280983ce116SIgor Romanov 	}
28191831d40SAndrew Rybchenko 	evq_allocated--;
28291831d40SAndrew Rybchenko 
283983ce116SIgor Romanov 	/*
284983ce116SIgor Romanov 	 * Reserve absolutely required minimum.
285983ce116SIgor Romanov 	 * Right now we use separate EVQ for Rx and Tx.
286983ce116SIgor Romanov 	 */
287983ce116SIgor Romanov 	if (rxq_allocated > 0 && evq_allocated > 0) {
288983ce116SIgor Romanov 		sa->rxq_max = 1;
289983ce116SIgor Romanov 		rxq_allocated--;
290983ce116SIgor Romanov 		evq_allocated--;
291983ce116SIgor Romanov 	}
292983ce116SIgor Romanov 	if (txq_allocated > 0 && evq_allocated > 0) {
293983ce116SIgor Romanov 		sa->txq_max = 1;
294983ce116SIgor Romanov 		txq_allocated--;
295983ce116SIgor Romanov 		evq_allocated--;
296983ce116SIgor Romanov 	}
297983ce116SIgor Romanov 
298983ce116SIgor Romanov 	if (sfc_mae_counter_rxq_required(sa) &&
299983ce116SIgor Romanov 	    rxq_allocated > 0 && evq_allocated > 0) {
300983ce116SIgor Romanov 		rxq_allocated--;
301983ce116SIgor Romanov 		evq_allocated--;
302983ce116SIgor Romanov 		sas->counters_rxq_allocated = true;
303983ce116SIgor Romanov 	} else {
304983ce116SIgor Romanov 		sas->counters_rxq_allocated = false;
305983ce116SIgor Romanov 	}
306983ce116SIgor Romanov 
307689a5674SIgor Romanov 	if (sfc_repr_supported(sa) &&
308689a5674SIgor Romanov 	    evq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN +
309689a5674SIgor Romanov 	    SFC_REPR_PROXY_NB_TXQ_MIN &&
310689a5674SIgor Romanov 	    rxq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN &&
311689a5674SIgor Romanov 	    txq_allocated >= SFC_REPR_PROXY_NB_TXQ_MIN) {
312689a5674SIgor Romanov 		unsigned int extra;
313689a5674SIgor Romanov 
314689a5674SIgor Romanov 		txq_allocated -= SFC_REPR_PROXY_NB_TXQ_MIN;
315689a5674SIgor Romanov 		rxq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN;
316689a5674SIgor Romanov 		evq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN +
317689a5674SIgor Romanov 			SFC_REPR_PROXY_NB_TXQ_MIN;
318689a5674SIgor Romanov 
319689a5674SIgor Romanov 		sas->nb_repr_rxq = SFC_REPR_PROXY_NB_RXQ_MIN;
320689a5674SIgor Romanov 		sas->nb_repr_txq = SFC_REPR_PROXY_NB_TXQ_MIN;
321689a5674SIgor Romanov 
322689a5674SIgor Romanov 		/* Allocate extra representor RxQs up to the maximum */
323689a5674SIgor Romanov 		extra = MIN(evq_allocated, rxq_allocated);
324689a5674SIgor Romanov 		extra = MIN(extra,
325689a5674SIgor Romanov 			    SFC_REPR_PROXY_NB_RXQ_MAX - sas->nb_repr_rxq);
326689a5674SIgor Romanov 		evq_allocated -= extra;
327689a5674SIgor Romanov 		rxq_allocated -= extra;
328689a5674SIgor Romanov 		sas->nb_repr_rxq += extra;
329689a5674SIgor Romanov 
330689a5674SIgor Romanov 		/* Allocate extra representor TxQs up to the maximum */
331689a5674SIgor Romanov 		extra = MIN(evq_allocated, txq_allocated);
332689a5674SIgor Romanov 		extra = MIN(extra,
333689a5674SIgor Romanov 			    SFC_REPR_PROXY_NB_TXQ_MAX - sas->nb_repr_txq);
334689a5674SIgor Romanov 		evq_allocated -= extra;
335689a5674SIgor Romanov 		txq_allocated -= extra;
336689a5674SIgor Romanov 		sas->nb_repr_txq += extra;
337689a5674SIgor Romanov 	} else {
338689a5674SIgor Romanov 		sas->nb_repr_rxq = 0;
339689a5674SIgor Romanov 		sas->nb_repr_txq = 0;
340689a5674SIgor Romanov 	}
341689a5674SIgor Romanov 
342983ce116SIgor Romanov 	/* Add remaining allocated queues */
343983ce116SIgor Romanov 	sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2);
344983ce116SIgor Romanov 	sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max);
34591831d40SAndrew Rybchenko 
34691831d40SAndrew Rybchenko 	/* Keep NIC initialized */
34791831d40SAndrew Rybchenko 	return 0;
34891831d40SAndrew Rybchenko 
349983ce116SIgor Romanov fail_allocate_evq:
35091831d40SAndrew Rybchenko fail_get_vi_pool:
35191831d40SAndrew Rybchenko 	efx_nic_fini(sa->nic);
352ba77f3e1SIgor Romanov fail_nic_init:
35391831d40SAndrew Rybchenko 	return rc;
35491831d40SAndrew Rybchenko }
35591831d40SAndrew Rybchenko 
35691831d40SAndrew Rybchenko static int
sfc_set_drv_limits(struct sfc_adapter * sa)35791831d40SAndrew Rybchenko sfc_set_drv_limits(struct sfc_adapter *sa)
35891831d40SAndrew Rybchenko {
359689a5674SIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
36091831d40SAndrew Rybchenko 	const struct rte_eth_dev_data *data = sa->eth_dev->data;
361689a5674SIgor Romanov 	uint32_t rxq_reserved = sfc_nb_reserved_rxq(sas);
362689a5674SIgor Romanov 	uint32_t txq_reserved = sfc_nb_txq_reserved(sas);
36391831d40SAndrew Rybchenko 	efx_drv_limits_t lim;
36491831d40SAndrew Rybchenko 
36591831d40SAndrew Rybchenko 	memset(&lim, 0, sizeof(lim));
36691831d40SAndrew Rybchenko 
367983ce116SIgor Romanov 	/*
368983ce116SIgor Romanov 	 * Limits are strict since take into account initial estimation.
3697be78d02SJosh Soref 	 * Resource allocation strategy is described in
370983ce116SIgor Romanov 	 * sfc_estimate_resource_limits().
371983ce116SIgor Romanov 	 */
37291831d40SAndrew Rybchenko 	lim.edl_min_evq_count = lim.edl_max_evq_count =
373689a5674SIgor Romanov 		1 + data->nb_rx_queues + data->nb_tx_queues +
374689a5674SIgor Romanov 		rxq_reserved + txq_reserved;
375983ce116SIgor Romanov 	lim.edl_min_rxq_count = lim.edl_max_rxq_count =
376983ce116SIgor Romanov 		data->nb_rx_queues + rxq_reserved;
377689a5674SIgor Romanov 	lim.edl_min_txq_count = lim.edl_max_txq_count =
378689a5674SIgor Romanov 		data->nb_tx_queues + txq_reserved;
37991831d40SAndrew Rybchenko 
38091831d40SAndrew Rybchenko 	return efx_nic_set_drv_limits(sa->nic, &lim);
38191831d40SAndrew Rybchenko }
38291831d40SAndrew Rybchenko 
383b1ffa211SAndrew Rybchenko static int
sfc_set_fw_subvariant(struct sfc_adapter * sa)3842f44752cSAndrew Rybchenko sfc_set_fw_subvariant(struct sfc_adapter *sa)
3852f44752cSAndrew Rybchenko {
386113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
3872f44752cSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
3882f44752cSAndrew Rybchenko 	uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
3892f44752cSAndrew Rybchenko 	unsigned int txq_index;
3902f44752cSAndrew Rybchenko 	efx_nic_fw_subvariant_t req_fw_subvariant;
3912f44752cSAndrew Rybchenko 	efx_nic_fw_subvariant_t cur_fw_subvariant;
3922f44752cSAndrew Rybchenko 	int rc;
3932f44752cSAndrew Rybchenko 
3942f44752cSAndrew Rybchenko 	if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
3952f44752cSAndrew Rybchenko 		sfc_info(sa, "no-Tx-checksum subvariant not supported");
3962f44752cSAndrew Rybchenko 		return 0;
3972f44752cSAndrew Rybchenko 	}
3982f44752cSAndrew Rybchenko 
399113a14a6SAndrew Rybchenko 	for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
400113a14a6SAndrew Rybchenko 		struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
4012f44752cSAndrew Rybchenko 
40229e4237dSAndrew Rybchenko 		if (txq_info->state & SFC_TXQ_INITIALIZED)
403b57870f2SAndrew Rybchenko 			tx_offloads |= txq_info->offloads;
4042f44752cSAndrew Rybchenko 	}
4052f44752cSAndrew Rybchenko 
406295968d1SFerruh Yigit 	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
407295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
408295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
409295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
4102f44752cSAndrew Rybchenko 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
4112f44752cSAndrew Rybchenko 	else
4122f44752cSAndrew Rybchenko 		req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
4132f44752cSAndrew Rybchenko 
4142f44752cSAndrew Rybchenko 	rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
4152f44752cSAndrew Rybchenko 	if (rc != 0) {
4162f44752cSAndrew Rybchenko 		sfc_err(sa, "failed to get FW subvariant: %d", rc);
4172f44752cSAndrew Rybchenko 		return rc;
4182f44752cSAndrew Rybchenko 	}
4192f44752cSAndrew Rybchenko 	sfc_info(sa, "FW subvariant is %u vs required %u",
4202f44752cSAndrew Rybchenko 		 cur_fw_subvariant, req_fw_subvariant);
4212f44752cSAndrew Rybchenko 
4222f44752cSAndrew Rybchenko 	if (cur_fw_subvariant == req_fw_subvariant)
4232f44752cSAndrew Rybchenko 		return 0;
4242f44752cSAndrew Rybchenko 
4252f44752cSAndrew Rybchenko 	rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
4262f44752cSAndrew Rybchenko 	if (rc != 0) {
4272f44752cSAndrew Rybchenko 		sfc_err(sa, "failed to set FW subvariant %u: %d",
4282f44752cSAndrew Rybchenko 			req_fw_subvariant, rc);
4292f44752cSAndrew Rybchenko 		return rc;
4302f44752cSAndrew Rybchenko 	}
4312f44752cSAndrew Rybchenko 	sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
4322f44752cSAndrew Rybchenko 
4332f44752cSAndrew Rybchenko 	return 0;
4342f44752cSAndrew Rybchenko }
4352f44752cSAndrew Rybchenko 
4362f44752cSAndrew Rybchenko static int
sfc_try_start(struct sfc_adapter * sa)437b1ffa211SAndrew Rybchenko sfc_try_start(struct sfc_adapter *sa)
43893fcf09bSAndrew Rybchenko {
43936c35355SAndrew Rybchenko 	const efx_nic_cfg_t *encp;
44093fcf09bSAndrew Rybchenko 	int rc;
44193fcf09bSAndrew Rybchenko 
44293fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
44393fcf09bSAndrew Rybchenko 
44493fcf09bSAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
445ac478689SIgor Romanov 	SFC_ASSERT(sa->state == SFC_ETHDEV_STARTING);
44693fcf09bSAndrew Rybchenko 
4472f44752cSAndrew Rybchenko 	sfc_log_init(sa, "set FW subvariant");
4482f44752cSAndrew Rybchenko 	rc = sfc_set_fw_subvariant(sa);
4492f44752cSAndrew Rybchenko 	if (rc != 0)
4502f44752cSAndrew Rybchenko 		goto fail_set_fw_subvariant;
4512f44752cSAndrew Rybchenko 
45291831d40SAndrew Rybchenko 	sfc_log_init(sa, "set resource limits");
45391831d40SAndrew Rybchenko 	rc = sfc_set_drv_limits(sa);
45491831d40SAndrew Rybchenko 	if (rc != 0)
45591831d40SAndrew Rybchenko 		goto fail_set_drv_limits;
45691831d40SAndrew Rybchenko 
45793fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "init nic");
45893fcf09bSAndrew Rybchenko 	rc = efx_nic_init(sa->nic);
45993fcf09bSAndrew Rybchenko 	if (rc != 0)
46093fcf09bSAndrew Rybchenko 		goto fail_nic_init;
46193fcf09bSAndrew Rybchenko 
4623037e6cfSViacheslav Galaktionov 	sfc_log_init(sa, "reconfigure NIC DMA");
4633037e6cfSViacheslav Galaktionov 	rc = efx_nic_dma_reconfigure(sa->nic);
4643037e6cfSViacheslav Galaktionov 	if (rc != 0) {
4653037e6cfSViacheslav Galaktionov 		sfc_err(sa, "cannot reconfigure NIC DMA: %s", rte_strerror(rc));
4663037e6cfSViacheslav Galaktionov 		goto fail_nic_dma_reconfigure;
4673037e6cfSViacheslav Galaktionov 	}
4683037e6cfSViacheslav Galaktionov 
46936c35355SAndrew Rybchenko 	encp = efx_nic_cfg_get(sa->nic);
4702646d42fSAndrew Rybchenko 
4712646d42fSAndrew Rybchenko 	/*
4722646d42fSAndrew Rybchenko 	 * Refresh (since it may change on NIC reset/restart) a copy of
4732646d42fSAndrew Rybchenko 	 * supported tunnel encapsulations in shared memory to be used
4742646d42fSAndrew Rybchenko 	 * on supported Rx packet type classes get.
4752646d42fSAndrew Rybchenko 	 */
4762646d42fSAndrew Rybchenko 	sa->priv.shared->tunnel_encaps =
4772646d42fSAndrew Rybchenko 		encp->enc_tunnel_encapsulations_supported;
4782646d42fSAndrew Rybchenko 
47936c35355SAndrew Rybchenko 	if (encp->enc_tunnel_encapsulations_supported != 0) {
48036c35355SAndrew Rybchenko 		sfc_log_init(sa, "apply tunnel config");
48136c35355SAndrew Rybchenko 		rc = efx_tunnel_reconfigure(sa->nic);
48236c35355SAndrew Rybchenko 		if (rc != 0)
48336c35355SAndrew Rybchenko 			goto fail_tunnel_reconfigure;
48436c35355SAndrew Rybchenko 	}
48536c35355SAndrew Rybchenko 
48606bc1977SAndrew Rybchenko 	rc = sfc_intr_start(sa);
48706bc1977SAndrew Rybchenko 	if (rc != 0)
48806bc1977SAndrew Rybchenko 		goto fail_intr_start;
48906bc1977SAndrew Rybchenko 
49058294ee6SAndrew Rybchenko 	rc = sfc_ev_start(sa);
49158294ee6SAndrew Rybchenko 	if (rc != 0)
49258294ee6SAndrew Rybchenko 		goto fail_ev_start;
49358294ee6SAndrew Rybchenko 
494bc712f1cSDenis Pryazhennikov 	rc = sfc_tbls_start(sa);
495bc712f1cSDenis Pryazhennikov 	if (rc != 0)
496bc712f1cSDenis Pryazhennikov 		goto fail_tbls_start;
497bc712f1cSDenis Pryazhennikov 
49803ed2119SAndrew Rybchenko 	rc = sfc_port_start(sa);
49903ed2119SAndrew Rybchenko 	if (rc != 0)
50003ed2119SAndrew Rybchenko 		goto fail_port_start;
50103ed2119SAndrew Rybchenko 
50228944ac0SAndrew Rybchenko 	rc = sfc_rx_start(sa);
50328944ac0SAndrew Rybchenko 	if (rc != 0)
50428944ac0SAndrew Rybchenko 		goto fail_rx_start;
50528944ac0SAndrew Rybchenko 
506fed9aeb4SIvan Malov 	rc = sfc_tx_start(sa);
507fed9aeb4SIvan Malov 	if (rc != 0)
508fed9aeb4SIvan Malov 		goto fail_tx_start;
509fed9aeb4SIvan Malov 
510a9825ccfSRoman Zhukov 	rc = sfc_flow_start(sa);
511a9825ccfSRoman Zhukov 	if (rc != 0)
512a9825ccfSRoman Zhukov 		goto fail_flows_insert;
513a9825ccfSRoman Zhukov 
51452e80b1bSIgor Romanov 	rc = sfc_repr_proxy_start(sa);
51552e80b1bSIgor Romanov 	if (rc != 0)
51652e80b1bSIgor Romanov 		goto fail_repr_proxy_start;
51752e80b1bSIgor Romanov 
51893fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done");
51993fcf09bSAndrew Rybchenko 	return 0;
52093fcf09bSAndrew Rybchenko 
52152e80b1bSIgor Romanov fail_repr_proxy_start:
52252e80b1bSIgor Romanov 	sfc_flow_stop(sa);
52352e80b1bSIgor Romanov 
524a9825ccfSRoman Zhukov fail_flows_insert:
525a9825ccfSRoman Zhukov 	sfc_tx_stop(sa);
526a9825ccfSRoman Zhukov 
527fed9aeb4SIvan Malov fail_tx_start:
528fed9aeb4SIvan Malov 	sfc_rx_stop(sa);
529fed9aeb4SIvan Malov 
53028944ac0SAndrew Rybchenko fail_rx_start:
53128944ac0SAndrew Rybchenko 	sfc_port_stop(sa);
53228944ac0SAndrew Rybchenko 
533bc712f1cSDenis Pryazhennikov fail_tbls_start:
53403ed2119SAndrew Rybchenko 	sfc_ev_stop(sa);
53503ed2119SAndrew Rybchenko 
536bc712f1cSDenis Pryazhennikov fail_port_start:
537bc712f1cSDenis Pryazhennikov 	sfc_tbls_stop(sa);
538bc712f1cSDenis Pryazhennikov 
53958294ee6SAndrew Rybchenko fail_ev_start:
54058294ee6SAndrew Rybchenko 	sfc_intr_stop(sa);
54158294ee6SAndrew Rybchenko 
54206bc1977SAndrew Rybchenko fail_intr_start:
54336c35355SAndrew Rybchenko fail_tunnel_reconfigure:
5443037e6cfSViacheslav Galaktionov fail_nic_dma_reconfigure:
54506bc1977SAndrew Rybchenko 	efx_nic_fini(sa->nic);
54606bc1977SAndrew Rybchenko 
54793fcf09bSAndrew Rybchenko fail_nic_init:
54891831d40SAndrew Rybchenko fail_set_drv_limits:
5492f44752cSAndrew Rybchenko fail_set_fw_subvariant:
550b1ffa211SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
551b1ffa211SAndrew Rybchenko 	return rc;
552b1ffa211SAndrew Rybchenko }
553b1ffa211SAndrew Rybchenko 
554b1ffa211SAndrew Rybchenko int
sfc_start(struct sfc_adapter * sa)555b1ffa211SAndrew Rybchenko sfc_start(struct sfc_adapter *sa)
556b1ffa211SAndrew Rybchenko {
557b1ffa211SAndrew Rybchenko 	unsigned int start_tries = 3;
558b1ffa211SAndrew Rybchenko 	int rc;
559b1ffa211SAndrew Rybchenko 
560b1ffa211SAndrew Rybchenko 	sfc_log_init(sa, "entry");
561b1ffa211SAndrew Rybchenko 
562b1ffa211SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
563b1ffa211SAndrew Rybchenko 
564b1ffa211SAndrew Rybchenko 	switch (sa->state) {
565ac478689SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
566b1ffa211SAndrew Rybchenko 		break;
567ac478689SIgor Romanov 	case SFC_ETHDEV_STARTED:
56891d16276SIvan Malov 		sfc_notice(sa, "already started");
569b1ffa211SAndrew Rybchenko 		return 0;
570b1ffa211SAndrew Rybchenko 	default:
571b1ffa211SAndrew Rybchenko 		rc = EINVAL;
572b1ffa211SAndrew Rybchenko 		goto fail_bad_state;
573b1ffa211SAndrew Rybchenko 	}
574b1ffa211SAndrew Rybchenko 
575ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_STARTING;
576b1ffa211SAndrew Rybchenko 
577dd45b880SAndrew Rybchenko 	rc = 0;
578b1ffa211SAndrew Rybchenko 	do {
579dd45b880SAndrew Rybchenko 		/*
580dd45b880SAndrew Rybchenko 		 * FIXME Try to recreate vSwitch on start retry.
581dd45b880SAndrew Rybchenko 		 * vSwitch is absent after MC reboot like events and
582dd45b880SAndrew Rybchenko 		 * we should recreate it. May be we need proper
583dd45b880SAndrew Rybchenko 		 * indication instead of guessing.
584dd45b880SAndrew Rybchenko 		 */
585dd45b880SAndrew Rybchenko 		if (rc != 0) {
586dd45b880SAndrew Rybchenko 			sfc_sriov_vswitch_destroy(sa);
587dd45b880SAndrew Rybchenko 			rc = sfc_sriov_vswitch_create(sa);
588dd45b880SAndrew Rybchenko 			if (rc != 0)
589dd45b880SAndrew Rybchenko 				goto fail_sriov_vswitch_create;
590dd45b880SAndrew Rybchenko 		}
591b1ffa211SAndrew Rybchenko 		rc = sfc_try_start(sa);
592b1ffa211SAndrew Rybchenko 	} while ((--start_tries > 0) &&
593b1ffa211SAndrew Rybchenko 		 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
594b1ffa211SAndrew Rybchenko 
595b1ffa211SAndrew Rybchenko 	if (rc != 0)
596b1ffa211SAndrew Rybchenko 		goto fail_try_start;
597b1ffa211SAndrew Rybchenko 
598ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_STARTED;
599b1ffa211SAndrew Rybchenko 	sfc_log_init(sa, "done");
600b1ffa211SAndrew Rybchenko 	return 0;
601b1ffa211SAndrew Rybchenko 
602b1ffa211SAndrew Rybchenko fail_try_start:
603dd45b880SAndrew Rybchenko fail_sriov_vswitch_create:
604ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_CONFIGURED;
60593fcf09bSAndrew Rybchenko fail_bad_state:
60693fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
60793fcf09bSAndrew Rybchenko 	return rc;
60893fcf09bSAndrew Rybchenko }
60993fcf09bSAndrew Rybchenko 
61093fcf09bSAndrew Rybchenko void
sfc_stop(struct sfc_adapter * sa)61193fcf09bSAndrew Rybchenko sfc_stop(struct sfc_adapter *sa)
61293fcf09bSAndrew Rybchenko {
61393fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
61493fcf09bSAndrew Rybchenko 
61593fcf09bSAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
61693fcf09bSAndrew Rybchenko 
61793fcf09bSAndrew Rybchenko 	switch (sa->state) {
618ac478689SIgor Romanov 	case SFC_ETHDEV_STARTED:
61993fcf09bSAndrew Rybchenko 		break;
620ac478689SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
62191d16276SIvan Malov 		sfc_notice(sa, "already stopped");
62293fcf09bSAndrew Rybchenko 		return;
62393fcf09bSAndrew Rybchenko 	default:
62493fcf09bSAndrew Rybchenko 		sfc_err(sa, "stop in unexpected state %u", sa->state);
62593fcf09bSAndrew Rybchenko 		SFC_ASSERT(B_FALSE);
62693fcf09bSAndrew Rybchenko 		return;
62793fcf09bSAndrew Rybchenko 	}
62893fcf09bSAndrew Rybchenko 
629ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_STOPPING;
63093fcf09bSAndrew Rybchenko 
63152e80b1bSIgor Romanov 	sfc_repr_proxy_stop(sa);
632a9825ccfSRoman Zhukov 	sfc_flow_stop(sa);
633fed9aeb4SIvan Malov 	sfc_tx_stop(sa);
63428944ac0SAndrew Rybchenko 	sfc_rx_stop(sa);
63503ed2119SAndrew Rybchenko 	sfc_port_stop(sa);
636bc712f1cSDenis Pryazhennikov 	sfc_tbls_stop(sa);
63758294ee6SAndrew Rybchenko 	sfc_ev_stop(sa);
63806bc1977SAndrew Rybchenko 	sfc_intr_stop(sa);
63993fcf09bSAndrew Rybchenko 	efx_nic_fini(sa->nic);
64093fcf09bSAndrew Rybchenko 
641ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_CONFIGURED;
64293fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done");
64393fcf09bSAndrew Rybchenko }
64493fcf09bSAndrew Rybchenko 
645e77f9f19SAndrew Rybchenko static int
sfc_restart(struct sfc_adapter * sa)646e77f9f19SAndrew Rybchenko sfc_restart(struct sfc_adapter *sa)
647e77f9f19SAndrew Rybchenko {
648e77f9f19SAndrew Rybchenko 	int rc;
649e77f9f19SAndrew Rybchenko 
650e77f9f19SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
651e77f9f19SAndrew Rybchenko 
652ac478689SIgor Romanov 	if (sa->state != SFC_ETHDEV_STARTED)
653e77f9f19SAndrew Rybchenko 		return EINVAL;
654e77f9f19SAndrew Rybchenko 
655e77f9f19SAndrew Rybchenko 	sfc_stop(sa);
656e77f9f19SAndrew Rybchenko 
657e77f9f19SAndrew Rybchenko 	rc = sfc_start(sa);
658e77f9f19SAndrew Rybchenko 	if (rc != 0)
659e77f9f19SAndrew Rybchenko 		sfc_err(sa, "restart failed");
660e77f9f19SAndrew Rybchenko 
661e77f9f19SAndrew Rybchenko 	return rc;
662e77f9f19SAndrew Rybchenko }
663e77f9f19SAndrew Rybchenko 
664e77f9f19SAndrew Rybchenko static void
sfc_restart_if_required(void * arg)665e77f9f19SAndrew Rybchenko sfc_restart_if_required(void *arg)
666e77f9f19SAndrew Rybchenko {
667e77f9f19SAndrew Rybchenko 	struct sfc_adapter *sa = arg;
668e77f9f19SAndrew Rybchenko 
669e77f9f19SAndrew Rybchenko 	/* If restart is scheduled, clear the flag and do it */
670e77f9f19SAndrew Rybchenko 	if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
671e77f9f19SAndrew Rybchenko 				1, 0)) {
672e77f9f19SAndrew Rybchenko 		sfc_adapter_lock(sa);
673ac478689SIgor Romanov 		if (sa->state == SFC_ETHDEV_STARTED)
674e77f9f19SAndrew Rybchenko 			(void)sfc_restart(sa);
675e77f9f19SAndrew Rybchenko 		sfc_adapter_unlock(sa);
676e77f9f19SAndrew Rybchenko 	}
677e77f9f19SAndrew Rybchenko }
678e77f9f19SAndrew Rybchenko 
679e77f9f19SAndrew Rybchenko void
sfc_schedule_restart(struct sfc_adapter * sa)680e77f9f19SAndrew Rybchenko sfc_schedule_restart(struct sfc_adapter *sa)
681e77f9f19SAndrew Rybchenko {
682e77f9f19SAndrew Rybchenko 	int rc;
683e77f9f19SAndrew Rybchenko 
684e77f9f19SAndrew Rybchenko 	/* Schedule restart alarm if it is not scheduled yet */
685e77f9f19SAndrew Rybchenko 	if (!rte_atomic32_test_and_set(&sa->restart_required))
686e77f9f19SAndrew Rybchenko 		return;
687e77f9f19SAndrew Rybchenko 
688e77f9f19SAndrew Rybchenko 	rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
689e77f9f19SAndrew Rybchenko 	if (rc == -ENOTSUP)
690e77f9f19SAndrew Rybchenko 		sfc_warn(sa, "alarms are not supported, restart is pending");
691e77f9f19SAndrew Rybchenko 	else if (rc != 0)
692e77f9f19SAndrew Rybchenko 		sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
693e77f9f19SAndrew Rybchenko 	else
69491d16276SIvan Malov 		sfc_notice(sa, "restart scheduled");
695e77f9f19SAndrew Rybchenko }
696e77f9f19SAndrew Rybchenko 
69793fcf09bSAndrew Rybchenko int
sfc_configure(struct sfc_adapter * sa)698aaa3f5f0SAndrew Rybchenko sfc_configure(struct sfc_adapter *sa)
699aaa3f5f0SAndrew Rybchenko {
700c7cb2d7aSAndrew Rybchenko 	int rc;
701c7cb2d7aSAndrew Rybchenko 
702aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry");
703aaa3f5f0SAndrew Rybchenko 
704aaa3f5f0SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
705aaa3f5f0SAndrew Rybchenko 
706ac478689SIgor Romanov 	SFC_ASSERT(sa->state == SFC_ETHDEV_INITIALIZED ||
707ac478689SIgor Romanov 		   sa->state == SFC_ETHDEV_CONFIGURED);
708ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_CONFIGURING;
709aaa3f5f0SAndrew Rybchenko 
710c7cb2d7aSAndrew Rybchenko 	rc = sfc_check_conf(sa);
711c7cb2d7aSAndrew Rybchenko 	if (rc != 0)
712c7cb2d7aSAndrew Rybchenko 		goto fail_check_conf;
713c7cb2d7aSAndrew Rybchenko 
71452597396SAndrew Rybchenko 	rc = sfc_intr_configure(sa);
71506bc1977SAndrew Rybchenko 	if (rc != 0)
71652597396SAndrew Rybchenko 		goto fail_intr_configure;
71706bc1977SAndrew Rybchenko 
718c577a525SAndrew Rybchenko 	rc = sfc_port_configure(sa);
71903ed2119SAndrew Rybchenko 	if (rc != 0)
720c577a525SAndrew Rybchenko 		goto fail_port_configure;
72103ed2119SAndrew Rybchenko 
722f7637d4dSAndrew Rybchenko 	rc = sfc_rx_configure(sa);
723a8e64c6bSAndrew Rybchenko 	if (rc != 0)
724f7637d4dSAndrew Rybchenko 		goto fail_rx_configure;
725a8e64c6bSAndrew Rybchenko 
726df64eaddSAndrew Rybchenko 	rc = sfc_tx_configure(sa);
727a8ad8cf8SIvan Malov 	if (rc != 0)
728df64eaddSAndrew Rybchenko 		goto fail_tx_configure;
729a8ad8cf8SIvan Malov 
730fdd7719eSIvan Ilchenko 	rc = sfc_sw_xstats_configure(sa);
731fdd7719eSIvan Ilchenko 	if (rc != 0)
732fdd7719eSIvan Ilchenko 		goto fail_sw_xstats_configure;
733fdd7719eSIvan Ilchenko 
734ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_CONFIGURED;
735aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done");
736aaa3f5f0SAndrew Rybchenko 	return 0;
737c7cb2d7aSAndrew Rybchenko 
738fdd7719eSIvan Ilchenko fail_sw_xstats_configure:
739fdd7719eSIvan Ilchenko 	sfc_tx_close(sa);
740fdd7719eSIvan Ilchenko 
741df64eaddSAndrew Rybchenko fail_tx_configure:
742f7637d4dSAndrew Rybchenko 	sfc_rx_close(sa);
743a8ad8cf8SIvan Malov 
744f7637d4dSAndrew Rybchenko fail_rx_configure:
745c577a525SAndrew Rybchenko 	sfc_port_close(sa);
746a8e64c6bSAndrew Rybchenko 
747c577a525SAndrew Rybchenko fail_port_configure:
74852597396SAndrew Rybchenko 	sfc_intr_close(sa);
74958294ee6SAndrew Rybchenko 
75052597396SAndrew Rybchenko fail_intr_configure:
751c7cb2d7aSAndrew Rybchenko fail_check_conf:
752ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_INITIALIZED;
753c7cb2d7aSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
754c7cb2d7aSAndrew Rybchenko 	return rc;
755aaa3f5f0SAndrew Rybchenko }
756aaa3f5f0SAndrew Rybchenko 
757aaa3f5f0SAndrew Rybchenko void
sfc_close(struct sfc_adapter * sa)758aaa3f5f0SAndrew Rybchenko sfc_close(struct sfc_adapter *sa)
759aaa3f5f0SAndrew Rybchenko {
760aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry");
761aaa3f5f0SAndrew Rybchenko 
762aaa3f5f0SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
763aaa3f5f0SAndrew Rybchenko 
764ac478689SIgor Romanov 	SFC_ASSERT(sa->state == SFC_ETHDEV_CONFIGURED);
765ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_CLOSING;
766aaa3f5f0SAndrew Rybchenko 
767fdd7719eSIvan Ilchenko 	sfc_sw_xstats_close(sa);
768df64eaddSAndrew Rybchenko 	sfc_tx_close(sa);
769f7637d4dSAndrew Rybchenko 	sfc_rx_close(sa);
770c577a525SAndrew Rybchenko 	sfc_port_close(sa);
77152597396SAndrew Rybchenko 	sfc_intr_close(sa);
77206bc1977SAndrew Rybchenko 
773ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_INITIALIZED;
774aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done");
775aaa3f5f0SAndrew Rybchenko }
776aaa3f5f0SAndrew Rybchenko 
777ba641f20SAndrew Rybchenko static int
sfc_mem_bar_init(struct sfc_adapter * sa,const efx_bar_region_t * mem_ebrp)778fe4dad21SIgor Romanov sfc_mem_bar_init(struct sfc_adapter *sa, const efx_bar_region_t *mem_ebrp)
779ba641f20SAndrew Rybchenko {
780ba641f20SAndrew Rybchenko 	struct rte_eth_dev *eth_dev = sa->eth_dev;
781c0802544SFerruh Yigit 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
782ba641f20SAndrew Rybchenko 	efsys_bar_t *ebp = &sa->mem_bar;
783fe4dad21SIgor Romanov 	struct rte_mem_resource *res =
784fe4dad21SIgor Romanov 		&pci_dev->mem_resource[mem_ebrp->ebr_index];
785ba641f20SAndrew Rybchenko 
786ba641f20SAndrew Rybchenko 	SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
787fe4dad21SIgor Romanov 	ebp->esb_rid = mem_ebrp->ebr_index;
788ba641f20SAndrew Rybchenko 	ebp->esb_dev = pci_dev;
789ba641f20SAndrew Rybchenko 	ebp->esb_base = res->addr;
790e285f30dSIgor Romanov 
791e285f30dSIgor Romanov 	sa->fcw_offset = mem_ebrp->ebr_offset;
792e285f30dSIgor Romanov 
793ba641f20SAndrew Rybchenko 	return 0;
794ba641f20SAndrew Rybchenko }
795ba641f20SAndrew Rybchenko 
796ba641f20SAndrew Rybchenko static void
sfc_mem_bar_fini(struct sfc_adapter * sa)797ba641f20SAndrew Rybchenko sfc_mem_bar_fini(struct sfc_adapter *sa)
798ba641f20SAndrew Rybchenko {
799ba641f20SAndrew Rybchenko 	efsys_bar_t *ebp = &sa->mem_bar;
800ba641f20SAndrew Rybchenko 
801ba641f20SAndrew Rybchenko 	SFC_BAR_LOCK_DESTROY(ebp);
802ba641f20SAndrew Rybchenko 	memset(ebp, 0, sizeof(*ebp));
803ba641f20SAndrew Rybchenko }
804ba641f20SAndrew Rybchenko 
8054ec1fc3bSIvan Malov /*
8064ec1fc3bSIvan Malov  * A fixed RSS key which has a property of being symmetric
8074ec1fc3bSIvan Malov  * (symmetrical flows are distributed to the same CPU)
8084ec1fc3bSIvan Malov  * and also known to give a uniform distribution
8094ec1fc3bSIvan Malov  * (a good distribution of traffic between different CPUs)
8104ec1fc3bSIvan Malov  */
81137a42c61SAndrew Rybchenko static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
8124ec1fc3bSIvan Malov 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
8134ec1fc3bSIvan Malov 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
8144ec1fc3bSIvan Malov 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
8154ec1fc3bSIvan Malov 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
8164ec1fc3bSIvan Malov 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
8174ec1fc3bSIvan Malov };
8184ec1fc3bSIvan Malov 
8194ec1fc3bSIvan Malov static int
sfc_rss_attach(struct sfc_adapter * sa)82001764b20SIvan Malov sfc_rss_attach(struct sfc_adapter *sa)
8214ec1fc3bSIvan Malov {
822e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
8234ec1fc3bSIvan Malov 	int rc;
8244ec1fc3bSIvan Malov 
8254ec1fc3bSIvan Malov 	rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
8264ec1fc3bSIvan Malov 	if (rc != 0)
8274ec1fc3bSIvan Malov 		goto fail_intr_init;
8284ec1fc3bSIvan Malov 
8294ec1fc3bSIvan Malov 	rc = efx_ev_init(sa->nic);
8304ec1fc3bSIvan Malov 	if (rc != 0)
8314ec1fc3bSIvan Malov 		goto fail_ev_init;
8324ec1fc3bSIvan Malov 
8334ec1fc3bSIvan Malov 	rc = efx_rx_init(sa->nic);
8344ec1fc3bSIvan Malov 	if (rc != 0)
8354ec1fc3bSIvan Malov 		goto fail_rx_init;
8364ec1fc3bSIvan Malov 
837d1482e21SIvan Malov 	rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
8384ec1fc3bSIvan Malov 	if (rc != 0)
8394ec1fc3bSIvan Malov 		goto fail_scale_support_get;
8404ec1fc3bSIvan Malov 
841d1482e21SIvan Malov 	rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
8424ec1fc3bSIvan Malov 	if (rc != 0)
8434ec1fc3bSIvan Malov 		goto fail_hash_support_get;
8444ec1fc3bSIvan Malov 
84501764b20SIvan Malov 	rc = sfc_rx_hash_init(sa);
84601764b20SIvan Malov 	if (rc != 0)
84701764b20SIvan Malov 		goto fail_rx_hash_init;
84801764b20SIvan Malov 
8494ec1fc3bSIvan Malov 	efx_rx_fini(sa->nic);
8504ec1fc3bSIvan Malov 	efx_ev_fini(sa->nic);
8514ec1fc3bSIvan Malov 	efx_intr_fini(sa->nic);
8524ec1fc3bSIvan Malov 
853d1482e21SIvan Malov 	rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
8546da67e70SIvan Malov 	memset(&rss->dummy_ctx, 0, sizeof(rss->dummy_ctx));
8556da67e70SIvan Malov 	rss->dummy_ctx.conf.qid_span = 1;
8566da67e70SIvan Malov 	rss->dummy_ctx.dummy = true;
8574ec1fc3bSIvan Malov 
8584ec1fc3bSIvan Malov 	return 0;
8594ec1fc3bSIvan Malov 
86001764b20SIvan Malov fail_rx_hash_init:
8614ec1fc3bSIvan Malov fail_hash_support_get:
8624ec1fc3bSIvan Malov fail_scale_support_get:
86300b94c1cSIvan Malov 	efx_rx_fini(sa->nic);
86400b94c1cSIvan Malov 
8654ec1fc3bSIvan Malov fail_rx_init:
8664ec1fc3bSIvan Malov 	efx_ev_fini(sa->nic);
8674ec1fc3bSIvan Malov 
8684ec1fc3bSIvan Malov fail_ev_init:
8694ec1fc3bSIvan Malov 	efx_intr_fini(sa->nic);
8704ec1fc3bSIvan Malov 
8714ec1fc3bSIvan Malov fail_intr_init:
8724ec1fc3bSIvan Malov 	return rc;
8734ec1fc3bSIvan Malov }
8744ec1fc3bSIvan Malov 
87501764b20SIvan Malov static void
sfc_rss_detach(struct sfc_adapter * sa)87601764b20SIvan Malov sfc_rss_detach(struct sfc_adapter *sa)
87701764b20SIvan Malov {
87801764b20SIvan Malov 	sfc_rx_hash_fini(sa);
87901764b20SIvan Malov }
88001764b20SIvan Malov 
881ba641f20SAndrew Rybchenko int
sfc_attach(struct sfc_adapter * sa)882ba641f20SAndrew Rybchenko sfc_attach(struct sfc_adapter *sa)
883ba641f20SAndrew Rybchenko {
884a8ad8cf8SIvan Malov 	const efx_nic_cfg_t *encp;
885329472d4SAndrew Rybchenko 	efx_nic_t *enp = sa->nic;
886ba641f20SAndrew Rybchenko 	int rc;
887ba641f20SAndrew Rybchenko 
888ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "entry");
889ba641f20SAndrew Rybchenko 
890ba641f20SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
891ba641f20SAndrew Rybchenko 
892ba641f20SAndrew Rybchenko 	efx_mcdi_new_epoch(enp);
893ba641f20SAndrew Rybchenko 
894ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "reset nic");
895ba641f20SAndrew Rybchenko 	rc = efx_nic_reset(enp);
896ba641f20SAndrew Rybchenko 	if (rc != 0)
897ba641f20SAndrew Rybchenko 		goto fail_nic_reset;
898ba641f20SAndrew Rybchenko 
899dd45b880SAndrew Rybchenko 	rc = sfc_sriov_attach(sa);
900dd45b880SAndrew Rybchenko 	if (rc != 0)
901dd45b880SAndrew Rybchenko 		goto fail_sriov_attach;
902dd45b880SAndrew Rybchenko 
90336c35355SAndrew Rybchenko 	/*
90436c35355SAndrew Rybchenko 	 * Probed NIC is sufficient for tunnel init.
90536c35355SAndrew Rybchenko 	 * Initialize tunnel support to be able to use libefx
90636c35355SAndrew Rybchenko 	 * efx_tunnel_config_udp_{add,remove}() in any state and
90736c35355SAndrew Rybchenko 	 * efx_tunnel_reconfigure() on start up.
90836c35355SAndrew Rybchenko 	 */
90936c35355SAndrew Rybchenko 	rc = efx_tunnel_init(enp);
91036c35355SAndrew Rybchenko 	if (rc != 0)
91136c35355SAndrew Rybchenko 		goto fail_tunnel_init;
91236c35355SAndrew Rybchenko 
9131f014258SIvan Malov 	encp = efx_nic_cfg_get(sa->nic);
9141f014258SIvan Malov 
9152646d42fSAndrew Rybchenko 	/*
9162646d42fSAndrew Rybchenko 	 * Make a copy of supported tunnel encapsulations in shared
9172646d42fSAndrew Rybchenko 	 * memory to be used on supported Rx packet type classes get.
9182646d42fSAndrew Rybchenko 	 */
9192646d42fSAndrew Rybchenko 	sa->priv.shared->tunnel_encaps =
9202646d42fSAndrew Rybchenko 		encp->enc_tunnel_encapsulations_supported;
9212646d42fSAndrew Rybchenko 
922295968d1SFerruh Yigit 	if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
9234f936666SIvan Malov 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
9244f936666SIvan Malov 			  encp->enc_tso_v3_enabled;
9251f014258SIvan Malov 		if (!sa->tso)
9269906cb29SIvan Malov 			sfc_info(sa, "TSO support isn't available on this adapter");
927b3b667c9SAndrew Rybchenko 	}
9281f014258SIvan Malov 
9299aa0afd1SAndrew Rybchenko 	if (sa->tso &&
9309aa0afd1SAndrew Rybchenko 	    (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
931295968d1SFerruh Yigit 	     (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
932295968d1SFerruh Yigit 	      RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
93377cb0071SIvan Malov 		sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
93477cb0071SIvan Malov 				encp->enc_tso_v3_enabled;
935c1ce2ba2SIvan Malov 		if (!sa->tso_encap)
936c1ce2ba2SIvan Malov 			sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
937c1ce2ba2SIvan Malov 	}
938c1ce2ba2SIvan Malov 
93991831d40SAndrew Rybchenko 	sfc_log_init(sa, "estimate resource limits");
94091831d40SAndrew Rybchenko 	rc = sfc_estimate_resource_limits(sa);
941ba641f20SAndrew Rybchenko 	if (rc != 0)
94291831d40SAndrew Rybchenko 		goto fail_estimate_rsrc_limits;
943ba641f20SAndrew Rybchenko 
944d5371f3dSIgor Romanov 	sa->evq_max_entries = encp->enc_evq_max_nevs;
945d5371f3dSIgor Romanov 	SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
946d5371f3dSIgor Romanov 
947d5371f3dSIgor Romanov 	sa->evq_min_entries = encp->enc_evq_min_nevs;
948d5371f3dSIgor Romanov 	SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
949d5371f3dSIgor Romanov 
950048a0d1aSIgor Romanov 	sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
951048a0d1aSIgor Romanov 	SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
952048a0d1aSIgor Romanov 
953048a0d1aSIgor Romanov 	sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
954048a0d1aSIgor Romanov 	SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
955048a0d1aSIgor Romanov 
956a8ad8cf8SIvan Malov 	sa->txq_max_entries = encp->enc_txq_max_ndescs;
957a8ad8cf8SIvan Malov 	SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
958a8ad8cf8SIvan Malov 
9599dbd28dfSIgor Romanov 	sa->txq_min_entries = encp->enc_txq_min_ndescs;
9609dbd28dfSIgor Romanov 	SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
9619dbd28dfSIgor Romanov 
96206bc1977SAndrew Rybchenko 	rc = sfc_intr_attach(sa);
96306bc1977SAndrew Rybchenko 	if (rc != 0)
96406bc1977SAndrew Rybchenko 		goto fail_intr_attach;
96506bc1977SAndrew Rybchenko 
96647995190SAndrew Rybchenko 	rc = sfc_ev_attach(sa);
96747995190SAndrew Rybchenko 	if (rc != 0)
96847995190SAndrew Rybchenko 		goto fail_ev_attach;
96947995190SAndrew Rybchenko 
970c577a525SAndrew Rybchenko 	rc = sfc_port_attach(sa);
971c577a525SAndrew Rybchenko 	if (rc != 0)
972c577a525SAndrew Rybchenko 		goto fail_port_attach;
973d23f3a89SAndrew Rybchenko 
97401764b20SIvan Malov 	rc = sfc_rss_attach(sa);
9754ec1fc3bSIvan Malov 	if (rc != 0)
97601764b20SIvan Malov 		goto fail_rss_attach;
9774ec1fc3bSIvan Malov 
978*8cff0013SIvan Malov 	sfc_flow_init(sa);
979*8cff0013SIvan Malov 
9806da67e70SIvan Malov 	rc = sfc_flow_rss_attach(sa);
9816da67e70SIvan Malov 	if (rc != 0)
9826da67e70SIvan Malov 		goto fail_flow_rss_attach;
9836da67e70SIvan Malov 
984791f57acSAndrew Rybchenko 	rc = sfc_filter_attach(sa);
985791f57acSAndrew Rybchenko 	if (rc != 0)
986791f57acSAndrew Rybchenko 		goto fail_filter_attach;
987791f57acSAndrew Rybchenko 
988983ce116SIgor Romanov 	rc = sfc_mae_counter_rxq_attach(sa);
989983ce116SIgor Romanov 	if (rc != 0)
990983ce116SIgor Romanov 		goto fail_mae_counter_rxq_attach;
991983ce116SIgor Romanov 
99202b234adSIvan Malov 	rc = sfc_mae_attach(sa);
99302b234adSIvan Malov 	if (rc != 0)
99402b234adSIvan Malov 		goto fail_mae_attach;
99502b234adSIvan Malov 
996bc712f1cSDenis Pryazhennikov 	rc = sfc_tbls_attach(sa);
997bc712f1cSDenis Pryazhennikov 	if (rc != 0)
998bc712f1cSDenis Pryazhennikov 		goto fail_tables_attach;
999bc712f1cSDenis Pryazhennikov 
1000c8617ddaSIgor Romanov 	rc = sfc_mae_switchdev_init(sa);
1001c8617ddaSIgor Romanov 	if (rc != 0)
1002c8617ddaSIgor Romanov 		goto fail_mae_switchdev_init;
1003c8617ddaSIgor Romanov 
100452e80b1bSIgor Romanov 	rc = sfc_repr_proxy_attach(sa);
100552e80b1bSIgor Romanov 	if (rc != 0)
100652e80b1bSIgor Romanov 		goto fail_repr_proxy_attach;
100752e80b1bSIgor Romanov 
1008ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "fini nic");
1009ba641f20SAndrew Rybchenko 	efx_nic_fini(enp);
1010ba641f20SAndrew Rybchenko 
1011fdd7719eSIvan Ilchenko 	rc = sfc_sw_xstats_init(sa);
1012fdd7719eSIvan Ilchenko 	if (rc != 0)
1013fdd7719eSIvan Ilchenko 		goto fail_sw_xstats_init;
1014fdd7719eSIvan Ilchenko 
1015dd45b880SAndrew Rybchenko 	/*
1016dd45b880SAndrew Rybchenko 	 * Create vSwitch to be able to use VFs when PF is not started yet
1017dd45b880SAndrew Rybchenko 	 * as DPDK port. VFs should be able to talk to each other even
1018dd45b880SAndrew Rybchenko 	 * if PF is down.
1019dd45b880SAndrew Rybchenko 	 */
1020dd45b880SAndrew Rybchenko 	rc = sfc_sriov_vswitch_create(sa);
1021dd45b880SAndrew Rybchenko 	if (rc != 0)
1022dd45b880SAndrew Rybchenko 		goto fail_sriov_vswitch_create;
1023dd45b880SAndrew Rybchenko 
1024ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_INITIALIZED;
1025ba641f20SAndrew Rybchenko 
1026ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "done");
1027ba641f20SAndrew Rybchenko 	return 0;
1028ba641f20SAndrew Rybchenko 
1029dd45b880SAndrew Rybchenko fail_sriov_vswitch_create:
1030fdd7719eSIvan Ilchenko 	sfc_sw_xstats_close(sa);
1031fdd7719eSIvan Ilchenko 
1032fdd7719eSIvan Ilchenko fail_sw_xstats_init:
103352e80b1bSIgor Romanov 	sfc_repr_proxy_detach(sa);
103452e80b1bSIgor Romanov 
103552e80b1bSIgor Romanov fail_repr_proxy_attach:
1036c8617ddaSIgor Romanov 	sfc_mae_switchdev_fini(sa);
1037c8617ddaSIgor Romanov 
1038c8617ddaSIgor Romanov fail_mae_switchdev_init:
1039bc712f1cSDenis Pryazhennikov 	sfc_tbls_detach(sa);
1040bc712f1cSDenis Pryazhennikov 
1041bc712f1cSDenis Pryazhennikov fail_tables_attach:
104202b234adSIvan Malov 	sfc_mae_detach(sa);
104302b234adSIvan Malov 
104402b234adSIvan Malov fail_mae_attach:
1045983ce116SIgor Romanov 	sfc_mae_counter_rxq_detach(sa);
1046983ce116SIgor Romanov 
1047983ce116SIgor Romanov fail_mae_counter_rxq_attach:
1048dd45b880SAndrew Rybchenko 	sfc_filter_detach(sa);
1049dd45b880SAndrew Rybchenko 
1050791f57acSAndrew Rybchenko fail_filter_attach:
10516da67e70SIvan Malov 	sfc_flow_rss_detach(sa);
10526da67e70SIvan Malov 
10536da67e70SIvan Malov fail_flow_rss_attach:
1054*8cff0013SIvan Malov 	sfc_flow_fini(sa);
105501764b20SIvan Malov 	sfc_rss_detach(sa);
105601764b20SIvan Malov 
105701764b20SIvan Malov fail_rss_attach:
1058c577a525SAndrew Rybchenko 	sfc_port_detach(sa);
1059c577a525SAndrew Rybchenko 
1060c577a525SAndrew Rybchenko fail_port_attach:
106147995190SAndrew Rybchenko 	sfc_ev_detach(sa);
106247995190SAndrew Rybchenko 
106347995190SAndrew Rybchenko fail_ev_attach:
10644ec1fc3bSIvan Malov 	sfc_intr_detach(sa);
10654ec1fc3bSIvan Malov 
106606bc1977SAndrew Rybchenko fail_intr_attach:
10674ec1fc3bSIvan Malov 	efx_nic_fini(sa->nic);
10684ec1fc3bSIvan Malov 
106991831d40SAndrew Rybchenko fail_estimate_rsrc_limits:
107036c35355SAndrew Rybchenko fail_tunnel_init:
107136c35355SAndrew Rybchenko 	efx_tunnel_fini(sa->nic);
1072dd45b880SAndrew Rybchenko 	sfc_sriov_detach(sa);
107336c35355SAndrew Rybchenko 
1074dd45b880SAndrew Rybchenko fail_sriov_attach:
1075ba641f20SAndrew Rybchenko fail_nic_reset:
1076329472d4SAndrew Rybchenko 
1077329472d4SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
1078329472d4SAndrew Rybchenko 	return rc;
1079329472d4SAndrew Rybchenko }
1080329472d4SAndrew Rybchenko 
1081329472d4SAndrew Rybchenko void
sfc_pre_detach(struct sfc_adapter * sa)1082c377f1adSIgor Romanov sfc_pre_detach(struct sfc_adapter *sa)
1083c377f1adSIgor Romanov {
1084c377f1adSIgor Romanov 	sfc_log_init(sa, "entry");
1085c377f1adSIgor Romanov 
1086c377f1adSIgor Romanov 	SFC_ASSERT(!sfc_adapter_is_locked(sa));
1087c377f1adSIgor Romanov 
1088c377f1adSIgor Romanov 	sfc_repr_proxy_pre_detach(sa);
1089c377f1adSIgor Romanov 
1090c377f1adSIgor Romanov 	sfc_log_init(sa, "done");
1091c377f1adSIgor Romanov }
1092c377f1adSIgor Romanov 
1093c377f1adSIgor Romanov void
sfc_detach(struct sfc_adapter * sa)1094329472d4SAndrew Rybchenko sfc_detach(struct sfc_adapter *sa)
1095329472d4SAndrew Rybchenko {
1096329472d4SAndrew Rybchenko 	sfc_log_init(sa, "entry");
1097329472d4SAndrew Rybchenko 
1098329472d4SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
1099329472d4SAndrew Rybchenko 
1100dd45b880SAndrew Rybchenko 	sfc_sriov_vswitch_destroy(sa);
1101dd45b880SAndrew Rybchenko 
110252e80b1bSIgor Romanov 	sfc_repr_proxy_detach(sa);
1103c8617ddaSIgor Romanov 	sfc_mae_switchdev_fini(sa);
1104bc712f1cSDenis Pryazhennikov 	sfc_tbls_detach(sa);
110502b234adSIvan Malov 	sfc_mae_detach(sa);
1106983ce116SIgor Romanov 	sfc_mae_counter_rxq_detach(sa);
1107329472d4SAndrew Rybchenko 	sfc_filter_detach(sa);
11086da67e70SIvan Malov 	sfc_flow_rss_detach(sa);
1109*8cff0013SIvan Malov 	sfc_flow_fini(sa);
111001764b20SIvan Malov 	sfc_rss_detach(sa);
1111c577a525SAndrew Rybchenko 	sfc_port_detach(sa);
111247995190SAndrew Rybchenko 	sfc_ev_detach(sa);
1113329472d4SAndrew Rybchenko 	sfc_intr_detach(sa);
111436c35355SAndrew Rybchenko 	efx_tunnel_fini(sa->nic);
1115dd45b880SAndrew Rybchenko 	sfc_sriov_detach(sa);
1116329472d4SAndrew Rybchenko 
1117ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_UNINITIALIZED;
1118329472d4SAndrew Rybchenko }
1119329472d4SAndrew Rybchenko 
11209e7fc8b8SRoman Zhukov static int
sfc_kvarg_fv_variant_handler(__rte_unused const char * key,const char * value_str,void * opaque)11219e7fc8b8SRoman Zhukov sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
11229e7fc8b8SRoman Zhukov 			     const char *value_str, void *opaque)
11239e7fc8b8SRoman Zhukov {
11249e7fc8b8SRoman Zhukov 	uint32_t *value = opaque;
11259e7fc8b8SRoman Zhukov 
11269e7fc8b8SRoman Zhukov 	if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
11279e7fc8b8SRoman Zhukov 		*value = EFX_FW_VARIANT_DONT_CARE;
11289e7fc8b8SRoman Zhukov 	else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
11299e7fc8b8SRoman Zhukov 		*value = EFX_FW_VARIANT_FULL_FEATURED;
11309e7fc8b8SRoman Zhukov 	else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
11319e7fc8b8SRoman Zhukov 		*value = EFX_FW_VARIANT_LOW_LATENCY;
11329e7fc8b8SRoman Zhukov 	else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
11339e7fc8b8SRoman Zhukov 		*value = EFX_FW_VARIANT_PACKED_STREAM;
11346e899accSAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
11356e899accSAndrew Rybchenko 		*value = EFX_FW_VARIANT_DPDK;
11369e7fc8b8SRoman Zhukov 	else
11379e7fc8b8SRoman Zhukov 		return -EINVAL;
11389e7fc8b8SRoman Zhukov 
11399e7fc8b8SRoman Zhukov 	return 0;
11409e7fc8b8SRoman Zhukov }
11419e7fc8b8SRoman Zhukov 
11429e7fc8b8SRoman Zhukov static int
sfc_get_fw_variant(struct sfc_adapter * sa,efx_fw_variant_t * efv)11439e7fc8b8SRoman Zhukov sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
11449e7fc8b8SRoman Zhukov {
11459e7fc8b8SRoman Zhukov 	efx_nic_fw_info_t enfi;
11469e7fc8b8SRoman Zhukov 	int rc;
11479e7fc8b8SRoman Zhukov 
11489e7fc8b8SRoman Zhukov 	rc = efx_nic_get_fw_version(sa->nic, &enfi);
11499e7fc8b8SRoman Zhukov 	if (rc != 0)
11509e7fc8b8SRoman Zhukov 		return rc;
11519e7fc8b8SRoman Zhukov 	else if (!enfi.enfi_dpcpu_fw_ids_valid)
11529e7fc8b8SRoman Zhukov 		return ENOTSUP;
11539e7fc8b8SRoman Zhukov 
11549e7fc8b8SRoman Zhukov 	/*
11559e7fc8b8SRoman Zhukov 	 * Firmware variant can be uniquely identified by the RxDPCPU
11569e7fc8b8SRoman Zhukov 	 * firmware id
11579e7fc8b8SRoman Zhukov 	 */
11589e7fc8b8SRoman Zhukov 	switch (enfi.enfi_rx_dpcpu_fw_id) {
11599e7fc8b8SRoman Zhukov 	case EFX_RXDP_FULL_FEATURED_FW_ID:
11609e7fc8b8SRoman Zhukov 		*efv = EFX_FW_VARIANT_FULL_FEATURED;
11619e7fc8b8SRoman Zhukov 		break;
11629e7fc8b8SRoman Zhukov 
11639e7fc8b8SRoman Zhukov 	case EFX_RXDP_LOW_LATENCY_FW_ID:
11649e7fc8b8SRoman Zhukov 		*efv = EFX_FW_VARIANT_LOW_LATENCY;
11659e7fc8b8SRoman Zhukov 		break;
11669e7fc8b8SRoman Zhukov 
11679e7fc8b8SRoman Zhukov 	case EFX_RXDP_PACKED_STREAM_FW_ID:
11689e7fc8b8SRoman Zhukov 		*efv = EFX_FW_VARIANT_PACKED_STREAM;
11699e7fc8b8SRoman Zhukov 		break;
11709e7fc8b8SRoman Zhukov 
11716e899accSAndrew Rybchenko 	case EFX_RXDP_DPDK_FW_ID:
11726e899accSAndrew Rybchenko 		*efv = EFX_FW_VARIANT_DPDK;
11736e899accSAndrew Rybchenko 		break;
11746e899accSAndrew Rybchenko 
11759e7fc8b8SRoman Zhukov 	default:
11769e7fc8b8SRoman Zhukov 		/*
11779e7fc8b8SRoman Zhukov 		 * Other firmware variants are not considered, since they are
11789e7fc8b8SRoman Zhukov 		 * not supported in the device parameters
11799e7fc8b8SRoman Zhukov 		 */
11809e7fc8b8SRoman Zhukov 		*efv = EFX_FW_VARIANT_DONT_CARE;
11819e7fc8b8SRoman Zhukov 		break;
11829e7fc8b8SRoman Zhukov 	}
11839e7fc8b8SRoman Zhukov 
11849e7fc8b8SRoman Zhukov 	return 0;
11859e7fc8b8SRoman Zhukov }
11869e7fc8b8SRoman Zhukov 
11879e7fc8b8SRoman Zhukov static const char *
sfc_fw_variant2str(efx_fw_variant_t efv)11889e7fc8b8SRoman Zhukov sfc_fw_variant2str(efx_fw_variant_t efv)
11899e7fc8b8SRoman Zhukov {
11909e7fc8b8SRoman Zhukov 	switch (efv) {
11919e7fc8b8SRoman Zhukov 	case EFX_RXDP_FULL_FEATURED_FW_ID:
11929e7fc8b8SRoman Zhukov 		return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
11939e7fc8b8SRoman Zhukov 	case EFX_RXDP_LOW_LATENCY_FW_ID:
11949e7fc8b8SRoman Zhukov 		return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
11959e7fc8b8SRoman Zhukov 	case EFX_RXDP_PACKED_STREAM_FW_ID:
11969e7fc8b8SRoman Zhukov 		return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
11976e899accSAndrew Rybchenko 	case EFX_RXDP_DPDK_FW_ID:
11986e899accSAndrew Rybchenko 		return SFC_KVARG_FW_VARIANT_DPDK;
11999e7fc8b8SRoman Zhukov 	default:
12009e7fc8b8SRoman Zhukov 		return "unknown";
12019e7fc8b8SRoman Zhukov 	}
12029e7fc8b8SRoman Zhukov }
12039e7fc8b8SRoman Zhukov 
12049e7fc8b8SRoman Zhukov static int
sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter * sa)12055a1ae82dSAndrew Rybchenko sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
12065a1ae82dSAndrew Rybchenko {
12075a1ae82dSAndrew Rybchenko 	int rc;
12085a1ae82dSAndrew Rybchenko 	long value;
12095a1ae82dSAndrew Rybchenko 
12105a1ae82dSAndrew Rybchenko 	value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
12115a1ae82dSAndrew Rybchenko 
12125a1ae82dSAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
12135a1ae82dSAndrew Rybchenko 				sfc_kvarg_long_handler, &value);
12145a1ae82dSAndrew Rybchenko 	if (rc != 0)
12155a1ae82dSAndrew Rybchenko 		return rc;
12165a1ae82dSAndrew Rybchenko 
12175a1ae82dSAndrew Rybchenko 	if (value < 0 ||
12185a1ae82dSAndrew Rybchenko 	    (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
12195a1ae82dSAndrew Rybchenko 		sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
12205a1ae82dSAndrew Rybchenko 			    "was set (%ld);", value);
12215a1ae82dSAndrew Rybchenko 		sfc_err(sa, "it must not be less than 0 or greater than %u",
12225a1ae82dSAndrew Rybchenko 			    EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
12235a1ae82dSAndrew Rybchenko 		return EINVAL;
12245a1ae82dSAndrew Rybchenko 	}
12255a1ae82dSAndrew Rybchenko 
12265a1ae82dSAndrew Rybchenko 	sa->rxd_wait_timeout_ns = value;
12275a1ae82dSAndrew Rybchenko 	return 0;
12285a1ae82dSAndrew Rybchenko }
12295a1ae82dSAndrew Rybchenko 
12305a1ae82dSAndrew Rybchenko static int
sfc_nic_probe(struct sfc_adapter * sa)12319e7fc8b8SRoman Zhukov sfc_nic_probe(struct sfc_adapter *sa)
12329e7fc8b8SRoman Zhukov {
12339e7fc8b8SRoman Zhukov 	efx_nic_t *enp = sa->nic;
12349e7fc8b8SRoman Zhukov 	efx_fw_variant_t preferred_efv;
12359e7fc8b8SRoman Zhukov 	efx_fw_variant_t efv;
12369e7fc8b8SRoman Zhukov 	int rc;
12379e7fc8b8SRoman Zhukov 
12389e7fc8b8SRoman Zhukov 	preferred_efv = EFX_FW_VARIANT_DONT_CARE;
12399e7fc8b8SRoman Zhukov 	rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
12409e7fc8b8SRoman Zhukov 				sfc_kvarg_fv_variant_handler,
12419e7fc8b8SRoman Zhukov 				&preferred_efv);
12429e7fc8b8SRoman Zhukov 	if (rc != 0) {
12439e7fc8b8SRoman Zhukov 		sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
12449e7fc8b8SRoman Zhukov 		return rc;
12459e7fc8b8SRoman Zhukov 	}
12469e7fc8b8SRoman Zhukov 
12475a1ae82dSAndrew Rybchenko 	rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
12485a1ae82dSAndrew Rybchenko 	if (rc != 0)
12495a1ae82dSAndrew Rybchenko 		return rc;
12505a1ae82dSAndrew Rybchenko 
12519e7fc8b8SRoman Zhukov 	rc = efx_nic_probe(enp, preferred_efv);
12529e7fc8b8SRoman Zhukov 	if (rc == EACCES) {
12539e7fc8b8SRoman Zhukov 		/* Unprivileged functions cannot set FW variant */
12549e7fc8b8SRoman Zhukov 		rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
12559e7fc8b8SRoman Zhukov 	}
12569e7fc8b8SRoman Zhukov 	if (rc != 0)
12579e7fc8b8SRoman Zhukov 		return rc;
12589e7fc8b8SRoman Zhukov 
12599e7fc8b8SRoman Zhukov 	rc = sfc_get_fw_variant(sa, &efv);
12609e7fc8b8SRoman Zhukov 	if (rc == ENOTSUP) {
12619e7fc8b8SRoman Zhukov 		sfc_warn(sa, "FW variant can not be obtained");
12629e7fc8b8SRoman Zhukov 		return 0;
12639e7fc8b8SRoman Zhukov 	}
12649e7fc8b8SRoman Zhukov 	if (rc != 0)
12659e7fc8b8SRoman Zhukov 		return rc;
12669e7fc8b8SRoman Zhukov 
12679e7fc8b8SRoman Zhukov 	/* Check that firmware variant was changed to the requested one */
12689e7fc8b8SRoman Zhukov 	if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
12699e7fc8b8SRoman Zhukov 		sfc_warn(sa, "FW variant has not changed to the requested %s",
12709e7fc8b8SRoman Zhukov 			 sfc_fw_variant2str(preferred_efv));
12719e7fc8b8SRoman Zhukov 	}
12729e7fc8b8SRoman Zhukov 
12739e7fc8b8SRoman Zhukov 	sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
12749e7fc8b8SRoman Zhukov 
12759e7fc8b8SRoman Zhukov 	return 0;
12769e7fc8b8SRoman Zhukov }
12779e7fc8b8SRoman Zhukov 
1278329472d4SAndrew Rybchenko int
sfc_probe(struct sfc_adapter * sa)1279329472d4SAndrew Rybchenko sfc_probe(struct sfc_adapter *sa)
1280329472d4SAndrew Rybchenko {
1281fe4dad21SIgor Romanov 	efx_bar_region_t mem_ebrp;
12827178fbdcSVijay Kumar Srivastava 	struct rte_eth_dev *eth_dev = sa->eth_dev;
12837178fbdcSVijay Kumar Srivastava 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1284329472d4SAndrew Rybchenko 	efx_nic_t *enp;
1285329472d4SAndrew Rybchenko 	int rc;
1286329472d4SAndrew Rybchenko 
1287329472d4SAndrew Rybchenko 	sfc_log_init(sa, "entry");
1288329472d4SAndrew Rybchenko 
1289329472d4SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
1290329472d4SAndrew Rybchenko 
1291329472d4SAndrew Rybchenko 	sa->socket_id = rte_socket_id();
1292e77f9f19SAndrew Rybchenko 	rte_atomic32_init(&sa->restart_required);
1293329472d4SAndrew Rybchenko 
1294329472d4SAndrew Rybchenko 	sfc_log_init(sa, "get family");
12957178fbdcSVijay Kumar Srivastava 	rc = sfc_efx_family(pci_dev, &mem_ebrp, &sa->family);
12967178fbdcSVijay Kumar Srivastava 
1297329472d4SAndrew Rybchenko 	if (rc != 0)
1298329472d4SAndrew Rybchenko 		goto fail_family;
1299fe4dad21SIgor Romanov 	sfc_log_init(sa,
1300fe4dad21SIgor Romanov 		     "family is %u, membar is %u, function control window offset is %lu",
1301fe4dad21SIgor Romanov 		     sa->family, mem_ebrp.ebr_index, mem_ebrp.ebr_offset);
1302e434de5dSAndy Moreton 
1303e434de5dSAndy Moreton 	sfc_log_init(sa, "init mem bar");
1304fe4dad21SIgor Romanov 	rc = sfc_mem_bar_init(sa, &mem_ebrp);
1305e434de5dSAndy Moreton 	if (rc != 0)
1306e434de5dSAndy Moreton 		goto fail_mem_bar_init;
1307329472d4SAndrew Rybchenko 
1308329472d4SAndrew Rybchenko 	sfc_log_init(sa, "create nic");
1309329472d4SAndrew Rybchenko 	rte_spinlock_init(&sa->nic_lock);
1310329472d4SAndrew Rybchenko 	rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1311fe4dad21SIgor Romanov 			    &sa->mem_bar, mem_ebrp.ebr_offset,
1312341bd4e0SIgor Romanov 			    &sa->nic_lock, &enp);
1313329472d4SAndrew Rybchenko 	if (rc != 0)
1314329472d4SAndrew Rybchenko 		goto fail_nic_create;
1315329472d4SAndrew Rybchenko 	sa->nic = enp;
1316329472d4SAndrew Rybchenko 
1317329472d4SAndrew Rybchenko 	rc = sfc_mcdi_init(sa);
1318329472d4SAndrew Rybchenko 	if (rc != 0)
1319329472d4SAndrew Rybchenko 		goto fail_mcdi_init;
1320329472d4SAndrew Rybchenko 
1321329472d4SAndrew Rybchenko 	sfc_log_init(sa, "probe nic");
13229e7fc8b8SRoman Zhukov 	rc = sfc_nic_probe(sa);
1323329472d4SAndrew Rybchenko 	if (rc != 0)
1324329472d4SAndrew Rybchenko 		goto fail_nic_probe;
1325329472d4SAndrew Rybchenko 
1326329472d4SAndrew Rybchenko 	sfc_log_init(sa, "done");
1327329472d4SAndrew Rybchenko 	return 0;
1328ba641f20SAndrew Rybchenko 
1329ba641f20SAndrew Rybchenko fail_nic_probe:
1330ba641f20SAndrew Rybchenko 	sfc_mcdi_fini(sa);
1331ba641f20SAndrew Rybchenko 
1332ba641f20SAndrew Rybchenko fail_mcdi_init:
1333ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "destroy nic");
1334ba641f20SAndrew Rybchenko 	sa->nic = NULL;
1335ba641f20SAndrew Rybchenko 	efx_nic_destroy(enp);
1336ba641f20SAndrew Rybchenko 
1337ba641f20SAndrew Rybchenko fail_nic_create:
1338ba641f20SAndrew Rybchenko 	sfc_mem_bar_fini(sa);
1339ba641f20SAndrew Rybchenko 
1340ba641f20SAndrew Rybchenko fail_mem_bar_init:
1341e434de5dSAndy Moreton fail_family:
1342ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
1343ba641f20SAndrew Rybchenko 	return rc;
1344ba641f20SAndrew Rybchenko }
1345ba641f20SAndrew Rybchenko 
1346ba641f20SAndrew Rybchenko void
sfc_unprobe(struct sfc_adapter * sa)1347329472d4SAndrew Rybchenko sfc_unprobe(struct sfc_adapter *sa)
1348ba641f20SAndrew Rybchenko {
1349ba641f20SAndrew Rybchenko 	efx_nic_t *enp = sa->nic;
1350ba641f20SAndrew Rybchenko 
1351ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "entry");
1352ba641f20SAndrew Rybchenko 
1353ba641f20SAndrew Rybchenko 	SFC_ASSERT(sfc_adapter_is_locked(sa));
1354ba641f20SAndrew Rybchenko 
1355ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "unprobe nic");
1356ba641f20SAndrew Rybchenko 	efx_nic_unprobe(enp);
1357ba641f20SAndrew Rybchenko 
1358ba641f20SAndrew Rybchenko 	sfc_mcdi_fini(sa);
1359ba641f20SAndrew Rybchenko 
1360e77f9f19SAndrew Rybchenko 	/*
1361e77f9f19SAndrew Rybchenko 	 * Make sure there is no pending alarm to restart since we are
1362e77f9f19SAndrew Rybchenko 	 * going to free device private which is passed as the callback
1363e77f9f19SAndrew Rybchenko 	 * opaque data. A new alarm cannot be scheduled since MCDI is
1364e77f9f19SAndrew Rybchenko 	 * shut down.
1365e77f9f19SAndrew Rybchenko 	 */
1366e77f9f19SAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1367e77f9f19SAndrew Rybchenko 
136826706314SViacheslav Galaktionov 	sfc_mae_clear_switch_port(sa->mae.switch_domain_id,
136926706314SViacheslav Galaktionov 				  sa->mae.switch_port_id);
137026706314SViacheslav Galaktionov 
1371ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "destroy nic");
1372ba641f20SAndrew Rybchenko 	sa->nic = NULL;
1373ba641f20SAndrew Rybchenko 	efx_nic_destroy(enp);
1374ba641f20SAndrew Rybchenko 
1375ba641f20SAndrew Rybchenko 	sfc_mem_bar_fini(sa);
1376ba641f20SAndrew Rybchenko 
1377a9825ccfSRoman Zhukov 	sfc_flow_fini(sa);
1378ac478689SIgor Romanov 	sa->state = SFC_ETHDEV_UNINITIALIZED;
1379ba641f20SAndrew Rybchenko }
1380dad99d92SIvan Malov 
1381dad99d92SIvan Malov uint32_t
sfc_register_logtype(const struct rte_pci_addr * pci_addr,const char * lt_prefix_str,uint32_t ll_default)1382e2c3639aSAndrew Rybchenko sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1383e2c3639aSAndrew Rybchenko 		     const char *lt_prefix_str, uint32_t ll_default)
1384dad99d92SIvan Malov {
1385dad99d92SIvan Malov 	size_t lt_prefix_str_size = strlen(lt_prefix_str);
1386dad99d92SIvan Malov 	size_t lt_str_size_max;
1387dad99d92SIvan Malov 	char *lt_str = NULL;
1388dad99d92SIvan Malov 	int ret;
1389dad99d92SIvan Malov 
1390dad99d92SIvan Malov 	if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1391dad99d92SIvan Malov 		++lt_prefix_str_size; /* Reserve space for prefix separator */
1392dad99d92SIvan Malov 		lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1393dad99d92SIvan Malov 	} else {
139442c807feSStephen Hemminger 		return sfc_logtype_driver;
1395dad99d92SIvan Malov 	}
1396dad99d92SIvan Malov 
1397dad99d92SIvan Malov 	lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1398dad99d92SIvan Malov 	if (lt_str == NULL)
139942c807feSStephen Hemminger 		return sfc_logtype_driver;
1400dad99d92SIvan Malov 
1401dad99d92SIvan Malov 	strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1402dad99d92SIvan Malov 	lt_str[lt_prefix_str_size - 1] = '.';
1403e2c3639aSAndrew Rybchenko 	rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1404dad99d92SIvan Malov 			    lt_str_size_max - lt_prefix_str_size);
1405dad99d92SIvan Malov 	lt_str[lt_str_size_max - 1] = '\0';
1406dad99d92SIvan Malov 
1407dad99d92SIvan Malov 	ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1408dad99d92SIvan Malov 	rte_free(lt_str);
1409dad99d92SIvan Malov 
141042c807feSStephen Hemminger 	if (ret < 0)
141142c807feSStephen Hemminger 		return sfc_logtype_driver;
141242c807feSStephen Hemminger 
141342c807feSStephen Hemminger 	return ret;
1414dad99d92SIvan Malov }
1415e86b48aaSIvan Malov 
1416e86b48aaSIvan Malov struct sfc_hw_switch_id {
1417e86b48aaSIvan Malov 	char	board_sn[RTE_SIZEOF_FIELD(efx_nic_board_info_t, enbi_serial)];
1418e86b48aaSIvan Malov };
1419e86b48aaSIvan Malov 
1420e86b48aaSIvan Malov int
sfc_hw_switch_id_init(struct sfc_adapter * sa,struct sfc_hw_switch_id ** idp)1421e86b48aaSIvan Malov sfc_hw_switch_id_init(struct sfc_adapter *sa,
1422e86b48aaSIvan Malov 		      struct sfc_hw_switch_id **idp)
1423e86b48aaSIvan Malov {
1424e86b48aaSIvan Malov 	efx_nic_board_info_t board_info;
1425e86b48aaSIvan Malov 	struct sfc_hw_switch_id *id;
1426e86b48aaSIvan Malov 	int rc;
1427e86b48aaSIvan Malov 
1428e86b48aaSIvan Malov 	if (idp == NULL)
1429e86b48aaSIvan Malov 		return EINVAL;
1430e86b48aaSIvan Malov 
1431e86b48aaSIvan Malov 	id = rte_zmalloc("sfc_hw_switch_id", sizeof(*id), 0);
1432e86b48aaSIvan Malov 	if (id == NULL)
1433e86b48aaSIvan Malov 		return ENOMEM;
1434e86b48aaSIvan Malov 
1435e86b48aaSIvan Malov 	rc = efx_nic_get_board_info(sa->nic, &board_info);
1436e86b48aaSIvan Malov 	if (rc != 0)
1437e86b48aaSIvan Malov 		return rc;
1438e86b48aaSIvan Malov 
1439e86b48aaSIvan Malov 	memcpy(id->board_sn, board_info.enbi_serial, sizeof(id->board_sn));
1440e86b48aaSIvan Malov 
1441e86b48aaSIvan Malov 	*idp = id;
1442e86b48aaSIvan Malov 
1443e86b48aaSIvan Malov 	return 0;
1444e86b48aaSIvan Malov }
1445e86b48aaSIvan Malov 
1446e86b48aaSIvan Malov void
sfc_hw_switch_id_fini(__rte_unused struct sfc_adapter * sa,struct sfc_hw_switch_id * id)1447e86b48aaSIvan Malov sfc_hw_switch_id_fini(__rte_unused struct sfc_adapter *sa,
1448e86b48aaSIvan Malov 		      struct sfc_hw_switch_id *id)
1449e86b48aaSIvan Malov {
1450e86b48aaSIvan Malov 	rte_free(id);
1451e86b48aaSIvan Malov }
1452e86b48aaSIvan Malov 
1453e86b48aaSIvan Malov bool
sfc_hw_switch_ids_equal(const struct sfc_hw_switch_id * left,const struct sfc_hw_switch_id * right)1454e86b48aaSIvan Malov sfc_hw_switch_ids_equal(const struct sfc_hw_switch_id *left,
1455e86b48aaSIvan Malov 			const struct sfc_hw_switch_id *right)
1456e86b48aaSIvan Malov {
1457380f3552SIvan Malov 	return strncmp(left->board_sn, right->board_sn,
1458380f3552SIvan Malov 		       sizeof(left->board_sn)) == 0;
1459e86b48aaSIvan Malov }
1460