xref: /dpdk/drivers/net/sfc/sfc_ethdev.c (revision 6da67e706dc9aa053bf1ed60eb97bcb8f2384d99)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2244cfa79SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
563d588ffSAndrew Rybchenko  *
663d588ffSAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
763d588ffSAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
863d588ffSAndrew Rybchenko  */
963d588ffSAndrew Rybchenko 
1063d588ffSAndrew Rybchenko #include <rte_dev.h>
11df96fd0dSBruce Richardson #include <ethdev_driver.h>
12df96fd0dSBruce Richardson #include <ethdev_pci.h>
1363d588ffSAndrew Rybchenko #include <rte_pci.h>
14c752998bSGaetan Rivet #include <rte_bus_pci.h>
15df1bfde4SAndrew Rybchenko #include <rte_errno.h>
16ed5b9848SAndy Green #include <rte_string_fns.h>
17bac46c6fSAndrew Rybchenko #include <rte_ether.h>
1863d588ffSAndrew Rybchenko 
19ba641f20SAndrew Rybchenko #include "efx.h"
20ba641f20SAndrew Rybchenko 
2163d588ffSAndrew Rybchenko #include "sfc.h"
2263d588ffSAndrew Rybchenko #include "sfc_debug.h"
2363d588ffSAndrew Rybchenko #include "sfc_log.h"
2463d588ffSAndrew Rybchenko #include "sfc_kvargs.h"
25886f8d8aSArtem Andreev #include "sfc_ev.h"
26ce35b05cSAndrew Rybchenko #include "sfc_rx.h"
27b1b7ad93SIvan Malov #include "sfc_tx.h"
28a9825ccfSRoman Zhukov #include "sfc_flow.h"
2953a80512SIvan Malov #include "sfc_flow_tunnel.h"
30df1bfde4SAndrew Rybchenko #include "sfc_dp.h"
31df1bfde4SAndrew Rybchenko #include "sfc_dp_rx.h"
32a62ec905SIgor Romanov #include "sfc_repr.h"
33fdd7719eSIvan Ilchenko #include "sfc_sw_stats.h"
3444db08d5SViacheslav Galaktionov #include "sfc_switch.h"
353037e6cfSViacheslav Galaktionov #include "sfc_nic_dma.h"
36fdd7719eSIvan Ilchenko 
37fdd7719eSIvan Ilchenko #define SFC_XSTAT_ID_INVALID_VAL  UINT64_MAX
38fdd7719eSIvan Ilchenko #define SFC_XSTAT_ID_INVALID_NAME '\0'
39df1bfde4SAndrew Rybchenko 
40fdceb100SIvan Malov uint32_t sfc_logtype_driver;
41fdceb100SIvan Malov 
42df1bfde4SAndrew Rybchenko static struct sfc_dp_list sfc_dp_head =
43df1bfde4SAndrew Rybchenko 	TAILQ_HEAD_INITIALIZER(sfc_dp_head);
4463d588ffSAndrew Rybchenko 
458fce2224SAndrew Rybchenko 
468fce2224SAndrew Rybchenko static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev);
478fce2224SAndrew Rybchenko 
488fce2224SAndrew Rybchenko 
4983fef46aSIvan Malov static int
5083fef46aSIvan Malov sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
5183fef46aSIvan Malov {
525313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
5383fef46aSIvan Malov 	efx_nic_fw_info_t enfi;
5483fef46aSIvan Malov 	int ret;
5583fef46aSIvan Malov 	int rc;
5683fef46aSIvan Malov 
5783fef46aSIvan Malov 	rc = efx_nic_get_fw_version(sa->nic, &enfi);
5883fef46aSIvan Malov 	if (rc != 0)
5983fef46aSIvan Malov 		return -rc;
6083fef46aSIvan Malov 
6183fef46aSIvan Malov 	ret = snprintf(fw_version, fw_size,
6283fef46aSIvan Malov 		       "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
6383fef46aSIvan Malov 		       enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
6483fef46aSIvan Malov 		       enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
6583fef46aSIvan Malov 	if (ret < 0)
6683fef46aSIvan Malov 		return ret;
6783fef46aSIvan Malov 
6883fef46aSIvan Malov 	if (enfi.enfi_dpcpu_fw_ids_valid) {
6983fef46aSIvan Malov 		size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
7083fef46aSIvan Malov 		int ret_extra;
7183fef46aSIvan Malov 
7283fef46aSIvan Malov 		ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
7383fef46aSIvan Malov 				     fw_size - dpcpu_fw_ids_offset,
7483fef46aSIvan Malov 				     " rx%" PRIx16 " tx%" PRIx16,
7583fef46aSIvan Malov 				     enfi.enfi_rx_dpcpu_fw_id,
7683fef46aSIvan Malov 				     enfi.enfi_tx_dpcpu_fw_id);
7783fef46aSIvan Malov 		if (ret_extra < 0)
7883fef46aSIvan Malov 			return ret_extra;
7983fef46aSIvan Malov 
8083fef46aSIvan Malov 		ret += ret_extra;
8183fef46aSIvan Malov 	}
8283fef46aSIvan Malov 
8383fef46aSIvan Malov 	if (fw_size < (size_t)(++ret))
8483fef46aSIvan Malov 		return ret;
8583fef46aSIvan Malov 	else
8683fef46aSIvan Malov 		return 0;
8783fef46aSIvan Malov }
8883fef46aSIvan Malov 
89bdad90d1SIvan Ilchenko static int
9063d588ffSAndrew Rybchenko sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
9163d588ffSAndrew Rybchenko {
925dec95e3SAndrew Rybchenko 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
93e295f175SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
945313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
95e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sas->rss;
961e7fbdf0SIvan Malov 	struct sfc_mae *mae = &sa->mae;
97c78d280eSIvan Malov 	uint64_t txq_offloads_def = 0;
9863d588ffSAndrew Rybchenko 
9963d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
10063d588ffSAndrew Rybchenko 
10135b2d13fSOlivier Matz 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
102bac46c6fSAndrew Rybchenko 	dev_info->max_mtu = EFX_MAC_SDU_MAX;
103bac46c6fSAndrew Rybchenko 
10403ed2119SAndrew Rybchenko 	dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
105a8e64c6bSAndrew Rybchenko 
106dd45b880SAndrew Rybchenko 	dev_info->max_vfs = sa->sriov.num_vfs;
107dd45b880SAndrew Rybchenko 
108d23f3a89SAndrew Rybchenko 	/* Autonegotiation may be disabled */
109295968d1SFerruh Yigit 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
110cf6a73fcSAndrew Rybchenko 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
111295968d1SFerruh Yigit 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_1G;
112cf6a73fcSAndrew Rybchenko 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
113295968d1SFerruh Yigit 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10G;
114cf6a73fcSAndrew Rybchenko 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
115295968d1SFerruh Yigit 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G;
116cf6a73fcSAndrew Rybchenko 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
117295968d1SFerruh Yigit 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_40G;
118cf6a73fcSAndrew Rybchenko 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
119295968d1SFerruh Yigit 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
120cf6a73fcSAndrew Rybchenko 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
121295968d1SFerruh Yigit 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
122d23f3a89SAndrew Rybchenko 
123ce35b05cSAndrew Rybchenko 	dev_info->max_rx_queues = sa->rxq_max;
124a8ad8cf8SIvan Malov 	dev_info->max_tx_queues = sa->txq_max;
125ce35b05cSAndrew Rybchenko 
126a8e64c6bSAndrew Rybchenko 	/* By default packets are dropped if no descriptors are available */
127a8e64c6bSAndrew Rybchenko 	dev_info->default_rxconf.rx_drop_en = 1;
128a8e64c6bSAndrew Rybchenko 
129ff6a1197SIvan Malov 	dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
130ff6a1197SIvan Malov 
131ff6a1197SIvan Malov 	/*
132ff6a1197SIvan Malov 	 * rx_offload_capa includes both device and queue offloads since
133ff6a1197SIvan Malov 	 * the latter may be requested on a per device basis which makes
134ff6a1197SIvan Malov 	 * sense when some offloads are needed to be set on all queues.
135ff6a1197SIvan Malov 	 */
136ff6a1197SIvan Malov 	dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
137ff6a1197SIvan Malov 				    dev_info->rx_queue_offload_capa;
138591cbbb1SAndrew Rybchenko 
139c78d280eSIvan Malov 	dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
140c78d280eSIvan Malov 
141c78d280eSIvan Malov 	/*
142c78d280eSIvan Malov 	 * tx_offload_capa includes both device and queue offloads since
143c78d280eSIvan Malov 	 * the latter may be requested on a per device basis which makes
144c78d280eSIvan Malov 	 * sense when some offloads are needed to be set on all queues.
145c78d280eSIvan Malov 	 */
146c78d280eSIvan Malov 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
147c78d280eSIvan Malov 				    dev_info->tx_queue_offload_capa;
148c78d280eSIvan Malov 
149295968d1SFerruh Yigit 	if (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
150295968d1SFerruh Yigit 		txq_offloads_def |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
151c78d280eSIvan Malov 
152c78d280eSIvan Malov 	dev_info->default_txconf.offloads |= txq_offloads_def;
153c1767d93SAndrew Rybchenko 
154d1482e21SIvan Malov 	if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
15501764b20SIvan Malov 		uint64_t rte_hf = 0;
15601764b20SIvan Malov 		unsigned int i;
15701764b20SIvan Malov 
15801764b20SIvan Malov 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
15901764b20SIvan Malov 			rte_hf |= rss->hf_map[i].rte;
16001764b20SIvan Malov 
1614ec1fc3bSIvan Malov 		dev_info->reta_size = EFX_RSS_TBL_SIZE;
16237a42c61SAndrew Rybchenko 		dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
16301764b20SIvan Malov 		dev_info->flow_type_rss_offloads = rte_hf;
1644ec1fc3bSIvan Malov 	}
1654ec1fc3bSIvan Malov 
1663c335b7fSAndrew Rybchenko 	/* Initialize to hardware limits */
167048a0d1aSIgor Romanov 	dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries;
168048a0d1aSIgor Romanov 	dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries;
169a8e64c6bSAndrew Rybchenko 	/* The RXQ hardware requires that the descriptor count is a power
170a8e64c6bSAndrew Rybchenko 	 * of 2, but rx_desc_lim cannot properly describe that constraint.
171a8e64c6bSAndrew Rybchenko 	 */
172048a0d1aSIgor Romanov 	dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries;
173a8ad8cf8SIvan Malov 
174c7dadc9fSAndrew Rybchenko 	/* Initialize to hardware limits */
175a8ad8cf8SIvan Malov 	dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
1769dbd28dfSIgor Romanov 	dev_info->tx_desc_lim.nb_min = sa->txq_min_entries;
177a8ad8cf8SIvan Malov 	/*
178a8ad8cf8SIvan Malov 	 * The TXQ hardware requires that the descriptor count is a power
179a8ad8cf8SIvan Malov 	 * of 2, but tx_desc_lim cannot properly describe that constraint
180a8ad8cf8SIvan Malov 	 */
1819dbd28dfSIgor Romanov 	dev_info->tx_desc_lim.nb_align = sa->txq_min_entries;
1823c335b7fSAndrew Rybchenko 
1835dec95e3SAndrew Rybchenko 	if (sap->dp_rx->get_dev_info != NULL)
1845dec95e3SAndrew Rybchenko 		sap->dp_rx->get_dev_info(dev_info);
1855dec95e3SAndrew Rybchenko 	if (sap->dp_tx->get_dev_info != NULL)
1865dec95e3SAndrew Rybchenko 		sap->dp_tx->get_dev_info(dev_info);
1870668a27aSIgor Romanov 
188862b35afSIgor Romanov 	dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
189862b35afSIgor Romanov 			     RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
1902fe6f1b7SDmitry Kozlyuk 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
191bdad90d1SIvan Ilchenko 
1922f577f0eSViacheslav Galaktionov 	if (mae->status == SFC_MAE_STATUS_SUPPORTED ||
1932f577f0eSViacheslav Galaktionov 	    mae->status == SFC_MAE_STATUS_ADMIN) {
1941e7fbdf0SIvan Malov 		dev_info->switch_info.name = dev->device->driver->name;
1951e7fbdf0SIvan Malov 		dev_info->switch_info.domain_id = mae->switch_domain_id;
1961e7fbdf0SIvan Malov 		dev_info->switch_info.port_id = mae->switch_port_id;
1971e7fbdf0SIvan Malov 	}
1981e7fbdf0SIvan Malov 
199bdad90d1SIvan Ilchenko 	return 0;
20063d588ffSAndrew Rybchenko }
20163d588ffSAndrew Rybchenko 
20256349dc9SAndrew Rybchenko static const uint32_t *
20356349dc9SAndrew Rybchenko sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
20456349dc9SAndrew Rybchenko {
2055dec95e3SAndrew Rybchenko 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
20656349dc9SAndrew Rybchenko 
2072646d42fSAndrew Rybchenko 	return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
20856349dc9SAndrew Rybchenko }
20956349dc9SAndrew Rybchenko 
210aaa3f5f0SAndrew Rybchenko static int
211aaa3f5f0SAndrew Rybchenko sfc_dev_configure(struct rte_eth_dev *dev)
212aaa3f5f0SAndrew Rybchenko {
213aaa3f5f0SAndrew Rybchenko 	struct rte_eth_dev_data *dev_data = dev->data;
2145313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
215aaa3f5f0SAndrew Rybchenko 	int rc;
216aaa3f5f0SAndrew Rybchenko 
217aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
218aaa3f5f0SAndrew Rybchenko 		     dev_data->nb_rx_queues, dev_data->nb_tx_queues);
219aaa3f5f0SAndrew Rybchenko 
220aaa3f5f0SAndrew Rybchenko 	sfc_adapter_lock(sa);
221aaa3f5f0SAndrew Rybchenko 	switch (sa->state) {
222ac478689SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
223aaa3f5f0SAndrew Rybchenko 		/* FALLTHROUGH */
224ac478689SIgor Romanov 	case SFC_ETHDEV_INITIALIZED:
225aaa3f5f0SAndrew Rybchenko 		rc = sfc_configure(sa);
226aaa3f5f0SAndrew Rybchenko 		break;
227aaa3f5f0SAndrew Rybchenko 	default:
228aaa3f5f0SAndrew Rybchenko 		sfc_err(sa, "unexpected adapter state %u to configure",
229aaa3f5f0SAndrew Rybchenko 			sa->state);
230aaa3f5f0SAndrew Rybchenko 		rc = EINVAL;
231aaa3f5f0SAndrew Rybchenko 		break;
232aaa3f5f0SAndrew Rybchenko 	}
233aaa3f5f0SAndrew Rybchenko 	sfc_adapter_unlock(sa);
234aaa3f5f0SAndrew Rybchenko 
235aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done %d", rc);
236aaa3f5f0SAndrew Rybchenko 	SFC_ASSERT(rc >= 0);
237aaa3f5f0SAndrew Rybchenko 	return -rc;
238aaa3f5f0SAndrew Rybchenko }
239aaa3f5f0SAndrew Rybchenko 
24093fcf09bSAndrew Rybchenko static int
24193fcf09bSAndrew Rybchenko sfc_dev_start(struct rte_eth_dev *dev)
24293fcf09bSAndrew Rybchenko {
2435313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
24493fcf09bSAndrew Rybchenko 	int rc;
24593fcf09bSAndrew Rybchenko 
24693fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
24793fcf09bSAndrew Rybchenko 
24893fcf09bSAndrew Rybchenko 	sfc_adapter_lock(sa);
24993fcf09bSAndrew Rybchenko 	rc = sfc_start(sa);
25093fcf09bSAndrew Rybchenko 	sfc_adapter_unlock(sa);
25193fcf09bSAndrew Rybchenko 
25293fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done %d", rc);
25393fcf09bSAndrew Rybchenko 	SFC_ASSERT(rc >= 0);
25493fcf09bSAndrew Rybchenko 	return -rc;
25593fcf09bSAndrew Rybchenko }
25693fcf09bSAndrew Rybchenko 
257886f8d8aSArtem Andreev static int
258886f8d8aSArtem Andreev sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
259886f8d8aSArtem Andreev {
2605313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
261886f8d8aSArtem Andreev 	struct rte_eth_link current_link;
2624abe903eSStephen Hemminger 	int ret;
263886f8d8aSArtem Andreev 
264886f8d8aSArtem Andreev 	sfc_log_init(sa, "entry");
265886f8d8aSArtem Andreev 
266ac478689SIgor Romanov 	if (sa->state != SFC_ETHDEV_STARTED) {
2679b098c52SAndrew Rybchenko 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
2689b098c52SAndrew Rybchenko 	} else if (wait_to_complete) {
269886f8d8aSArtem Andreev 		efx_link_mode_t link_mode;
270886f8d8aSArtem Andreev 
271c7c915e1SAndrew Rybchenko 		if (efx_port_poll(sa->nic, &link_mode) != 0)
272c7c915e1SAndrew Rybchenko 			link_mode = EFX_LINK_UNKNOWN;
273886f8d8aSArtem Andreev 		sfc_port_link_mode_to_info(link_mode, &current_link);
274886f8d8aSArtem Andreev 
275886f8d8aSArtem Andreev 	} else {
276886f8d8aSArtem Andreev 		sfc_ev_mgmt_qpoll(sa);
2774abe903eSStephen Hemminger 		rte_eth_linkstatus_get(dev, &current_link);
278886f8d8aSArtem Andreev 	}
279886f8d8aSArtem Andreev 
2804abe903eSStephen Hemminger 	ret = rte_eth_linkstatus_set(dev, &current_link);
2814abe903eSStephen Hemminger 	if (ret == 0)
28291d16276SIvan Malov 		sfc_notice(sa, "Link status is %s",
283886f8d8aSArtem Andreev 			   current_link.link_status ? "UP" : "DOWN");
28491d16276SIvan Malov 
2854abe903eSStephen Hemminger 	return ret;
286886f8d8aSArtem Andreev }
287886f8d8aSArtem Andreev 
28862024eb8SIvan Ilchenko static int
28993fcf09bSAndrew Rybchenko sfc_dev_stop(struct rte_eth_dev *dev)
29093fcf09bSAndrew Rybchenko {
2915313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
29293fcf09bSAndrew Rybchenko 
29393fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
29493fcf09bSAndrew Rybchenko 
29593fcf09bSAndrew Rybchenko 	sfc_adapter_lock(sa);
29693fcf09bSAndrew Rybchenko 	sfc_stop(sa);
29793fcf09bSAndrew Rybchenko 	sfc_adapter_unlock(sa);
29893fcf09bSAndrew Rybchenko 
29993fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done");
30062024eb8SIvan Ilchenko 
30162024eb8SIvan Ilchenko 	return 0;
30293fcf09bSAndrew Rybchenko }
30393fcf09bSAndrew Rybchenko 
3042a05f337SArtem Andreev static int
3052a05f337SArtem Andreev sfc_dev_set_link_up(struct rte_eth_dev *dev)
3062a05f337SArtem Andreev {
3075313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3082a05f337SArtem Andreev 	int rc;
3092a05f337SArtem Andreev 
3102a05f337SArtem Andreev 	sfc_log_init(sa, "entry");
3112a05f337SArtem Andreev 
3122a05f337SArtem Andreev 	sfc_adapter_lock(sa);
3132a05f337SArtem Andreev 	rc = sfc_start(sa);
3142a05f337SArtem Andreev 	sfc_adapter_unlock(sa);
3152a05f337SArtem Andreev 
3162a05f337SArtem Andreev 	SFC_ASSERT(rc >= 0);
3172a05f337SArtem Andreev 	return -rc;
3182a05f337SArtem Andreev }
3192a05f337SArtem Andreev 
3202a05f337SArtem Andreev static int
3212a05f337SArtem Andreev sfc_dev_set_link_down(struct rte_eth_dev *dev)
3222a05f337SArtem Andreev {
3235313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
3242a05f337SArtem Andreev 
3252a05f337SArtem Andreev 	sfc_log_init(sa, "entry");
3262a05f337SArtem Andreev 
3272a05f337SArtem Andreev 	sfc_adapter_lock(sa);
3282a05f337SArtem Andreev 	sfc_stop(sa);
3292a05f337SArtem Andreev 	sfc_adapter_unlock(sa);
3302a05f337SArtem Andreev 
3312a05f337SArtem Andreev 	return 0;
3322a05f337SArtem Andreev }
3332a05f337SArtem Andreev 
33430410493SThomas Monjalon static void
33530410493SThomas Monjalon sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
33630410493SThomas Monjalon {
33730410493SThomas Monjalon 	free(dev->process_private);
3380607dadfSThomas Monjalon 	rte_eth_dev_release_port(dev);
33930410493SThomas Monjalon }
34030410493SThomas Monjalon 
341b142387bSThomas Monjalon static int
342aaa3f5f0SAndrew Rybchenko sfc_dev_close(struct rte_eth_dev *dev)
343aaa3f5f0SAndrew Rybchenko {
3445313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
345aaa3f5f0SAndrew Rybchenko 
346aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry");
347aaa3f5f0SAndrew Rybchenko 
34830410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
34930410493SThomas Monjalon 		sfc_eth_dev_secondary_clear_ops(dev);
35030410493SThomas Monjalon 		return 0;
35130410493SThomas Monjalon 	}
35230410493SThomas Monjalon 
353c377f1adSIgor Romanov 	sfc_pre_detach(sa);
354c377f1adSIgor Romanov 
355aaa3f5f0SAndrew Rybchenko 	sfc_adapter_lock(sa);
356aaa3f5f0SAndrew Rybchenko 	switch (sa->state) {
357ac478689SIgor Romanov 	case SFC_ETHDEV_STARTED:
35893fcf09bSAndrew Rybchenko 		sfc_stop(sa);
359ac478689SIgor Romanov 		SFC_ASSERT(sa->state == SFC_ETHDEV_CONFIGURED);
36093fcf09bSAndrew Rybchenko 		/* FALLTHROUGH */
361ac478689SIgor Romanov 	case SFC_ETHDEV_CONFIGURED:
362aaa3f5f0SAndrew Rybchenko 		sfc_close(sa);
363ac478689SIgor Romanov 		SFC_ASSERT(sa->state == SFC_ETHDEV_INITIALIZED);
364aaa3f5f0SAndrew Rybchenko 		/* FALLTHROUGH */
365ac478689SIgor Romanov 	case SFC_ETHDEV_INITIALIZED:
366aaa3f5f0SAndrew Rybchenko 		break;
367aaa3f5f0SAndrew Rybchenko 	default:
368aaa3f5f0SAndrew Rybchenko 		sfc_err(sa, "unexpected adapter state %u on close", sa->state);
369aaa3f5f0SAndrew Rybchenko 		break;
370aaa3f5f0SAndrew Rybchenko 	}
3718fce2224SAndrew Rybchenko 
3728fce2224SAndrew Rybchenko 	/*
373fbd19135SThomas Monjalon 	 * Cleanup all resources.
3748fce2224SAndrew Rybchenko 	 * Rollback primary process sfc_eth_dev_init() below.
3758fce2224SAndrew Rybchenko 	 */
3768fce2224SAndrew Rybchenko 
3778fce2224SAndrew Rybchenko 	sfc_eth_dev_clear_ops(dev);
3788fce2224SAndrew Rybchenko 
3793037e6cfSViacheslav Galaktionov 	sfc_nic_dma_detach(sa);
3808fce2224SAndrew Rybchenko 	sfc_detach(sa);
3818fce2224SAndrew Rybchenko 	sfc_unprobe(sa);
3828fce2224SAndrew Rybchenko 
3838fce2224SAndrew Rybchenko 	sfc_kvargs_cleanup(sa);
3848fce2224SAndrew Rybchenko 
385aaa3f5f0SAndrew Rybchenko 	sfc_adapter_unlock(sa);
3868fce2224SAndrew Rybchenko 	sfc_adapter_lock_fini(sa);
387aaa3f5f0SAndrew Rybchenko 
388aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done");
3898fce2224SAndrew Rybchenko 
3908fce2224SAndrew Rybchenko 	/* Required for logging, so cleanup last */
3918fce2224SAndrew Rybchenko 	sa->eth_dev = NULL;
3928fce2224SAndrew Rybchenko 
3938fce2224SAndrew Rybchenko 	free(sa);
394b142387bSThomas Monjalon 
395b142387bSThomas Monjalon 	return 0;
396aaa3f5f0SAndrew Rybchenko }
397aaa3f5f0SAndrew Rybchenko 
3989039c812SAndrew Rybchenko static int
399f3de3840SIvan Malov sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
400f3de3840SIvan Malov 		   boolean_t enabled)
401f3de3840SIvan Malov {
402f3de3840SIvan Malov 	struct sfc_port *port;
403f3de3840SIvan Malov 	boolean_t *toggle;
4045313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
405f3de3840SIvan Malov 	boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
406f3de3840SIvan Malov 	const char *desc = (allmulti) ? "all-multi" : "promiscuous";
4079039c812SAndrew Rybchenko 	int rc = 0;
408f3de3840SIvan Malov 
409f3de3840SIvan Malov 	sfc_adapter_lock(sa);
410f3de3840SIvan Malov 
411f3de3840SIvan Malov 	port = &sa->port;
412f3de3840SIvan Malov 	toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
413f3de3840SIvan Malov 
414f3de3840SIvan Malov 	if (*toggle != enabled) {
415f3de3840SIvan Malov 		*toggle = enabled;
416f3de3840SIvan Malov 
417e0d5ba7eSAndrew Rybchenko 		if (sfc_sa2shared(sa)->isolated) {
41884a9b481SIvan Malov 			sfc_warn(sa, "isolated mode is active on the port");
41984a9b481SIvan Malov 			sfc_warn(sa, "the change is to be applied on the next "
42084a9b481SIvan Malov 				     "start provided that isolated mode is "
42184a9b481SIvan Malov 				     "disabled prior the next start");
422ac478689SIgor Romanov 		} else if ((sa->state == SFC_ETHDEV_STARTED) &&
4239039c812SAndrew Rybchenko 			   ((rc = sfc_set_rx_mode(sa)) != 0)) {
424f3de3840SIvan Malov 			*toggle = !(enabled);
42598608e18SIgor Romanov 			sfc_warn(sa, "Failed to %s %s mode, rc = %d",
42698608e18SIgor Romanov 				 ((enabled) ? "enable" : "disable"), desc, rc);
42798608e18SIgor Romanov 
42898608e18SIgor Romanov 			/*
42998608e18SIgor Romanov 			 * For promiscuous and all-multicast filters a
43098608e18SIgor Romanov 			 * permission failure should be reported as an
43198608e18SIgor Romanov 			 * unsupported filter.
43298608e18SIgor Romanov 			 */
43398608e18SIgor Romanov 			if (rc == EPERM)
43498608e18SIgor Romanov 				rc = ENOTSUP;
435f3de3840SIvan Malov 		}
436f3de3840SIvan Malov 	}
437f3de3840SIvan Malov 
438f3de3840SIvan Malov 	sfc_adapter_unlock(sa);
4399039c812SAndrew Rybchenko 	return rc;
440f3de3840SIvan Malov }
441f3de3840SIvan Malov 
4429039c812SAndrew Rybchenko static int
443f3de3840SIvan Malov sfc_dev_promisc_enable(struct rte_eth_dev *dev)
444f3de3840SIvan Malov {
4457329b56fSIgor Romanov 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
4467329b56fSIgor Romanov 
4477329b56fSIgor Romanov 	SFC_ASSERT(rc >= 0);
4487329b56fSIgor Romanov 	return -rc;
449f3de3840SIvan Malov }
450f3de3840SIvan Malov 
4519039c812SAndrew Rybchenko static int
452f3de3840SIvan Malov sfc_dev_promisc_disable(struct rte_eth_dev *dev)
453f3de3840SIvan Malov {
4547329b56fSIgor Romanov 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
4557329b56fSIgor Romanov 
4567329b56fSIgor Romanov 	SFC_ASSERT(rc >= 0);
4577329b56fSIgor Romanov 	return -rc;
458f3de3840SIvan Malov }
459f3de3840SIvan Malov 
460ca041cd4SIvan Ilchenko static int
461f3de3840SIvan Malov sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
462f3de3840SIvan Malov {
4637329b56fSIgor Romanov 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
4647329b56fSIgor Romanov 
4657329b56fSIgor Romanov 	SFC_ASSERT(rc >= 0);
4667329b56fSIgor Romanov 	return -rc;
467f3de3840SIvan Malov }
468f3de3840SIvan Malov 
469ca041cd4SIvan Ilchenko static int
470f3de3840SIvan Malov sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
471f3de3840SIvan Malov {
4727329b56fSIgor Romanov 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
4737329b56fSIgor Romanov 
4747329b56fSIgor Romanov 	SFC_ASSERT(rc >= 0);
4757329b56fSIgor Romanov 	return -rc;
476f3de3840SIvan Malov }
477f3de3840SIvan Malov 
478ce35b05cSAndrew Rybchenko static int
47909cafbddSIgor Romanov sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
480ce35b05cSAndrew Rybchenko 		   uint16_t nb_rx_desc, unsigned int socket_id,
481ce35b05cSAndrew Rybchenko 		   const struct rte_eth_rxconf *rx_conf,
482ce35b05cSAndrew Rybchenko 		   struct rte_mempool *mb_pool)
483ce35b05cSAndrew Rybchenko {
484dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
4855313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
48609cafbddSIgor Romanov 	sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
48709cafbddSIgor Romanov 	struct sfc_rxq_info *rxq_info;
48809cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
489ce35b05cSAndrew Rybchenko 	int rc;
490ce35b05cSAndrew Rybchenko 
491ce35b05cSAndrew Rybchenko 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
49209cafbddSIgor Romanov 		     ethdev_qid, nb_rx_desc, socket_id);
493ce35b05cSAndrew Rybchenko 
494ce35b05cSAndrew Rybchenko 	sfc_adapter_lock(sa);
495ce35b05cSAndrew Rybchenko 
49609cafbddSIgor Romanov 	sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
49709cafbddSIgor Romanov 	rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id,
498ce35b05cSAndrew Rybchenko 			  rx_conf, mb_pool);
499ce35b05cSAndrew Rybchenko 	if (rc != 0)
500ce35b05cSAndrew Rybchenko 		goto fail_rx_qinit;
501ce35b05cSAndrew Rybchenko 
50209cafbddSIgor Romanov 	rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
50309cafbddSIgor Romanov 	dev->data->rx_queues[ethdev_qid] = rxq_info->dp;
504ce35b05cSAndrew Rybchenko 
505ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
506ce35b05cSAndrew Rybchenko 
507ce35b05cSAndrew Rybchenko 	return 0;
508ce35b05cSAndrew Rybchenko 
509ce35b05cSAndrew Rybchenko fail_rx_qinit:
510ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
511ce35b05cSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
512ce35b05cSAndrew Rybchenko 	return -rc;
513ce35b05cSAndrew Rybchenko }
514ce35b05cSAndrew Rybchenko 
515ce35b05cSAndrew Rybchenko static void
5167483341aSXueming Li sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
517ce35b05cSAndrew Rybchenko {
5187483341aSXueming Li 	struct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];
519df1bfde4SAndrew Rybchenko 	struct sfc_rxq *rxq;
520ce35b05cSAndrew Rybchenko 	struct sfc_adapter *sa;
52109cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
522ce35b05cSAndrew Rybchenko 
523df1bfde4SAndrew Rybchenko 	if (dp_rxq == NULL)
524ce35b05cSAndrew Rybchenko 		return;
525ce35b05cSAndrew Rybchenko 
526df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
527ce35b05cSAndrew Rybchenko 	sa = rxq->evq->sa;
528ce35b05cSAndrew Rybchenko 	sfc_adapter_lock(sa);
529ce35b05cSAndrew Rybchenko 
5309f25da76SAndrew Rybchenko 	sw_index = dp_rxq->dpq.queue_id;
531ce35b05cSAndrew Rybchenko 
532ce35b05cSAndrew Rybchenko 	sfc_log_init(sa, "RxQ=%u", sw_index);
533ce35b05cSAndrew Rybchenko 
534ce35b05cSAndrew Rybchenko 	sfc_rx_qfini(sa, sw_index);
535ce35b05cSAndrew Rybchenko 
536ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
537ce35b05cSAndrew Rybchenko }
538ce35b05cSAndrew Rybchenko 
539b1b7ad93SIvan Malov static int
540db980d26SIgor Romanov sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
541b1b7ad93SIvan Malov 		   uint16_t nb_tx_desc, unsigned int socket_id,
542b1b7ad93SIvan Malov 		   const struct rte_eth_txconf *tx_conf)
543b1b7ad93SIvan Malov {
544113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
5455313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
546db980d26SIgor Romanov 	struct sfc_txq_info *txq_info;
547db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
548b1b7ad93SIvan Malov 	int rc;
549b1b7ad93SIvan Malov 
550b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
551db980d26SIgor Romanov 		     ethdev_qid, nb_tx_desc, socket_id);
552b1b7ad93SIvan Malov 
553b1b7ad93SIvan Malov 	sfc_adapter_lock(sa);
554b1b7ad93SIvan Malov 
555db980d26SIgor Romanov 	sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
556db980d26SIgor Romanov 	rc = sfc_tx_qinit(sa, sw_index, nb_tx_desc, socket_id, tx_conf);
557b1b7ad93SIvan Malov 	if (rc != 0)
558b1b7ad93SIvan Malov 		goto fail_tx_qinit;
559b1b7ad93SIvan Malov 
560db980d26SIgor Romanov 	txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
561db980d26SIgor Romanov 	dev->data->tx_queues[ethdev_qid] = txq_info->dp;
562b1b7ad93SIvan Malov 
563b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
564b1b7ad93SIvan Malov 	return 0;
565b1b7ad93SIvan Malov 
566b1b7ad93SIvan Malov fail_tx_qinit:
567b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
568b1b7ad93SIvan Malov 	SFC_ASSERT(rc > 0);
569b1b7ad93SIvan Malov 	return -rc;
570b1b7ad93SIvan Malov }
571b1b7ad93SIvan Malov 
572b1b7ad93SIvan Malov static void
5737483341aSXueming Li sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
574b1b7ad93SIvan Malov {
5757483341aSXueming Li 	struct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];
576dbdc8241SAndrew Rybchenko 	struct sfc_txq *txq;
577db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
578b1b7ad93SIvan Malov 	struct sfc_adapter *sa;
579b1b7ad93SIvan Malov 
580dbdc8241SAndrew Rybchenko 	if (dp_txq == NULL)
581b1b7ad93SIvan Malov 		return;
582b1b7ad93SIvan Malov 
583dbdc8241SAndrew Rybchenko 	txq = sfc_txq_by_dp_txq(dp_txq);
584dbe26517SAndrew Rybchenko 	sw_index = dp_txq->dpq.queue_id;
585b1b7ad93SIvan Malov 
586b1b7ad93SIvan Malov 	SFC_ASSERT(txq->evq != NULL);
587b1b7ad93SIvan Malov 	sa = txq->evq->sa;
588b1b7ad93SIvan Malov 
589b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
590b1b7ad93SIvan Malov 
591b1b7ad93SIvan Malov 	sfc_adapter_lock(sa);
592b1b7ad93SIvan Malov 
593b1b7ad93SIvan Malov 	sfc_tx_qfini(sa, sw_index);
594b1b7ad93SIvan Malov 
595b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
596b1b7ad93SIvan Malov }
597b1b7ad93SIvan Malov 
598395ffcb4SIvan Ilchenko static void
599395ffcb4SIvan Ilchenko sfc_stats_get_dp_rx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
600395ffcb4SIvan Ilchenko {
601395ffcb4SIvan Ilchenko 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
602395ffcb4SIvan Ilchenko 	uint64_t pkts_sum = 0;
603395ffcb4SIvan Ilchenko 	uint64_t bytes_sum = 0;
604395ffcb4SIvan Ilchenko 	unsigned int i;
605395ffcb4SIvan Ilchenko 
606395ffcb4SIvan Ilchenko 	for (i = 0; i < sas->ethdev_rxq_count; ++i) {
607395ffcb4SIvan Ilchenko 		struct sfc_rxq_info *rxq_info;
608395ffcb4SIvan Ilchenko 
609395ffcb4SIvan Ilchenko 		rxq_info = sfc_rxq_info_by_ethdev_qid(sas, i);
610395ffcb4SIvan Ilchenko 		if (rxq_info->state & SFC_RXQ_INITIALIZED) {
611395ffcb4SIvan Ilchenko 			union sfc_pkts_bytes qstats;
612395ffcb4SIvan Ilchenko 
613395ffcb4SIvan Ilchenko 			sfc_pkts_bytes_get(&rxq_info->dp->dpq.stats, &qstats);
614395ffcb4SIvan Ilchenko 			pkts_sum += qstats.pkts -
615395ffcb4SIvan Ilchenko 					sa->sw_stats.reset_rx_pkts[i];
616395ffcb4SIvan Ilchenko 			bytes_sum += qstats.bytes -
617395ffcb4SIvan Ilchenko 					sa->sw_stats.reset_rx_bytes[i];
618395ffcb4SIvan Ilchenko 		}
619395ffcb4SIvan Ilchenko 	}
620395ffcb4SIvan Ilchenko 
621395ffcb4SIvan Ilchenko 	*pkts = pkts_sum;
622395ffcb4SIvan Ilchenko 	*bytes = bytes_sum;
623395ffcb4SIvan Ilchenko }
624395ffcb4SIvan Ilchenko 
625acc47448SIvan Ilchenko static void
626acc47448SIvan Ilchenko sfc_stats_get_dp_tx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
627acc47448SIvan Ilchenko {
628acc47448SIvan Ilchenko 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
629acc47448SIvan Ilchenko 	uint64_t pkts_sum = 0;
630acc47448SIvan Ilchenko 	uint64_t bytes_sum = 0;
631acc47448SIvan Ilchenko 	unsigned int i;
632acc47448SIvan Ilchenko 
633acc47448SIvan Ilchenko 	for (i = 0; i < sas->ethdev_txq_count; ++i) {
634acc47448SIvan Ilchenko 		struct sfc_txq_info *txq_info;
635acc47448SIvan Ilchenko 
636acc47448SIvan Ilchenko 		txq_info = sfc_txq_info_by_ethdev_qid(sas, i);
637acc47448SIvan Ilchenko 		if (txq_info->state & SFC_TXQ_INITIALIZED) {
638acc47448SIvan Ilchenko 			union sfc_pkts_bytes qstats;
639acc47448SIvan Ilchenko 
640acc47448SIvan Ilchenko 			sfc_pkts_bytes_get(&txq_info->dp->dpq.stats, &qstats);
641acc47448SIvan Ilchenko 			pkts_sum += qstats.pkts -
642acc47448SIvan Ilchenko 					sa->sw_stats.reset_tx_pkts[i];
643acc47448SIvan Ilchenko 			bytes_sum += qstats.bytes -
644acc47448SIvan Ilchenko 					sa->sw_stats.reset_tx_bytes[i];
645acc47448SIvan Ilchenko 		}
646acc47448SIvan Ilchenko 	}
647acc47448SIvan Ilchenko 
648acc47448SIvan Ilchenko 	*pkts = pkts_sum;
649acc47448SIvan Ilchenko 	*bytes = bytes_sum;
650acc47448SIvan Ilchenko }
651acc47448SIvan Ilchenko 
652ab77a001SAndrew Rybchenko /*
653ab77a001SAndrew Rybchenko  * Some statistics are computed as A - B where A and B each increase
654ab77a001SAndrew Rybchenko  * monotonically with some hardware counter(s) and the counters are read
655ab77a001SAndrew Rybchenko  * asynchronously.
656ab77a001SAndrew Rybchenko  *
657ab77a001SAndrew Rybchenko  * If packet X is counted in A, but not counted in B yet, computed value is
658ab77a001SAndrew Rybchenko  * greater than real.
659ab77a001SAndrew Rybchenko  *
660ab77a001SAndrew Rybchenko  * If packet X is not counted in A at the moment of reading the counter,
661ab77a001SAndrew Rybchenko  * but counted in B at the moment of reading the counter, computed value
662ab77a001SAndrew Rybchenko  * is less than real.
663ab77a001SAndrew Rybchenko  *
664ab77a001SAndrew Rybchenko  * However, counter which grows backward is worse evil than slightly wrong
665ab77a001SAndrew Rybchenko  * value. So, let's try to guarantee that it never happens except may be
666ab77a001SAndrew Rybchenko  * the case when the MAC stats are zeroed as a result of a NIC reset.
667ab77a001SAndrew Rybchenko  */
668ab77a001SAndrew Rybchenko static void
669ab77a001SAndrew Rybchenko sfc_update_diff_stat(uint64_t *stat, uint64_t newval)
670ab77a001SAndrew Rybchenko {
671ab77a001SAndrew Rybchenko 	if ((int64_t)(newval - *stat) > 0 || newval == 0)
672ab77a001SAndrew Rybchenko 		*stat = newval;
673ab77a001SAndrew Rybchenko }
674ab77a001SAndrew Rybchenko 
675d5b0924bSMatan Azrad static int
6761caab2f1SAndrew Rybchenko sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
6771caab2f1SAndrew Rybchenko {
678395ffcb4SIvan Ilchenko 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
679395ffcb4SIvan Ilchenko 	bool have_dp_rx_stats = sap->dp_rx->features & SFC_DP_RX_FEAT_STATS;
680acc47448SIvan Ilchenko 	bool have_dp_tx_stats = sap->dp_tx->features & SFC_DP_TX_FEAT_STATS;
6815313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
6821caab2f1SAndrew Rybchenko 	struct sfc_port *port = &sa->port;
6831caab2f1SAndrew Rybchenko 	uint64_t *mac_stats;
684d5b0924bSMatan Azrad 	int ret;
6851caab2f1SAndrew Rybchenko 
68617b0d7b3SIvan Ilchenko 	sfc_adapter_lock(sa);
6871caab2f1SAndrew Rybchenko 
688395ffcb4SIvan Ilchenko 	if (have_dp_rx_stats)
689395ffcb4SIvan Ilchenko 		sfc_stats_get_dp_rx(sa, &stats->ipackets, &stats->ibytes);
690acc47448SIvan Ilchenko 	if (have_dp_tx_stats)
691acc47448SIvan Ilchenko 		sfc_stats_get_dp_tx(sa, &stats->opackets, &stats->obytes);
692395ffcb4SIvan Ilchenko 
6931827b073SIvan Ilchenko 	ret = sfc_port_update_mac_stats(sa, B_FALSE);
694d5b0924bSMatan Azrad 	if (ret != 0)
6951caab2f1SAndrew Rybchenko 		goto unlock;
6961caab2f1SAndrew Rybchenko 
6971caab2f1SAndrew Rybchenko 	mac_stats = port->mac_stats_buf;
6981caab2f1SAndrew Rybchenko 
6991caab2f1SAndrew Rybchenko 	if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
7001caab2f1SAndrew Rybchenko 				   EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
701395ffcb4SIvan Ilchenko 		if (!have_dp_rx_stats) {
7021caab2f1SAndrew Rybchenko 			stats->ipackets =
7031caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
7041caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
7051caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
7061caab2f1SAndrew Rybchenko 			stats->ibytes =
7071caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
7081caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
7091caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
710395ffcb4SIvan Ilchenko 
711395ffcb4SIvan Ilchenko 			/* CRC is included in these stats, but shouldn't be */
712395ffcb4SIvan Ilchenko 			stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
713395ffcb4SIvan Ilchenko 		}
714acc47448SIvan Ilchenko 		if (!have_dp_tx_stats) {
715395ffcb4SIvan Ilchenko 			stats->opackets =
716395ffcb4SIvan Ilchenko 				mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
717395ffcb4SIvan Ilchenko 				mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
718395ffcb4SIvan Ilchenko 				mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
7191caab2f1SAndrew Rybchenko 			stats->obytes =
7201caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
7211caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
7221caab2f1SAndrew Rybchenko 				mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
7238a693036SViacheslav Galaktionov 
7248a693036SViacheslav Galaktionov 			/* CRC is included in these stats, but shouldn't be */
7258a693036SViacheslav Galaktionov 			stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
726acc47448SIvan Ilchenko 		}
727acc47448SIvan Ilchenko 		stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
728acc47448SIvan Ilchenko 		stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
7291caab2f1SAndrew Rybchenko 	} else {
730acc47448SIvan Ilchenko 		if (!have_dp_tx_stats) {
7311caab2f1SAndrew Rybchenko 			stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
732acc47448SIvan Ilchenko 			stats->obytes = mac_stats[EFX_MAC_TX_OCTETS] -
733acc47448SIvan Ilchenko 				mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
734acc47448SIvan Ilchenko 		}
7358a693036SViacheslav Galaktionov 
7361caab2f1SAndrew Rybchenko 		/*
7371caab2f1SAndrew Rybchenko 		 * Take into account stats which are whenever supported
7381caab2f1SAndrew Rybchenko 		 * on EF10. If some stat is not supported by current
7391caab2f1SAndrew Rybchenko 		 * firmware variant or HW revision, it is guaranteed
7401caab2f1SAndrew Rybchenko 		 * to be zero in mac_stats.
7411caab2f1SAndrew Rybchenko 		 */
7421caab2f1SAndrew Rybchenko 		stats->imissed =
7431caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
7441caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
7451caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
7461caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
7471caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
7481caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_TRUNC_QBB] +
7491caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_QBB] +
7501caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
7511caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
7521caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
7531caab2f1SAndrew Rybchenko 		stats->ierrors =
7541caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_FCS_ERRORS] +
7551caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
7561caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_JABBER_PKTS];
7571caab2f1SAndrew Rybchenko 		/* no oerrors counters supported on EF10 */
758ab77a001SAndrew Rybchenko 
759395ffcb4SIvan Ilchenko 		if (!have_dp_rx_stats) {
760ab77a001SAndrew Rybchenko 			/* Exclude missed, errors and pauses from Rx packets */
761ab77a001SAndrew Rybchenko 			sfc_update_diff_stat(&port->ipackets,
762ab77a001SAndrew Rybchenko 				mac_stats[EFX_MAC_RX_PKTS] -
763ab77a001SAndrew Rybchenko 				mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
764ab77a001SAndrew Rybchenko 				stats->imissed - stats->ierrors);
765ab77a001SAndrew Rybchenko 			stats->ipackets = port->ipackets;
766395ffcb4SIvan Ilchenko 			stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS] -
767395ffcb4SIvan Ilchenko 				mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
768395ffcb4SIvan Ilchenko 		}
7691caab2f1SAndrew Rybchenko 	}
7701caab2f1SAndrew Rybchenko 
7711caab2f1SAndrew Rybchenko unlock:
77217b0d7b3SIvan Ilchenko 	sfc_adapter_unlock(sa);
773d5b0924bSMatan Azrad 	SFC_ASSERT(ret >= 0);
774d5b0924bSMatan Azrad 	return -ret;
7751caab2f1SAndrew Rybchenko }
7761caab2f1SAndrew Rybchenko 
7779970a9adSIgor Romanov static int
778e8acb329SIvan Malov sfc_stats_reset(struct rte_eth_dev *dev)
779e8acb329SIvan Malov {
7805313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
781e8acb329SIvan Malov 	struct sfc_port *port = &sa->port;
782e8acb329SIvan Malov 	int rc;
783e8acb329SIvan Malov 
78417b0d7b3SIvan Ilchenko 	sfc_adapter_lock(sa);
78517b0d7b3SIvan Ilchenko 
786ac478689SIgor Romanov 	if (sa->state != SFC_ETHDEV_STARTED) {
787e8acb329SIvan Malov 		/*
788e8acb329SIvan Malov 		 * The operation cannot be done if port is not started; it
789e8acb329SIvan Malov 		 * will be scheduled to be done during the next port start
790e8acb329SIvan Malov 		 */
791e8acb329SIvan Malov 		port->mac_stats_reset_pending = B_TRUE;
79217b0d7b3SIvan Ilchenko 		sfc_adapter_unlock(sa);
7939970a9adSIgor Romanov 		return 0;
794e8acb329SIvan Malov 	}
795e8acb329SIvan Malov 
796e8acb329SIvan Malov 	rc = sfc_port_reset_mac_stats(sa);
797e8acb329SIvan Malov 	if (rc != 0)
798e8acb329SIvan Malov 		sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
7999970a9adSIgor Romanov 
800fdd7719eSIvan Ilchenko 	sfc_sw_xstats_reset(sa);
801fdd7719eSIvan Ilchenko 
80217b0d7b3SIvan Ilchenko 	sfc_adapter_unlock(sa);
80317b0d7b3SIvan Ilchenko 
8049970a9adSIgor Romanov 	SFC_ASSERT(rc >= 0);
8059970a9adSIgor Romanov 	return -rc;
806e8acb329SIvan Malov }
807e8acb329SIvan Malov 
808fdd7719eSIvan Ilchenko static unsigned int
809fdd7719eSIvan Ilchenko sfc_xstats_get_nb_supported(struct sfc_adapter *sa)
810fdd7719eSIvan Ilchenko {
811fdd7719eSIvan Ilchenko 	struct sfc_port *port = &sa->port;
812fdd7719eSIvan Ilchenko 	unsigned int nb_supported;
813fdd7719eSIvan Ilchenko 
814fdd7719eSIvan Ilchenko 	sfc_adapter_lock(sa);
815fdd7719eSIvan Ilchenko 	nb_supported = port->mac_stats_nb_supported +
816fdd7719eSIvan Ilchenko 		       sfc_sw_xstats_get_nb_supported(sa);
817fdd7719eSIvan Ilchenko 	sfc_adapter_unlock(sa);
818fdd7719eSIvan Ilchenko 
819fdd7719eSIvan Ilchenko 	return nb_supported;
820fdd7719eSIvan Ilchenko }
821fdd7719eSIvan Ilchenko 
8227b989176SAndrew Rybchenko static int
8237b989176SAndrew Rybchenko sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
8247b989176SAndrew Rybchenko 	       unsigned int xstats_count)
8257b989176SAndrew Rybchenko {
8265313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
82721ca2629SIvan Ilchenko 	unsigned int nb_written = 0;
828fdd7719eSIvan Ilchenko 	unsigned int nb_supported = 0;
829fdd7719eSIvan Ilchenko 	int rc;
8307b989176SAndrew Rybchenko 
831fdd7719eSIvan Ilchenko 	if (unlikely(xstats == NULL))
832fdd7719eSIvan Ilchenko 		return sfc_xstats_get_nb_supported(sa);
8337b989176SAndrew Rybchenko 
834fdd7719eSIvan Ilchenko 	rc = sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written);
835fdd7719eSIvan Ilchenko 	if (rc < 0)
836fdd7719eSIvan Ilchenko 		return rc;
837fdd7719eSIvan Ilchenko 
838fdd7719eSIvan Ilchenko 	nb_supported = rc;
839fdd7719eSIvan Ilchenko 	sfc_sw_xstats_get_vals(sa, xstats, xstats_count, &nb_written,
840fdd7719eSIvan Ilchenko 			       &nb_supported);
841fdd7719eSIvan Ilchenko 
842fdd7719eSIvan Ilchenko 	return nb_supported;
8437b989176SAndrew Rybchenko }
8447b989176SAndrew Rybchenko 
8457b989176SAndrew Rybchenko static int
8467b989176SAndrew Rybchenko sfc_xstats_get_names(struct rte_eth_dev *dev,
8477b989176SAndrew Rybchenko 		     struct rte_eth_xstat_name *xstats_names,
8487b989176SAndrew Rybchenko 		     unsigned int xstats_count)
8497b989176SAndrew Rybchenko {
8505313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
8517b989176SAndrew Rybchenko 	struct sfc_port *port = &sa->port;
8527b989176SAndrew Rybchenko 	unsigned int i;
8537b989176SAndrew Rybchenko 	unsigned int nstats = 0;
854fdd7719eSIvan Ilchenko 	unsigned int nb_written = 0;
855fdd7719eSIvan Ilchenko 	int ret;
8567b989176SAndrew Rybchenko 
857fdd7719eSIvan Ilchenko 	if (unlikely(xstats_names == NULL))
858fdd7719eSIvan Ilchenko 		return sfc_xstats_get_nb_supported(sa);
859c78c2224SIvan Ilchenko 
8607b989176SAndrew Rybchenko 	for (i = 0; i < EFX_MAC_NSTATS; ++i) {
8617b989176SAndrew Rybchenko 		if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
862fdd7719eSIvan Ilchenko 			if (nstats < xstats_count) {
863ed5b9848SAndy Green 				strlcpy(xstats_names[nstats].name,
8647b989176SAndrew Rybchenko 					efx_mac_stat_name(sa->nic, i),
8657b989176SAndrew Rybchenko 					sizeof(xstats_names[0].name));
866fdd7719eSIvan Ilchenko 				nb_written++;
867fdd7719eSIvan Ilchenko 			}
8687b989176SAndrew Rybchenko 			nstats++;
8697b989176SAndrew Rybchenko 		}
8707b989176SAndrew Rybchenko 	}
8717b989176SAndrew Rybchenko 
872fdd7719eSIvan Ilchenko 	ret = sfc_sw_xstats_get_names(sa, xstats_names, xstats_count,
873fdd7719eSIvan Ilchenko 				      &nb_written, &nstats);
874fdd7719eSIvan Ilchenko 	if (ret != 0) {
875fdd7719eSIvan Ilchenko 		SFC_ASSERT(ret < 0);
876fdd7719eSIvan Ilchenko 		return ret;
877fdd7719eSIvan Ilchenko 	}
878fdd7719eSIvan Ilchenko 
8797b989176SAndrew Rybchenko 	return nstats;
8807b989176SAndrew Rybchenko }
8817b989176SAndrew Rybchenko 
882cdbb29cfSAndrew Rybchenko static int
88373280c1eSIvan Malov sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
88473280c1eSIvan Malov 		     uint64_t *values, unsigned int n)
88573280c1eSIvan Malov {
8865313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
887fdd7719eSIvan Ilchenko 	struct sfc_port *port = &sa->port;
888fdd7719eSIvan Ilchenko 	unsigned int nb_supported;
889fdd7719eSIvan Ilchenko 	unsigned int i;
890fdd7719eSIvan Ilchenko 	int rc;
89173280c1eSIvan Malov 
8928232cc2dSIvan Ilchenko 	if (unlikely(ids == NULL || values == NULL))
8938232cc2dSIvan Ilchenko 		return -EINVAL;
89473280c1eSIvan Malov 
895fdd7719eSIvan Ilchenko 	/*
896fdd7719eSIvan Ilchenko 	 * Values array could be filled in nonsequential order. Fill values with
897fdd7719eSIvan Ilchenko 	 * constant indicating invalid ID first.
898fdd7719eSIvan Ilchenko 	 */
899fdd7719eSIvan Ilchenko 	for (i = 0; i < n; i++)
900fdd7719eSIvan Ilchenko 		values[i] = SFC_XSTAT_ID_INVALID_VAL;
901fdd7719eSIvan Ilchenko 
902fdd7719eSIvan Ilchenko 	rc = sfc_port_get_mac_stats_by_id(sa, ids, values, n);
903fdd7719eSIvan Ilchenko 	if (rc != 0)
904fdd7719eSIvan Ilchenko 		return rc;
905fdd7719eSIvan Ilchenko 
906fdd7719eSIvan Ilchenko 	nb_supported = port->mac_stats_nb_supported;
907fdd7719eSIvan Ilchenko 	sfc_sw_xstats_get_vals_by_id(sa, ids, values, n, &nb_supported);
908fdd7719eSIvan Ilchenko 
909fdd7719eSIvan Ilchenko 	/* Return number of written stats before invalid ID is encountered. */
910fdd7719eSIvan Ilchenko 	for (i = 0; i < n; i++) {
911fdd7719eSIvan Ilchenko 		if (values[i] == SFC_XSTAT_ID_INVALID_VAL)
912fdd7719eSIvan Ilchenko 			return i;
913fdd7719eSIvan Ilchenko 	}
914fdd7719eSIvan Ilchenko 
915fdd7719eSIvan Ilchenko 	return n;
91673280c1eSIvan Malov }
91773280c1eSIvan Malov 
91873280c1eSIvan Malov static int
91973280c1eSIvan Malov sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
9208c9f976fSAndrew Rybchenko 			   const uint64_t *ids,
92173280c1eSIvan Malov 			   struct rte_eth_xstat_name *xstats_names,
9228c9f976fSAndrew Rybchenko 			   unsigned int size)
92373280c1eSIvan Malov {
9245313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
92573280c1eSIvan Malov 	struct sfc_port *port = &sa->port;
9267d466e5fSIvan Ilchenko 	unsigned int nb_supported;
92773280c1eSIvan Malov 	unsigned int i;
928fdd7719eSIvan Ilchenko 	int ret;
92973280c1eSIvan Malov 
9308232cc2dSIvan Ilchenko 	if (unlikely(xstats_names == NULL && ids != NULL) ||
9318232cc2dSIvan Ilchenko 	    unlikely(xstats_names != NULL && ids == NULL))
9328232cc2dSIvan Ilchenko 		return -EINVAL;
9338232cc2dSIvan Ilchenko 
934fdd7719eSIvan Ilchenko 	if (unlikely(xstats_names == NULL && ids == NULL))
935fdd7719eSIvan Ilchenko 		return sfc_xstats_get_nb_supported(sa);
93617b0d7b3SIvan Ilchenko 
937fdd7719eSIvan Ilchenko 	/*
938fdd7719eSIvan Ilchenko 	 * Names array could be filled in nonsequential order. Fill names with
939fdd7719eSIvan Ilchenko 	 * string indicating invalid ID first.
940fdd7719eSIvan Ilchenko 	 */
941fdd7719eSIvan Ilchenko 	for (i = 0; i < size; i++)
942fdd7719eSIvan Ilchenko 		xstats_names[i].name[0] = SFC_XSTAT_ID_INVALID_NAME;
943fdd7719eSIvan Ilchenko 
944fdd7719eSIvan Ilchenko 	sfc_adapter_lock(sa);
94573280c1eSIvan Malov 
9467d466e5fSIvan Ilchenko 	SFC_ASSERT(port->mac_stats_nb_supported <=
9477d466e5fSIvan Ilchenko 		   RTE_DIM(port->mac_stats_by_id));
94873280c1eSIvan Malov 
9497d466e5fSIvan Ilchenko 	for (i = 0; i < size; i++) {
9507d466e5fSIvan Ilchenko 		if (ids[i] < port->mac_stats_nb_supported) {
9517d466e5fSIvan Ilchenko 			strlcpy(xstats_names[i].name,
9527d466e5fSIvan Ilchenko 				efx_mac_stat_name(sa->nic,
9537d466e5fSIvan Ilchenko 						 port->mac_stats_by_id[ids[i]]),
95473280c1eSIvan Malov 				sizeof(xstats_names[0].name));
95573280c1eSIvan Malov 		}
95673280c1eSIvan Malov 	}
95773280c1eSIvan Malov 
958fdd7719eSIvan Ilchenko 	nb_supported = port->mac_stats_nb_supported;
959fdd7719eSIvan Ilchenko 
96017b0d7b3SIvan Ilchenko 	sfc_adapter_unlock(sa);
96117b0d7b3SIvan Ilchenko 
962fdd7719eSIvan Ilchenko 	ret = sfc_sw_xstats_get_names_by_id(sa, ids, xstats_names, size,
963fdd7719eSIvan Ilchenko 					    &nb_supported);
964fdd7719eSIvan Ilchenko 	if (ret != 0) {
965fdd7719eSIvan Ilchenko 		SFC_ASSERT(ret < 0);
966fdd7719eSIvan Ilchenko 		return ret;
967fdd7719eSIvan Ilchenko 	}
968fdd7719eSIvan Ilchenko 
969fdd7719eSIvan Ilchenko 	/* Return number of written names before invalid ID is encountered. */
970fdd7719eSIvan Ilchenko 	for (i = 0; i < size; i++) {
971fdd7719eSIvan Ilchenko 		if (xstats_names[i].name[0] == SFC_XSTAT_ID_INVALID_NAME)
972fdd7719eSIvan Ilchenko 			return i;
973fdd7719eSIvan Ilchenko 	}
974fdd7719eSIvan Ilchenko 
9757d466e5fSIvan Ilchenko 	return size;
97673280c1eSIvan Malov }
97773280c1eSIvan Malov 
97873280c1eSIvan Malov static int
979cdbb29cfSAndrew Rybchenko sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
980cdbb29cfSAndrew Rybchenko {
9815313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
982cdbb29cfSAndrew Rybchenko 	unsigned int wanted_fc, link_fc;
983cdbb29cfSAndrew Rybchenko 
984cdbb29cfSAndrew Rybchenko 	memset(fc_conf, 0, sizeof(*fc_conf));
985cdbb29cfSAndrew Rybchenko 
986cdbb29cfSAndrew Rybchenko 	sfc_adapter_lock(sa);
987cdbb29cfSAndrew Rybchenko 
988ac478689SIgor Romanov 	if (sa->state == SFC_ETHDEV_STARTED)
989cdbb29cfSAndrew Rybchenko 		efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
990cdbb29cfSAndrew Rybchenko 	else
991cdbb29cfSAndrew Rybchenko 		link_fc = sa->port.flow_ctrl;
992cdbb29cfSAndrew Rybchenko 
993cdbb29cfSAndrew Rybchenko 	switch (link_fc) {
994cdbb29cfSAndrew Rybchenko 	case 0:
995295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_NONE;
996cdbb29cfSAndrew Rybchenko 		break;
997cdbb29cfSAndrew Rybchenko 	case EFX_FCNTL_RESPOND:
998295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
999cdbb29cfSAndrew Rybchenko 		break;
1000cdbb29cfSAndrew Rybchenko 	case EFX_FCNTL_GENERATE:
1001295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
1002cdbb29cfSAndrew Rybchenko 		break;
1003cdbb29cfSAndrew Rybchenko 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
1004295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_FULL;
1005cdbb29cfSAndrew Rybchenko 		break;
1006cdbb29cfSAndrew Rybchenko 	default:
1007cdbb29cfSAndrew Rybchenko 		sfc_err(sa, "%s: unexpected flow control value %#x",
1008cdbb29cfSAndrew Rybchenko 			__func__, link_fc);
1009cdbb29cfSAndrew Rybchenko 	}
1010cdbb29cfSAndrew Rybchenko 
1011cdbb29cfSAndrew Rybchenko 	fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
1012cdbb29cfSAndrew Rybchenko 
1013cdbb29cfSAndrew Rybchenko 	sfc_adapter_unlock(sa);
1014cdbb29cfSAndrew Rybchenko 
1015cdbb29cfSAndrew Rybchenko 	return 0;
1016cdbb29cfSAndrew Rybchenko }
1017cdbb29cfSAndrew Rybchenko 
1018cdbb29cfSAndrew Rybchenko static int
1019cdbb29cfSAndrew Rybchenko sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1020cdbb29cfSAndrew Rybchenko {
10215313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1022cdbb29cfSAndrew Rybchenko 	struct sfc_port *port = &sa->port;
1023cdbb29cfSAndrew Rybchenko 	unsigned int fcntl;
1024cdbb29cfSAndrew Rybchenko 	int rc;
1025cdbb29cfSAndrew Rybchenko 
1026cdbb29cfSAndrew Rybchenko 	if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
1027cdbb29cfSAndrew Rybchenko 	    fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
1028cdbb29cfSAndrew Rybchenko 	    fc_conf->mac_ctrl_frame_fwd != 0) {
1029cdbb29cfSAndrew Rybchenko 		sfc_err(sa, "unsupported flow control settings specified");
1030cdbb29cfSAndrew Rybchenko 		rc = EINVAL;
1031cdbb29cfSAndrew Rybchenko 		goto fail_inval;
1032cdbb29cfSAndrew Rybchenko 	}
1033cdbb29cfSAndrew Rybchenko 
1034cdbb29cfSAndrew Rybchenko 	switch (fc_conf->mode) {
1035295968d1SFerruh Yigit 	case RTE_ETH_FC_NONE:
1036cdbb29cfSAndrew Rybchenko 		fcntl = 0;
1037cdbb29cfSAndrew Rybchenko 		break;
1038295968d1SFerruh Yigit 	case RTE_ETH_FC_RX_PAUSE:
1039cdbb29cfSAndrew Rybchenko 		fcntl = EFX_FCNTL_RESPOND;
1040cdbb29cfSAndrew Rybchenko 		break;
1041295968d1SFerruh Yigit 	case RTE_ETH_FC_TX_PAUSE:
1042cdbb29cfSAndrew Rybchenko 		fcntl = EFX_FCNTL_GENERATE;
1043cdbb29cfSAndrew Rybchenko 		break;
1044295968d1SFerruh Yigit 	case RTE_ETH_FC_FULL:
1045cdbb29cfSAndrew Rybchenko 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
1046cdbb29cfSAndrew Rybchenko 		break;
1047cdbb29cfSAndrew Rybchenko 	default:
1048cdbb29cfSAndrew Rybchenko 		rc = EINVAL;
1049cdbb29cfSAndrew Rybchenko 		goto fail_inval;
1050cdbb29cfSAndrew Rybchenko 	}
1051cdbb29cfSAndrew Rybchenko 
1052cdbb29cfSAndrew Rybchenko 	sfc_adapter_lock(sa);
1053cdbb29cfSAndrew Rybchenko 
1054ac478689SIgor Romanov 	if (sa->state == SFC_ETHDEV_STARTED) {
1055cdbb29cfSAndrew Rybchenko 		rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
1056cdbb29cfSAndrew Rybchenko 		if (rc != 0)
1057cdbb29cfSAndrew Rybchenko 			goto fail_mac_fcntl_set;
1058cdbb29cfSAndrew Rybchenko 	}
1059cdbb29cfSAndrew Rybchenko 
1060cdbb29cfSAndrew Rybchenko 	port->flow_ctrl = fcntl;
1061cdbb29cfSAndrew Rybchenko 	port->flow_ctrl_autoneg = fc_conf->autoneg;
1062cdbb29cfSAndrew Rybchenko 
1063cdbb29cfSAndrew Rybchenko 	sfc_adapter_unlock(sa);
1064cdbb29cfSAndrew Rybchenko 
1065cdbb29cfSAndrew Rybchenko 	return 0;
1066cdbb29cfSAndrew Rybchenko 
1067cdbb29cfSAndrew Rybchenko fail_mac_fcntl_set:
1068cdbb29cfSAndrew Rybchenko 	sfc_adapter_unlock(sa);
1069cdbb29cfSAndrew Rybchenko fail_inval:
1070cdbb29cfSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
1071cdbb29cfSAndrew Rybchenko 	return -rc;
1072cdbb29cfSAndrew Rybchenko }
1073cdbb29cfSAndrew Rybchenko 
1074e961cf42SAndrew Rybchenko static int
10756c0cc77aSIgor Romanov sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
10766c0cc77aSIgor Romanov {
10776c0cc77aSIgor Romanov 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
10786c0cc77aSIgor Romanov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
10796c0cc77aSIgor Romanov 	boolean_t scatter_enabled;
10806c0cc77aSIgor Romanov 	const char *error;
10816c0cc77aSIgor Romanov 	unsigned int i;
10826c0cc77aSIgor Romanov 
10836c0cc77aSIgor Romanov 	for (i = 0; i < sas->rxq_count; i++) {
10846c0cc77aSIgor Romanov 		if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0)
10856c0cc77aSIgor Romanov 			continue;
10866c0cc77aSIgor Romanov 
10876c0cc77aSIgor Romanov 		scatter_enabled = (sas->rxq_info[i].type_flags &
10886c0cc77aSIgor Romanov 				   EFX_RXQ_FLAG_SCATTER);
10896c0cc77aSIgor Romanov 
10906c0cc77aSIgor Romanov 		if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size,
10916c0cc77aSIgor Romanov 					  encp->enc_rx_prefix_size,
1092d41a6268SIgor Romanov 					  scatter_enabled,
1093d41a6268SIgor Romanov 					  encp->enc_rx_scatter_max, &error)) {
10946c0cc77aSIgor Romanov 			sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
10956c0cc77aSIgor Romanov 				error);
10966c0cc77aSIgor Romanov 			return EINVAL;
10976c0cc77aSIgor Romanov 		}
10986c0cc77aSIgor Romanov 	}
10996c0cc77aSIgor Romanov 
11006c0cc77aSIgor Romanov 	return 0;
11016c0cc77aSIgor Romanov }
11026c0cc77aSIgor Romanov 
11036c0cc77aSIgor Romanov static int
1104e961cf42SAndrew Rybchenko sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1105e961cf42SAndrew Rybchenko {
11065313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1107e961cf42SAndrew Rybchenko 	size_t pdu = EFX_MAC_PDU(mtu);
1108e961cf42SAndrew Rybchenko 	size_t old_pdu;
1109e961cf42SAndrew Rybchenko 	int rc;
1110e961cf42SAndrew Rybchenko 
1111e961cf42SAndrew Rybchenko 	sfc_log_init(sa, "mtu=%u", mtu);
1112e961cf42SAndrew Rybchenko 
1113e961cf42SAndrew Rybchenko 	rc = EINVAL;
1114e961cf42SAndrew Rybchenko 	if (pdu < EFX_MAC_PDU_MIN) {
1115e961cf42SAndrew Rybchenko 		sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
1116e961cf42SAndrew Rybchenko 			(unsigned int)mtu, (unsigned int)pdu,
1117e961cf42SAndrew Rybchenko 			EFX_MAC_PDU_MIN);
1118e961cf42SAndrew Rybchenko 		goto fail_inval;
1119e961cf42SAndrew Rybchenko 	}
1120e961cf42SAndrew Rybchenko 	if (pdu > EFX_MAC_PDU_MAX) {
1121e961cf42SAndrew Rybchenko 		sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
1122e961cf42SAndrew Rybchenko 			(unsigned int)mtu, (unsigned int)pdu,
1123441717b9SAndrew Rybchenko 			(unsigned int)EFX_MAC_PDU_MAX);
1124e961cf42SAndrew Rybchenko 		goto fail_inval;
1125e961cf42SAndrew Rybchenko 	}
1126e961cf42SAndrew Rybchenko 
1127e961cf42SAndrew Rybchenko 	sfc_adapter_lock(sa);
1128e961cf42SAndrew Rybchenko 
11296c0cc77aSIgor Romanov 	rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
11306c0cc77aSIgor Romanov 	if (rc != 0)
11316c0cc77aSIgor Romanov 		goto fail_check_scatter;
11326c0cc77aSIgor Romanov 
1133e961cf42SAndrew Rybchenko 	if (pdu != sa->port.pdu) {
1134ac478689SIgor Romanov 		if (sa->state == SFC_ETHDEV_STARTED) {
1135e961cf42SAndrew Rybchenko 			sfc_stop(sa);
1136e961cf42SAndrew Rybchenko 
1137e961cf42SAndrew Rybchenko 			old_pdu = sa->port.pdu;
1138e961cf42SAndrew Rybchenko 			sa->port.pdu = pdu;
1139e961cf42SAndrew Rybchenko 			rc = sfc_start(sa);
1140e961cf42SAndrew Rybchenko 			if (rc != 0)
1141e961cf42SAndrew Rybchenko 				goto fail_start;
1142e961cf42SAndrew Rybchenko 		} else {
1143e961cf42SAndrew Rybchenko 			sa->port.pdu = pdu;
1144e961cf42SAndrew Rybchenko 		}
1145e961cf42SAndrew Rybchenko 	}
1146e961cf42SAndrew Rybchenko 
1147e961cf42SAndrew Rybchenko 	sfc_adapter_unlock(sa);
1148e961cf42SAndrew Rybchenko 
1149e961cf42SAndrew Rybchenko 	sfc_log_init(sa, "done");
1150e961cf42SAndrew Rybchenko 	return 0;
1151e961cf42SAndrew Rybchenko 
1152e961cf42SAndrew Rybchenko fail_start:
1153e961cf42SAndrew Rybchenko 	sa->port.pdu = old_pdu;
1154e961cf42SAndrew Rybchenko 	if (sfc_start(sa) != 0)
1155e961cf42SAndrew Rybchenko 		sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
1156e961cf42SAndrew Rybchenko 			"PDU max size - port is stopped",
1157e961cf42SAndrew Rybchenko 			(unsigned int)pdu, (unsigned int)old_pdu);
11586c0cc77aSIgor Romanov 
11596c0cc77aSIgor Romanov fail_check_scatter:
1160e961cf42SAndrew Rybchenko 	sfc_adapter_unlock(sa);
1161e961cf42SAndrew Rybchenko 
1162e961cf42SAndrew Rybchenko fail_inval:
1163e961cf42SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
1164e961cf42SAndrew Rybchenko 	SFC_ASSERT(rc > 0);
1165e961cf42SAndrew Rybchenko 	return -rc;
1166e961cf42SAndrew Rybchenko }
1167caccf8b3SOlivier Matz static int
11686d13ea8eSOlivier Matz sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1169c100fd46SIvan Malov {
11705313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1171c100fd46SIvan Malov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
117284a9b481SIvan Malov 	struct sfc_port *port = &sa->port;
11736d13ea8eSOlivier Matz 	struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0];
1174caccf8b3SOlivier Matz 	int rc = 0;
1175c100fd46SIvan Malov 
1176c100fd46SIvan Malov 	sfc_adapter_lock(sa);
1177c100fd46SIvan Malov 
117898e2783aSAndrew Rybchenko 	if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr))
117998e2783aSAndrew Rybchenko 		goto unlock;
118098e2783aSAndrew Rybchenko 
1181642088ddSIvan Malov 	/*
1182642088ddSIvan Malov 	 * Copy the address to the device private data so that
1183642088ddSIvan Malov 	 * it could be recalled in the case of adapter restart.
1184642088ddSIvan Malov 	 */
1185538da7a1SOlivier Matz 	rte_ether_addr_copy(mac_addr, &port->default_mac_addr);
1186642088ddSIvan Malov 
1187caccf8b3SOlivier Matz 	/*
1188caccf8b3SOlivier Matz 	 * Neither of the two following checks can return
1189caccf8b3SOlivier Matz 	 * an error. The new MAC address is preserved in
1190caccf8b3SOlivier Matz 	 * the device private data and can be activated
1191caccf8b3SOlivier Matz 	 * on the next port start if the user prevents
1192caccf8b3SOlivier Matz 	 * isolated mode from being enabled.
1193caccf8b3SOlivier Matz 	 */
1194e0d5ba7eSAndrew Rybchenko 	if (sfc_sa2shared(sa)->isolated) {
1195caccf8b3SOlivier Matz 		sfc_warn(sa, "isolated mode is active on the port");
1196caccf8b3SOlivier Matz 		sfc_warn(sa, "will not set MAC address");
119784a9b481SIvan Malov 		goto unlock;
119884a9b481SIvan Malov 	}
119984a9b481SIvan Malov 
1200ac478689SIgor Romanov 	if (sa->state != SFC_ETHDEV_STARTED) {
120191d16276SIvan Malov 		sfc_notice(sa, "the port is not started");
120291d16276SIvan Malov 		sfc_notice(sa, "the new MAC address will be set on port start");
1203c100fd46SIvan Malov 
1204c100fd46SIvan Malov 		goto unlock;
1205c100fd46SIvan Malov 	}
1206c100fd46SIvan Malov 
1207c100fd46SIvan Malov 	if (encp->enc_allow_set_mac_with_installed_filters) {
1208c100fd46SIvan Malov 		rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
1209c100fd46SIvan Malov 		if (rc != 0) {
1210c100fd46SIvan Malov 			sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
1211c100fd46SIvan Malov 			goto unlock;
1212c100fd46SIvan Malov 		}
1213c100fd46SIvan Malov 
1214c100fd46SIvan Malov 		/*
1215c100fd46SIvan Malov 		 * Changing the MAC address by means of MCDI request
1216c100fd46SIvan Malov 		 * has no effect on received traffic, therefore
1217c100fd46SIvan Malov 		 * we also need to update unicast filters
1218c100fd46SIvan Malov 		 */
121998608e18SIgor Romanov 		rc = sfc_set_rx_mode_unchecked(sa);
1220caccf8b3SOlivier Matz 		if (rc != 0) {
1221c100fd46SIvan Malov 			sfc_err(sa, "cannot set filter (rc = %u)", rc);
1222caccf8b3SOlivier Matz 			/* Rollback the old address */
1223caccf8b3SOlivier Matz 			(void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
122498608e18SIgor Romanov 			(void)sfc_set_rx_mode_unchecked(sa);
1225caccf8b3SOlivier Matz 		}
1226c100fd46SIvan Malov 	} else {
1227c100fd46SIvan Malov 		sfc_warn(sa, "cannot set MAC address with filters installed");
1228c100fd46SIvan Malov 		sfc_warn(sa, "adapter will be restarted to pick the new MAC");
1229c100fd46SIvan Malov 		sfc_warn(sa, "(some traffic may be dropped)");
1230c100fd46SIvan Malov 
1231c100fd46SIvan Malov 		/*
1232c100fd46SIvan Malov 		 * Since setting MAC address with filters installed is not
1233642088ddSIvan Malov 		 * allowed on the adapter, the new MAC address will be set
1234642088ddSIvan Malov 		 * by means of adapter restart. sfc_start() shall retrieve
1235642088ddSIvan Malov 		 * the new address from the device private data and set it.
1236c100fd46SIvan Malov 		 */
1237c100fd46SIvan Malov 		sfc_stop(sa);
1238c100fd46SIvan Malov 		rc = sfc_start(sa);
1239c100fd46SIvan Malov 		if (rc != 0)
1240c100fd46SIvan Malov 			sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
1241c100fd46SIvan Malov 	}
1242c100fd46SIvan Malov 
1243c100fd46SIvan Malov unlock:
1244caccf8b3SOlivier Matz 	if (rc != 0)
1245538da7a1SOlivier Matz 		rte_ether_addr_copy(old_addr, &port->default_mac_addr);
1246caccf8b3SOlivier Matz 
1247c100fd46SIvan Malov 	sfc_adapter_unlock(sa);
1248caccf8b3SOlivier Matz 
1249caccf8b3SOlivier Matz 	SFC_ASSERT(rc >= 0);
1250caccf8b3SOlivier Matz 	return -rc;
1251c100fd46SIvan Malov }
1252c100fd46SIvan Malov 
1253e961cf42SAndrew Rybchenko 
12540fa0070eSIvan Malov static int
12556d13ea8eSOlivier Matz sfc_set_mc_addr_list(struct rte_eth_dev *dev,
12566d13ea8eSOlivier Matz 		struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
12570fa0070eSIvan Malov {
12585313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1259295f647aSIvan Malov 	struct sfc_port *port = &sa->port;
1260295f647aSIvan Malov 	uint8_t *mc_addrs = port->mcast_addrs;
12610fa0070eSIvan Malov 	int rc;
12620fa0070eSIvan Malov 	unsigned int i;
12630fa0070eSIvan Malov 
1264e0d5ba7eSAndrew Rybchenko 	if (sfc_sa2shared(sa)->isolated) {
126584a9b481SIvan Malov 		sfc_err(sa, "isolated mode is active on the port");
126684a9b481SIvan Malov 		sfc_err(sa, "will not set multicast address list");
126784a9b481SIvan Malov 		return -ENOTSUP;
126884a9b481SIvan Malov 	}
126984a9b481SIvan Malov 
1270295f647aSIvan Malov 	if (mc_addrs == NULL)
1271295f647aSIvan Malov 		return -ENOBUFS;
1272295f647aSIvan Malov 
1273295f647aSIvan Malov 	if (nb_mc_addr > port->max_mcast_addrs) {
12740fa0070eSIvan Malov 		sfc_err(sa, "too many multicast addresses: %u > %u",
1275295f647aSIvan Malov 			 nb_mc_addr, port->max_mcast_addrs);
12760fa0070eSIvan Malov 		return -EINVAL;
12770fa0070eSIvan Malov 	}
12780fa0070eSIvan Malov 
12790fa0070eSIvan Malov 	for (i = 0; i < nb_mc_addr; ++i) {
1280b171936cSStephen Hemminger 		rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
12810fa0070eSIvan Malov 				 EFX_MAC_ADDR_LEN);
12820fa0070eSIvan Malov 		mc_addrs += EFX_MAC_ADDR_LEN;
12830fa0070eSIvan Malov 	}
12840fa0070eSIvan Malov 
1285295f647aSIvan Malov 	port->nb_mcast_addrs = nb_mc_addr;
12860fa0070eSIvan Malov 
1287ac478689SIgor Romanov 	if (sa->state != SFC_ETHDEV_STARTED)
1288295f647aSIvan Malov 		return 0;
12890fa0070eSIvan Malov 
1290295f647aSIvan Malov 	rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1291295f647aSIvan Malov 					port->nb_mcast_addrs);
12920fa0070eSIvan Malov 	if (rc != 0)
12930fa0070eSIvan Malov 		sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
12940fa0070eSIvan Malov 
1295d0dcfe98SAndrew Rybchenko 	SFC_ASSERT(rc >= 0);
12960fa0070eSIvan Malov 	return -rc;
12970fa0070eSIvan Malov }
12980fa0070eSIvan Malov 
1299f28ede50SAndrew Rybchenko /*
1300f28ede50SAndrew Rybchenko  * The function is used by the secondary process as well. It must not
1301f28ede50SAndrew Rybchenko  * use any process-local pointers from the adapter data.
1302f28ede50SAndrew Rybchenko  */
13035502e397SAndrew Rybchenko static void
130409cafbddSIgor Romanov sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
13055502e397SAndrew Rybchenko 		      struct rte_eth_rxq_info *qinfo)
13065502e397SAndrew Rybchenko {
1307dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
130809cafbddSIgor Romanov 	sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
13095502e397SAndrew Rybchenko 	struct sfc_rxq_info *rxq_info;
13105502e397SAndrew Rybchenko 
131109cafbddSIgor Romanov 	rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
13125502e397SAndrew Rybchenko 
13135befcecbSAndrew Rybchenko 	qinfo->mp = rxq_info->refill_mb_pool;
13145befcecbSAndrew Rybchenko 	qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
13155502e397SAndrew Rybchenko 	qinfo->conf.rx_drop_en = 1;
1316ac7af396SAndrew Rybchenko 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1317b7294d88SAndrew Rybchenko 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
1318ff6a1197SIvan Malov 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
1319295968d1SFerruh Yigit 		qinfo->conf.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
1320ff6a1197SIvan Malov 		qinfo->scattered_rx = 1;
1321ff6a1197SIvan Malov 	}
13225502e397SAndrew Rybchenko 	qinfo->nb_desc = rxq_info->entries;
13235502e397SAndrew Rybchenko }
13245502e397SAndrew Rybchenko 
1325f28ede50SAndrew Rybchenko /*
1326f28ede50SAndrew Rybchenko  * The function is used by the secondary process as well. It must not
1327f28ede50SAndrew Rybchenko  * use any process-local pointers from the adapter data.
1328f28ede50SAndrew Rybchenko  */
1329c5938838SIvan Malov static void
1330db980d26SIgor Romanov sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
1331c5938838SIvan Malov 		      struct rte_eth_txq_info *qinfo)
1332c5938838SIvan Malov {
1333113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1334c5938838SIvan Malov 	struct sfc_txq_info *txq_info;
1335c5938838SIvan Malov 
1336db980d26SIgor Romanov 	SFC_ASSERT(ethdev_qid < sas->ethdev_txq_count);
1337c5938838SIvan Malov 
1338db980d26SIgor Romanov 	txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1339c5938838SIvan Malov 
1340c5938838SIvan Malov 	memset(qinfo, 0, sizeof(*qinfo));
1341c5938838SIvan Malov 
1342b57870f2SAndrew Rybchenko 	qinfo->conf.offloads = txq_info->offloads;
1343b57870f2SAndrew Rybchenko 	qinfo->conf.tx_free_thresh = txq_info->free_thresh;
1344c6a1d9b5SIvan Malov 	qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1345c5938838SIvan Malov 	qinfo->nb_desc = txq_info->entries;
1346c5938838SIvan Malov }
1347c5938838SIvan Malov 
1348b76e1b2cSAndrew Rybchenko /*
1349b76e1b2cSAndrew Rybchenko  * The function is used by the secondary process as well. It must not
1350b76e1b2cSAndrew Rybchenko  * use any process-local pointers from the adapter data.
1351b76e1b2cSAndrew Rybchenko  */
135204aa6b9cSAndrew Rybchenko static uint32_t
13538d7d4fcdSKonstantin Ananyev sfc_rx_queue_count(void *rx_queue)
135404aa6b9cSAndrew Rybchenko {
13558d7d4fcdSKonstantin Ananyev 	struct sfc_dp_rxq *dp_rxq = rx_queue;
13568d7d4fcdSKonstantin Ananyev 	const struct sfc_dp_rx *dp_rx;
13572e42d78dSAndrew Rybchenko 	struct sfc_rxq_info *rxq_info;
135804aa6b9cSAndrew Rybchenko 
13598d7d4fcdSKonstantin Ananyev 	dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
13608d7d4fcdSKonstantin Ananyev 	rxq_info = sfc_rxq_info_by_dp_rxq(dp_rxq);
1361768629c6SAndrew Rybchenko 
1362bfea01bcSAndrew Rybchenko 	if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
1363768629c6SAndrew Rybchenko 		return 0;
1364768629c6SAndrew Rybchenko 
13658d7d4fcdSKonstantin Ananyev 	return dp_rx->qdesc_npending(dp_rxq);
136604aa6b9cSAndrew Rybchenko }
136704aa6b9cSAndrew Rybchenko 
1368b76e1b2cSAndrew Rybchenko /*
1369b76e1b2cSAndrew Rybchenko  * The function is used by the secondary process as well. It must not
1370b76e1b2cSAndrew Rybchenko  * use any process-local pointers from the adapter data.
1371b76e1b2cSAndrew Rybchenko  */
137236d84f87SAndrew Rybchenko static int
13731d8f3a80SIvan Malov sfc_rx_descriptor_status(void *queue, uint16_t offset)
13741d8f3a80SIvan Malov {
13751d8f3a80SIvan Malov 	struct sfc_dp_rxq *dp_rxq = queue;
1376b76e1b2cSAndrew Rybchenko 	const struct sfc_dp_rx *dp_rx;
13771d8f3a80SIvan Malov 
1378b76e1b2cSAndrew Rybchenko 	dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1379b76e1b2cSAndrew Rybchenko 
1380b76e1b2cSAndrew Rybchenko 	return dp_rx->qdesc_status(dp_rxq, offset);
13811d8f3a80SIvan Malov }
13821d8f3a80SIvan Malov 
13833cf4b9c2SAndrew Rybchenko /*
13843cf4b9c2SAndrew Rybchenko  * The function is used by the secondary process as well. It must not
13853cf4b9c2SAndrew Rybchenko  * use any process-local pointers from the adapter data.
13863cf4b9c2SAndrew Rybchenko  */
13871d8f3a80SIvan Malov static int
13887df6f854SIvan Malov sfc_tx_descriptor_status(void *queue, uint16_t offset)
13897df6f854SIvan Malov {
13907df6f854SIvan Malov 	struct sfc_dp_txq *dp_txq = queue;
13913cf4b9c2SAndrew Rybchenko 	const struct sfc_dp_tx *dp_tx;
13927df6f854SIvan Malov 
13933cf4b9c2SAndrew Rybchenko 	dp_tx = sfc_dp_tx_by_dp_txq(dp_txq);
13943cf4b9c2SAndrew Rybchenko 
13953cf4b9c2SAndrew Rybchenko 	return dp_tx->qdesc_status(dp_txq, offset);
13967df6f854SIvan Malov }
13977df6f854SIvan Malov 
13987df6f854SIvan Malov static int
139909cafbddSIgor Romanov sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1400ac7af396SAndrew Rybchenko {
1401dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
14025313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
140309cafbddSIgor Romanov 	sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
140409cafbddSIgor Romanov 	struct sfc_rxq_info *rxq_info;
140509cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
1406ac7af396SAndrew Rybchenko 	int rc;
1407ac7af396SAndrew Rybchenko 
140809cafbddSIgor Romanov 	sfc_log_init(sa, "RxQ=%u", ethdev_qid);
1409ac7af396SAndrew Rybchenko 
1410ac7af396SAndrew Rybchenko 	sfc_adapter_lock(sa);
1411ac7af396SAndrew Rybchenko 
1412ac7af396SAndrew Rybchenko 	rc = EINVAL;
1413ac478689SIgor Romanov 	if (sa->state != SFC_ETHDEV_STARTED)
1414ac7af396SAndrew Rybchenko 		goto fail_not_started;
1415ac7af396SAndrew Rybchenko 
141609cafbddSIgor Romanov 	rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
141709cafbddSIgor Romanov 	if (rxq_info->state != SFC_RXQ_INITIALIZED)
14180668a27aSIgor Romanov 		goto fail_not_setup;
14190668a27aSIgor Romanov 
142009cafbddSIgor Romanov 	sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
142109cafbddSIgor Romanov 	rc = sfc_rx_qstart(sa, sw_index);
1422ac7af396SAndrew Rybchenko 	if (rc != 0)
1423ac7af396SAndrew Rybchenko 		goto fail_rx_qstart;
1424ac7af396SAndrew Rybchenko 
142509cafbddSIgor Romanov 	rxq_info->deferred_started = B_TRUE;
1426ac7af396SAndrew Rybchenko 
1427ac7af396SAndrew Rybchenko 	sfc_adapter_unlock(sa);
1428ac7af396SAndrew Rybchenko 
1429ac7af396SAndrew Rybchenko 	return 0;
1430ac7af396SAndrew Rybchenko 
1431ac7af396SAndrew Rybchenko fail_rx_qstart:
14320668a27aSIgor Romanov fail_not_setup:
1433ac7af396SAndrew Rybchenko fail_not_started:
1434ac7af396SAndrew Rybchenko 	sfc_adapter_unlock(sa);
1435ac7af396SAndrew Rybchenko 	SFC_ASSERT(rc > 0);
1436ac7af396SAndrew Rybchenko 	return -rc;
1437ac7af396SAndrew Rybchenko }
1438ac7af396SAndrew Rybchenko 
1439ac7af396SAndrew Rybchenko static int
144009cafbddSIgor Romanov sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1441ac7af396SAndrew Rybchenko {
1442dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
14435313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
144409cafbddSIgor Romanov 	sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
144509cafbddSIgor Romanov 	struct sfc_rxq_info *rxq_info;
144609cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
1447ac7af396SAndrew Rybchenko 
144809cafbddSIgor Romanov 	sfc_log_init(sa, "RxQ=%u", ethdev_qid);
1449ac7af396SAndrew Rybchenko 
1450ac7af396SAndrew Rybchenko 	sfc_adapter_lock(sa);
1451ac7af396SAndrew Rybchenko 
145209cafbddSIgor Romanov 	sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
145309cafbddSIgor Romanov 	sfc_rx_qstop(sa, sw_index);
145409cafbddSIgor Romanov 
145509cafbddSIgor Romanov 	rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
145609cafbddSIgor Romanov 	rxq_info->deferred_started = B_FALSE;
1457ac7af396SAndrew Rybchenko 
1458ac7af396SAndrew Rybchenko 	sfc_adapter_unlock(sa);
1459ac7af396SAndrew Rybchenko 
1460ac7af396SAndrew Rybchenko 	return 0;
1461ac7af396SAndrew Rybchenko }
1462ac7af396SAndrew Rybchenko 
1463c6a1d9b5SIvan Malov static int
1464db980d26SIgor Romanov sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1465c6a1d9b5SIvan Malov {
1466113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
14675313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1468db980d26SIgor Romanov 	struct sfc_txq_info *txq_info;
1469db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
1470c6a1d9b5SIvan Malov 	int rc;
1471c6a1d9b5SIvan Malov 
1472db980d26SIgor Romanov 	sfc_log_init(sa, "TxQ = %u", ethdev_qid);
1473c6a1d9b5SIvan Malov 
1474c6a1d9b5SIvan Malov 	sfc_adapter_lock(sa);
1475c6a1d9b5SIvan Malov 
1476c6a1d9b5SIvan Malov 	rc = EINVAL;
1477ac478689SIgor Romanov 	if (sa->state != SFC_ETHDEV_STARTED)
1478c6a1d9b5SIvan Malov 		goto fail_not_started;
1479c6a1d9b5SIvan Malov 
1480db980d26SIgor Romanov 	txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1481db980d26SIgor Romanov 	if (txq_info->state != SFC_TXQ_INITIALIZED)
1482862b35afSIgor Romanov 		goto fail_not_setup;
1483862b35afSIgor Romanov 
1484db980d26SIgor Romanov 	sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
1485db980d26SIgor Romanov 	rc = sfc_tx_qstart(sa, sw_index);
1486c6a1d9b5SIvan Malov 	if (rc != 0)
1487c6a1d9b5SIvan Malov 		goto fail_tx_qstart;
1488c6a1d9b5SIvan Malov 
1489db980d26SIgor Romanov 	txq_info->deferred_started = B_TRUE;
1490c6a1d9b5SIvan Malov 
1491c6a1d9b5SIvan Malov 	sfc_adapter_unlock(sa);
1492c6a1d9b5SIvan Malov 	return 0;
1493c6a1d9b5SIvan Malov 
1494c6a1d9b5SIvan Malov fail_tx_qstart:
1495c6a1d9b5SIvan Malov 
1496862b35afSIgor Romanov fail_not_setup:
1497c6a1d9b5SIvan Malov fail_not_started:
1498c6a1d9b5SIvan Malov 	sfc_adapter_unlock(sa);
1499c6a1d9b5SIvan Malov 	SFC_ASSERT(rc > 0);
1500c6a1d9b5SIvan Malov 	return -rc;
1501c6a1d9b5SIvan Malov }
1502c6a1d9b5SIvan Malov 
1503c6a1d9b5SIvan Malov static int
1504db980d26SIgor Romanov sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1505c6a1d9b5SIvan Malov {
1506113a14a6SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
15075313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1508db980d26SIgor Romanov 	struct sfc_txq_info *txq_info;
1509db980d26SIgor Romanov 	sfc_sw_index_t sw_index;
1510c6a1d9b5SIvan Malov 
1511db980d26SIgor Romanov 	sfc_log_init(sa, "TxQ = %u", ethdev_qid);
1512c6a1d9b5SIvan Malov 
1513c6a1d9b5SIvan Malov 	sfc_adapter_lock(sa);
1514c6a1d9b5SIvan Malov 
1515db980d26SIgor Romanov 	sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
1516db980d26SIgor Romanov 	sfc_tx_qstop(sa, sw_index);
1517c6a1d9b5SIvan Malov 
1518db980d26SIgor Romanov 	txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1519db980d26SIgor Romanov 	txq_info->deferred_started = B_FALSE;
1520c6a1d9b5SIvan Malov 
1521c6a1d9b5SIvan Malov 	sfc_adapter_unlock(sa);
1522c6a1d9b5SIvan Malov 	return 0;
1523c6a1d9b5SIvan Malov }
1524c6a1d9b5SIvan Malov 
152536c35355SAndrew Rybchenko static efx_tunnel_protocol_t
152636c35355SAndrew Rybchenko sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
152736c35355SAndrew Rybchenko {
152836c35355SAndrew Rybchenko 	switch (rte_type) {
1529295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
153036c35355SAndrew Rybchenko 		return EFX_TUNNEL_PROTOCOL_VXLAN;
1531295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
153236c35355SAndrew Rybchenko 		return EFX_TUNNEL_PROTOCOL_GENEVE;
153336c35355SAndrew Rybchenko 	default:
153436c35355SAndrew Rybchenko 		return EFX_TUNNEL_NPROTOS;
153536c35355SAndrew Rybchenko 	}
153636c35355SAndrew Rybchenko }
153736c35355SAndrew Rybchenko 
153836c35355SAndrew Rybchenko enum sfc_udp_tunnel_op_e {
153936c35355SAndrew Rybchenko 	SFC_UDP_TUNNEL_ADD_PORT,
154036c35355SAndrew Rybchenko 	SFC_UDP_TUNNEL_DEL_PORT,
154136c35355SAndrew Rybchenko };
154236c35355SAndrew Rybchenko 
154336c35355SAndrew Rybchenko static int
154436c35355SAndrew Rybchenko sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
154536c35355SAndrew Rybchenko 		      struct rte_eth_udp_tunnel *tunnel_udp,
154636c35355SAndrew Rybchenko 		      enum sfc_udp_tunnel_op_e op)
154736c35355SAndrew Rybchenko {
15485313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
154936c35355SAndrew Rybchenko 	efx_tunnel_protocol_t tunnel_proto;
155036c35355SAndrew Rybchenko 	int rc;
155136c35355SAndrew Rybchenko 
155236c35355SAndrew Rybchenko 	sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
155336c35355SAndrew Rybchenko 		     (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
155436c35355SAndrew Rybchenko 		     (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
155536c35355SAndrew Rybchenko 		     tunnel_udp->udp_port, tunnel_udp->prot_type);
155636c35355SAndrew Rybchenko 
155736c35355SAndrew Rybchenko 	tunnel_proto =
155836c35355SAndrew Rybchenko 		sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
155936c35355SAndrew Rybchenko 	if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
156036c35355SAndrew Rybchenko 		rc = ENOTSUP;
156136c35355SAndrew Rybchenko 		goto fail_bad_proto;
156236c35355SAndrew Rybchenko 	}
156336c35355SAndrew Rybchenko 
156436c35355SAndrew Rybchenko 	sfc_adapter_lock(sa);
156536c35355SAndrew Rybchenko 
156636c35355SAndrew Rybchenko 	switch (op) {
156736c35355SAndrew Rybchenko 	case SFC_UDP_TUNNEL_ADD_PORT:
156836c35355SAndrew Rybchenko 		rc = efx_tunnel_config_udp_add(sa->nic,
156936c35355SAndrew Rybchenko 					       tunnel_udp->udp_port,
157036c35355SAndrew Rybchenko 					       tunnel_proto);
157136c35355SAndrew Rybchenko 		break;
157236c35355SAndrew Rybchenko 	case SFC_UDP_TUNNEL_DEL_PORT:
157336c35355SAndrew Rybchenko 		rc = efx_tunnel_config_udp_remove(sa->nic,
157436c35355SAndrew Rybchenko 						  tunnel_udp->udp_port,
157536c35355SAndrew Rybchenko 						  tunnel_proto);
157636c35355SAndrew Rybchenko 		break;
157736c35355SAndrew Rybchenko 	default:
157836c35355SAndrew Rybchenko 		rc = EINVAL;
157936c35355SAndrew Rybchenko 		goto fail_bad_op;
158036c35355SAndrew Rybchenko 	}
158136c35355SAndrew Rybchenko 
158236c35355SAndrew Rybchenko 	if (rc != 0)
158336c35355SAndrew Rybchenko 		goto fail_op;
158436c35355SAndrew Rybchenko 
1585ac478689SIgor Romanov 	if (sa->state == SFC_ETHDEV_STARTED) {
158636c35355SAndrew Rybchenko 		rc = efx_tunnel_reconfigure(sa->nic);
158736c35355SAndrew Rybchenko 		if (rc == EAGAIN) {
158836c35355SAndrew Rybchenko 			/*
158936c35355SAndrew Rybchenko 			 * Configuration is accepted by FW and MC reboot
159036c35355SAndrew Rybchenko 			 * is initiated to apply the changes. MC reboot
159136c35355SAndrew Rybchenko 			 * will be handled in a usual way (MC reboot
159236c35355SAndrew Rybchenko 			 * event on management event queue and adapter
159336c35355SAndrew Rybchenko 			 * restart).
159436c35355SAndrew Rybchenko 			 */
159536c35355SAndrew Rybchenko 			rc = 0;
159636c35355SAndrew Rybchenko 		} else if (rc != 0) {
159736c35355SAndrew Rybchenko 			goto fail_reconfigure;
159836c35355SAndrew Rybchenko 		}
159936c35355SAndrew Rybchenko 	}
160036c35355SAndrew Rybchenko 
160136c35355SAndrew Rybchenko 	sfc_adapter_unlock(sa);
160236c35355SAndrew Rybchenko 	return 0;
160336c35355SAndrew Rybchenko 
160436c35355SAndrew Rybchenko fail_reconfigure:
160536c35355SAndrew Rybchenko 	/* Remove/restore entry since the change makes the trouble */
160636c35355SAndrew Rybchenko 	switch (op) {
160736c35355SAndrew Rybchenko 	case SFC_UDP_TUNNEL_ADD_PORT:
160836c35355SAndrew Rybchenko 		(void)efx_tunnel_config_udp_remove(sa->nic,
160936c35355SAndrew Rybchenko 						   tunnel_udp->udp_port,
161036c35355SAndrew Rybchenko 						   tunnel_proto);
161136c35355SAndrew Rybchenko 		break;
161236c35355SAndrew Rybchenko 	case SFC_UDP_TUNNEL_DEL_PORT:
161336c35355SAndrew Rybchenko 		(void)efx_tunnel_config_udp_add(sa->nic,
161436c35355SAndrew Rybchenko 						tunnel_udp->udp_port,
161536c35355SAndrew Rybchenko 						tunnel_proto);
161636c35355SAndrew Rybchenko 		break;
161736c35355SAndrew Rybchenko 	}
161836c35355SAndrew Rybchenko 
161936c35355SAndrew Rybchenko fail_op:
162036c35355SAndrew Rybchenko fail_bad_op:
162136c35355SAndrew Rybchenko 	sfc_adapter_unlock(sa);
162236c35355SAndrew Rybchenko 
162336c35355SAndrew Rybchenko fail_bad_proto:
162436c35355SAndrew Rybchenko 	SFC_ASSERT(rc > 0);
162536c35355SAndrew Rybchenko 	return -rc;
162636c35355SAndrew Rybchenko }
162736c35355SAndrew Rybchenko 
162836c35355SAndrew Rybchenko static int
162936c35355SAndrew Rybchenko sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
163036c35355SAndrew Rybchenko 			    struct rte_eth_udp_tunnel *tunnel_udp)
163136c35355SAndrew Rybchenko {
163236c35355SAndrew Rybchenko 	return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
163336c35355SAndrew Rybchenko }
163436c35355SAndrew Rybchenko 
163536c35355SAndrew Rybchenko static int
163636c35355SAndrew Rybchenko sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
163736c35355SAndrew Rybchenko 			    struct rte_eth_udp_tunnel *tunnel_udp)
163836c35355SAndrew Rybchenko {
163936c35355SAndrew Rybchenko 	return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
164036c35355SAndrew Rybchenko }
164136c35355SAndrew Rybchenko 
1642128da692SAndrew Rybchenko /*
1643128da692SAndrew Rybchenko  * The function is used by the secondary process as well. It must not
1644128da692SAndrew Rybchenko  * use any process-local pointers from the adapter data.
1645128da692SAndrew Rybchenko  */
1646088e1721SIvan Malov static int
1647088e1721SIvan Malov sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1648088e1721SIvan Malov 			  struct rte_eth_rss_conf *rss_conf)
1649088e1721SIvan Malov {
1650e295f175SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1651e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sas->rss;
1652088e1721SIvan Malov 
1653453e1d4bSIvan Malov 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE)
1654088e1721SIvan Malov 		return -ENOTSUP;
1655088e1721SIvan Malov 
1656088e1721SIvan Malov 	/*
1657088e1721SIvan Malov 	 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1658295968d1SFerruh Yigit 	 * hence, conversion is done here to derive a correct set of RTE_ETH_RSS
1659088e1721SIvan Malov 	 * flags which corresponds to the active EFX configuration stored
1660088e1721SIvan Malov 	 * locally in 'sfc_adapter' and kept up-to-date
1661088e1721SIvan Malov 	 */
1662e295f175SAndrew Rybchenko 	rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types);
166337a42c61SAndrew Rybchenko 	rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1664088e1721SIvan Malov 	if (rss_conf->rss_key != NULL)
1665d1482e21SIvan Malov 		rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
1666088e1721SIvan Malov 
1667088e1721SIvan Malov 	return 0;
1668088e1721SIvan Malov }
166982faef50SIvan Malov 
167082faef50SIvan Malov static int
167182faef50SIvan Malov sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
167282faef50SIvan Malov 			struct rte_eth_rss_conf *rss_conf)
167382faef50SIvan Malov {
16745313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1675e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
167682faef50SIvan Malov 	unsigned int efx_hash_types;
167792a15fc5SIgor Romanov 	unsigned int n_contexts;
167892a15fc5SIgor Romanov 	unsigned int mode_i = 0;
167992a15fc5SIgor Romanov 	unsigned int key_i = 0;
1680*6da67e70SIvan Malov 	uint32_t contexts[2];
168192a15fc5SIgor Romanov 	unsigned int i = 0;
168282faef50SIvan Malov 	int rc = 0;
168382faef50SIvan Malov 
1684e0d5ba7eSAndrew Rybchenko 	if (sfc_sa2shared(sa)->isolated)
168584a9b481SIvan Malov 		return -ENOTSUP;
168684a9b481SIvan Malov 
1687d1482e21SIvan Malov 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
168882faef50SIvan Malov 		sfc_err(sa, "RSS is not available");
168982faef50SIvan Malov 		return -ENOTSUP;
169082faef50SIvan Malov 	}
169182faef50SIvan Malov 
1692d1482e21SIvan Malov 	if (rss->channels == 0) {
169363ab5e0cSAndrew Rybchenko 		sfc_err(sa, "RSS is not configured");
169463ab5e0cSAndrew Rybchenko 		return -EINVAL;
169563ab5e0cSAndrew Rybchenko 	}
169663ab5e0cSAndrew Rybchenko 
169782faef50SIvan Malov 	if ((rss_conf->rss_key != NULL) &&
1698d1482e21SIvan Malov 	    (rss_conf->rss_key_len != sizeof(rss->key))) {
16996b9a30d9SFerruh Yigit 		sfc_err(sa, "RSS key size is wrong (should be %zu)",
1700d1482e21SIvan Malov 			sizeof(rss->key));
170182faef50SIvan Malov 		return -EINVAL;
170282faef50SIvan Malov 	}
170382faef50SIvan Malov 
170482faef50SIvan Malov 	sfc_adapter_lock(sa);
170582faef50SIvan Malov 
170601764b20SIvan Malov 	rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
170701764b20SIvan Malov 	if (rc != 0)
170801764b20SIvan Malov 		goto fail_rx_hf_rte_to_efx;
170982faef50SIvan Malov 
1710*6da67e70SIvan Malov 	contexts[0] = EFX_RSS_CONTEXT_DEFAULT;
1711*6da67e70SIvan Malov 	contexts[1] = rss->dummy_ctx.nic_handle;
1712*6da67e70SIvan Malov 	n_contexts = (rss->dummy_ctx.nic_handle_refcnt == 0) ? 1 : 2;
1713*6da67e70SIvan Malov 
171492a15fc5SIgor Romanov 	for (mode_i = 0; mode_i < n_contexts; mode_i++) {
171592a15fc5SIgor Romanov 		rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
171692a15fc5SIgor Romanov 					   rss->hash_alg, efx_hash_types,
171792a15fc5SIgor Romanov 					   B_TRUE);
171882faef50SIvan Malov 		if (rc != 0)
171982faef50SIvan Malov 			goto fail_scale_mode_set;
172092a15fc5SIgor Romanov 	}
172182faef50SIvan Malov 
172282faef50SIvan Malov 	if (rss_conf->rss_key != NULL) {
1723ac478689SIgor Romanov 		if (sa->state == SFC_ETHDEV_STARTED) {
172492a15fc5SIgor Romanov 			for (key_i = 0; key_i < n_contexts; key_i++) {
172503081632SMark Spender 				rc = efx_rx_scale_key_set(sa->nic,
172692a15fc5SIgor Romanov 							  contexts[key_i],
172703081632SMark Spender 							  rss_conf->rss_key,
1728d1482e21SIvan Malov 							  sizeof(rss->key));
172982faef50SIvan Malov 				if (rc != 0)
173082faef50SIvan Malov 					goto fail_scale_key_set;
173182faef50SIvan Malov 			}
173292a15fc5SIgor Romanov 		}
173382faef50SIvan Malov 
1734d1482e21SIvan Malov 		rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
173582faef50SIvan Malov 	}
173682faef50SIvan Malov 
1737d1482e21SIvan Malov 	rss->hash_types = efx_hash_types;
173882faef50SIvan Malov 
173982faef50SIvan Malov 	sfc_adapter_unlock(sa);
174082faef50SIvan Malov 
174182faef50SIvan Malov 	return 0;
174282faef50SIvan Malov 
174382faef50SIvan Malov fail_scale_key_set:
174492a15fc5SIgor Romanov 	for (i = 0; i < key_i; i++) {
174592a15fc5SIgor Romanov 		if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key,
174692a15fc5SIgor Romanov 					 sizeof(rss->key)) != 0)
174792a15fc5SIgor Romanov 			sfc_err(sa, "failed to restore RSS key");
174892a15fc5SIgor Romanov 	}
174992a15fc5SIgor Romanov 
175092a15fc5SIgor Romanov fail_scale_mode_set:
175192a15fc5SIgor Romanov 	for (i = 0; i < mode_i; i++) {
175292a15fc5SIgor Romanov 		if (efx_rx_scale_mode_set(sa->nic, contexts[i],
175303081632SMark Spender 					  EFX_RX_HASHALG_TOEPLITZ,
1754d1482e21SIvan Malov 					  rss->hash_types, B_TRUE) != 0)
175582faef50SIvan Malov 			sfc_err(sa, "failed to restore RSS mode");
175692a15fc5SIgor Romanov 	}
175782faef50SIvan Malov 
175801764b20SIvan Malov fail_rx_hf_rte_to_efx:
175982faef50SIvan Malov 	sfc_adapter_unlock(sa);
176082faef50SIvan Malov 	return -rc;
176182faef50SIvan Malov }
1762af0d9317SIvan Malov 
1763128da692SAndrew Rybchenko /*
1764128da692SAndrew Rybchenko  * The function is used by the secondary process as well. It must not
1765128da692SAndrew Rybchenko  * use any process-local pointers from the adapter data.
1766128da692SAndrew Rybchenko  */
1767af0d9317SIvan Malov static int
1768af0d9317SIvan Malov sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1769af0d9317SIvan Malov 		       struct rte_eth_rss_reta_entry64 *reta_conf,
1770af0d9317SIvan Malov 		       uint16_t reta_size)
1771af0d9317SIvan Malov {
1772e295f175SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1773e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sas->rss;
1774af0d9317SIvan Malov 	int entry;
1775af0d9317SIvan Malov 
1776e0d5ba7eSAndrew Rybchenko 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated)
1777af0d9317SIvan Malov 		return -ENOTSUP;
1778af0d9317SIvan Malov 
1779d1482e21SIvan Malov 	if (rss->channels == 0)
178063ab5e0cSAndrew Rybchenko 		return -EINVAL;
178163ab5e0cSAndrew Rybchenko 
1782af0d9317SIvan Malov 	if (reta_size != EFX_RSS_TBL_SIZE)
1783af0d9317SIvan Malov 		return -EINVAL;
1784af0d9317SIvan Malov 
1785af0d9317SIvan Malov 	for (entry = 0; entry < reta_size; entry++) {
1786295968d1SFerruh Yigit 		int grp = entry / RTE_ETH_RETA_GROUP_SIZE;
1787295968d1SFerruh Yigit 		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
1788af0d9317SIvan Malov 
1789af0d9317SIvan Malov 		if ((reta_conf[grp].mask >> grp_idx) & 1)
1790d1482e21SIvan Malov 			reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
1791af0d9317SIvan Malov 	}
1792af0d9317SIvan Malov 
1793af0d9317SIvan Malov 	return 0;
1794af0d9317SIvan Malov }
179532bcfb0aSIvan Malov 
179632bcfb0aSIvan Malov static int
179732bcfb0aSIvan Malov sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
179832bcfb0aSIvan Malov 			struct rte_eth_rss_reta_entry64 *reta_conf,
179932bcfb0aSIvan Malov 			uint16_t reta_size)
180032bcfb0aSIvan Malov {
18015313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1802e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
180332bcfb0aSIvan Malov 	unsigned int *rss_tbl_new;
180432bcfb0aSIvan Malov 	uint16_t entry;
180573332ab8SIvan Malov 	int rc = 0;
180632bcfb0aSIvan Malov 
180732bcfb0aSIvan Malov 
1808e0d5ba7eSAndrew Rybchenko 	if (sfc_sa2shared(sa)->isolated)
180984a9b481SIvan Malov 		return -ENOTSUP;
181084a9b481SIvan Malov 
1811d1482e21SIvan Malov 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
181232bcfb0aSIvan Malov 		sfc_err(sa, "RSS is not available");
181332bcfb0aSIvan Malov 		return -ENOTSUP;
181432bcfb0aSIvan Malov 	}
181532bcfb0aSIvan Malov 
1816d1482e21SIvan Malov 	if (rss->channels == 0) {
181763ab5e0cSAndrew Rybchenko 		sfc_err(sa, "RSS is not configured");
181863ab5e0cSAndrew Rybchenko 		return -EINVAL;
181963ab5e0cSAndrew Rybchenko 	}
182063ab5e0cSAndrew Rybchenko 
182132bcfb0aSIvan Malov 	if (reta_size != EFX_RSS_TBL_SIZE) {
182232bcfb0aSIvan Malov 		sfc_err(sa, "RETA size is wrong (should be %u)",
182332bcfb0aSIvan Malov 			EFX_RSS_TBL_SIZE);
182432bcfb0aSIvan Malov 		return -EINVAL;
182532bcfb0aSIvan Malov 	}
182632bcfb0aSIvan Malov 
1827d1482e21SIvan Malov 	rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
182832bcfb0aSIvan Malov 	if (rss_tbl_new == NULL)
182932bcfb0aSIvan Malov 		return -ENOMEM;
183032bcfb0aSIvan Malov 
183132bcfb0aSIvan Malov 	sfc_adapter_lock(sa);
183232bcfb0aSIvan Malov 
1833d1482e21SIvan Malov 	rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
183432bcfb0aSIvan Malov 
183532bcfb0aSIvan Malov 	for (entry = 0; entry < reta_size; entry++) {
1836295968d1SFerruh Yigit 		int grp_idx = entry % RTE_ETH_RETA_GROUP_SIZE;
183732bcfb0aSIvan Malov 		struct rte_eth_rss_reta_entry64 *grp;
183832bcfb0aSIvan Malov 
1839295968d1SFerruh Yigit 		grp = &reta_conf[entry / RTE_ETH_RETA_GROUP_SIZE];
184032bcfb0aSIvan Malov 
184132bcfb0aSIvan Malov 		if (grp->mask & (1ull << grp_idx)) {
1842d1482e21SIvan Malov 			if (grp->reta[grp_idx] >= rss->channels) {
184332bcfb0aSIvan Malov 				rc = EINVAL;
184432bcfb0aSIvan Malov 				goto bad_reta_entry;
184532bcfb0aSIvan Malov 			}
184632bcfb0aSIvan Malov 			rss_tbl_new[entry] = grp->reta[grp_idx];
184732bcfb0aSIvan Malov 		}
184832bcfb0aSIvan Malov 	}
184932bcfb0aSIvan Malov 
1850ac478689SIgor Romanov 	if (sa->state == SFC_ETHDEV_STARTED) {
185103081632SMark Spender 		rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
185203081632SMark Spender 					  rss_tbl_new, EFX_RSS_TBL_SIZE);
185373332ab8SIvan Malov 		if (rc != 0)
185473332ab8SIvan Malov 			goto fail_scale_tbl_set;
185573332ab8SIvan Malov 	}
185673332ab8SIvan Malov 
1857d1482e21SIvan Malov 	rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
185832bcfb0aSIvan Malov 
185973332ab8SIvan Malov fail_scale_tbl_set:
186032bcfb0aSIvan Malov bad_reta_entry:
186132bcfb0aSIvan Malov 	sfc_adapter_unlock(sa);
186232bcfb0aSIvan Malov 
186332bcfb0aSIvan Malov 	rte_free(rss_tbl_new);
186432bcfb0aSIvan Malov 
186532bcfb0aSIvan Malov 	SFC_ASSERT(rc >= 0);
186632bcfb0aSIvan Malov 	return -rc;
186732bcfb0aSIvan Malov }
1868088e1721SIvan Malov 
1869403030f5SAndrew Rybchenko static int
1870fb7ad441SThomas Monjalon sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
1871fb7ad441SThomas Monjalon 		     const struct rte_flow_ops **ops)
1872403030f5SAndrew Rybchenko {
1873fb7ad441SThomas Monjalon 	*ops = &sfc_flow_ops;
1874fb7ad441SThomas Monjalon 	return 0;
1875403030f5SAndrew Rybchenko }
1876403030f5SAndrew Rybchenko 
187708d23c67SAndrew Rybchenko static int
187808d23c67SAndrew Rybchenko sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
187908d23c67SAndrew Rybchenko {
18805dec95e3SAndrew Rybchenko 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
188108d23c67SAndrew Rybchenko 
188208d23c67SAndrew Rybchenko 	/*
188308d23c67SAndrew Rybchenko 	 * If Rx datapath does not provide callback to check mempool,
188408d23c67SAndrew Rybchenko 	 * all pools are supported.
188508d23c67SAndrew Rybchenko 	 */
18865dec95e3SAndrew Rybchenko 	if (sap->dp_rx->pool_ops_supported == NULL)
188708d23c67SAndrew Rybchenko 		return 1;
188808d23c67SAndrew Rybchenko 
18895dec95e3SAndrew Rybchenko 	return sap->dp_rx->pool_ops_supported(pool);
189008d23c67SAndrew Rybchenko }
189108d23c67SAndrew Rybchenko 
18924279b54eSGeorgiy Levashov static int
189309cafbddSIgor Romanov sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
18944279b54eSGeorgiy Levashov {
18954279b54eSGeorgiy Levashov 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
18964279b54eSGeorgiy Levashov 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
189709cafbddSIgor Romanov 	sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
18984279b54eSGeorgiy Levashov 	struct sfc_rxq_info *rxq_info;
18994279b54eSGeorgiy Levashov 
190009cafbddSIgor Romanov 	rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
19014279b54eSGeorgiy Levashov 
19024279b54eSGeorgiy Levashov 	return sap->dp_rx->intr_enable(rxq_info->dp);
19034279b54eSGeorgiy Levashov }
19044279b54eSGeorgiy Levashov 
19054279b54eSGeorgiy Levashov static int
190609cafbddSIgor Romanov sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
19074279b54eSGeorgiy Levashov {
19084279b54eSGeorgiy Levashov 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
19094279b54eSGeorgiy Levashov 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
191009cafbddSIgor Romanov 	sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
19114279b54eSGeorgiy Levashov 	struct sfc_rxq_info *rxq_info;
19124279b54eSGeorgiy Levashov 
191309cafbddSIgor Romanov 	rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
19144279b54eSGeorgiy Levashov 
19154279b54eSGeorgiy Levashov 	return sap->dp_rx->intr_disable(rxq_info->dp);
19164279b54eSGeorgiy Levashov }
19174279b54eSGeorgiy Levashov 
191844db08d5SViacheslav Galaktionov struct sfc_mport_journal_ctx {
191944db08d5SViacheslav Galaktionov 	struct sfc_adapter		*sa;
192044db08d5SViacheslav Galaktionov 	uint16_t			switch_domain_id;
192144db08d5SViacheslav Galaktionov 	uint32_t			mcdi_handle;
192244db08d5SViacheslav Galaktionov 	bool				controllers_assigned;
192344db08d5SViacheslav Galaktionov 	efx_pcie_interface_t		*controllers;
192444db08d5SViacheslav Galaktionov 	size_t				nb_controllers;
192544db08d5SViacheslav Galaktionov };
192644db08d5SViacheslav Galaktionov 
192744db08d5SViacheslav Galaktionov static int
192844db08d5SViacheslav Galaktionov sfc_journal_ctx_add_controller(struct sfc_mport_journal_ctx *ctx,
192944db08d5SViacheslav Galaktionov 			       efx_pcie_interface_t intf)
193044db08d5SViacheslav Galaktionov {
193144db08d5SViacheslav Galaktionov 	efx_pcie_interface_t *new_controllers;
193244db08d5SViacheslav Galaktionov 	size_t i, target;
193344db08d5SViacheslav Galaktionov 	size_t new_size;
193444db08d5SViacheslav Galaktionov 
193544db08d5SViacheslav Galaktionov 	if (ctx->controllers == NULL) {
193644db08d5SViacheslav Galaktionov 		ctx->controllers = rte_malloc("sfc_controller_mapping",
193744db08d5SViacheslav Galaktionov 					      sizeof(ctx->controllers[0]), 0);
193844db08d5SViacheslav Galaktionov 		if (ctx->controllers == NULL)
193944db08d5SViacheslav Galaktionov 			return ENOMEM;
194044db08d5SViacheslav Galaktionov 
194144db08d5SViacheslav Galaktionov 		ctx->controllers[0] = intf;
194244db08d5SViacheslav Galaktionov 		ctx->nb_controllers = 1;
194344db08d5SViacheslav Galaktionov 
194444db08d5SViacheslav Galaktionov 		return 0;
194544db08d5SViacheslav Galaktionov 	}
194644db08d5SViacheslav Galaktionov 
194744db08d5SViacheslav Galaktionov 	for (i = 0; i < ctx->nb_controllers; i++) {
194844db08d5SViacheslav Galaktionov 		if (ctx->controllers[i] == intf)
194944db08d5SViacheslav Galaktionov 			return 0;
195044db08d5SViacheslav Galaktionov 		if (ctx->controllers[i] > intf)
195144db08d5SViacheslav Galaktionov 			break;
195244db08d5SViacheslav Galaktionov 	}
195344db08d5SViacheslav Galaktionov 	target = i;
195444db08d5SViacheslav Galaktionov 
195544db08d5SViacheslav Galaktionov 	ctx->nb_controllers += 1;
195644db08d5SViacheslav Galaktionov 	new_size = ctx->nb_controllers * sizeof(ctx->controllers[0]);
195744db08d5SViacheslav Galaktionov 
195844db08d5SViacheslav Galaktionov 	new_controllers = rte_realloc(ctx->controllers, new_size, 0);
195944db08d5SViacheslav Galaktionov 	if (new_controllers == NULL) {
196044db08d5SViacheslav Galaktionov 		rte_free(ctx->controllers);
196144db08d5SViacheslav Galaktionov 		return ENOMEM;
196244db08d5SViacheslav Galaktionov 	}
196344db08d5SViacheslav Galaktionov 	ctx->controllers = new_controllers;
196444db08d5SViacheslav Galaktionov 
196544db08d5SViacheslav Galaktionov 	for (i = target + 1; i < ctx->nb_controllers; i++)
196644db08d5SViacheslav Galaktionov 		ctx->controllers[i] = ctx->controllers[i - 1];
196744db08d5SViacheslav Galaktionov 
196844db08d5SViacheslav Galaktionov 	ctx->controllers[target] = intf;
196944db08d5SViacheslav Galaktionov 
197044db08d5SViacheslav Galaktionov 	return 0;
197144db08d5SViacheslav Galaktionov }
197244db08d5SViacheslav Galaktionov 
197344db08d5SViacheslav Galaktionov static efx_rc_t
197444db08d5SViacheslav Galaktionov sfc_process_mport_journal_entry(struct sfc_mport_journal_ctx *ctx,
197544db08d5SViacheslav Galaktionov 				efx_mport_desc_t *mport)
197644db08d5SViacheslav Galaktionov {
1977599e4e9aSViacheslav Galaktionov 	struct sfc_mae_switch_port_request req;
1978599e4e9aSViacheslav Galaktionov 	efx_mport_sel_t entity_selector;
197944db08d5SViacheslav Galaktionov 	efx_mport_sel_t ethdev_mport;
1980599e4e9aSViacheslav Galaktionov 	uint16_t switch_port_id;
1981599e4e9aSViacheslav Galaktionov 	efx_rc_t efx_rc;
198244db08d5SViacheslav Galaktionov 	int rc;
198344db08d5SViacheslav Galaktionov 
198444db08d5SViacheslav Galaktionov 	sfc_dbg(ctx->sa,
198544db08d5SViacheslav Galaktionov 		"processing mport id %u (controller %u pf %u vf %u)",
198644db08d5SViacheslav Galaktionov 		mport->emd_id.id, mport->emd_vnic.ev_intf,
198744db08d5SViacheslav Galaktionov 		mport->emd_vnic.ev_pf, mport->emd_vnic.ev_vf);
198844db08d5SViacheslav Galaktionov 	efx_mae_mport_invalid(&ethdev_mport);
198944db08d5SViacheslav Galaktionov 
199044db08d5SViacheslav Galaktionov 	if (!ctx->controllers_assigned) {
199144db08d5SViacheslav Galaktionov 		rc = sfc_journal_ctx_add_controller(ctx,
199244db08d5SViacheslav Galaktionov 						    mport->emd_vnic.ev_intf);
199344db08d5SViacheslav Galaktionov 		if (rc != 0)
199444db08d5SViacheslav Galaktionov 			return rc;
199544db08d5SViacheslav Galaktionov 	}
199644db08d5SViacheslav Galaktionov 
1997599e4e9aSViacheslav Galaktionov 	/* Build Mport selector */
1998599e4e9aSViacheslav Galaktionov 	efx_rc = efx_mae_mport_by_pcie_mh_function(mport->emd_vnic.ev_intf,
1999599e4e9aSViacheslav Galaktionov 						mport->emd_vnic.ev_pf,
2000599e4e9aSViacheslav Galaktionov 						mport->emd_vnic.ev_vf,
2001599e4e9aSViacheslav Galaktionov 						&entity_selector);
2002599e4e9aSViacheslav Galaktionov 	if (efx_rc != 0) {
2003599e4e9aSViacheslav Galaktionov 		sfc_err(ctx->sa, "failed to build entity mport selector for c%upf%uvf%u",
2004599e4e9aSViacheslav Galaktionov 			mport->emd_vnic.ev_intf,
2005599e4e9aSViacheslav Galaktionov 			mport->emd_vnic.ev_pf,
2006599e4e9aSViacheslav Galaktionov 			mport->emd_vnic.ev_vf);
2007599e4e9aSViacheslav Galaktionov 		return efx_rc;
2008599e4e9aSViacheslav Galaktionov 	}
2009599e4e9aSViacheslav Galaktionov 
2010599e4e9aSViacheslav Galaktionov 	rc = sfc_mae_switch_port_id_by_entity(ctx->switch_domain_id,
2011599e4e9aSViacheslav Galaktionov 					      &entity_selector,
2012599e4e9aSViacheslav Galaktionov 					      SFC_MAE_SWITCH_PORT_REPRESENTOR,
2013599e4e9aSViacheslav Galaktionov 					      &switch_port_id);
2014599e4e9aSViacheslav Galaktionov 	switch (rc) {
2015599e4e9aSViacheslav Galaktionov 	case 0:
2016599e4e9aSViacheslav Galaktionov 		/* Already registered */
2017599e4e9aSViacheslav Galaktionov 		break;
2018599e4e9aSViacheslav Galaktionov 	case ENOENT:
2019599e4e9aSViacheslav Galaktionov 		/*
2020599e4e9aSViacheslav Galaktionov 		 * No representor has been created for this entity.
2021599e4e9aSViacheslav Galaktionov 		 * Create a dummy switch registry entry with an invalid ethdev
2022599e4e9aSViacheslav Galaktionov 		 * mport selector. When a corresponding representor is created,
2023599e4e9aSViacheslav Galaktionov 		 * this entry will be updated.
2024599e4e9aSViacheslav Galaktionov 		 */
2025599e4e9aSViacheslav Galaktionov 		req.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
2026599e4e9aSViacheslav Galaktionov 		req.entity_mportp = &entity_selector;
2027599e4e9aSViacheslav Galaktionov 		req.ethdev_mportp = &ethdev_mport;
2028599e4e9aSViacheslav Galaktionov 		req.ethdev_port_id = RTE_MAX_ETHPORTS;
2029599e4e9aSViacheslav Galaktionov 		req.port_data.repr.intf = mport->emd_vnic.ev_intf;
2030599e4e9aSViacheslav Galaktionov 		req.port_data.repr.pf = mport->emd_vnic.ev_pf;
2031599e4e9aSViacheslav Galaktionov 		req.port_data.repr.vf = mport->emd_vnic.ev_vf;
2032599e4e9aSViacheslav Galaktionov 
2033599e4e9aSViacheslav Galaktionov 		rc = sfc_mae_assign_switch_port(ctx->switch_domain_id,
2034599e4e9aSViacheslav Galaktionov 						&req, &switch_port_id);
2035599e4e9aSViacheslav Galaktionov 		if (rc != 0) {
2036599e4e9aSViacheslav Galaktionov 			sfc_err(ctx->sa,
2037599e4e9aSViacheslav Galaktionov 				"failed to assign MAE switch port for c%upf%uvf%u: %s",
2038599e4e9aSViacheslav Galaktionov 				mport->emd_vnic.ev_intf,
2039599e4e9aSViacheslav Galaktionov 				mport->emd_vnic.ev_pf,
2040599e4e9aSViacheslav Galaktionov 				mport->emd_vnic.ev_vf,
2041599e4e9aSViacheslav Galaktionov 				rte_strerror(rc));
2042599e4e9aSViacheslav Galaktionov 			return rc;
2043599e4e9aSViacheslav Galaktionov 		}
2044599e4e9aSViacheslav Galaktionov 		break;
2045599e4e9aSViacheslav Galaktionov 	default:
2046599e4e9aSViacheslav Galaktionov 		sfc_err(ctx->sa, "failed to find MAE switch port for c%upf%uvf%u: %s",
2047599e4e9aSViacheslav Galaktionov 			mport->emd_vnic.ev_intf,
2048599e4e9aSViacheslav Galaktionov 			mport->emd_vnic.ev_pf,
2049599e4e9aSViacheslav Galaktionov 			mport->emd_vnic.ev_vf,
2050599e4e9aSViacheslav Galaktionov 			rte_strerror(rc));
2051599e4e9aSViacheslav Galaktionov 		return rc;
2052599e4e9aSViacheslav Galaktionov 	}
2053599e4e9aSViacheslav Galaktionov 
205444db08d5SViacheslav Galaktionov 	return 0;
205544db08d5SViacheslav Galaktionov }
205644db08d5SViacheslav Galaktionov 
205744db08d5SViacheslav Galaktionov static efx_rc_t
205844db08d5SViacheslav Galaktionov sfc_process_mport_journal_cb(void *data, efx_mport_desc_t *mport,
205944db08d5SViacheslav Galaktionov 			     size_t mport_len)
206044db08d5SViacheslav Galaktionov {
206144db08d5SViacheslav Galaktionov 	struct sfc_mport_journal_ctx *ctx = data;
206244db08d5SViacheslav Galaktionov 
206344db08d5SViacheslav Galaktionov 	if (ctx == NULL || ctx->sa == NULL) {
206444db08d5SViacheslav Galaktionov 		sfc_err(ctx->sa, "received NULL context or SFC adapter");
206544db08d5SViacheslav Galaktionov 		return EINVAL;
206644db08d5SViacheslav Galaktionov 	}
206744db08d5SViacheslav Galaktionov 
206844db08d5SViacheslav Galaktionov 	if (mport_len != sizeof(*mport)) {
206944db08d5SViacheslav Galaktionov 		sfc_err(ctx->sa, "actual and expected mport buffer sizes differ");
207044db08d5SViacheslav Galaktionov 		return EINVAL;
207144db08d5SViacheslav Galaktionov 	}
207244db08d5SViacheslav Galaktionov 
207344db08d5SViacheslav Galaktionov 	SFC_ASSERT(sfc_adapter_is_locked(ctx->sa));
207444db08d5SViacheslav Galaktionov 
207544db08d5SViacheslav Galaktionov 	/*
207644db08d5SViacheslav Galaktionov 	 * If a zombie flag is set, it means the mport has been marked for
207744db08d5SViacheslav Galaktionov 	 * deletion and cannot be used for any new operations. The mport will
207844db08d5SViacheslav Galaktionov 	 * be destroyed completely once all references to it are released.
207944db08d5SViacheslav Galaktionov 	 */
208044db08d5SViacheslav Galaktionov 	if (mport->emd_zombie) {
208144db08d5SViacheslav Galaktionov 		sfc_dbg(ctx->sa, "mport is a zombie, skipping");
208244db08d5SViacheslav Galaktionov 		return 0;
208344db08d5SViacheslav Galaktionov 	}
208444db08d5SViacheslav Galaktionov 	if (mport->emd_type != EFX_MPORT_TYPE_VNIC) {
208544db08d5SViacheslav Galaktionov 		sfc_dbg(ctx->sa, "mport is not a VNIC, skipping");
208644db08d5SViacheslav Galaktionov 		return 0;
208744db08d5SViacheslav Galaktionov 	}
208844db08d5SViacheslav Galaktionov 	if (mport->emd_vnic.ev_client_type != EFX_MPORT_VNIC_CLIENT_FUNCTION) {
208944db08d5SViacheslav Galaktionov 		sfc_dbg(ctx->sa, "mport is not a function, skipping");
209044db08d5SViacheslav Galaktionov 		return 0;
209144db08d5SViacheslav Galaktionov 	}
209244db08d5SViacheslav Galaktionov 	if (mport->emd_vnic.ev_handle == ctx->mcdi_handle) {
209344db08d5SViacheslav Galaktionov 		sfc_dbg(ctx->sa, "mport is this driver instance, skipping");
209444db08d5SViacheslav Galaktionov 		return 0;
209544db08d5SViacheslav Galaktionov 	}
209644db08d5SViacheslav Galaktionov 
209744db08d5SViacheslav Galaktionov 	return sfc_process_mport_journal_entry(ctx, mport);
209844db08d5SViacheslav Galaktionov }
209944db08d5SViacheslav Galaktionov 
210044db08d5SViacheslav Galaktionov static int
210144db08d5SViacheslav Galaktionov sfc_process_mport_journal(struct sfc_adapter *sa)
210244db08d5SViacheslav Galaktionov {
210344db08d5SViacheslav Galaktionov 	struct sfc_mport_journal_ctx ctx;
210444db08d5SViacheslav Galaktionov 	const efx_pcie_interface_t *controllers;
210544db08d5SViacheslav Galaktionov 	size_t nb_controllers;
210644db08d5SViacheslav Galaktionov 	efx_rc_t efx_rc;
210744db08d5SViacheslav Galaktionov 	int rc;
210844db08d5SViacheslav Galaktionov 
210944db08d5SViacheslav Galaktionov 	memset(&ctx, 0, sizeof(ctx));
211044db08d5SViacheslav Galaktionov 	ctx.sa = sa;
211144db08d5SViacheslav Galaktionov 	ctx.switch_domain_id = sa->mae.switch_domain_id;
211244db08d5SViacheslav Galaktionov 
211344db08d5SViacheslav Galaktionov 	efx_rc = efx_mcdi_get_own_client_handle(sa->nic, &ctx.mcdi_handle);
211444db08d5SViacheslav Galaktionov 	if (efx_rc != 0) {
211544db08d5SViacheslav Galaktionov 		sfc_err(sa, "failed to get own MCDI handle");
211644db08d5SViacheslav Galaktionov 		SFC_ASSERT(efx_rc > 0);
211744db08d5SViacheslav Galaktionov 		return efx_rc;
211844db08d5SViacheslav Galaktionov 	}
211944db08d5SViacheslav Galaktionov 
212044db08d5SViacheslav Galaktionov 	rc = sfc_mae_switch_domain_controllers(ctx.switch_domain_id,
212144db08d5SViacheslav Galaktionov 					       &controllers, &nb_controllers);
212244db08d5SViacheslav Galaktionov 	if (rc != 0) {
212344db08d5SViacheslav Galaktionov 		sfc_err(sa, "failed to get controller mapping");
212444db08d5SViacheslav Galaktionov 		return rc;
212544db08d5SViacheslav Galaktionov 	}
212644db08d5SViacheslav Galaktionov 
212744db08d5SViacheslav Galaktionov 	ctx.controllers_assigned = controllers != NULL;
212844db08d5SViacheslav Galaktionov 	ctx.controllers = NULL;
212944db08d5SViacheslav Galaktionov 	ctx.nb_controllers = 0;
213044db08d5SViacheslav Galaktionov 
213144db08d5SViacheslav Galaktionov 	efx_rc = efx_mae_read_mport_journal(sa->nic,
213244db08d5SViacheslav Galaktionov 					    sfc_process_mport_journal_cb, &ctx);
213344db08d5SViacheslav Galaktionov 	if (efx_rc != 0) {
213444db08d5SViacheslav Galaktionov 		sfc_err(sa, "failed to process MAE mport journal");
213544db08d5SViacheslav Galaktionov 		SFC_ASSERT(efx_rc > 0);
213644db08d5SViacheslav Galaktionov 		return efx_rc;
213744db08d5SViacheslav Galaktionov 	}
213844db08d5SViacheslav Galaktionov 
213944db08d5SViacheslav Galaktionov 	if (controllers == NULL) {
214044db08d5SViacheslav Galaktionov 		rc = sfc_mae_switch_domain_map_controllers(ctx.switch_domain_id,
214144db08d5SViacheslav Galaktionov 							   ctx.controllers,
214244db08d5SViacheslav Galaktionov 							   ctx.nb_controllers);
214344db08d5SViacheslav Galaktionov 		if (rc != 0)
214444db08d5SViacheslav Galaktionov 			return rc;
214544db08d5SViacheslav Galaktionov 	}
214644db08d5SViacheslav Galaktionov 
214744db08d5SViacheslav Galaktionov 	return 0;
214844db08d5SViacheslav Galaktionov }
214944db08d5SViacheslav Galaktionov 
2150599e4e9aSViacheslav Galaktionov static void
2151599e4e9aSViacheslav Galaktionov sfc_count_representors_cb(enum sfc_mae_switch_port_type type,
2152599e4e9aSViacheslav Galaktionov 			  const efx_mport_sel_t *ethdev_mportp __rte_unused,
2153599e4e9aSViacheslav Galaktionov 			  uint16_t ethdev_port_id __rte_unused,
2154599e4e9aSViacheslav Galaktionov 			  const efx_mport_sel_t *entity_mportp __rte_unused,
2155599e4e9aSViacheslav Galaktionov 			  uint16_t switch_port_id __rte_unused,
2156599e4e9aSViacheslav Galaktionov 			  union sfc_mae_switch_port_data *port_datap
2157599e4e9aSViacheslav Galaktionov 				__rte_unused,
2158599e4e9aSViacheslav Galaktionov 			  void *user_datap)
2159599e4e9aSViacheslav Galaktionov {
2160599e4e9aSViacheslav Galaktionov 	int *counter = user_datap;
2161599e4e9aSViacheslav Galaktionov 
2162599e4e9aSViacheslav Galaktionov 	SFC_ASSERT(counter != NULL);
2163599e4e9aSViacheslav Galaktionov 
2164599e4e9aSViacheslav Galaktionov 	if (type == SFC_MAE_SWITCH_PORT_REPRESENTOR)
2165599e4e9aSViacheslav Galaktionov 		(*counter)++;
2166599e4e9aSViacheslav Galaktionov }
2167599e4e9aSViacheslav Galaktionov 
2168599e4e9aSViacheslav Galaktionov struct sfc_get_representors_ctx {
2169599e4e9aSViacheslav Galaktionov 	struct rte_eth_representor_info	*info;
2170599e4e9aSViacheslav Galaktionov 	struct sfc_adapter		*sa;
2171599e4e9aSViacheslav Galaktionov 	uint16_t			switch_domain_id;
2172599e4e9aSViacheslav Galaktionov 	const efx_pcie_interface_t	*controllers;
2173599e4e9aSViacheslav Galaktionov 	size_t				nb_controllers;
2174599e4e9aSViacheslav Galaktionov };
2175599e4e9aSViacheslav Galaktionov 
2176599e4e9aSViacheslav Galaktionov static void
2177599e4e9aSViacheslav Galaktionov sfc_get_representors_cb(enum sfc_mae_switch_port_type type,
2178599e4e9aSViacheslav Galaktionov 			const efx_mport_sel_t *ethdev_mportp __rte_unused,
2179599e4e9aSViacheslav Galaktionov 			uint16_t ethdev_port_id __rte_unused,
2180599e4e9aSViacheslav Galaktionov 			const efx_mport_sel_t *entity_mportp __rte_unused,
2181599e4e9aSViacheslav Galaktionov 			uint16_t switch_port_id,
2182599e4e9aSViacheslav Galaktionov 			union sfc_mae_switch_port_data *port_datap,
2183599e4e9aSViacheslav Galaktionov 			void *user_datap)
2184599e4e9aSViacheslav Galaktionov {
2185599e4e9aSViacheslav Galaktionov 	struct sfc_get_representors_ctx *ctx = user_datap;
2186599e4e9aSViacheslav Galaktionov 	struct rte_eth_representor_range *range;
2187599e4e9aSViacheslav Galaktionov 	int ret;
2188599e4e9aSViacheslav Galaktionov 	int rc;
2189599e4e9aSViacheslav Galaktionov 
2190599e4e9aSViacheslav Galaktionov 	SFC_ASSERT(ctx != NULL);
2191599e4e9aSViacheslav Galaktionov 	SFC_ASSERT(ctx->info != NULL);
2192599e4e9aSViacheslav Galaktionov 	SFC_ASSERT(ctx->sa != NULL);
2193599e4e9aSViacheslav Galaktionov 
2194599e4e9aSViacheslav Galaktionov 	if (type != SFC_MAE_SWITCH_PORT_REPRESENTOR) {
2195599e4e9aSViacheslav Galaktionov 		sfc_dbg(ctx->sa, "not a representor, skipping");
2196599e4e9aSViacheslav Galaktionov 		return;
2197599e4e9aSViacheslav Galaktionov 	}
2198599e4e9aSViacheslav Galaktionov 	if (ctx->info->nb_ranges >= ctx->info->nb_ranges_alloc) {
2199599e4e9aSViacheslav Galaktionov 		sfc_dbg(ctx->sa, "info structure is full already");
2200599e4e9aSViacheslav Galaktionov 		return;
2201599e4e9aSViacheslav Galaktionov 	}
2202599e4e9aSViacheslav Galaktionov 
2203599e4e9aSViacheslav Galaktionov 	range = &ctx->info->ranges[ctx->info->nb_ranges];
2204599e4e9aSViacheslav Galaktionov 	rc = sfc_mae_switch_controller_from_mapping(ctx->controllers,
2205599e4e9aSViacheslav Galaktionov 						    ctx->nb_controllers,
2206599e4e9aSViacheslav Galaktionov 						    port_datap->repr.intf,
2207599e4e9aSViacheslav Galaktionov 						    &range->controller);
2208599e4e9aSViacheslav Galaktionov 	if (rc != 0) {
2209599e4e9aSViacheslav Galaktionov 		sfc_err(ctx->sa, "invalid representor controller: %d",
2210599e4e9aSViacheslav Galaktionov 			port_datap->repr.intf);
2211599e4e9aSViacheslav Galaktionov 		range->controller = -1;
2212599e4e9aSViacheslav Galaktionov 	}
2213599e4e9aSViacheslav Galaktionov 	range->pf = port_datap->repr.pf;
2214599e4e9aSViacheslav Galaktionov 	range->id_base = switch_port_id;
2215599e4e9aSViacheslav Galaktionov 	range->id_end = switch_port_id;
2216599e4e9aSViacheslav Galaktionov 
2217599e4e9aSViacheslav Galaktionov 	if (port_datap->repr.vf != EFX_PCI_VF_INVALID) {
2218599e4e9aSViacheslav Galaktionov 		range->type = RTE_ETH_REPRESENTOR_VF;
2219599e4e9aSViacheslav Galaktionov 		range->vf = port_datap->repr.vf;
2220599e4e9aSViacheslav Galaktionov 		ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN,
2221599e4e9aSViacheslav Galaktionov 			       "c%dpf%dvf%d", range->controller, range->pf,
2222599e4e9aSViacheslav Galaktionov 			       range->vf);
2223599e4e9aSViacheslav Galaktionov 	} else {
2224599e4e9aSViacheslav Galaktionov 		range->type = RTE_ETH_REPRESENTOR_PF;
2225599e4e9aSViacheslav Galaktionov 		ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN,
2226599e4e9aSViacheslav Galaktionov 			 "c%dpf%d", range->controller, range->pf);
2227599e4e9aSViacheslav Galaktionov 	}
2228599e4e9aSViacheslav Galaktionov 	if (ret >= RTE_DEV_NAME_MAX_LEN) {
2229599e4e9aSViacheslav Galaktionov 		sfc_err(ctx->sa, "representor name has been truncated: %s",
2230599e4e9aSViacheslav Galaktionov 			range->name);
2231599e4e9aSViacheslav Galaktionov 	}
2232599e4e9aSViacheslav Galaktionov 
2233599e4e9aSViacheslav Galaktionov 	ctx->info->nb_ranges++;
2234599e4e9aSViacheslav Galaktionov }
2235599e4e9aSViacheslav Galaktionov 
2236599e4e9aSViacheslav Galaktionov static int
2237599e4e9aSViacheslav Galaktionov sfc_representor_info_get(struct rte_eth_dev *dev,
2238599e4e9aSViacheslav Galaktionov 			 struct rte_eth_representor_info *info)
2239599e4e9aSViacheslav Galaktionov {
2240599e4e9aSViacheslav Galaktionov 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2241599e4e9aSViacheslav Galaktionov 	struct sfc_get_representors_ctx get_repr_ctx;
2242599e4e9aSViacheslav Galaktionov 	const efx_nic_cfg_t *nic_cfg;
2243599e4e9aSViacheslav Galaktionov 	uint16_t switch_domain_id;
2244599e4e9aSViacheslav Galaktionov 	uint32_t nb_repr;
2245599e4e9aSViacheslav Galaktionov 	int controller;
2246599e4e9aSViacheslav Galaktionov 	int rc;
2247599e4e9aSViacheslav Galaktionov 
2248599e4e9aSViacheslav Galaktionov 	sfc_adapter_lock(sa);
2249599e4e9aSViacheslav Galaktionov 
22502f577f0eSViacheslav Galaktionov 	if (sa->mae.status != SFC_MAE_STATUS_ADMIN) {
2251599e4e9aSViacheslav Galaktionov 		sfc_adapter_unlock(sa);
2252599e4e9aSViacheslav Galaktionov 		return -ENOTSUP;
2253599e4e9aSViacheslav Galaktionov 	}
2254599e4e9aSViacheslav Galaktionov 
2255599e4e9aSViacheslav Galaktionov 	rc = sfc_process_mport_journal(sa);
2256599e4e9aSViacheslav Galaktionov 	if (rc != 0) {
2257599e4e9aSViacheslav Galaktionov 		sfc_adapter_unlock(sa);
2258599e4e9aSViacheslav Galaktionov 		SFC_ASSERT(rc > 0);
2259599e4e9aSViacheslav Galaktionov 		return -rc;
2260599e4e9aSViacheslav Galaktionov 	}
2261599e4e9aSViacheslav Galaktionov 
2262599e4e9aSViacheslav Galaktionov 	switch_domain_id = sa->mae.switch_domain_id;
2263599e4e9aSViacheslav Galaktionov 
2264599e4e9aSViacheslav Galaktionov 	nb_repr = 0;
2265599e4e9aSViacheslav Galaktionov 	rc = sfc_mae_switch_ports_iterate(switch_domain_id,
2266599e4e9aSViacheslav Galaktionov 					  sfc_count_representors_cb,
2267599e4e9aSViacheslav Galaktionov 					  &nb_repr);
2268599e4e9aSViacheslav Galaktionov 	if (rc != 0) {
2269599e4e9aSViacheslav Galaktionov 		sfc_adapter_unlock(sa);
2270599e4e9aSViacheslav Galaktionov 		SFC_ASSERT(rc > 0);
2271599e4e9aSViacheslav Galaktionov 		return -rc;
2272599e4e9aSViacheslav Galaktionov 	}
2273599e4e9aSViacheslav Galaktionov 
2274599e4e9aSViacheslav Galaktionov 	if (info == NULL) {
2275599e4e9aSViacheslav Galaktionov 		sfc_adapter_unlock(sa);
2276599e4e9aSViacheslav Galaktionov 		return nb_repr;
2277599e4e9aSViacheslav Galaktionov 	}
2278599e4e9aSViacheslav Galaktionov 
2279599e4e9aSViacheslav Galaktionov 	rc = sfc_mae_switch_domain_controllers(switch_domain_id,
2280599e4e9aSViacheslav Galaktionov 					       &get_repr_ctx.controllers,
2281599e4e9aSViacheslav Galaktionov 					       &get_repr_ctx.nb_controllers);
2282599e4e9aSViacheslav Galaktionov 	if (rc != 0) {
2283599e4e9aSViacheslav Galaktionov 		sfc_adapter_unlock(sa);
2284599e4e9aSViacheslav Galaktionov 		SFC_ASSERT(rc > 0);
2285599e4e9aSViacheslav Galaktionov 		return -rc;
2286599e4e9aSViacheslav Galaktionov 	}
2287599e4e9aSViacheslav Galaktionov 
2288599e4e9aSViacheslav Galaktionov 	nic_cfg = efx_nic_cfg_get(sa->nic);
2289599e4e9aSViacheslav Galaktionov 
2290599e4e9aSViacheslav Galaktionov 	rc = sfc_mae_switch_domain_get_controller(switch_domain_id,
2291599e4e9aSViacheslav Galaktionov 						  nic_cfg->enc_intf,
2292599e4e9aSViacheslav Galaktionov 						  &controller);
2293599e4e9aSViacheslav Galaktionov 	if (rc != 0) {
2294599e4e9aSViacheslav Galaktionov 		sfc_err(sa, "invalid controller: %d", nic_cfg->enc_intf);
2295599e4e9aSViacheslav Galaktionov 		controller = -1;
2296599e4e9aSViacheslav Galaktionov 	}
2297599e4e9aSViacheslav Galaktionov 
2298599e4e9aSViacheslav Galaktionov 	info->controller = controller;
2299599e4e9aSViacheslav Galaktionov 	info->pf = nic_cfg->enc_pf;
2300599e4e9aSViacheslav Galaktionov 
2301599e4e9aSViacheslav Galaktionov 	get_repr_ctx.info = info;
2302599e4e9aSViacheslav Galaktionov 	get_repr_ctx.sa = sa;
2303599e4e9aSViacheslav Galaktionov 	get_repr_ctx.switch_domain_id = switch_domain_id;
2304599e4e9aSViacheslav Galaktionov 	rc = sfc_mae_switch_ports_iterate(switch_domain_id,
2305599e4e9aSViacheslav Galaktionov 					  sfc_get_representors_cb,
2306599e4e9aSViacheslav Galaktionov 					  &get_repr_ctx);
2307599e4e9aSViacheslav Galaktionov 	if (rc != 0) {
2308599e4e9aSViacheslav Galaktionov 		sfc_adapter_unlock(sa);
2309599e4e9aSViacheslav Galaktionov 		SFC_ASSERT(rc > 0);
2310599e4e9aSViacheslav Galaktionov 		return -rc;
2311599e4e9aSViacheslav Galaktionov 	}
2312599e4e9aSViacheslav Galaktionov 
2313599e4e9aSViacheslav Galaktionov 	sfc_adapter_unlock(sa);
2314599e4e9aSViacheslav Galaktionov 	return nb_repr;
2315599e4e9aSViacheslav Galaktionov }
2316599e4e9aSViacheslav Galaktionov 
23179b14dc74SIvan Malov static int
23189b14dc74SIvan Malov sfc_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features)
23199b14dc74SIvan Malov {
23209b14dc74SIvan Malov 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
23219b14dc74SIvan Malov 	uint64_t supported = 0;
23229b14dc74SIvan Malov 
23239b14dc74SIvan Malov 	sfc_adapter_lock(sa);
23249b14dc74SIvan Malov 
23259b14dc74SIvan Malov 	if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_FLAG) != 0)
23269b14dc74SIvan Malov 		supported |= RTE_ETH_RX_METADATA_USER_FLAG;
23279b14dc74SIvan Malov 
23289b14dc74SIvan Malov 	if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_MARK) != 0)
23299b14dc74SIvan Malov 		supported |= RTE_ETH_RX_METADATA_USER_MARK;
23309b14dc74SIvan Malov 
233153a80512SIvan Malov 	if (sfc_flow_tunnel_is_supported(sa))
233253a80512SIvan Malov 		supported |= RTE_ETH_RX_METADATA_TUNNEL_ID;
233353a80512SIvan Malov 
23349b14dc74SIvan Malov 	sa->negotiated_rx_metadata = supported & *features;
23359b14dc74SIvan Malov 	*features = sa->negotiated_rx_metadata;
23369b14dc74SIvan Malov 
23379b14dc74SIvan Malov 	sfc_adapter_unlock(sa);
23389b14dc74SIvan Malov 
23399b14dc74SIvan Malov 	return 0;
23409b14dc74SIvan Malov }
23419b14dc74SIvan Malov 
234263d588ffSAndrew Rybchenko static const struct eth_dev_ops sfc_eth_dev_ops = {
2343aaa3f5f0SAndrew Rybchenko 	.dev_configure			= sfc_dev_configure,
234493fcf09bSAndrew Rybchenko 	.dev_start			= sfc_dev_start,
234593fcf09bSAndrew Rybchenko 	.dev_stop			= sfc_dev_stop,
23462a05f337SArtem Andreev 	.dev_set_link_up		= sfc_dev_set_link_up,
23472a05f337SArtem Andreev 	.dev_set_link_down		= sfc_dev_set_link_down,
2348aaa3f5f0SAndrew Rybchenko 	.dev_close			= sfc_dev_close,
2349f3de3840SIvan Malov 	.promiscuous_enable		= sfc_dev_promisc_enable,
2350f3de3840SIvan Malov 	.promiscuous_disable		= sfc_dev_promisc_disable,
2351f3de3840SIvan Malov 	.allmulticast_enable		= sfc_dev_allmulti_enable,
2352f3de3840SIvan Malov 	.allmulticast_disable		= sfc_dev_allmulti_disable,
2353886f8d8aSArtem Andreev 	.link_update			= sfc_dev_link_update,
23541caab2f1SAndrew Rybchenko 	.stats_get			= sfc_stats_get,
2355e8acb329SIvan Malov 	.stats_reset			= sfc_stats_reset,
23567b989176SAndrew Rybchenko 	.xstats_get			= sfc_xstats_get,
2357e8acb329SIvan Malov 	.xstats_reset			= sfc_stats_reset,
23587b989176SAndrew Rybchenko 	.xstats_get_names		= sfc_xstats_get_names,
235963d588ffSAndrew Rybchenko 	.dev_infos_get			= sfc_dev_infos_get,
236056349dc9SAndrew Rybchenko 	.dev_supported_ptypes_get	= sfc_dev_supported_ptypes_get,
2361e961cf42SAndrew Rybchenko 	.mtu_set			= sfc_dev_set_mtu,
2362ac7af396SAndrew Rybchenko 	.rx_queue_start			= sfc_rx_queue_start,
2363ac7af396SAndrew Rybchenko 	.rx_queue_stop			= sfc_rx_queue_stop,
2364c6a1d9b5SIvan Malov 	.tx_queue_start			= sfc_tx_queue_start,
2365c6a1d9b5SIvan Malov 	.tx_queue_stop			= sfc_tx_queue_stop,
2366ce35b05cSAndrew Rybchenko 	.rx_queue_setup			= sfc_rx_queue_setup,
2367ce35b05cSAndrew Rybchenko 	.rx_queue_release		= sfc_rx_queue_release,
23684279b54eSGeorgiy Levashov 	.rx_queue_intr_enable		= sfc_rx_queue_intr_enable,
23694279b54eSGeorgiy Levashov 	.rx_queue_intr_disable		= sfc_rx_queue_intr_disable,
2370b1b7ad93SIvan Malov 	.tx_queue_setup			= sfc_tx_queue_setup,
2371b1b7ad93SIvan Malov 	.tx_queue_release		= sfc_tx_queue_release,
2372cdbb29cfSAndrew Rybchenko 	.flow_ctrl_get			= sfc_flow_ctrl_get,
2373cdbb29cfSAndrew Rybchenko 	.flow_ctrl_set			= sfc_flow_ctrl_set,
2374c100fd46SIvan Malov 	.mac_addr_set			= sfc_mac_addr_set,
237536c35355SAndrew Rybchenko 	.udp_tunnel_port_add		= sfc_dev_udp_tunnel_port_add,
237636c35355SAndrew Rybchenko 	.udp_tunnel_port_del		= sfc_dev_udp_tunnel_port_del,
237732bcfb0aSIvan Malov 	.reta_update			= sfc_dev_rss_reta_update,
2378af0d9317SIvan Malov 	.reta_query			= sfc_dev_rss_reta_query,
237982faef50SIvan Malov 	.rss_hash_update		= sfc_dev_rss_hash_update,
2380088e1721SIvan Malov 	.rss_hash_conf_get		= sfc_dev_rss_hash_conf_get,
2381fb7ad441SThomas Monjalon 	.flow_ops_get			= sfc_dev_flow_ops_get,
23820fa0070eSIvan Malov 	.set_mc_addr_list		= sfc_set_mc_addr_list,
23835502e397SAndrew Rybchenko 	.rxq_info_get			= sfc_rx_queue_info_get,
2384c5938838SIvan Malov 	.txq_info_get			= sfc_tx_queue_info_get,
238583fef46aSIvan Malov 	.fw_version_get			= sfc_fw_version_get,
238673280c1eSIvan Malov 	.xstats_get_by_id		= sfc_xstats_get_by_id,
238773280c1eSIvan Malov 	.xstats_get_names_by_id		= sfc_xstats_get_names_by_id,
238808d23c67SAndrew Rybchenko 	.pool_ops_supported		= sfc_pool_ops_supported,
2389599e4e9aSViacheslav Galaktionov 	.representor_info_get		= sfc_representor_info_get,
23909b14dc74SIvan Malov 	.rx_metadata_negotiate		= sfc_rx_metadata_negotiate,
239163d588ffSAndrew Rybchenko };
239263d588ffSAndrew Rybchenko 
2393a62ec905SIgor Romanov struct sfc_ethdev_init_data {
2394a62ec905SIgor Romanov 	uint16_t		nb_representors;
2395a62ec905SIgor Romanov };
2396a62ec905SIgor Romanov 
2397f28ede50SAndrew Rybchenko /**
2398f28ede50SAndrew Rybchenko  * Duplicate a string in potentially shared memory required for
2399f28ede50SAndrew Rybchenko  * multi-process support.
2400f28ede50SAndrew Rybchenko  *
2401f28ede50SAndrew Rybchenko  * strdup() allocates from process-local heap/memory.
2402f28ede50SAndrew Rybchenko  */
2403f28ede50SAndrew Rybchenko static char *
2404f28ede50SAndrew Rybchenko sfc_strdup(const char *str)
2405f28ede50SAndrew Rybchenko {
2406f28ede50SAndrew Rybchenko 	size_t size;
2407f28ede50SAndrew Rybchenko 	char *copy;
2408f28ede50SAndrew Rybchenko 
2409f28ede50SAndrew Rybchenko 	if (str == NULL)
2410f28ede50SAndrew Rybchenko 		return NULL;
2411f28ede50SAndrew Rybchenko 
2412f28ede50SAndrew Rybchenko 	size = strlen(str) + 1;
2413f28ede50SAndrew Rybchenko 	copy = rte_malloc(__func__, size, 0);
2414f28ede50SAndrew Rybchenko 	if (copy != NULL)
2415f28ede50SAndrew Rybchenko 		rte_memcpy(copy, str, size);
2416f28ede50SAndrew Rybchenko 
2417f28ede50SAndrew Rybchenko 	return copy;
2418f28ede50SAndrew Rybchenko }
2419f28ede50SAndrew Rybchenko 
242063d588ffSAndrew Rybchenko static int
2421df1bfde4SAndrew Rybchenko sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
2422df1bfde4SAndrew Rybchenko {
24235313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
242450f34eedSAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2425c7b57f00SAndrew Rybchenko 	const struct sfc_dp_rx *dp_rx;
2426c7b57f00SAndrew Rybchenko 	const struct sfc_dp_tx *dp_tx;
2427390f9b8dSAndrew Rybchenko 	const efx_nic_cfg_t *encp;
2428df1bfde4SAndrew Rybchenko 	unsigned int avail_caps = 0;
2429df1bfde4SAndrew Rybchenko 	const char *rx_name = NULL;
2430dbdc8241SAndrew Rybchenko 	const char *tx_name = NULL;
2431df1bfde4SAndrew Rybchenko 	int rc;
2432df1bfde4SAndrew Rybchenko 
2433638bddc9SAndrew Rybchenko 	switch (sa->family) {
2434638bddc9SAndrew Rybchenko 	case EFX_FAMILY_HUNTINGTON:
2435638bddc9SAndrew Rybchenko 	case EFX_FAMILY_MEDFORD:
2436f3129efdSAndrew Rybchenko 	case EFX_FAMILY_MEDFORD2:
2437638bddc9SAndrew Rybchenko 		avail_caps |= SFC_DP_HW_FW_CAP_EF10;
2438849c2d91SAndrew Rybchenko 		avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX;
2439849c2d91SAndrew Rybchenko 		avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX;
2440638bddc9SAndrew Rybchenko 		break;
2441714f508eSAndrew Rybchenko 	case EFX_FAMILY_RIVERHEAD:
2442714f508eSAndrew Rybchenko 		avail_caps |= SFC_DP_HW_FW_CAP_EF100;
2443714f508eSAndrew Rybchenko 		break;
2444638bddc9SAndrew Rybchenko 	default:
2445638bddc9SAndrew Rybchenko 		break;
2446638bddc9SAndrew Rybchenko 	}
2447638bddc9SAndrew Rybchenko 
2448390f9b8dSAndrew Rybchenko 	encp = efx_nic_cfg_get(sa->nic);
2449390f9b8dSAndrew Rybchenko 	if (encp->enc_rx_es_super_buffer_supported)
2450390f9b8dSAndrew Rybchenko 		avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
2451390f9b8dSAndrew Rybchenko 
2452df1bfde4SAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
2453df1bfde4SAndrew Rybchenko 				sfc_kvarg_string_handler, &rx_name);
2454df1bfde4SAndrew Rybchenko 	if (rc != 0)
2455df1bfde4SAndrew Rybchenko 		goto fail_kvarg_rx_datapath;
2456df1bfde4SAndrew Rybchenko 
2457df1bfde4SAndrew Rybchenko 	if (rx_name != NULL) {
2458c7b57f00SAndrew Rybchenko 		dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
2459c7b57f00SAndrew Rybchenko 		if (dp_rx == NULL) {
2460df1bfde4SAndrew Rybchenko 			sfc_err(sa, "Rx datapath %s not found", rx_name);
2461df1bfde4SAndrew Rybchenko 			rc = ENOENT;
2462df1bfde4SAndrew Rybchenko 			goto fail_dp_rx;
2463df1bfde4SAndrew Rybchenko 		}
2464c7b57f00SAndrew Rybchenko 		if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
2465df1bfde4SAndrew Rybchenko 			sfc_err(sa,
2466df1bfde4SAndrew Rybchenko 				"Insufficient Hw/FW capabilities to use Rx datapath %s",
2467df1bfde4SAndrew Rybchenko 				rx_name);
2468df1bfde4SAndrew Rybchenko 			rc = EINVAL;
246935870e50SAndrew Rybchenko 			goto fail_dp_rx_caps;
2470df1bfde4SAndrew Rybchenko 		}
2471df1bfde4SAndrew Rybchenko 	} else {
2472c7b57f00SAndrew Rybchenko 		dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
2473c7b57f00SAndrew Rybchenko 		if (dp_rx == NULL) {
2474df1bfde4SAndrew Rybchenko 			sfc_err(sa, "Rx datapath by caps %#x not found",
2475df1bfde4SAndrew Rybchenko 				avail_caps);
2476df1bfde4SAndrew Rybchenko 			rc = ENOENT;
2477df1bfde4SAndrew Rybchenko 			goto fail_dp_rx;
2478df1bfde4SAndrew Rybchenko 		}
2479df1bfde4SAndrew Rybchenko 	}
2480df1bfde4SAndrew Rybchenko 
248150f34eedSAndrew Rybchenko 	sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
248250f34eedSAndrew Rybchenko 	if (sas->dp_rx_name == NULL) {
2483f28ede50SAndrew Rybchenko 		rc = ENOMEM;
2484f28ede50SAndrew Rybchenko 		goto fail_dp_rx_name;
2485f28ede50SAndrew Rybchenko 	}
2486f28ede50SAndrew Rybchenko 
24879b14dc74SIvan Malov 	if (strcmp(dp_rx->dp.name, SFC_KVARG_DATAPATH_EF10_ESSB) == 0) {
24889b14dc74SIvan Malov 		/* FLAG and MARK are always available from Rx prefix. */
24899b14dc74SIvan Malov 		sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG;
24909b14dc74SIvan Malov 		sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK;
24919b14dc74SIvan Malov 	}
24929b14dc74SIvan Malov 
249350f34eedSAndrew Rybchenko 	sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
2494df1bfde4SAndrew Rybchenko 
2495dbdc8241SAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
2496dbdc8241SAndrew Rybchenko 				sfc_kvarg_string_handler, &tx_name);
2497dbdc8241SAndrew Rybchenko 	if (rc != 0)
2498dbdc8241SAndrew Rybchenko 		goto fail_kvarg_tx_datapath;
2499dbdc8241SAndrew Rybchenko 
2500dbdc8241SAndrew Rybchenko 	if (tx_name != NULL) {
2501c7b57f00SAndrew Rybchenko 		dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
2502c7b57f00SAndrew Rybchenko 		if (dp_tx == NULL) {
2503dbdc8241SAndrew Rybchenko 			sfc_err(sa, "Tx datapath %s not found", tx_name);
2504dbdc8241SAndrew Rybchenko 			rc = ENOENT;
2505dbdc8241SAndrew Rybchenko 			goto fail_dp_tx;
2506dbdc8241SAndrew Rybchenko 		}
2507c7b57f00SAndrew Rybchenko 		if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
2508dbdc8241SAndrew Rybchenko 			sfc_err(sa,
2509dbdc8241SAndrew Rybchenko 				"Insufficient Hw/FW capabilities to use Tx datapath %s",
2510dbdc8241SAndrew Rybchenko 				tx_name);
2511dbdc8241SAndrew Rybchenko 			rc = EINVAL;
251235870e50SAndrew Rybchenko 			goto fail_dp_tx_caps;
2513dbdc8241SAndrew Rybchenko 		}
2514dbdc8241SAndrew Rybchenko 	} else {
2515c7b57f00SAndrew Rybchenko 		dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
2516c7b57f00SAndrew Rybchenko 		if (dp_tx == NULL) {
2517dbdc8241SAndrew Rybchenko 			sfc_err(sa, "Tx datapath by caps %#x not found",
2518dbdc8241SAndrew Rybchenko 				avail_caps);
2519dbdc8241SAndrew Rybchenko 			rc = ENOENT;
2520dbdc8241SAndrew Rybchenko 			goto fail_dp_tx;
2521dbdc8241SAndrew Rybchenko 		}
2522dbdc8241SAndrew Rybchenko 	}
2523dbdc8241SAndrew Rybchenko 
252450f34eedSAndrew Rybchenko 	sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
252550f34eedSAndrew Rybchenko 	if (sas->dp_tx_name == NULL) {
2526f28ede50SAndrew Rybchenko 		rc = ENOMEM;
2527f28ede50SAndrew Rybchenko 		goto fail_dp_tx_name;
2528f28ede50SAndrew Rybchenko 	}
2529f28ede50SAndrew Rybchenko 
253050f34eedSAndrew Rybchenko 	sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
2531dbdc8241SAndrew Rybchenko 
25325dec95e3SAndrew Rybchenko 	sa->priv.dp_rx = dp_rx;
25335dec95e3SAndrew Rybchenko 	sa->priv.dp_tx = dp_tx;
2534c7b57f00SAndrew Rybchenko 
2535c7b57f00SAndrew Rybchenko 	dev->rx_pkt_burst = dp_rx->pkt_burst;
253607685524SIgor Romanov 	dev->tx_pkt_prepare = dp_tx->pkt_prepare;
2537c7b57f00SAndrew Rybchenko 	dev->tx_pkt_burst = dp_tx->pkt_burst;
2538df1bfde4SAndrew Rybchenko 
2539cbfc6111SFerruh Yigit 	dev->rx_queue_count = sfc_rx_queue_count;
2540cbfc6111SFerruh Yigit 	dev->rx_descriptor_status = sfc_rx_descriptor_status;
2541cbfc6111SFerruh Yigit 	dev->tx_descriptor_status = sfc_tx_descriptor_status;
2542df1bfde4SAndrew Rybchenko 	dev->dev_ops = &sfc_eth_dev_ops;
2543df1bfde4SAndrew Rybchenko 
2544df1bfde4SAndrew Rybchenko 	return 0;
2545df1bfde4SAndrew Rybchenko 
2546f28ede50SAndrew Rybchenko fail_dp_tx_name:
254735870e50SAndrew Rybchenko fail_dp_tx_caps:
2548dbdc8241SAndrew Rybchenko fail_dp_tx:
2549dbdc8241SAndrew Rybchenko fail_kvarg_tx_datapath:
255050f34eedSAndrew Rybchenko 	rte_free(sas->dp_rx_name);
255150f34eedSAndrew Rybchenko 	sas->dp_rx_name = NULL;
2552f28ede50SAndrew Rybchenko 
2553f28ede50SAndrew Rybchenko fail_dp_rx_name:
255435870e50SAndrew Rybchenko fail_dp_rx_caps:
2555df1bfde4SAndrew Rybchenko fail_dp_rx:
2556df1bfde4SAndrew Rybchenko fail_kvarg_rx_datapath:
2557df1bfde4SAndrew Rybchenko 	return rc;
2558df1bfde4SAndrew Rybchenko }
2559df1bfde4SAndrew Rybchenko 
2560df1bfde4SAndrew Rybchenko static void
256135870e50SAndrew Rybchenko sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
256235870e50SAndrew Rybchenko {
25635313b441SAndrew Rybchenko 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
256450f34eedSAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
256535870e50SAndrew Rybchenko 
256635870e50SAndrew Rybchenko 	dev->dev_ops = NULL;
256707685524SIgor Romanov 	dev->tx_pkt_prepare = NULL;
256835870e50SAndrew Rybchenko 	dev->rx_pkt_burst = NULL;
256935870e50SAndrew Rybchenko 	dev->tx_pkt_burst = NULL;
257035870e50SAndrew Rybchenko 
257150f34eedSAndrew Rybchenko 	rte_free(sas->dp_tx_name);
257250f34eedSAndrew Rybchenko 	sas->dp_tx_name = NULL;
25735dec95e3SAndrew Rybchenko 	sa->priv.dp_tx = NULL;
2574f28ede50SAndrew Rybchenko 
257550f34eedSAndrew Rybchenko 	rte_free(sas->dp_rx_name);
257650f34eedSAndrew Rybchenko 	sas->dp_rx_name = NULL;
25775dec95e3SAndrew Rybchenko 	sa->priv.dp_rx = NULL;
257835870e50SAndrew Rybchenko }
257935870e50SAndrew Rybchenko 
2580f28ede50SAndrew Rybchenko static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
25812646d42fSAndrew Rybchenko 	.dev_supported_ptypes_get	= sfc_dev_supported_ptypes_get,
2582128da692SAndrew Rybchenko 	.reta_query			= sfc_dev_rss_reta_query,
2583128da692SAndrew Rybchenko 	.rss_hash_conf_get		= sfc_dev_rss_hash_conf_get,
2584f28ede50SAndrew Rybchenko 	.rxq_info_get			= sfc_rx_queue_info_get,
2585f28ede50SAndrew Rybchenko 	.txq_info_get			= sfc_tx_queue_info_get,
2586f28ede50SAndrew Rybchenko };
2587f28ede50SAndrew Rybchenko 
2588f28ede50SAndrew Rybchenko static int
25895dec95e3SAndrew Rybchenko sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
2590f28ede50SAndrew Rybchenko {
259150f34eedSAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
25925dec95e3SAndrew Rybchenko 	struct sfc_adapter_priv *sap;
2593f28ede50SAndrew Rybchenko 	const struct sfc_dp_rx *dp_rx;
2594f28ede50SAndrew Rybchenko 	const struct sfc_dp_tx *dp_tx;
2595f28ede50SAndrew Rybchenko 	int rc;
2596f28ede50SAndrew Rybchenko 
25975dec95e3SAndrew Rybchenko 	/*
25985dec95e3SAndrew Rybchenko 	 * Allocate process private data from heap, since it should not
25995dec95e3SAndrew Rybchenko 	 * be located in shared memory allocated using rte_malloc() API.
26005dec95e3SAndrew Rybchenko 	 */
26015dec95e3SAndrew Rybchenko 	sap = calloc(1, sizeof(*sap));
26025dec95e3SAndrew Rybchenko 	if (sap == NULL) {
26035dec95e3SAndrew Rybchenko 		rc = ENOMEM;
26045dec95e3SAndrew Rybchenko 		goto fail_alloc_priv;
26055dec95e3SAndrew Rybchenko 	}
26065dec95e3SAndrew Rybchenko 
2607f52fa243SAndrew Rybchenko 	sap->logtype_main = logtype_main;
2608f52fa243SAndrew Rybchenko 
260950f34eedSAndrew Rybchenko 	dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
2610f28ede50SAndrew Rybchenko 	if (dp_rx == NULL) {
261136653b7dSAndrew Rybchenko 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
261250f34eedSAndrew Rybchenko 			"cannot find %s Rx datapath", sas->dp_rx_name);
2613f28ede50SAndrew Rybchenko 		rc = ENOENT;
2614f28ede50SAndrew Rybchenko 		goto fail_dp_rx;
2615f28ede50SAndrew Rybchenko 	}
2616f28ede50SAndrew Rybchenko 	if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
261736653b7dSAndrew Rybchenko 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2618e2c3639aSAndrew Rybchenko 			"%s Rx datapath does not support multi-process",
261950f34eedSAndrew Rybchenko 			sas->dp_rx_name);
2620f28ede50SAndrew Rybchenko 		rc = EINVAL;
2621f28ede50SAndrew Rybchenko 		goto fail_dp_rx_multi_process;
2622f28ede50SAndrew Rybchenko 	}
2623f28ede50SAndrew Rybchenko 
262450f34eedSAndrew Rybchenko 	dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
2625f28ede50SAndrew Rybchenko 	if (dp_tx == NULL) {
262636653b7dSAndrew Rybchenko 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
262750f34eedSAndrew Rybchenko 			"cannot find %s Tx datapath", sas->dp_tx_name);
2628f28ede50SAndrew Rybchenko 		rc = ENOENT;
2629f28ede50SAndrew Rybchenko 		goto fail_dp_tx;
2630f28ede50SAndrew Rybchenko 	}
2631f28ede50SAndrew Rybchenko 	if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
263236653b7dSAndrew Rybchenko 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2633e2c3639aSAndrew Rybchenko 			"%s Tx datapath does not support multi-process",
263450f34eedSAndrew Rybchenko 			sas->dp_tx_name);
2635f28ede50SAndrew Rybchenko 		rc = EINVAL;
2636f28ede50SAndrew Rybchenko 		goto fail_dp_tx_multi_process;
2637f28ede50SAndrew Rybchenko 	}
2638f28ede50SAndrew Rybchenko 
26395dec95e3SAndrew Rybchenko 	sap->dp_rx = dp_rx;
26405dec95e3SAndrew Rybchenko 	sap->dp_tx = dp_tx;
26415dec95e3SAndrew Rybchenko 
26425dec95e3SAndrew Rybchenko 	dev->process_private = sap;
2643f28ede50SAndrew Rybchenko 	dev->rx_pkt_burst = dp_rx->pkt_burst;
264407685524SIgor Romanov 	dev->tx_pkt_prepare = dp_tx->pkt_prepare;
2645f28ede50SAndrew Rybchenko 	dev->tx_pkt_burst = dp_tx->pkt_burst;
2646cbfc6111SFerruh Yigit 	dev->rx_queue_count = sfc_rx_queue_count;
2647cbfc6111SFerruh Yigit 	dev->rx_descriptor_status = sfc_rx_descriptor_status;
2648cbfc6111SFerruh Yigit 	dev->tx_descriptor_status = sfc_tx_descriptor_status;
2649f28ede50SAndrew Rybchenko 	dev->dev_ops = &sfc_eth_dev_secondary_ops;
2650f28ede50SAndrew Rybchenko 
2651f28ede50SAndrew Rybchenko 	return 0;
2652f28ede50SAndrew Rybchenko 
2653f28ede50SAndrew Rybchenko fail_dp_tx_multi_process:
2654f28ede50SAndrew Rybchenko fail_dp_tx:
2655f28ede50SAndrew Rybchenko fail_dp_rx_multi_process:
2656f28ede50SAndrew Rybchenko fail_dp_rx:
26575dec95e3SAndrew Rybchenko 	free(sap);
26585dec95e3SAndrew Rybchenko 
26595dec95e3SAndrew Rybchenko fail_alloc_priv:
2660f28ede50SAndrew Rybchenko 	return rc;
2661f28ede50SAndrew Rybchenko }
2662f28ede50SAndrew Rybchenko 
2663f28ede50SAndrew Rybchenko static void
2664df1bfde4SAndrew Rybchenko sfc_register_dp(void)
2665df1bfde4SAndrew Rybchenko {
2666df1bfde4SAndrew Rybchenko 	/* Register once */
2667638bddc9SAndrew Rybchenko 	if (TAILQ_EMPTY(&sfc_dp_head)) {
2668638bddc9SAndrew Rybchenko 		/* Prefer EF10 datapath */
2669554644e3SAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
2670390f9b8dSAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
2671638bddc9SAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
2672df1bfde4SAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
2673dbdc8241SAndrew Rybchenko 
26740cb551b6SAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
26758b00f426SAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
2676dbdc8241SAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
267756885200SAndrew Rybchenko 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
2678df1bfde4SAndrew Rybchenko 	}
2679638bddc9SAndrew Rybchenko }
2680df1bfde4SAndrew Rybchenko 
2681df1bfde4SAndrew Rybchenko static int
2682a62ec905SIgor Romanov sfc_parse_switch_mode(struct sfc_adapter *sa, bool has_representors)
2683f8778b16SIgor Romanov {
2684f8778b16SIgor Romanov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2685f8778b16SIgor Romanov 	const char *switch_mode = NULL;
2686f8778b16SIgor Romanov 	int rc;
2687f8778b16SIgor Romanov 
2688f8778b16SIgor Romanov 	sfc_log_init(sa, "entry");
2689f8778b16SIgor Romanov 
2690f8778b16SIgor Romanov 	rc = sfc_kvargs_process(sa, SFC_KVARG_SWITCH_MODE,
2691f8778b16SIgor Romanov 				sfc_kvarg_string_handler, &switch_mode);
2692f8778b16SIgor Romanov 	if (rc != 0)
2693f8778b16SIgor Romanov 		goto fail_kvargs;
2694f8778b16SIgor Romanov 
2695f8778b16SIgor Romanov 	if (switch_mode == NULL) {
26962f577f0eSViacheslav Galaktionov 		sa->switchdev = encp->enc_mae_admin &&
2697a62ec905SIgor Romanov 				(!encp->enc_datapath_cap_evb ||
2698a62ec905SIgor Romanov 				 has_representors);
2699f8778b16SIgor Romanov 	} else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) {
2700f8778b16SIgor Romanov 		sa->switchdev = false;
2701f8778b16SIgor Romanov 	} else if (strcasecmp(switch_mode,
2702f8778b16SIgor Romanov 			      SFC_KVARG_SWITCH_MODE_SWITCHDEV) == 0) {
2703f8778b16SIgor Romanov 		sa->switchdev = true;
2704f8778b16SIgor Romanov 	} else {
2705f8778b16SIgor Romanov 		sfc_err(sa, "invalid switch mode device argument '%s'",
2706f8778b16SIgor Romanov 			switch_mode);
2707f8778b16SIgor Romanov 		rc = EINVAL;
2708f8778b16SIgor Romanov 		goto fail_mode;
2709f8778b16SIgor Romanov 	}
2710f8778b16SIgor Romanov 
2711f8778b16SIgor Romanov 	sfc_log_init(sa, "done");
2712f8778b16SIgor Romanov 
2713f8778b16SIgor Romanov 	return 0;
2714f8778b16SIgor Romanov 
2715f8778b16SIgor Romanov fail_mode:
2716f8778b16SIgor Romanov fail_kvargs:
2717f8778b16SIgor Romanov 	sfc_log_init(sa, "failed: %s", rte_strerror(rc));
2718f8778b16SIgor Romanov 
2719f8778b16SIgor Romanov 	return rc;
2720f8778b16SIgor Romanov }
2721f8778b16SIgor Romanov 
2722f8778b16SIgor Romanov static int
2723a62ec905SIgor Romanov sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
272463d588ffSAndrew Rybchenko {
27255313b441SAndrew Rybchenko 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2726c0802544SFerruh Yigit 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2727a62ec905SIgor Romanov 	struct sfc_ethdev_init_data *init_data = init_params;
2728e2c3639aSAndrew Rybchenko 	uint32_t logtype_main;
27295313b441SAndrew Rybchenko 	struct sfc_adapter *sa;
273063d588ffSAndrew Rybchenko 	int rc;
2731ba641f20SAndrew Rybchenko 	const efx_nic_cfg_t *encp;
27326d13ea8eSOlivier Matz 	const struct rte_ether_addr *from;
2733671eb37cSAndrew Rybchenko 	int ret;
273463d588ffSAndrew Rybchenko 
2735dd461e81SVijay Kumar Srivastava 	if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
2736dd461e81SVijay Kumar Srivastava 			SFC_EFX_DEV_CLASS_NET) {
2737dd461e81SVijay Kumar Srivastava 		SFC_GENERIC_LOG(DEBUG,
2738dd461e81SVijay Kumar Srivastava 			"Incompatible device class: skip probing, should be probed by other sfc driver.");
2739dd461e81SVijay Kumar Srivastava 		return 1;
2740dd461e81SVijay Kumar Srivastava 	}
2741dd461e81SVijay Kumar Srivastava 
27423f95dfb9SIgor Romanov 	rc = sfc_dp_mport_register();
27433f95dfb9SIgor Romanov 	if (rc != 0)
27443f95dfb9SIgor Romanov 		return rc;
27453f95dfb9SIgor Romanov 
2746df1bfde4SAndrew Rybchenko 	sfc_register_dp();
2747df1bfde4SAndrew Rybchenko 
2748e2c3639aSAndrew Rybchenko 	logtype_main = sfc_register_logtype(&pci_dev->addr,
2749e2c3639aSAndrew Rybchenko 					    SFC_LOGTYPE_MAIN_STR,
2750e2c3639aSAndrew Rybchenko 					    RTE_LOG_NOTICE);
2751e2c3639aSAndrew Rybchenko 
2752f28ede50SAndrew Rybchenko 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
27535dec95e3SAndrew Rybchenko 		return -sfc_eth_dev_secondary_init(dev, logtype_main);
27545dec95e3SAndrew Rybchenko 
275563d588ffSAndrew Rybchenko 	/* Required for logging */
2756671eb37cSAndrew Rybchenko 	ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
2757671eb37cSAndrew Rybchenko 			"PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
2758671eb37cSAndrew Rybchenko 			pci_dev->addr.domain, pci_dev->addr.bus,
2759671eb37cSAndrew Rybchenko 			pci_dev->addr.devid, pci_dev->addr.function,
2760671eb37cSAndrew Rybchenko 			dev->data->port_id);
2761671eb37cSAndrew Rybchenko 	if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
2762671eb37cSAndrew Rybchenko 		SFC_GENERIC_LOG(ERR,
2763671eb37cSAndrew Rybchenko 			"reserved log prefix is too short for " PCI_PRI_FMT,
2764671eb37cSAndrew Rybchenko 			pci_dev->addr.domain, pci_dev->addr.bus,
2765671eb37cSAndrew Rybchenko 			pci_dev->addr.devid, pci_dev->addr.function);
2766671eb37cSAndrew Rybchenko 		return -EINVAL;
2767671eb37cSAndrew Rybchenko 	}
27681d3c7f9cSAndrew Rybchenko 	sas->pci_addr = pci_dev->addr;
27691d3c7f9cSAndrew Rybchenko 	sas->port_id = dev->data->port_id;
27705313b441SAndrew Rybchenko 
27715313b441SAndrew Rybchenko 	/*
27725313b441SAndrew Rybchenko 	 * Allocate process private data from heap, since it should not
27735313b441SAndrew Rybchenko 	 * be located in shared memory allocated using rte_malloc() API.
27745313b441SAndrew Rybchenko 	 */
27755313b441SAndrew Rybchenko 	sa = calloc(1, sizeof(*sa));
27765313b441SAndrew Rybchenko 	if (sa == NULL) {
27775313b441SAndrew Rybchenko 		rc = ENOMEM;
27785313b441SAndrew Rybchenko 		goto fail_alloc_sa;
27795313b441SAndrew Rybchenko 	}
27805313b441SAndrew Rybchenko 
27815313b441SAndrew Rybchenko 	dev->process_private = sa;
27825313b441SAndrew Rybchenko 
27835313b441SAndrew Rybchenko 	/* Required for logging */
27845313b441SAndrew Rybchenko 	sa->priv.shared = sas;
2785f52fa243SAndrew Rybchenko 	sa->priv.logtype_main = logtype_main;
2786b7be9f43SAndrew Rybchenko 
278763d588ffSAndrew Rybchenko 	sa->eth_dev = dev;
278863d588ffSAndrew Rybchenko 
278963d588ffSAndrew Rybchenko 	/* Copy PCI device info to the dev->data */
279063d588ffSAndrew Rybchenko 	rte_eth_copy_pci_info(dev, pci_dev);
2791e4f72c9eSIvan Malov 	dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
279263d588ffSAndrew Rybchenko 
279363d588ffSAndrew Rybchenko 	rc = sfc_kvargs_parse(sa);
279463d588ffSAndrew Rybchenko 	if (rc != 0)
279563d588ffSAndrew Rybchenko 		goto fail_kvargs_parse;
279663d588ffSAndrew Rybchenko 
279763d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
279863d588ffSAndrew Rybchenko 
279935b2d13fSOlivier Matz 	dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
2800ba641f20SAndrew Rybchenko 	if (dev->data->mac_addrs == NULL) {
2801ba641f20SAndrew Rybchenko 		rc = ENOMEM;
2802ba641f20SAndrew Rybchenko 		goto fail_mac_addrs;
2803ba641f20SAndrew Rybchenko 	}
2804ba641f20SAndrew Rybchenko 
2805ba641f20SAndrew Rybchenko 	sfc_adapter_lock_init(sa);
2806ba641f20SAndrew Rybchenko 	sfc_adapter_lock(sa);
2807ba641f20SAndrew Rybchenko 
2808329472d4SAndrew Rybchenko 	sfc_log_init(sa, "probing");
2809329472d4SAndrew Rybchenko 	rc = sfc_probe(sa);
2810329472d4SAndrew Rybchenko 	if (rc != 0)
2811329472d4SAndrew Rybchenko 		goto fail_probe;
2812329472d4SAndrew Rybchenko 
2813f8778b16SIgor Romanov 	/*
2814f8778b16SIgor Romanov 	 * Selecting a default switch mode requires the NIC to be probed and
2815f8778b16SIgor Romanov 	 * to have its capabilities filled in.
2816f8778b16SIgor Romanov 	 */
2817a62ec905SIgor Romanov 	rc = sfc_parse_switch_mode(sa, init_data->nb_representors > 0);
2818f8778b16SIgor Romanov 	if (rc != 0)
2819f8778b16SIgor Romanov 		goto fail_switch_mode;
2820f8778b16SIgor Romanov 
2821329472d4SAndrew Rybchenko 	sfc_log_init(sa, "set device ops");
2822329472d4SAndrew Rybchenko 	rc = sfc_eth_dev_set_ops(dev);
2823329472d4SAndrew Rybchenko 	if (rc != 0)
2824329472d4SAndrew Rybchenko 		goto fail_set_ops;
2825329472d4SAndrew Rybchenko 
2826ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "attaching");
2827ba641f20SAndrew Rybchenko 	rc = sfc_attach(sa);
2828ba641f20SAndrew Rybchenko 	if (rc != 0)
2829ba641f20SAndrew Rybchenko 		goto fail_attach;
2830ba641f20SAndrew Rybchenko 
28312f577f0eSViacheslav Galaktionov 	if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_ADMIN) {
2832f8778b16SIgor Romanov 		sfc_err(sa,
28332f577f0eSViacheslav Galaktionov 			"failed to enable switchdev mode without admin MAE privilege");
2834f8778b16SIgor Romanov 		rc = ENOTSUP;
2835f8778b16SIgor Romanov 		goto fail_switchdev_no_mae;
2836f8778b16SIgor Romanov 	}
2837f8778b16SIgor Romanov 
2838ba641f20SAndrew Rybchenko 	encp = efx_nic_cfg_get(sa->nic);
2839ba641f20SAndrew Rybchenko 
2840ba641f20SAndrew Rybchenko 	/*
2841ba641f20SAndrew Rybchenko 	 * The arguments are really reverse order in comparison to
2842ba641f20SAndrew Rybchenko 	 * Linux kernel. Copy from NIC config to Ethernet device data.
2843ba641f20SAndrew Rybchenko 	 */
28446d13ea8eSOlivier Matz 	from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
2845538da7a1SOlivier Matz 	rte_ether_addr_copy(from, &dev->data->mac_addrs[0]);
2846ba641f20SAndrew Rybchenko 
28473037e6cfSViacheslav Galaktionov 	/*
28483037e6cfSViacheslav Galaktionov 	 * Setup the NIC DMA mapping handler. All internal mempools
28493037e6cfSViacheslav Galaktionov 	 * MUST be created on attach before this point, and the
28503037e6cfSViacheslav Galaktionov 	 * adapter MUST NOT create mempools with the adapter lock
28513037e6cfSViacheslav Galaktionov 	 * held after this point.
28523037e6cfSViacheslav Galaktionov 	 */
28533037e6cfSViacheslav Galaktionov 	rc = sfc_nic_dma_attach(sa);
28543037e6cfSViacheslav Galaktionov 	if (rc != 0)
28553037e6cfSViacheslav Galaktionov 		goto fail_nic_dma_attach;
28563037e6cfSViacheslav Galaktionov 
2857ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
2858ba641f20SAndrew Rybchenko 
285963d588ffSAndrew Rybchenko 	sfc_log_init(sa, "done");
286063d588ffSAndrew Rybchenko 	return 0;
286163d588ffSAndrew Rybchenko 
28623037e6cfSViacheslav Galaktionov fail_nic_dma_attach:
2863f8778b16SIgor Romanov fail_switchdev_no_mae:
2864f8778b16SIgor Romanov 	sfc_detach(sa);
2865f8778b16SIgor Romanov 
2866ba641f20SAndrew Rybchenko fail_attach:
286735870e50SAndrew Rybchenko 	sfc_eth_dev_clear_ops(dev);
286835870e50SAndrew Rybchenko 
2869329472d4SAndrew Rybchenko fail_set_ops:
2870f8778b16SIgor Romanov fail_switch_mode:
2871329472d4SAndrew Rybchenko 	sfc_unprobe(sa);
2872329472d4SAndrew Rybchenko 
2873329472d4SAndrew Rybchenko fail_probe:
2874ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
2875ba641f20SAndrew Rybchenko 	sfc_adapter_lock_fini(sa);
2876ba641f20SAndrew Rybchenko 	rte_free(dev->data->mac_addrs);
2877ba641f20SAndrew Rybchenko 	dev->data->mac_addrs = NULL;
2878ba641f20SAndrew Rybchenko 
2879ba641f20SAndrew Rybchenko fail_mac_addrs:
288063d588ffSAndrew Rybchenko 	sfc_kvargs_cleanup(sa);
288163d588ffSAndrew Rybchenko 
288263d588ffSAndrew Rybchenko fail_kvargs_parse:
288363d588ffSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
28845dec95e3SAndrew Rybchenko 	dev->process_private = NULL;
28855313b441SAndrew Rybchenko 	free(sa);
28865313b441SAndrew Rybchenko 
28875313b441SAndrew Rybchenko fail_alloc_sa:
288863d588ffSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
288963d588ffSAndrew Rybchenko 	return -rc;
289063d588ffSAndrew Rybchenko }
289163d588ffSAndrew Rybchenko 
289263d588ffSAndrew Rybchenko static int
289363d588ffSAndrew Rybchenko sfc_eth_dev_uninit(struct rte_eth_dev *dev)
289463d588ffSAndrew Rybchenko {
289593f993cbSAndrew Rybchenko 	sfc_dev_close(dev);
289693f993cbSAndrew Rybchenko 
289763d588ffSAndrew Rybchenko 	return 0;
289863d588ffSAndrew Rybchenko }
289963d588ffSAndrew Rybchenko 
290063d588ffSAndrew Rybchenko static const struct rte_pci_id pci_id_sfc_efx_map[] = {
2901ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
2902223a29a2SIvan Malov 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
2903ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
2904223a29a2SIvan Malov 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
2905ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
2906223a29a2SIvan Malov 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
2907f3129efdSAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
2908f3129efdSAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
2909714f508eSAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD) },
2910b75d85b7SIgor Romanov 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD_VF) },
291163d588ffSAndrew Rybchenko 	{ .vendor_id = 0 /* sentinel */ }
291263d588ffSAndrew Rybchenko };
291363d588ffSAndrew Rybchenko 
2914a62ec905SIgor Romanov static int
2915a62ec905SIgor Romanov sfc_parse_rte_devargs(const char *args, struct rte_eth_devargs *devargs)
2916a62ec905SIgor Romanov {
2917a62ec905SIgor Romanov 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
2918a62ec905SIgor Romanov 	int rc;
2919a62ec905SIgor Romanov 
2920a62ec905SIgor Romanov 	if (args != NULL) {
2921a62ec905SIgor Romanov 		rc = rte_eth_devargs_parse(args, &eth_da);
2922a62ec905SIgor Romanov 		if (rc != 0) {
2923a62ec905SIgor Romanov 			SFC_GENERIC_LOG(ERR,
2924a62ec905SIgor Romanov 					"Failed to parse generic devargs '%s'",
2925a62ec905SIgor Romanov 					args);
2926a62ec905SIgor Romanov 			return rc;
2927a62ec905SIgor Romanov 		}
2928a62ec905SIgor Romanov 	}
2929a62ec905SIgor Romanov 
2930a62ec905SIgor Romanov 	*devargs = eth_da;
2931a62ec905SIgor Romanov 
2932a62ec905SIgor Romanov 	return 0;
2933a62ec905SIgor Romanov }
2934a62ec905SIgor Romanov 
2935a62ec905SIgor Romanov static int
2936472fa1b1SIgor Romanov sfc_eth_dev_find_or_create(struct rte_pci_device *pci_dev,
2937a62ec905SIgor Romanov 			   struct sfc_ethdev_init_data *init_data,
2938472fa1b1SIgor Romanov 			   struct rte_eth_dev **devp,
2939472fa1b1SIgor Romanov 			   bool *dev_created)
2940a62ec905SIgor Romanov {
2941a62ec905SIgor Romanov 	struct rte_eth_dev *dev;
2942472fa1b1SIgor Romanov 	bool created = false;
2943a62ec905SIgor Romanov 	int rc;
2944a62ec905SIgor Romanov 
2945472fa1b1SIgor Romanov 	dev = rte_eth_dev_allocated(pci_dev->device.name);
2946472fa1b1SIgor Romanov 	if (dev == NULL) {
2947a62ec905SIgor Romanov 		rc = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
2948a62ec905SIgor Romanov 					sizeof(struct sfc_adapter_shared),
2949a62ec905SIgor Romanov 					eth_dev_pci_specific_init, pci_dev,
2950a62ec905SIgor Romanov 					sfc_eth_dev_init, init_data);
2951a62ec905SIgor Romanov 		if (rc != 0) {
2952a62ec905SIgor Romanov 			SFC_GENERIC_LOG(ERR, "Failed to create sfc ethdev '%s'",
2953a62ec905SIgor Romanov 					pci_dev->device.name);
2954a62ec905SIgor Romanov 			return rc;
2955a62ec905SIgor Romanov 		}
2956a62ec905SIgor Romanov 
2957472fa1b1SIgor Romanov 		created = true;
2958472fa1b1SIgor Romanov 
2959a62ec905SIgor Romanov 		dev = rte_eth_dev_allocated(pci_dev->device.name);
2960a62ec905SIgor Romanov 		if (dev == NULL) {
2961472fa1b1SIgor Romanov 			SFC_GENERIC_LOG(ERR,
2962472fa1b1SIgor Romanov 				"Failed to find allocated sfc ethdev '%s'",
2963a62ec905SIgor Romanov 				pci_dev->device.name);
2964a62ec905SIgor Romanov 			return -ENODEV;
2965a62ec905SIgor Romanov 		}
2966472fa1b1SIgor Romanov 	}
2967a62ec905SIgor Romanov 
2968a62ec905SIgor Romanov 	*devp = dev;
2969472fa1b1SIgor Romanov 	*dev_created = created;
2970a62ec905SIgor Romanov 
2971a62ec905SIgor Romanov 	return 0;
2972a62ec905SIgor Romanov }
2973a62ec905SIgor Romanov 
2974a62ec905SIgor Romanov static int
29756ded2e01SViacheslav Galaktionov sfc_eth_dev_create_repr(struct sfc_adapter *sa,
29766ded2e01SViacheslav Galaktionov 			efx_pcie_interface_t controller,
29776ded2e01SViacheslav Galaktionov 			uint16_t port,
29786ded2e01SViacheslav Galaktionov 			uint16_t repr_port,
29796ded2e01SViacheslav Galaktionov 			enum rte_eth_representor_type type)
29806ded2e01SViacheslav Galaktionov {
29816ded2e01SViacheslav Galaktionov 	struct sfc_repr_entity_info entity;
29826ded2e01SViacheslav Galaktionov 	efx_mport_sel_t mport_sel;
29836ded2e01SViacheslav Galaktionov 	int rc;
29846ded2e01SViacheslav Galaktionov 
29856ded2e01SViacheslav Galaktionov 	switch (type) {
29866ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_NONE:
29876ded2e01SViacheslav Galaktionov 		return 0;
29886ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_VF:
29896ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_PF:
29906ded2e01SViacheslav Galaktionov 		break;
29916ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_SF:
29926ded2e01SViacheslav Galaktionov 		sfc_err(sa, "SF representors are not supported");
29936ded2e01SViacheslav Galaktionov 		return ENOTSUP;
29946ded2e01SViacheslav Galaktionov 	default:
29956ded2e01SViacheslav Galaktionov 		sfc_err(sa, "unknown representor type: %d", type);
29966ded2e01SViacheslav Galaktionov 		return ENOTSUP;
29976ded2e01SViacheslav Galaktionov 	}
29986ded2e01SViacheslav Galaktionov 
29996ded2e01SViacheslav Galaktionov 	rc = efx_mae_mport_by_pcie_mh_function(controller,
30006ded2e01SViacheslav Galaktionov 					       port,
30016ded2e01SViacheslav Galaktionov 					       repr_port,
30026ded2e01SViacheslav Galaktionov 					       &mport_sel);
30036ded2e01SViacheslav Galaktionov 	if (rc != 0) {
30046ded2e01SViacheslav Galaktionov 		sfc_err(sa,
30056ded2e01SViacheslav Galaktionov 			"failed to get m-port selector for controller %u port %u repr_port %u: %s",
30066ded2e01SViacheslav Galaktionov 			controller, port, repr_port, rte_strerror(-rc));
30076ded2e01SViacheslav Galaktionov 		return rc;
30086ded2e01SViacheslav Galaktionov 	}
30096ded2e01SViacheslav Galaktionov 
30106ded2e01SViacheslav Galaktionov 	memset(&entity, 0, sizeof(entity));
30116ded2e01SViacheslav Galaktionov 	entity.type = type;
30126ded2e01SViacheslav Galaktionov 	entity.intf = controller;
30136ded2e01SViacheslav Galaktionov 	entity.pf = port;
30146ded2e01SViacheslav Galaktionov 	entity.vf = repr_port;
30156ded2e01SViacheslav Galaktionov 
30166ded2e01SViacheslav Galaktionov 	rc = sfc_repr_create(sa->eth_dev, &entity, sa->mae.switch_domain_id,
30176ded2e01SViacheslav Galaktionov 			     &mport_sel);
30186ded2e01SViacheslav Galaktionov 	if (rc != 0) {
30196ded2e01SViacheslav Galaktionov 		sfc_err(sa,
30206ded2e01SViacheslav Galaktionov 			"failed to create representor for controller %u port %u repr_port %u: %s",
30216ded2e01SViacheslav Galaktionov 			controller, port, repr_port, rte_strerror(-rc));
30226ded2e01SViacheslav Galaktionov 		return rc;
30236ded2e01SViacheslav Galaktionov 	}
30246ded2e01SViacheslav Galaktionov 
30256ded2e01SViacheslav Galaktionov 	return 0;
30266ded2e01SViacheslav Galaktionov }
30276ded2e01SViacheslav Galaktionov 
30286ded2e01SViacheslav Galaktionov static int
30296ded2e01SViacheslav Galaktionov sfc_eth_dev_create_repr_port(struct sfc_adapter *sa,
30306ded2e01SViacheslav Galaktionov 			     const struct rte_eth_devargs *eth_da,
30316ded2e01SViacheslav Galaktionov 			     efx_pcie_interface_t controller,
30326ded2e01SViacheslav Galaktionov 			     uint16_t port)
30336ded2e01SViacheslav Galaktionov {
30346ded2e01SViacheslav Galaktionov 	int first_error = 0;
30356ded2e01SViacheslav Galaktionov 	uint16_t i;
30366ded2e01SViacheslav Galaktionov 	int rc;
30376ded2e01SViacheslav Galaktionov 
30386ded2e01SViacheslav Galaktionov 	if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
30396ded2e01SViacheslav Galaktionov 		return sfc_eth_dev_create_repr(sa, controller, port,
30406ded2e01SViacheslav Galaktionov 					       EFX_PCI_VF_INVALID,
30416ded2e01SViacheslav Galaktionov 					       eth_da->type);
30426ded2e01SViacheslav Galaktionov 	}
30436ded2e01SViacheslav Galaktionov 
30446ded2e01SViacheslav Galaktionov 	for (i = 0; i < eth_da->nb_representor_ports; i++) {
30456ded2e01SViacheslav Galaktionov 		rc = sfc_eth_dev_create_repr(sa, controller, port,
30466ded2e01SViacheslav Galaktionov 					     eth_da->representor_ports[i],
30476ded2e01SViacheslav Galaktionov 					     eth_da->type);
30486ded2e01SViacheslav Galaktionov 		if (rc != 0 && first_error == 0)
30496ded2e01SViacheslav Galaktionov 			first_error = rc;
30506ded2e01SViacheslav Galaktionov 	}
30516ded2e01SViacheslav Galaktionov 
30526ded2e01SViacheslav Galaktionov 	return first_error;
30536ded2e01SViacheslav Galaktionov }
30546ded2e01SViacheslav Galaktionov 
30556ded2e01SViacheslav Galaktionov static int
30566ded2e01SViacheslav Galaktionov sfc_eth_dev_create_repr_controller(struct sfc_adapter *sa,
30576ded2e01SViacheslav Galaktionov 				   const struct rte_eth_devargs *eth_da,
30586ded2e01SViacheslav Galaktionov 				   efx_pcie_interface_t controller)
30596ded2e01SViacheslav Galaktionov {
30606ded2e01SViacheslav Galaktionov 	const efx_nic_cfg_t *encp;
30616ded2e01SViacheslav Galaktionov 	int first_error = 0;
30626ded2e01SViacheslav Galaktionov 	uint16_t default_port;
30636ded2e01SViacheslav Galaktionov 	uint16_t i;
30646ded2e01SViacheslav Galaktionov 	int rc;
30656ded2e01SViacheslav Galaktionov 
30666ded2e01SViacheslav Galaktionov 	if (eth_da->nb_ports == 0) {
30676ded2e01SViacheslav Galaktionov 		encp = efx_nic_cfg_get(sa->nic);
30686ded2e01SViacheslav Galaktionov 		default_port = encp->enc_intf == controller ? encp->enc_pf : 0;
30696ded2e01SViacheslav Galaktionov 		return sfc_eth_dev_create_repr_port(sa, eth_da, controller,
30706ded2e01SViacheslav Galaktionov 						    default_port);
30716ded2e01SViacheslav Galaktionov 	}
30726ded2e01SViacheslav Galaktionov 
30736ded2e01SViacheslav Galaktionov 	for (i = 0; i < eth_da->nb_ports; i++) {
30746ded2e01SViacheslav Galaktionov 		rc = sfc_eth_dev_create_repr_port(sa, eth_da, controller,
30756ded2e01SViacheslav Galaktionov 						  eth_da->ports[i]);
30766ded2e01SViacheslav Galaktionov 		if (rc != 0 && first_error == 0)
30776ded2e01SViacheslav Galaktionov 			first_error = rc;
30786ded2e01SViacheslav Galaktionov 	}
30796ded2e01SViacheslav Galaktionov 
30806ded2e01SViacheslav Galaktionov 	return first_error;
30816ded2e01SViacheslav Galaktionov }
30826ded2e01SViacheslav Galaktionov 
30836ded2e01SViacheslav Galaktionov static int
3084a62ec905SIgor Romanov sfc_eth_dev_create_representors(struct rte_eth_dev *dev,
3085a62ec905SIgor Romanov 				const struct rte_eth_devargs *eth_da)
3086a62ec905SIgor Romanov {
30876ded2e01SViacheslav Galaktionov 	efx_pcie_interface_t intf;
30886ded2e01SViacheslav Galaktionov 	const efx_nic_cfg_t *encp;
3089a62ec905SIgor Romanov 	struct sfc_adapter *sa;
30906ded2e01SViacheslav Galaktionov 	uint16_t switch_domain_id;
30916ded2e01SViacheslav Galaktionov 	uint16_t i;
3092a62ec905SIgor Romanov 	int rc;
3093a62ec905SIgor Romanov 
3094a62ec905SIgor Romanov 	sa = sfc_adapter_by_eth_dev(dev);
30956ded2e01SViacheslav Galaktionov 	switch_domain_id = sa->mae.switch_domain_id;
30966ded2e01SViacheslav Galaktionov 
30976ded2e01SViacheslav Galaktionov 	switch (eth_da->type) {
30986ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_NONE:
30996ded2e01SViacheslav Galaktionov 		return 0;
31006ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_PF:
31016ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_VF:
31026ded2e01SViacheslav Galaktionov 		break;
31036ded2e01SViacheslav Galaktionov 	case RTE_ETH_REPRESENTOR_SF:
31046ded2e01SViacheslav Galaktionov 		sfc_err(sa, "SF representors are not supported");
31056ded2e01SViacheslav Galaktionov 		return -ENOTSUP;
31066ded2e01SViacheslav Galaktionov 	default:
31076ded2e01SViacheslav Galaktionov 		sfc_err(sa, "unknown representor type: %d",
31086ded2e01SViacheslav Galaktionov 			eth_da->type);
31096ded2e01SViacheslav Galaktionov 		return -ENOTSUP;
31106ded2e01SViacheslav Galaktionov 	}
3111a62ec905SIgor Romanov 
3112a62ec905SIgor Romanov 	if (!sa->switchdev) {
3113a62ec905SIgor Romanov 		sfc_err(sa, "cannot create representors in non-switchdev mode");
3114a62ec905SIgor Romanov 		return -EINVAL;
3115a62ec905SIgor Romanov 	}
3116a62ec905SIgor Romanov 
3117a62ec905SIgor Romanov 	if (!sfc_repr_available(sfc_sa2shared(sa))) {
3118a62ec905SIgor Romanov 		sfc_err(sa, "cannot create representors: unsupported");
3119a62ec905SIgor Romanov 
3120a62ec905SIgor Romanov 		return -ENOTSUP;
3121a62ec905SIgor Romanov 	}
3122a62ec905SIgor Romanov 
312344db08d5SViacheslav Galaktionov 	/*
312444db08d5SViacheslav Galaktionov 	 * This is needed to construct the DPDK controller -> EFX interface
312544db08d5SViacheslav Galaktionov 	 * mapping.
312644db08d5SViacheslav Galaktionov 	 */
312744db08d5SViacheslav Galaktionov 	sfc_adapter_lock(sa);
312844db08d5SViacheslav Galaktionov 	rc = sfc_process_mport_journal(sa);
312944db08d5SViacheslav Galaktionov 	sfc_adapter_unlock(sa);
313044db08d5SViacheslav Galaktionov 	if (rc != 0) {
313144db08d5SViacheslav Galaktionov 		SFC_ASSERT(rc > 0);
313244db08d5SViacheslav Galaktionov 		return -rc;
313344db08d5SViacheslav Galaktionov 	}
313444db08d5SViacheslav Galaktionov 
31356ded2e01SViacheslav Galaktionov 	if (eth_da->nb_mh_controllers > 0) {
31366ded2e01SViacheslav Galaktionov 		for (i = 0; i < eth_da->nb_mh_controllers; i++) {
31376ded2e01SViacheslav Galaktionov 			rc = sfc_mae_switch_domain_get_intf(switch_domain_id,
31386ded2e01SViacheslav Galaktionov 						eth_da->mh_controllers[i],
31396ded2e01SViacheslav Galaktionov 						&intf);
3140a62ec905SIgor Romanov 			if (rc != 0) {
31416ded2e01SViacheslav Galaktionov 				sfc_err(sa, "failed to get representor");
3142a62ec905SIgor Romanov 				continue;
3143a62ec905SIgor Romanov 			}
31446ded2e01SViacheslav Galaktionov 			sfc_eth_dev_create_repr_controller(sa, eth_da, intf);
3145a62ec905SIgor Romanov 		}
31466ded2e01SViacheslav Galaktionov 	} else {
31476ded2e01SViacheslav Galaktionov 		encp = efx_nic_cfg_get(sa->nic);
31486ded2e01SViacheslav Galaktionov 		sfc_eth_dev_create_repr_controller(sa, eth_da, encp->enc_intf);
3149a62ec905SIgor Romanov 	}
3150a62ec905SIgor Romanov 
3151a62ec905SIgor Romanov 	return 0;
3152a62ec905SIgor Romanov }
3153a62ec905SIgor Romanov 
3154fdf91e0fSJan Blunck static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3155fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
3156fdf91e0fSJan Blunck {
3157a62ec905SIgor Romanov 	struct sfc_ethdev_init_data init_data;
3158a62ec905SIgor Romanov 	struct rte_eth_devargs eth_da;
3159a62ec905SIgor Romanov 	struct rte_eth_dev *dev;
3160472fa1b1SIgor Romanov 	bool dev_created;
3161a62ec905SIgor Romanov 	int rc;
3162a62ec905SIgor Romanov 
3163a62ec905SIgor Romanov 	if (pci_dev->device.devargs != NULL) {
3164a62ec905SIgor Romanov 		rc = sfc_parse_rte_devargs(pci_dev->device.devargs->args,
3165a62ec905SIgor Romanov 					   &eth_da);
3166a62ec905SIgor Romanov 		if (rc != 0)
3167a62ec905SIgor Romanov 			return rc;
3168a62ec905SIgor Romanov 	} else {
3169a62ec905SIgor Romanov 		memset(&eth_da, 0, sizeof(eth_da));
3170a62ec905SIgor Romanov 	}
3171a62ec905SIgor Romanov 
31726ded2e01SViacheslav Galaktionov 	/* If no VF representors specified, check for PF ones */
31736ded2e01SViacheslav Galaktionov 	if (eth_da.nb_representor_ports > 0)
3174a62ec905SIgor Romanov 		init_data.nb_representors = eth_da.nb_representor_ports;
31756ded2e01SViacheslav Galaktionov 	else
31766ded2e01SViacheslav Galaktionov 		init_data.nb_representors = eth_da.nb_ports;
3177a62ec905SIgor Romanov 
31786ded2e01SViacheslav Galaktionov 	if (init_data.nb_representors > 0 &&
3179a62ec905SIgor Romanov 	    rte_eal_process_type() != RTE_PROC_PRIMARY) {
3180a62ec905SIgor Romanov 		SFC_GENERIC_LOG(ERR,
3181a62ec905SIgor Romanov 			"Create representors from secondary process not supported, dev '%s'",
3182a62ec905SIgor Romanov 			pci_dev->device.name);
3183a62ec905SIgor Romanov 		return -ENOTSUP;
3184a62ec905SIgor Romanov 	}
3185a62ec905SIgor Romanov 
3186472fa1b1SIgor Romanov 	/*
3187472fa1b1SIgor Romanov 	 * Driver supports RTE_PCI_DRV_PROBE_AGAIN. Hence create device only
3188472fa1b1SIgor Romanov 	 * if it does not already exist. Re-probing an existing device is
3189472fa1b1SIgor Romanov 	 * expected to allow additional representors to be configured.
3190472fa1b1SIgor Romanov 	 */
3191472fa1b1SIgor Romanov 	rc = sfc_eth_dev_find_or_create(pci_dev, &init_data, &dev,
3192472fa1b1SIgor Romanov 					&dev_created);
3193a62ec905SIgor Romanov 	if (rc != 0)
3194a62ec905SIgor Romanov 		return rc;
3195a62ec905SIgor Romanov 
3196a62ec905SIgor Romanov 	rc = sfc_eth_dev_create_representors(dev, &eth_da);
3197a62ec905SIgor Romanov 	if (rc != 0) {
3198472fa1b1SIgor Romanov 		if (dev_created)
3199a62ec905SIgor Romanov 			(void)rte_eth_dev_destroy(dev, sfc_eth_dev_uninit);
3200472fa1b1SIgor Romanov 
3201a62ec905SIgor Romanov 		return rc;
3202a62ec905SIgor Romanov 	}
3203a62ec905SIgor Romanov 
3204a62ec905SIgor Romanov 	return 0;
3205fdf91e0fSJan Blunck }
3206fdf91e0fSJan Blunck 
3207fdf91e0fSJan Blunck static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3208fdf91e0fSJan Blunck {
3209fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
3210fdf91e0fSJan Blunck }
3211fdf91e0fSJan Blunck 
3212fdf91e0fSJan Blunck static struct rte_pci_driver sfc_efx_pmd = {
321363d588ffSAndrew Rybchenko 	.id_table = pci_id_sfc_efx_map,
3214ba641f20SAndrew Rybchenko 	.drv_flags =
32153b809c27SAndrew Rybchenko 		RTE_PCI_DRV_INTR_LSC |
3216472fa1b1SIgor Romanov 		RTE_PCI_DRV_NEED_MAPPING |
3217472fa1b1SIgor Romanov 		RTE_PCI_DRV_PROBE_AGAIN,
3218fdf91e0fSJan Blunck 	.probe = sfc_eth_dev_pci_probe,
3219fdf91e0fSJan Blunck 	.remove = sfc_eth_dev_pci_remove,
322063d588ffSAndrew Rybchenko };
322163d588ffSAndrew Rybchenko 
3222fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
322363d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
322406e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
322563d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
3226f8778b16SIgor Romanov 	SFC_KVARG_SWITCH_MODE "=" SFC_KVARG_VALUES_SWITCH_MODE " "
3227df1bfde4SAndrew Rybchenko 	SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
3228dbdc8241SAndrew Rybchenko 	SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
3229c22d3c50SAndrew Rybchenko 	SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
32309e7fc8b8SRoman Zhukov 	SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
32315a1ae82dSAndrew Rybchenko 	SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
3232a6fae8f9SIvan Malov 	SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
3233fdceb100SIvan Malov 
3234f8e99896SThomas Monjalon RTE_INIT(sfc_driver_register_logtype)
3235fdceb100SIvan Malov {
3236fdceb100SIvan Malov 	int ret;
3237fdceb100SIvan Malov 
3238fdceb100SIvan Malov 	ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
3239fdceb100SIvan Malov 						   RTE_LOG_NOTICE);
3240fdceb100SIvan Malov 	sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;
3241fdceb100SIvan Malov }
3242