xref: /dpdk/drivers/net/sfc/sfc_ethdev.c (revision cdbb29cf4bc4e2c8b6bf0683aa7519edea951b1a)
163d588ffSAndrew Rybchenko /*-
263d588ffSAndrew Rybchenko  * Copyright (c) 2016 Solarflare Communications Inc.
363d588ffSAndrew Rybchenko  * All rights reserved.
463d588ffSAndrew Rybchenko  *
563d588ffSAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
663d588ffSAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
763d588ffSAndrew Rybchenko  *
863d588ffSAndrew Rybchenko  * Redistribution and use in source and binary forms, with or without
963d588ffSAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
1063d588ffSAndrew Rybchenko  *
1163d588ffSAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
1263d588ffSAndrew Rybchenko  *    this list of conditions and the following disclaimer.
1363d588ffSAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
1463d588ffSAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
1563d588ffSAndrew Rybchenko  *    and/or other materials provided with the distribution.
1663d588ffSAndrew Rybchenko  *
1763d588ffSAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1863d588ffSAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1963d588ffSAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2063d588ffSAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2163d588ffSAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2263d588ffSAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2363d588ffSAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2463d588ffSAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
2563d588ffSAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2663d588ffSAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
2763d588ffSAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2863d588ffSAndrew Rybchenko  */
2963d588ffSAndrew Rybchenko 
3063d588ffSAndrew Rybchenko #include <rte_dev.h>
3163d588ffSAndrew Rybchenko #include <rte_ethdev.h>
3263d588ffSAndrew Rybchenko #include <rte_pci.h>
3363d588ffSAndrew Rybchenko 
34ba641f20SAndrew Rybchenko #include "efx.h"
35ba641f20SAndrew Rybchenko 
3663d588ffSAndrew Rybchenko #include "sfc.h"
3763d588ffSAndrew Rybchenko #include "sfc_debug.h"
3863d588ffSAndrew Rybchenko #include "sfc_log.h"
3963d588ffSAndrew Rybchenko #include "sfc_kvargs.h"
40886f8d8aSArtem Andreev #include "sfc_ev.h"
41ce35b05cSAndrew Rybchenko #include "sfc_rx.h"
42b1b7ad93SIvan Malov #include "sfc_tx.h"
4363d588ffSAndrew Rybchenko 
4463d588ffSAndrew Rybchenko 
4563d588ffSAndrew Rybchenko static void
4663d588ffSAndrew Rybchenko sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
4763d588ffSAndrew Rybchenko {
4863d588ffSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
4963d588ffSAndrew Rybchenko 
5063d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
5163d588ffSAndrew Rybchenko 
5263d588ffSAndrew Rybchenko 	dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
5303ed2119SAndrew Rybchenko 	dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
54a8e64c6bSAndrew Rybchenko 
55ce35b05cSAndrew Rybchenko 	dev_info->max_rx_queues = sa->rxq_max;
56a8ad8cf8SIvan Malov 	dev_info->max_tx_queues = sa->txq_max;
57ce35b05cSAndrew Rybchenko 
58a8e64c6bSAndrew Rybchenko 	/* By default packets are dropped if no descriptors are available */
59a8e64c6bSAndrew Rybchenko 	dev_info->default_rxconf.rx_drop_en = 1;
60a8e64c6bSAndrew Rybchenko 
61a8ad8cf8SIvan Malov 	dev_info->tx_offload_capa =
62a8ad8cf8SIvan Malov 		DEV_TX_OFFLOAD_IPV4_CKSUM |
63a8ad8cf8SIvan Malov 		DEV_TX_OFFLOAD_UDP_CKSUM |
64a8ad8cf8SIvan Malov 		DEV_TX_OFFLOAD_TCP_CKSUM;
65a8ad8cf8SIvan Malov 
66a8ad8cf8SIvan Malov 	dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOVLANOFFL |
67a8ad8cf8SIvan Malov 					     ETH_TXQ_FLAGS_NOXSUMSCTP;
68a8ad8cf8SIvan Malov 
69a8e64c6bSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
70a8e64c6bSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
71a8e64c6bSAndrew Rybchenko 	/* The RXQ hardware requires that the descriptor count is a power
72a8e64c6bSAndrew Rybchenko 	 * of 2, but rx_desc_lim cannot properly describe that constraint.
73a8e64c6bSAndrew Rybchenko 	 */
74a8e64c6bSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
75a8ad8cf8SIvan Malov 
76a8ad8cf8SIvan Malov 	dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
77a8ad8cf8SIvan Malov 	dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
78a8ad8cf8SIvan Malov 	/*
79a8ad8cf8SIvan Malov 	 * The TXQ hardware requires that the descriptor count is a power
80a8ad8cf8SIvan Malov 	 * of 2, but tx_desc_lim cannot properly describe that constraint
81a8ad8cf8SIvan Malov 	 */
82a8ad8cf8SIvan Malov 	dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
8363d588ffSAndrew Rybchenko }
8463d588ffSAndrew Rybchenko 
85aaa3f5f0SAndrew Rybchenko static int
86aaa3f5f0SAndrew Rybchenko sfc_dev_configure(struct rte_eth_dev *dev)
87aaa3f5f0SAndrew Rybchenko {
88aaa3f5f0SAndrew Rybchenko 	struct rte_eth_dev_data *dev_data = dev->data;
89aaa3f5f0SAndrew Rybchenko 	struct sfc_adapter *sa = dev_data->dev_private;
90aaa3f5f0SAndrew Rybchenko 	int rc;
91aaa3f5f0SAndrew Rybchenko 
92aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
93aaa3f5f0SAndrew Rybchenko 		     dev_data->nb_rx_queues, dev_data->nb_tx_queues);
94aaa3f5f0SAndrew Rybchenko 
95aaa3f5f0SAndrew Rybchenko 	sfc_adapter_lock(sa);
96aaa3f5f0SAndrew Rybchenko 	switch (sa->state) {
97aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_CONFIGURED:
98aaa3f5f0SAndrew Rybchenko 		sfc_close(sa);
99aaa3f5f0SAndrew Rybchenko 		SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
100aaa3f5f0SAndrew Rybchenko 		/* FALLTHROUGH */
101aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_INITIALIZED:
102aaa3f5f0SAndrew Rybchenko 		rc = sfc_configure(sa);
103aaa3f5f0SAndrew Rybchenko 		break;
104aaa3f5f0SAndrew Rybchenko 	default:
105aaa3f5f0SAndrew Rybchenko 		sfc_err(sa, "unexpected adapter state %u to configure",
106aaa3f5f0SAndrew Rybchenko 			sa->state);
107aaa3f5f0SAndrew Rybchenko 		rc = EINVAL;
108aaa3f5f0SAndrew Rybchenko 		break;
109aaa3f5f0SAndrew Rybchenko 	}
110aaa3f5f0SAndrew Rybchenko 	sfc_adapter_unlock(sa);
111aaa3f5f0SAndrew Rybchenko 
112aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done %d", rc);
113aaa3f5f0SAndrew Rybchenko 	SFC_ASSERT(rc >= 0);
114aaa3f5f0SAndrew Rybchenko 	return -rc;
115aaa3f5f0SAndrew Rybchenko }
116aaa3f5f0SAndrew Rybchenko 
11793fcf09bSAndrew Rybchenko static int
11893fcf09bSAndrew Rybchenko sfc_dev_start(struct rte_eth_dev *dev)
11993fcf09bSAndrew Rybchenko {
12093fcf09bSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
12193fcf09bSAndrew Rybchenko 	int rc;
12293fcf09bSAndrew Rybchenko 
12393fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
12493fcf09bSAndrew Rybchenko 
12593fcf09bSAndrew Rybchenko 	sfc_adapter_lock(sa);
12693fcf09bSAndrew Rybchenko 	rc = sfc_start(sa);
12793fcf09bSAndrew Rybchenko 	sfc_adapter_unlock(sa);
12893fcf09bSAndrew Rybchenko 
12993fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done %d", rc);
13093fcf09bSAndrew Rybchenko 	SFC_ASSERT(rc >= 0);
13193fcf09bSAndrew Rybchenko 	return -rc;
13293fcf09bSAndrew Rybchenko }
13393fcf09bSAndrew Rybchenko 
134886f8d8aSArtem Andreev static int
135886f8d8aSArtem Andreev sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
136886f8d8aSArtem Andreev {
137886f8d8aSArtem Andreev 	struct sfc_adapter *sa = dev->data->dev_private;
138886f8d8aSArtem Andreev 	struct rte_eth_link *dev_link = &dev->data->dev_link;
139886f8d8aSArtem Andreev 	struct rte_eth_link old_link;
140886f8d8aSArtem Andreev 	struct rte_eth_link current_link;
141886f8d8aSArtem Andreev 
142886f8d8aSArtem Andreev 	sfc_log_init(sa, "entry");
143886f8d8aSArtem Andreev 
144886f8d8aSArtem Andreev 	if (sa->state != SFC_ADAPTER_STARTED)
145886f8d8aSArtem Andreev 		return 0;
146886f8d8aSArtem Andreev 
147886f8d8aSArtem Andreev retry:
148886f8d8aSArtem Andreev 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
149886f8d8aSArtem Andreev 	*(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
150886f8d8aSArtem Andreev 
151886f8d8aSArtem Andreev 	if (wait_to_complete) {
152886f8d8aSArtem Andreev 		efx_link_mode_t link_mode;
153886f8d8aSArtem Andreev 
154886f8d8aSArtem Andreev 		efx_port_poll(sa->nic, &link_mode);
155886f8d8aSArtem Andreev 		sfc_port_link_mode_to_info(link_mode, &current_link);
156886f8d8aSArtem Andreev 
157886f8d8aSArtem Andreev 		if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
158886f8d8aSArtem Andreev 					 *(uint64_t *)&old_link,
159886f8d8aSArtem Andreev 					 *(uint64_t *)&current_link))
160886f8d8aSArtem Andreev 			goto retry;
161886f8d8aSArtem Andreev 	} else {
162886f8d8aSArtem Andreev 		sfc_ev_mgmt_qpoll(sa);
163886f8d8aSArtem Andreev 		*(int64_t *)&current_link =
164886f8d8aSArtem Andreev 			rte_atomic64_read((rte_atomic64_t *)dev_link);
165886f8d8aSArtem Andreev 	}
166886f8d8aSArtem Andreev 
167886f8d8aSArtem Andreev 	if (old_link.link_status != current_link.link_status)
168886f8d8aSArtem Andreev 		sfc_info(sa, "Link status is %s",
169886f8d8aSArtem Andreev 			 current_link.link_status ? "UP" : "DOWN");
170886f8d8aSArtem Andreev 
171886f8d8aSArtem Andreev 	return old_link.link_status == current_link.link_status ? 0 : -1;
172886f8d8aSArtem Andreev }
173886f8d8aSArtem Andreev 
17493fcf09bSAndrew Rybchenko static void
17593fcf09bSAndrew Rybchenko sfc_dev_stop(struct rte_eth_dev *dev)
17693fcf09bSAndrew Rybchenko {
17793fcf09bSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
17893fcf09bSAndrew Rybchenko 
17993fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
18093fcf09bSAndrew Rybchenko 
18193fcf09bSAndrew Rybchenko 	sfc_adapter_lock(sa);
18293fcf09bSAndrew Rybchenko 	sfc_stop(sa);
18393fcf09bSAndrew Rybchenko 	sfc_adapter_unlock(sa);
18493fcf09bSAndrew Rybchenko 
18593fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done");
18693fcf09bSAndrew Rybchenko }
18793fcf09bSAndrew Rybchenko 
188aaa3f5f0SAndrew Rybchenko static void
189aaa3f5f0SAndrew Rybchenko sfc_dev_close(struct rte_eth_dev *dev)
190aaa3f5f0SAndrew Rybchenko {
191aaa3f5f0SAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
192aaa3f5f0SAndrew Rybchenko 
193aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry");
194aaa3f5f0SAndrew Rybchenko 
195aaa3f5f0SAndrew Rybchenko 	sfc_adapter_lock(sa);
196aaa3f5f0SAndrew Rybchenko 	switch (sa->state) {
19793fcf09bSAndrew Rybchenko 	case SFC_ADAPTER_STARTED:
19893fcf09bSAndrew Rybchenko 		sfc_stop(sa);
19993fcf09bSAndrew Rybchenko 		SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
20093fcf09bSAndrew Rybchenko 		/* FALLTHROUGH */
201aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_CONFIGURED:
202aaa3f5f0SAndrew Rybchenko 		sfc_close(sa);
203aaa3f5f0SAndrew Rybchenko 		SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
204aaa3f5f0SAndrew Rybchenko 		/* FALLTHROUGH */
205aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_INITIALIZED:
206aaa3f5f0SAndrew Rybchenko 		break;
207aaa3f5f0SAndrew Rybchenko 	default:
208aaa3f5f0SAndrew Rybchenko 		sfc_err(sa, "unexpected adapter state %u on close", sa->state);
209aaa3f5f0SAndrew Rybchenko 		break;
210aaa3f5f0SAndrew Rybchenko 	}
211aaa3f5f0SAndrew Rybchenko 	sfc_adapter_unlock(sa);
212aaa3f5f0SAndrew Rybchenko 
213aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done");
214aaa3f5f0SAndrew Rybchenko }
215aaa3f5f0SAndrew Rybchenko 
216ce35b05cSAndrew Rybchenko static int
217ce35b05cSAndrew Rybchenko sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218ce35b05cSAndrew Rybchenko 		   uint16_t nb_rx_desc, unsigned int socket_id,
219ce35b05cSAndrew Rybchenko 		   const struct rte_eth_rxconf *rx_conf,
220ce35b05cSAndrew Rybchenko 		   struct rte_mempool *mb_pool)
221ce35b05cSAndrew Rybchenko {
222ce35b05cSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
223ce35b05cSAndrew Rybchenko 	int rc;
224ce35b05cSAndrew Rybchenko 
225ce35b05cSAndrew Rybchenko 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
226ce35b05cSAndrew Rybchenko 		     rx_queue_id, nb_rx_desc, socket_id);
227ce35b05cSAndrew Rybchenko 
228ce35b05cSAndrew Rybchenko 	sfc_adapter_lock(sa);
229ce35b05cSAndrew Rybchenko 
230ce35b05cSAndrew Rybchenko 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
231ce35b05cSAndrew Rybchenko 			  rx_conf, mb_pool);
232ce35b05cSAndrew Rybchenko 	if (rc != 0)
233ce35b05cSAndrew Rybchenko 		goto fail_rx_qinit;
234ce35b05cSAndrew Rybchenko 
235ce35b05cSAndrew Rybchenko 	dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
236ce35b05cSAndrew Rybchenko 
237ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
238ce35b05cSAndrew Rybchenko 
239ce35b05cSAndrew Rybchenko 	return 0;
240ce35b05cSAndrew Rybchenko 
241ce35b05cSAndrew Rybchenko fail_rx_qinit:
242ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
243ce35b05cSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
244ce35b05cSAndrew Rybchenko 	return -rc;
245ce35b05cSAndrew Rybchenko }
246ce35b05cSAndrew Rybchenko 
247ce35b05cSAndrew Rybchenko static void
248ce35b05cSAndrew Rybchenko sfc_rx_queue_release(void *queue)
249ce35b05cSAndrew Rybchenko {
250ce35b05cSAndrew Rybchenko 	struct sfc_rxq *rxq = queue;
251ce35b05cSAndrew Rybchenko 	struct sfc_adapter *sa;
252ce35b05cSAndrew Rybchenko 	unsigned int sw_index;
253ce35b05cSAndrew Rybchenko 
254ce35b05cSAndrew Rybchenko 	if (rxq == NULL)
255ce35b05cSAndrew Rybchenko 		return;
256ce35b05cSAndrew Rybchenko 
257ce35b05cSAndrew Rybchenko 	sa = rxq->evq->sa;
258ce35b05cSAndrew Rybchenko 	sfc_adapter_lock(sa);
259ce35b05cSAndrew Rybchenko 
260ce35b05cSAndrew Rybchenko 	sw_index = sfc_rxq_sw_index(rxq);
261ce35b05cSAndrew Rybchenko 
262ce35b05cSAndrew Rybchenko 	sfc_log_init(sa, "RxQ=%u", sw_index);
263ce35b05cSAndrew Rybchenko 
264ce35b05cSAndrew Rybchenko 	sa->eth_dev->data->rx_queues[sw_index] = NULL;
265ce35b05cSAndrew Rybchenko 
266ce35b05cSAndrew Rybchenko 	sfc_rx_qfini(sa, sw_index);
267ce35b05cSAndrew Rybchenko 
268ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
269ce35b05cSAndrew Rybchenko }
270ce35b05cSAndrew Rybchenko 
271b1b7ad93SIvan Malov static int
272b1b7ad93SIvan Malov sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
273b1b7ad93SIvan Malov 		   uint16_t nb_tx_desc, unsigned int socket_id,
274b1b7ad93SIvan Malov 		   const struct rte_eth_txconf *tx_conf)
275b1b7ad93SIvan Malov {
276b1b7ad93SIvan Malov 	struct sfc_adapter *sa = dev->data->dev_private;
277b1b7ad93SIvan Malov 	int rc;
278b1b7ad93SIvan Malov 
279b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
280b1b7ad93SIvan Malov 		     tx_queue_id, nb_tx_desc, socket_id);
281b1b7ad93SIvan Malov 
282b1b7ad93SIvan Malov 	sfc_adapter_lock(sa);
283b1b7ad93SIvan Malov 
284b1b7ad93SIvan Malov 	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
285b1b7ad93SIvan Malov 	if (rc != 0)
286b1b7ad93SIvan Malov 		goto fail_tx_qinit;
287b1b7ad93SIvan Malov 
288b1b7ad93SIvan Malov 	dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq;
289b1b7ad93SIvan Malov 
290b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
291b1b7ad93SIvan Malov 	return 0;
292b1b7ad93SIvan Malov 
293b1b7ad93SIvan Malov fail_tx_qinit:
294b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
295b1b7ad93SIvan Malov 	SFC_ASSERT(rc > 0);
296b1b7ad93SIvan Malov 	return -rc;
297b1b7ad93SIvan Malov }
298b1b7ad93SIvan Malov 
299b1b7ad93SIvan Malov static void
300b1b7ad93SIvan Malov sfc_tx_queue_release(void *queue)
301b1b7ad93SIvan Malov {
302b1b7ad93SIvan Malov 	struct sfc_txq *txq = queue;
303b1b7ad93SIvan Malov 	unsigned int sw_index;
304b1b7ad93SIvan Malov 	struct sfc_adapter *sa;
305b1b7ad93SIvan Malov 
306b1b7ad93SIvan Malov 	if (txq == NULL)
307b1b7ad93SIvan Malov 		return;
308b1b7ad93SIvan Malov 
309b1b7ad93SIvan Malov 	sw_index = sfc_txq_sw_index(txq);
310b1b7ad93SIvan Malov 
311b1b7ad93SIvan Malov 	SFC_ASSERT(txq->evq != NULL);
312b1b7ad93SIvan Malov 	sa = txq->evq->sa;
313b1b7ad93SIvan Malov 
314b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
315b1b7ad93SIvan Malov 
316b1b7ad93SIvan Malov 	sfc_adapter_lock(sa);
317b1b7ad93SIvan Malov 
318b1b7ad93SIvan Malov 	SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
319b1b7ad93SIvan Malov 	sa->eth_dev->data->tx_queues[sw_index] = NULL;
320b1b7ad93SIvan Malov 
321b1b7ad93SIvan Malov 	sfc_tx_qfini(sa, sw_index);
322b1b7ad93SIvan Malov 
323b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
324b1b7ad93SIvan Malov }
325b1b7ad93SIvan Malov 
3261caab2f1SAndrew Rybchenko static void
3271caab2f1SAndrew Rybchenko sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3281caab2f1SAndrew Rybchenko {
3291caab2f1SAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
3301caab2f1SAndrew Rybchenko 	struct sfc_port *port = &sa->port;
3311caab2f1SAndrew Rybchenko 	uint64_t *mac_stats;
3321caab2f1SAndrew Rybchenko 
3331caab2f1SAndrew Rybchenko 	rte_spinlock_lock(&port->mac_stats_lock);
3341caab2f1SAndrew Rybchenko 
3351caab2f1SAndrew Rybchenko 	if (sfc_port_update_mac_stats(sa) != 0)
3361caab2f1SAndrew Rybchenko 		goto unlock;
3371caab2f1SAndrew Rybchenko 
3381caab2f1SAndrew Rybchenko 	mac_stats = port->mac_stats_buf;
3391caab2f1SAndrew Rybchenko 
3401caab2f1SAndrew Rybchenko 	if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
3411caab2f1SAndrew Rybchenko 				   EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
3421caab2f1SAndrew Rybchenko 		stats->ipackets =
3431caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
3441caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
3451caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
3461caab2f1SAndrew Rybchenko 		stats->opackets =
3471caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
3481caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
3491caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
3501caab2f1SAndrew Rybchenko 		stats->ibytes =
3511caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
3521caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
3531caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
3541caab2f1SAndrew Rybchenko 		stats->obytes =
3551caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
3561caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
3571caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
3581caab2f1SAndrew Rybchenko 		stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
3591caab2f1SAndrew Rybchenko 		stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
3601caab2f1SAndrew Rybchenko 		stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
3611caab2f1SAndrew Rybchenko 	} else {
3621caab2f1SAndrew Rybchenko 		stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
3631caab2f1SAndrew Rybchenko 		stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
3641caab2f1SAndrew Rybchenko 		stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
3651caab2f1SAndrew Rybchenko 		stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
3661caab2f1SAndrew Rybchenko 		/*
3671caab2f1SAndrew Rybchenko 		 * Take into account stats which are whenever supported
3681caab2f1SAndrew Rybchenko 		 * on EF10. If some stat is not supported by current
3691caab2f1SAndrew Rybchenko 		 * firmware variant or HW revision, it is guaranteed
3701caab2f1SAndrew Rybchenko 		 * to be zero in mac_stats.
3711caab2f1SAndrew Rybchenko 		 */
3721caab2f1SAndrew Rybchenko 		stats->imissed =
3731caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
3741caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
3751caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
3761caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
3771caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
3781caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_TRUNC_QBB] +
3791caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_QBB] +
3801caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
3811caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
3821caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
3831caab2f1SAndrew Rybchenko 		stats->ierrors =
3841caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_FCS_ERRORS] +
3851caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
3861caab2f1SAndrew Rybchenko 			mac_stats[EFX_MAC_RX_JABBER_PKTS];
3871caab2f1SAndrew Rybchenko 		/* no oerrors counters supported on EF10 */
3881caab2f1SAndrew Rybchenko 	}
3891caab2f1SAndrew Rybchenko 
3901caab2f1SAndrew Rybchenko unlock:
3911caab2f1SAndrew Rybchenko 	rte_spinlock_unlock(&port->mac_stats_lock);
3921caab2f1SAndrew Rybchenko }
3931caab2f1SAndrew Rybchenko 
3947b989176SAndrew Rybchenko static int
3957b989176SAndrew Rybchenko sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3967b989176SAndrew Rybchenko 	       unsigned int xstats_count)
3977b989176SAndrew Rybchenko {
3987b989176SAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
3997b989176SAndrew Rybchenko 	struct sfc_port *port = &sa->port;
4007b989176SAndrew Rybchenko 	uint64_t *mac_stats;
4017b989176SAndrew Rybchenko 	int rc;
4027b989176SAndrew Rybchenko 	unsigned int i;
4037b989176SAndrew Rybchenko 	int nstats = 0;
4047b989176SAndrew Rybchenko 
4057b989176SAndrew Rybchenko 	rte_spinlock_lock(&port->mac_stats_lock);
4067b989176SAndrew Rybchenko 
4077b989176SAndrew Rybchenko 	rc = sfc_port_update_mac_stats(sa);
4087b989176SAndrew Rybchenko 	if (rc != 0) {
4097b989176SAndrew Rybchenko 		SFC_ASSERT(rc > 0);
4107b989176SAndrew Rybchenko 		nstats = -rc;
4117b989176SAndrew Rybchenko 		goto unlock;
4127b989176SAndrew Rybchenko 	}
4137b989176SAndrew Rybchenko 
4147b989176SAndrew Rybchenko 	mac_stats = port->mac_stats_buf;
4157b989176SAndrew Rybchenko 
4167b989176SAndrew Rybchenko 	for (i = 0; i < EFX_MAC_NSTATS; ++i) {
4177b989176SAndrew Rybchenko 		if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
4187b989176SAndrew Rybchenko 			if (xstats != NULL && nstats < (int)xstats_count) {
4197b989176SAndrew Rybchenko 				xstats[nstats].id = nstats;
4207b989176SAndrew Rybchenko 				xstats[nstats].value = mac_stats[i];
4217b989176SAndrew Rybchenko 			}
4227b989176SAndrew Rybchenko 			nstats++;
4237b989176SAndrew Rybchenko 		}
4247b989176SAndrew Rybchenko 	}
4257b989176SAndrew Rybchenko 
4267b989176SAndrew Rybchenko unlock:
4277b989176SAndrew Rybchenko 	rte_spinlock_unlock(&port->mac_stats_lock);
4287b989176SAndrew Rybchenko 
4297b989176SAndrew Rybchenko 	return nstats;
4307b989176SAndrew Rybchenko }
4317b989176SAndrew Rybchenko 
4327b989176SAndrew Rybchenko static int
4337b989176SAndrew Rybchenko sfc_xstats_get_names(struct rte_eth_dev *dev,
4347b989176SAndrew Rybchenko 		     struct rte_eth_xstat_name *xstats_names,
4357b989176SAndrew Rybchenko 		     unsigned int xstats_count)
4367b989176SAndrew Rybchenko {
4377b989176SAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
4387b989176SAndrew Rybchenko 	struct sfc_port *port = &sa->port;
4397b989176SAndrew Rybchenko 	unsigned int i;
4407b989176SAndrew Rybchenko 	unsigned int nstats = 0;
4417b989176SAndrew Rybchenko 
4427b989176SAndrew Rybchenko 	for (i = 0; i < EFX_MAC_NSTATS; ++i) {
4437b989176SAndrew Rybchenko 		if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
4447b989176SAndrew Rybchenko 			if (xstats_names != NULL && nstats < xstats_count)
4457b989176SAndrew Rybchenko 				strncpy(xstats_names[nstats].name,
4467b989176SAndrew Rybchenko 					efx_mac_stat_name(sa->nic, i),
4477b989176SAndrew Rybchenko 					sizeof(xstats_names[0].name));
4487b989176SAndrew Rybchenko 			nstats++;
4497b989176SAndrew Rybchenko 		}
4507b989176SAndrew Rybchenko 	}
4517b989176SAndrew Rybchenko 
4527b989176SAndrew Rybchenko 	return nstats;
4537b989176SAndrew Rybchenko }
4547b989176SAndrew Rybchenko 
455*cdbb29cfSAndrew Rybchenko static int
456*cdbb29cfSAndrew Rybchenko sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
457*cdbb29cfSAndrew Rybchenko {
458*cdbb29cfSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
459*cdbb29cfSAndrew Rybchenko 	unsigned int wanted_fc, link_fc;
460*cdbb29cfSAndrew Rybchenko 
461*cdbb29cfSAndrew Rybchenko 	memset(fc_conf, 0, sizeof(*fc_conf));
462*cdbb29cfSAndrew Rybchenko 
463*cdbb29cfSAndrew Rybchenko 	sfc_adapter_lock(sa);
464*cdbb29cfSAndrew Rybchenko 
465*cdbb29cfSAndrew Rybchenko 	if (sa->state == SFC_ADAPTER_STARTED)
466*cdbb29cfSAndrew Rybchenko 		efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
467*cdbb29cfSAndrew Rybchenko 	else
468*cdbb29cfSAndrew Rybchenko 		link_fc = sa->port.flow_ctrl;
469*cdbb29cfSAndrew Rybchenko 
470*cdbb29cfSAndrew Rybchenko 	switch (link_fc) {
471*cdbb29cfSAndrew Rybchenko 	case 0:
472*cdbb29cfSAndrew Rybchenko 		fc_conf->mode = RTE_FC_NONE;
473*cdbb29cfSAndrew Rybchenko 		break;
474*cdbb29cfSAndrew Rybchenko 	case EFX_FCNTL_RESPOND:
475*cdbb29cfSAndrew Rybchenko 		fc_conf->mode = RTE_FC_RX_PAUSE;
476*cdbb29cfSAndrew Rybchenko 		break;
477*cdbb29cfSAndrew Rybchenko 	case EFX_FCNTL_GENERATE:
478*cdbb29cfSAndrew Rybchenko 		fc_conf->mode = RTE_FC_TX_PAUSE;
479*cdbb29cfSAndrew Rybchenko 		break;
480*cdbb29cfSAndrew Rybchenko 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
481*cdbb29cfSAndrew Rybchenko 		fc_conf->mode = RTE_FC_FULL;
482*cdbb29cfSAndrew Rybchenko 		break;
483*cdbb29cfSAndrew Rybchenko 	default:
484*cdbb29cfSAndrew Rybchenko 		sfc_err(sa, "%s: unexpected flow control value %#x",
485*cdbb29cfSAndrew Rybchenko 			__func__, link_fc);
486*cdbb29cfSAndrew Rybchenko 	}
487*cdbb29cfSAndrew Rybchenko 
488*cdbb29cfSAndrew Rybchenko 	fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
489*cdbb29cfSAndrew Rybchenko 
490*cdbb29cfSAndrew Rybchenko 	sfc_adapter_unlock(sa);
491*cdbb29cfSAndrew Rybchenko 
492*cdbb29cfSAndrew Rybchenko 	return 0;
493*cdbb29cfSAndrew Rybchenko }
494*cdbb29cfSAndrew Rybchenko 
495*cdbb29cfSAndrew Rybchenko static int
496*cdbb29cfSAndrew Rybchenko sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
497*cdbb29cfSAndrew Rybchenko {
498*cdbb29cfSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
499*cdbb29cfSAndrew Rybchenko 	struct sfc_port *port = &sa->port;
500*cdbb29cfSAndrew Rybchenko 	unsigned int fcntl;
501*cdbb29cfSAndrew Rybchenko 	int rc;
502*cdbb29cfSAndrew Rybchenko 
503*cdbb29cfSAndrew Rybchenko 	if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
504*cdbb29cfSAndrew Rybchenko 	    fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
505*cdbb29cfSAndrew Rybchenko 	    fc_conf->mac_ctrl_frame_fwd != 0) {
506*cdbb29cfSAndrew Rybchenko 		sfc_err(sa, "unsupported flow control settings specified");
507*cdbb29cfSAndrew Rybchenko 		rc = EINVAL;
508*cdbb29cfSAndrew Rybchenko 		goto fail_inval;
509*cdbb29cfSAndrew Rybchenko 	}
510*cdbb29cfSAndrew Rybchenko 
511*cdbb29cfSAndrew Rybchenko 	switch (fc_conf->mode) {
512*cdbb29cfSAndrew Rybchenko 	case RTE_FC_NONE:
513*cdbb29cfSAndrew Rybchenko 		fcntl = 0;
514*cdbb29cfSAndrew Rybchenko 		break;
515*cdbb29cfSAndrew Rybchenko 	case RTE_FC_RX_PAUSE:
516*cdbb29cfSAndrew Rybchenko 		fcntl = EFX_FCNTL_RESPOND;
517*cdbb29cfSAndrew Rybchenko 		break;
518*cdbb29cfSAndrew Rybchenko 	case RTE_FC_TX_PAUSE:
519*cdbb29cfSAndrew Rybchenko 		fcntl = EFX_FCNTL_GENERATE;
520*cdbb29cfSAndrew Rybchenko 		break;
521*cdbb29cfSAndrew Rybchenko 	case RTE_FC_FULL:
522*cdbb29cfSAndrew Rybchenko 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
523*cdbb29cfSAndrew Rybchenko 		break;
524*cdbb29cfSAndrew Rybchenko 	default:
525*cdbb29cfSAndrew Rybchenko 		rc = EINVAL;
526*cdbb29cfSAndrew Rybchenko 		goto fail_inval;
527*cdbb29cfSAndrew Rybchenko 	}
528*cdbb29cfSAndrew Rybchenko 
529*cdbb29cfSAndrew Rybchenko 	sfc_adapter_lock(sa);
530*cdbb29cfSAndrew Rybchenko 
531*cdbb29cfSAndrew Rybchenko 	if (sa->state == SFC_ADAPTER_STARTED) {
532*cdbb29cfSAndrew Rybchenko 		rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
533*cdbb29cfSAndrew Rybchenko 		if (rc != 0)
534*cdbb29cfSAndrew Rybchenko 			goto fail_mac_fcntl_set;
535*cdbb29cfSAndrew Rybchenko 	}
536*cdbb29cfSAndrew Rybchenko 
537*cdbb29cfSAndrew Rybchenko 	port->flow_ctrl = fcntl;
538*cdbb29cfSAndrew Rybchenko 	port->flow_ctrl_autoneg = fc_conf->autoneg;
539*cdbb29cfSAndrew Rybchenko 
540*cdbb29cfSAndrew Rybchenko 	sfc_adapter_unlock(sa);
541*cdbb29cfSAndrew Rybchenko 
542*cdbb29cfSAndrew Rybchenko 	return 0;
543*cdbb29cfSAndrew Rybchenko 
544*cdbb29cfSAndrew Rybchenko fail_mac_fcntl_set:
545*cdbb29cfSAndrew Rybchenko 	sfc_adapter_unlock(sa);
546*cdbb29cfSAndrew Rybchenko fail_inval:
547*cdbb29cfSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
548*cdbb29cfSAndrew Rybchenko 	return -rc;
549*cdbb29cfSAndrew Rybchenko }
550*cdbb29cfSAndrew Rybchenko 
55163d588ffSAndrew Rybchenko static const struct eth_dev_ops sfc_eth_dev_ops = {
552aaa3f5f0SAndrew Rybchenko 	.dev_configure			= sfc_dev_configure,
55393fcf09bSAndrew Rybchenko 	.dev_start			= sfc_dev_start,
55493fcf09bSAndrew Rybchenko 	.dev_stop			= sfc_dev_stop,
555aaa3f5f0SAndrew Rybchenko 	.dev_close			= sfc_dev_close,
556886f8d8aSArtem Andreev 	.link_update			= sfc_dev_link_update,
5571caab2f1SAndrew Rybchenko 	.stats_get			= sfc_stats_get,
5587b989176SAndrew Rybchenko 	.xstats_get			= sfc_xstats_get,
5597b989176SAndrew Rybchenko 	.xstats_get_names		= sfc_xstats_get_names,
56063d588ffSAndrew Rybchenko 	.dev_infos_get			= sfc_dev_infos_get,
561ce35b05cSAndrew Rybchenko 	.rx_queue_setup			= sfc_rx_queue_setup,
562ce35b05cSAndrew Rybchenko 	.rx_queue_release		= sfc_rx_queue_release,
563b1b7ad93SIvan Malov 	.tx_queue_setup			= sfc_tx_queue_setup,
564b1b7ad93SIvan Malov 	.tx_queue_release		= sfc_tx_queue_release,
565*cdbb29cfSAndrew Rybchenko 	.flow_ctrl_get			= sfc_flow_ctrl_get,
566*cdbb29cfSAndrew Rybchenko 	.flow_ctrl_set			= sfc_flow_ctrl_set,
56763d588ffSAndrew Rybchenko };
56863d588ffSAndrew Rybchenko 
56963d588ffSAndrew Rybchenko static int
57063d588ffSAndrew Rybchenko sfc_eth_dev_init(struct rte_eth_dev *dev)
57163d588ffSAndrew Rybchenko {
57263d588ffSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
57363d588ffSAndrew Rybchenko 	struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
57463d588ffSAndrew Rybchenko 	int rc;
575ba641f20SAndrew Rybchenko 	const efx_nic_cfg_t *encp;
576ba641f20SAndrew Rybchenko 	const struct ether_addr *from;
57763d588ffSAndrew Rybchenko 
57863d588ffSAndrew Rybchenko 	/* Required for logging */
57963d588ffSAndrew Rybchenko 	sa->eth_dev = dev;
58063d588ffSAndrew Rybchenko 
58163d588ffSAndrew Rybchenko 	/* Copy PCI device info to the dev->data */
58263d588ffSAndrew Rybchenko 	rte_eth_copy_pci_info(dev, pci_dev);
58363d588ffSAndrew Rybchenko 
58463d588ffSAndrew Rybchenko 	rc = sfc_kvargs_parse(sa);
58563d588ffSAndrew Rybchenko 	if (rc != 0)
58663d588ffSAndrew Rybchenko 		goto fail_kvargs_parse;
58763d588ffSAndrew Rybchenko 
58863d588ffSAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
58963d588ffSAndrew Rybchenko 				sfc_kvarg_bool_handler, &sa->debug_init);
59063d588ffSAndrew Rybchenko 	if (rc != 0)
59163d588ffSAndrew Rybchenko 		goto fail_kvarg_debug_init;
59263d588ffSAndrew Rybchenko 
59363d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
59463d588ffSAndrew Rybchenko 
595ba641f20SAndrew Rybchenko 	dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
596ba641f20SAndrew Rybchenko 	if (dev->data->mac_addrs == NULL) {
597ba641f20SAndrew Rybchenko 		rc = ENOMEM;
598ba641f20SAndrew Rybchenko 		goto fail_mac_addrs;
599ba641f20SAndrew Rybchenko 	}
600ba641f20SAndrew Rybchenko 
601ba641f20SAndrew Rybchenko 	sfc_adapter_lock_init(sa);
602ba641f20SAndrew Rybchenko 	sfc_adapter_lock(sa);
603ba641f20SAndrew Rybchenko 
604ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "attaching");
605ba641f20SAndrew Rybchenko 	rc = sfc_attach(sa);
606ba641f20SAndrew Rybchenko 	if (rc != 0)
607ba641f20SAndrew Rybchenko 		goto fail_attach;
608ba641f20SAndrew Rybchenko 
609ba641f20SAndrew Rybchenko 	encp = efx_nic_cfg_get(sa->nic);
610ba641f20SAndrew Rybchenko 
611ba641f20SAndrew Rybchenko 	/*
612ba641f20SAndrew Rybchenko 	 * The arguments are really reverse order in comparison to
613ba641f20SAndrew Rybchenko 	 * Linux kernel. Copy from NIC config to Ethernet device data.
614ba641f20SAndrew Rybchenko 	 */
615ba641f20SAndrew Rybchenko 	from = (const struct ether_addr *)(encp->enc_mac_addr);
616ba641f20SAndrew Rybchenko 	ether_addr_copy(from, &dev->data->mac_addrs[0]);
617ba641f20SAndrew Rybchenko 
61863d588ffSAndrew Rybchenko 	dev->dev_ops = &sfc_eth_dev_ops;
619921f6cf1SAndrew Rybchenko 	dev->rx_pkt_burst = &sfc_recv_pkts;
620428c7dddSIvan Malov 	dev->tx_pkt_burst = &sfc_xmit_pkts;
62163d588ffSAndrew Rybchenko 
622ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
623ba641f20SAndrew Rybchenko 
62463d588ffSAndrew Rybchenko 	sfc_log_init(sa, "done");
62563d588ffSAndrew Rybchenko 	return 0;
62663d588ffSAndrew Rybchenko 
627ba641f20SAndrew Rybchenko fail_attach:
628ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
629ba641f20SAndrew Rybchenko 	sfc_adapter_lock_fini(sa);
630ba641f20SAndrew Rybchenko 	rte_free(dev->data->mac_addrs);
631ba641f20SAndrew Rybchenko 	dev->data->mac_addrs = NULL;
632ba641f20SAndrew Rybchenko 
633ba641f20SAndrew Rybchenko fail_mac_addrs:
63463d588ffSAndrew Rybchenko fail_kvarg_debug_init:
63563d588ffSAndrew Rybchenko 	sfc_kvargs_cleanup(sa);
63663d588ffSAndrew Rybchenko 
63763d588ffSAndrew Rybchenko fail_kvargs_parse:
63863d588ffSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
63963d588ffSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
64063d588ffSAndrew Rybchenko 	return -rc;
64163d588ffSAndrew Rybchenko }
64263d588ffSAndrew Rybchenko 
64363d588ffSAndrew Rybchenko static int
64463d588ffSAndrew Rybchenko sfc_eth_dev_uninit(struct rte_eth_dev *dev)
64563d588ffSAndrew Rybchenko {
64663d588ffSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
64763d588ffSAndrew Rybchenko 
64863d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
64963d588ffSAndrew Rybchenko 
650ba641f20SAndrew Rybchenko 	sfc_adapter_lock(sa);
651ba641f20SAndrew Rybchenko 
652ba641f20SAndrew Rybchenko 	sfc_detach(sa);
653ba641f20SAndrew Rybchenko 
654ba641f20SAndrew Rybchenko 	rte_free(dev->data->mac_addrs);
655ba641f20SAndrew Rybchenko 	dev->data->mac_addrs = NULL;
656ba641f20SAndrew Rybchenko 
65763d588ffSAndrew Rybchenko 	dev->dev_ops = NULL;
658921f6cf1SAndrew Rybchenko 	dev->rx_pkt_burst = NULL;
659428c7dddSIvan Malov 	dev->tx_pkt_burst = NULL;
66063d588ffSAndrew Rybchenko 
66163d588ffSAndrew Rybchenko 	sfc_kvargs_cleanup(sa);
66263d588ffSAndrew Rybchenko 
663ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
664ba641f20SAndrew Rybchenko 	sfc_adapter_lock_fini(sa);
665ba641f20SAndrew Rybchenko 
66663d588ffSAndrew Rybchenko 	sfc_log_init(sa, "done");
66763d588ffSAndrew Rybchenko 
66863d588ffSAndrew Rybchenko 	/* Required for logging, so cleanup last */
66963d588ffSAndrew Rybchenko 	sa->eth_dev = NULL;
67063d588ffSAndrew Rybchenko 	return 0;
67163d588ffSAndrew Rybchenko }
67263d588ffSAndrew Rybchenko 
67363d588ffSAndrew Rybchenko static const struct rte_pci_id pci_id_sfc_efx_map[] = {
674ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
675ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
676ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
67763d588ffSAndrew Rybchenko 	{ .vendor_id = 0 /* sentinel */ }
67863d588ffSAndrew Rybchenko };
67963d588ffSAndrew Rybchenko 
68063d588ffSAndrew Rybchenko static struct eth_driver sfc_efx_pmd = {
68163d588ffSAndrew Rybchenko 	.pci_drv = {
68263d588ffSAndrew Rybchenko 		.id_table = pci_id_sfc_efx_map,
683ba641f20SAndrew Rybchenko 		.drv_flags =
684ba641f20SAndrew Rybchenko 			RTE_PCI_DRV_NEED_MAPPING,
68563d588ffSAndrew Rybchenko 		.probe = rte_eth_dev_pci_probe,
68663d588ffSAndrew Rybchenko 		.remove = rte_eth_dev_pci_remove,
68763d588ffSAndrew Rybchenko 	},
68863d588ffSAndrew Rybchenko 	.eth_dev_init = sfc_eth_dev_init,
68963d588ffSAndrew Rybchenko 	.eth_dev_uninit = sfc_eth_dev_uninit,
69063d588ffSAndrew Rybchenko 	.dev_private_size = sizeof(struct sfc_adapter),
69163d588ffSAndrew Rybchenko };
69263d588ffSAndrew Rybchenko 
69363d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv);
69463d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
69563d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
696c22d3c50SAndrew Rybchenko 	SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
6973e3b2e4cSAndrew Rybchenko 	SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
69863d588ffSAndrew Rybchenko 	SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
699