xref: /dpdk/drivers/net/sfc/sfc_ethdev.c (revision b1b7ad933b39fdbd6d2aaa602c2bc8cd3678ec84)
163d588ffSAndrew Rybchenko /*-
263d588ffSAndrew Rybchenko  * Copyright (c) 2016 Solarflare Communications Inc.
363d588ffSAndrew Rybchenko  * All rights reserved.
463d588ffSAndrew Rybchenko  *
563d588ffSAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
663d588ffSAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
763d588ffSAndrew Rybchenko  *
863d588ffSAndrew Rybchenko  * Redistribution and use in source and binary forms, with or without
963d588ffSAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
1063d588ffSAndrew Rybchenko  *
1163d588ffSAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
1263d588ffSAndrew Rybchenko  *    this list of conditions and the following disclaimer.
1363d588ffSAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
1463d588ffSAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
1563d588ffSAndrew Rybchenko  *    and/or other materials provided with the distribution.
1663d588ffSAndrew Rybchenko  *
1763d588ffSAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1863d588ffSAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1963d588ffSAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2063d588ffSAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2163d588ffSAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2263d588ffSAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2363d588ffSAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2463d588ffSAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
2563d588ffSAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2663d588ffSAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
2763d588ffSAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2863d588ffSAndrew Rybchenko  */
2963d588ffSAndrew Rybchenko 
3063d588ffSAndrew Rybchenko #include <rte_dev.h>
3163d588ffSAndrew Rybchenko #include <rte_ethdev.h>
3263d588ffSAndrew Rybchenko #include <rte_pci.h>
3363d588ffSAndrew Rybchenko 
34ba641f20SAndrew Rybchenko #include "efx.h"
35ba641f20SAndrew Rybchenko 
3663d588ffSAndrew Rybchenko #include "sfc.h"
3763d588ffSAndrew Rybchenko #include "sfc_debug.h"
3863d588ffSAndrew Rybchenko #include "sfc_log.h"
3963d588ffSAndrew Rybchenko #include "sfc_kvargs.h"
40886f8d8aSArtem Andreev #include "sfc_ev.h"
41ce35b05cSAndrew Rybchenko #include "sfc_rx.h"
42*b1b7ad93SIvan Malov #include "sfc_tx.h"
4363d588ffSAndrew Rybchenko 
4463d588ffSAndrew Rybchenko 
4563d588ffSAndrew Rybchenko static void
4663d588ffSAndrew Rybchenko sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
4763d588ffSAndrew Rybchenko {
4863d588ffSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
4963d588ffSAndrew Rybchenko 
5063d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
5163d588ffSAndrew Rybchenko 
5263d588ffSAndrew Rybchenko 	dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
5303ed2119SAndrew Rybchenko 	dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
54a8e64c6bSAndrew Rybchenko 
55ce35b05cSAndrew Rybchenko 	dev_info->max_rx_queues = sa->rxq_max;
56a8ad8cf8SIvan Malov 	dev_info->max_tx_queues = sa->txq_max;
57ce35b05cSAndrew Rybchenko 
58a8e64c6bSAndrew Rybchenko 	/* By default packets are dropped if no descriptors are available */
59a8e64c6bSAndrew Rybchenko 	dev_info->default_rxconf.rx_drop_en = 1;
60a8e64c6bSAndrew Rybchenko 
61a8ad8cf8SIvan Malov 	dev_info->tx_offload_capa =
62a8ad8cf8SIvan Malov 		DEV_TX_OFFLOAD_IPV4_CKSUM |
63a8ad8cf8SIvan Malov 		DEV_TX_OFFLOAD_UDP_CKSUM |
64a8ad8cf8SIvan Malov 		DEV_TX_OFFLOAD_TCP_CKSUM;
65a8ad8cf8SIvan Malov 
66a8ad8cf8SIvan Malov 	dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOVLANOFFL |
67a8ad8cf8SIvan Malov 					     ETH_TXQ_FLAGS_NOXSUMSCTP;
68a8ad8cf8SIvan Malov 
69a8e64c6bSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
70a8e64c6bSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
71a8e64c6bSAndrew Rybchenko 	/* The RXQ hardware requires that the descriptor count is a power
72a8e64c6bSAndrew Rybchenko 	 * of 2, but rx_desc_lim cannot properly describe that constraint.
73a8e64c6bSAndrew Rybchenko 	 */
74a8e64c6bSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
75a8ad8cf8SIvan Malov 
76a8ad8cf8SIvan Malov 	dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
77a8ad8cf8SIvan Malov 	dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
78a8ad8cf8SIvan Malov 	/*
79a8ad8cf8SIvan Malov 	 * The TXQ hardware requires that the descriptor count is a power
80a8ad8cf8SIvan Malov 	 * of 2, but tx_desc_lim cannot properly describe that constraint
81a8ad8cf8SIvan Malov 	 */
82a8ad8cf8SIvan Malov 	dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
8363d588ffSAndrew Rybchenko }
8463d588ffSAndrew Rybchenko 
85aaa3f5f0SAndrew Rybchenko static int
86aaa3f5f0SAndrew Rybchenko sfc_dev_configure(struct rte_eth_dev *dev)
87aaa3f5f0SAndrew Rybchenko {
88aaa3f5f0SAndrew Rybchenko 	struct rte_eth_dev_data *dev_data = dev->data;
89aaa3f5f0SAndrew Rybchenko 	struct sfc_adapter *sa = dev_data->dev_private;
90aaa3f5f0SAndrew Rybchenko 	int rc;
91aaa3f5f0SAndrew Rybchenko 
92aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
93aaa3f5f0SAndrew Rybchenko 		     dev_data->nb_rx_queues, dev_data->nb_tx_queues);
94aaa3f5f0SAndrew Rybchenko 
95aaa3f5f0SAndrew Rybchenko 	sfc_adapter_lock(sa);
96aaa3f5f0SAndrew Rybchenko 	switch (sa->state) {
97aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_CONFIGURED:
98aaa3f5f0SAndrew Rybchenko 		sfc_close(sa);
99aaa3f5f0SAndrew Rybchenko 		SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
100aaa3f5f0SAndrew Rybchenko 		/* FALLTHROUGH */
101aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_INITIALIZED:
102aaa3f5f0SAndrew Rybchenko 		rc = sfc_configure(sa);
103aaa3f5f0SAndrew Rybchenko 		break;
104aaa3f5f0SAndrew Rybchenko 	default:
105aaa3f5f0SAndrew Rybchenko 		sfc_err(sa, "unexpected adapter state %u to configure",
106aaa3f5f0SAndrew Rybchenko 			sa->state);
107aaa3f5f0SAndrew Rybchenko 		rc = EINVAL;
108aaa3f5f0SAndrew Rybchenko 		break;
109aaa3f5f0SAndrew Rybchenko 	}
110aaa3f5f0SAndrew Rybchenko 	sfc_adapter_unlock(sa);
111aaa3f5f0SAndrew Rybchenko 
112aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done %d", rc);
113aaa3f5f0SAndrew Rybchenko 	SFC_ASSERT(rc >= 0);
114aaa3f5f0SAndrew Rybchenko 	return -rc;
115aaa3f5f0SAndrew Rybchenko }
116aaa3f5f0SAndrew Rybchenko 
11793fcf09bSAndrew Rybchenko static int
11893fcf09bSAndrew Rybchenko sfc_dev_start(struct rte_eth_dev *dev)
11993fcf09bSAndrew Rybchenko {
12093fcf09bSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
12193fcf09bSAndrew Rybchenko 	int rc;
12293fcf09bSAndrew Rybchenko 
12393fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
12493fcf09bSAndrew Rybchenko 
12593fcf09bSAndrew Rybchenko 	sfc_adapter_lock(sa);
12693fcf09bSAndrew Rybchenko 	rc = sfc_start(sa);
12793fcf09bSAndrew Rybchenko 	sfc_adapter_unlock(sa);
12893fcf09bSAndrew Rybchenko 
12993fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done %d", rc);
13093fcf09bSAndrew Rybchenko 	SFC_ASSERT(rc >= 0);
13193fcf09bSAndrew Rybchenko 	return -rc;
13293fcf09bSAndrew Rybchenko }
13393fcf09bSAndrew Rybchenko 
134886f8d8aSArtem Andreev static int
135886f8d8aSArtem Andreev sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
136886f8d8aSArtem Andreev {
137886f8d8aSArtem Andreev 	struct sfc_adapter *sa = dev->data->dev_private;
138886f8d8aSArtem Andreev 	struct rte_eth_link *dev_link = &dev->data->dev_link;
139886f8d8aSArtem Andreev 	struct rte_eth_link old_link;
140886f8d8aSArtem Andreev 	struct rte_eth_link current_link;
141886f8d8aSArtem Andreev 
142886f8d8aSArtem Andreev 	sfc_log_init(sa, "entry");
143886f8d8aSArtem Andreev 
144886f8d8aSArtem Andreev 	if (sa->state != SFC_ADAPTER_STARTED)
145886f8d8aSArtem Andreev 		return 0;
146886f8d8aSArtem Andreev 
147886f8d8aSArtem Andreev retry:
148886f8d8aSArtem Andreev 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
149886f8d8aSArtem Andreev 	*(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
150886f8d8aSArtem Andreev 
151886f8d8aSArtem Andreev 	if (wait_to_complete) {
152886f8d8aSArtem Andreev 		efx_link_mode_t link_mode;
153886f8d8aSArtem Andreev 
154886f8d8aSArtem Andreev 		efx_port_poll(sa->nic, &link_mode);
155886f8d8aSArtem Andreev 		sfc_port_link_mode_to_info(link_mode, &current_link);
156886f8d8aSArtem Andreev 
157886f8d8aSArtem Andreev 		if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
158886f8d8aSArtem Andreev 					 *(uint64_t *)&old_link,
159886f8d8aSArtem Andreev 					 *(uint64_t *)&current_link))
160886f8d8aSArtem Andreev 			goto retry;
161886f8d8aSArtem Andreev 	} else {
162886f8d8aSArtem Andreev 		sfc_ev_mgmt_qpoll(sa);
163886f8d8aSArtem Andreev 		*(int64_t *)&current_link =
164886f8d8aSArtem Andreev 			rte_atomic64_read((rte_atomic64_t *)dev_link);
165886f8d8aSArtem Andreev 	}
166886f8d8aSArtem Andreev 
167886f8d8aSArtem Andreev 	if (old_link.link_status != current_link.link_status)
168886f8d8aSArtem Andreev 		sfc_info(sa, "Link status is %s",
169886f8d8aSArtem Andreev 			 current_link.link_status ? "UP" : "DOWN");
170886f8d8aSArtem Andreev 
171886f8d8aSArtem Andreev 	return old_link.link_status == current_link.link_status ? 0 : -1;
172886f8d8aSArtem Andreev }
173886f8d8aSArtem Andreev 
17493fcf09bSAndrew Rybchenko static void
17593fcf09bSAndrew Rybchenko sfc_dev_stop(struct rte_eth_dev *dev)
17693fcf09bSAndrew Rybchenko {
17793fcf09bSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
17893fcf09bSAndrew Rybchenko 
17993fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "entry");
18093fcf09bSAndrew Rybchenko 
18193fcf09bSAndrew Rybchenko 	sfc_adapter_lock(sa);
18293fcf09bSAndrew Rybchenko 	sfc_stop(sa);
18393fcf09bSAndrew Rybchenko 	sfc_adapter_unlock(sa);
18493fcf09bSAndrew Rybchenko 
18593fcf09bSAndrew Rybchenko 	sfc_log_init(sa, "done");
18693fcf09bSAndrew Rybchenko }
18793fcf09bSAndrew Rybchenko 
188aaa3f5f0SAndrew Rybchenko static void
189aaa3f5f0SAndrew Rybchenko sfc_dev_close(struct rte_eth_dev *dev)
190aaa3f5f0SAndrew Rybchenko {
191aaa3f5f0SAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
192aaa3f5f0SAndrew Rybchenko 
193aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "entry");
194aaa3f5f0SAndrew Rybchenko 
195aaa3f5f0SAndrew Rybchenko 	sfc_adapter_lock(sa);
196aaa3f5f0SAndrew Rybchenko 	switch (sa->state) {
19793fcf09bSAndrew Rybchenko 	case SFC_ADAPTER_STARTED:
19893fcf09bSAndrew Rybchenko 		sfc_stop(sa);
19993fcf09bSAndrew Rybchenko 		SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
20093fcf09bSAndrew Rybchenko 		/* FALLTHROUGH */
201aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_CONFIGURED:
202aaa3f5f0SAndrew Rybchenko 		sfc_close(sa);
203aaa3f5f0SAndrew Rybchenko 		SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
204aaa3f5f0SAndrew Rybchenko 		/* FALLTHROUGH */
205aaa3f5f0SAndrew Rybchenko 	case SFC_ADAPTER_INITIALIZED:
206aaa3f5f0SAndrew Rybchenko 		break;
207aaa3f5f0SAndrew Rybchenko 	default:
208aaa3f5f0SAndrew Rybchenko 		sfc_err(sa, "unexpected adapter state %u on close", sa->state);
209aaa3f5f0SAndrew Rybchenko 		break;
210aaa3f5f0SAndrew Rybchenko 	}
211aaa3f5f0SAndrew Rybchenko 	sfc_adapter_unlock(sa);
212aaa3f5f0SAndrew Rybchenko 
213aaa3f5f0SAndrew Rybchenko 	sfc_log_init(sa, "done");
214aaa3f5f0SAndrew Rybchenko }
215aaa3f5f0SAndrew Rybchenko 
216ce35b05cSAndrew Rybchenko static int
217ce35b05cSAndrew Rybchenko sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218ce35b05cSAndrew Rybchenko 		   uint16_t nb_rx_desc, unsigned int socket_id,
219ce35b05cSAndrew Rybchenko 		   const struct rte_eth_rxconf *rx_conf,
220ce35b05cSAndrew Rybchenko 		   struct rte_mempool *mb_pool)
221ce35b05cSAndrew Rybchenko {
222ce35b05cSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
223ce35b05cSAndrew Rybchenko 	int rc;
224ce35b05cSAndrew Rybchenko 
225ce35b05cSAndrew Rybchenko 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
226ce35b05cSAndrew Rybchenko 		     rx_queue_id, nb_rx_desc, socket_id);
227ce35b05cSAndrew Rybchenko 
228ce35b05cSAndrew Rybchenko 	sfc_adapter_lock(sa);
229ce35b05cSAndrew Rybchenko 
230ce35b05cSAndrew Rybchenko 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
231ce35b05cSAndrew Rybchenko 			  rx_conf, mb_pool);
232ce35b05cSAndrew Rybchenko 	if (rc != 0)
233ce35b05cSAndrew Rybchenko 		goto fail_rx_qinit;
234ce35b05cSAndrew Rybchenko 
235ce35b05cSAndrew Rybchenko 	dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
236ce35b05cSAndrew Rybchenko 
237ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
238ce35b05cSAndrew Rybchenko 
239ce35b05cSAndrew Rybchenko 	return 0;
240ce35b05cSAndrew Rybchenko 
241ce35b05cSAndrew Rybchenko fail_rx_qinit:
242ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
243ce35b05cSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
244ce35b05cSAndrew Rybchenko 	return -rc;
245ce35b05cSAndrew Rybchenko }
246ce35b05cSAndrew Rybchenko 
247ce35b05cSAndrew Rybchenko static void
248ce35b05cSAndrew Rybchenko sfc_rx_queue_release(void *queue)
249ce35b05cSAndrew Rybchenko {
250ce35b05cSAndrew Rybchenko 	struct sfc_rxq *rxq = queue;
251ce35b05cSAndrew Rybchenko 	struct sfc_adapter *sa;
252ce35b05cSAndrew Rybchenko 	unsigned int sw_index;
253ce35b05cSAndrew Rybchenko 
254ce35b05cSAndrew Rybchenko 	if (rxq == NULL)
255ce35b05cSAndrew Rybchenko 		return;
256ce35b05cSAndrew Rybchenko 
257ce35b05cSAndrew Rybchenko 	sa = rxq->evq->sa;
258ce35b05cSAndrew Rybchenko 	sfc_adapter_lock(sa);
259ce35b05cSAndrew Rybchenko 
260ce35b05cSAndrew Rybchenko 	sw_index = sfc_rxq_sw_index(rxq);
261ce35b05cSAndrew Rybchenko 
262ce35b05cSAndrew Rybchenko 	sfc_log_init(sa, "RxQ=%u", sw_index);
263ce35b05cSAndrew Rybchenko 
264ce35b05cSAndrew Rybchenko 	sa->eth_dev->data->rx_queues[sw_index] = NULL;
265ce35b05cSAndrew Rybchenko 
266ce35b05cSAndrew Rybchenko 	sfc_rx_qfini(sa, sw_index);
267ce35b05cSAndrew Rybchenko 
268ce35b05cSAndrew Rybchenko 	sfc_adapter_unlock(sa);
269ce35b05cSAndrew Rybchenko }
270ce35b05cSAndrew Rybchenko 
271*b1b7ad93SIvan Malov static int
272*b1b7ad93SIvan Malov sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
273*b1b7ad93SIvan Malov 		   uint16_t nb_tx_desc, unsigned int socket_id,
274*b1b7ad93SIvan Malov 		   const struct rte_eth_txconf *tx_conf)
275*b1b7ad93SIvan Malov {
276*b1b7ad93SIvan Malov 	struct sfc_adapter *sa = dev->data->dev_private;
277*b1b7ad93SIvan Malov 	int rc;
278*b1b7ad93SIvan Malov 
279*b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
280*b1b7ad93SIvan Malov 		     tx_queue_id, nb_tx_desc, socket_id);
281*b1b7ad93SIvan Malov 
282*b1b7ad93SIvan Malov 	sfc_adapter_lock(sa);
283*b1b7ad93SIvan Malov 
284*b1b7ad93SIvan Malov 	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
285*b1b7ad93SIvan Malov 	if (rc != 0)
286*b1b7ad93SIvan Malov 		goto fail_tx_qinit;
287*b1b7ad93SIvan Malov 
288*b1b7ad93SIvan Malov 	dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq;
289*b1b7ad93SIvan Malov 
290*b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
291*b1b7ad93SIvan Malov 	return 0;
292*b1b7ad93SIvan Malov 
293*b1b7ad93SIvan Malov fail_tx_qinit:
294*b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
295*b1b7ad93SIvan Malov 	SFC_ASSERT(rc > 0);
296*b1b7ad93SIvan Malov 	return -rc;
297*b1b7ad93SIvan Malov }
298*b1b7ad93SIvan Malov 
299*b1b7ad93SIvan Malov static void
300*b1b7ad93SIvan Malov sfc_tx_queue_release(void *queue)
301*b1b7ad93SIvan Malov {
302*b1b7ad93SIvan Malov 	struct sfc_txq *txq = queue;
303*b1b7ad93SIvan Malov 	unsigned int sw_index;
304*b1b7ad93SIvan Malov 	struct sfc_adapter *sa;
305*b1b7ad93SIvan Malov 
306*b1b7ad93SIvan Malov 	if (txq == NULL)
307*b1b7ad93SIvan Malov 		return;
308*b1b7ad93SIvan Malov 
309*b1b7ad93SIvan Malov 	sw_index = sfc_txq_sw_index(txq);
310*b1b7ad93SIvan Malov 
311*b1b7ad93SIvan Malov 	SFC_ASSERT(txq->evq != NULL);
312*b1b7ad93SIvan Malov 	sa = txq->evq->sa;
313*b1b7ad93SIvan Malov 
314*b1b7ad93SIvan Malov 	sfc_log_init(sa, "TxQ = %u", sw_index);
315*b1b7ad93SIvan Malov 
316*b1b7ad93SIvan Malov 	sfc_adapter_lock(sa);
317*b1b7ad93SIvan Malov 
318*b1b7ad93SIvan Malov 	SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
319*b1b7ad93SIvan Malov 	sa->eth_dev->data->tx_queues[sw_index] = NULL;
320*b1b7ad93SIvan Malov 
321*b1b7ad93SIvan Malov 	sfc_tx_qfini(sa, sw_index);
322*b1b7ad93SIvan Malov 
323*b1b7ad93SIvan Malov 	sfc_adapter_unlock(sa);
324*b1b7ad93SIvan Malov }
325*b1b7ad93SIvan Malov 
32663d588ffSAndrew Rybchenko static const struct eth_dev_ops sfc_eth_dev_ops = {
327aaa3f5f0SAndrew Rybchenko 	.dev_configure			= sfc_dev_configure,
32893fcf09bSAndrew Rybchenko 	.dev_start			= sfc_dev_start,
32993fcf09bSAndrew Rybchenko 	.dev_stop			= sfc_dev_stop,
330aaa3f5f0SAndrew Rybchenko 	.dev_close			= sfc_dev_close,
331886f8d8aSArtem Andreev 	.link_update			= sfc_dev_link_update,
33263d588ffSAndrew Rybchenko 	.dev_infos_get			= sfc_dev_infos_get,
333ce35b05cSAndrew Rybchenko 	.rx_queue_setup			= sfc_rx_queue_setup,
334ce35b05cSAndrew Rybchenko 	.rx_queue_release		= sfc_rx_queue_release,
335*b1b7ad93SIvan Malov 	.tx_queue_setup			= sfc_tx_queue_setup,
336*b1b7ad93SIvan Malov 	.tx_queue_release		= sfc_tx_queue_release,
33763d588ffSAndrew Rybchenko };
33863d588ffSAndrew Rybchenko 
33963d588ffSAndrew Rybchenko static int
34063d588ffSAndrew Rybchenko sfc_eth_dev_init(struct rte_eth_dev *dev)
34163d588ffSAndrew Rybchenko {
34263d588ffSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
34363d588ffSAndrew Rybchenko 	struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
34463d588ffSAndrew Rybchenko 	int rc;
345ba641f20SAndrew Rybchenko 	const efx_nic_cfg_t *encp;
346ba641f20SAndrew Rybchenko 	const struct ether_addr *from;
34763d588ffSAndrew Rybchenko 
34863d588ffSAndrew Rybchenko 	/* Required for logging */
34963d588ffSAndrew Rybchenko 	sa->eth_dev = dev;
35063d588ffSAndrew Rybchenko 
35163d588ffSAndrew Rybchenko 	/* Copy PCI device info to the dev->data */
35263d588ffSAndrew Rybchenko 	rte_eth_copy_pci_info(dev, pci_dev);
35363d588ffSAndrew Rybchenko 
35463d588ffSAndrew Rybchenko 	rc = sfc_kvargs_parse(sa);
35563d588ffSAndrew Rybchenko 	if (rc != 0)
35663d588ffSAndrew Rybchenko 		goto fail_kvargs_parse;
35763d588ffSAndrew Rybchenko 
35863d588ffSAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
35963d588ffSAndrew Rybchenko 				sfc_kvarg_bool_handler, &sa->debug_init);
36063d588ffSAndrew Rybchenko 	if (rc != 0)
36163d588ffSAndrew Rybchenko 		goto fail_kvarg_debug_init;
36263d588ffSAndrew Rybchenko 
36363d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
36463d588ffSAndrew Rybchenko 
365ba641f20SAndrew Rybchenko 	dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
366ba641f20SAndrew Rybchenko 	if (dev->data->mac_addrs == NULL) {
367ba641f20SAndrew Rybchenko 		rc = ENOMEM;
368ba641f20SAndrew Rybchenko 		goto fail_mac_addrs;
369ba641f20SAndrew Rybchenko 	}
370ba641f20SAndrew Rybchenko 
371ba641f20SAndrew Rybchenko 	sfc_adapter_lock_init(sa);
372ba641f20SAndrew Rybchenko 	sfc_adapter_lock(sa);
373ba641f20SAndrew Rybchenko 
374ba641f20SAndrew Rybchenko 	sfc_log_init(sa, "attaching");
375ba641f20SAndrew Rybchenko 	rc = sfc_attach(sa);
376ba641f20SAndrew Rybchenko 	if (rc != 0)
377ba641f20SAndrew Rybchenko 		goto fail_attach;
378ba641f20SAndrew Rybchenko 
379ba641f20SAndrew Rybchenko 	encp = efx_nic_cfg_get(sa->nic);
380ba641f20SAndrew Rybchenko 
381ba641f20SAndrew Rybchenko 	/*
382ba641f20SAndrew Rybchenko 	 * The arguments are really reverse order in comparison to
383ba641f20SAndrew Rybchenko 	 * Linux kernel. Copy from NIC config to Ethernet device data.
384ba641f20SAndrew Rybchenko 	 */
385ba641f20SAndrew Rybchenko 	from = (const struct ether_addr *)(encp->enc_mac_addr);
386ba641f20SAndrew Rybchenko 	ether_addr_copy(from, &dev->data->mac_addrs[0]);
387ba641f20SAndrew Rybchenko 
38863d588ffSAndrew Rybchenko 	dev->dev_ops = &sfc_eth_dev_ops;
389921f6cf1SAndrew Rybchenko 	dev->rx_pkt_burst = &sfc_recv_pkts;
39063d588ffSAndrew Rybchenko 
391ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
392ba641f20SAndrew Rybchenko 
39363d588ffSAndrew Rybchenko 	sfc_log_init(sa, "done");
39463d588ffSAndrew Rybchenko 	return 0;
39563d588ffSAndrew Rybchenko 
396ba641f20SAndrew Rybchenko fail_attach:
397ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
398ba641f20SAndrew Rybchenko 	sfc_adapter_lock_fini(sa);
399ba641f20SAndrew Rybchenko 	rte_free(dev->data->mac_addrs);
400ba641f20SAndrew Rybchenko 	dev->data->mac_addrs = NULL;
401ba641f20SAndrew Rybchenko 
402ba641f20SAndrew Rybchenko fail_mac_addrs:
40363d588ffSAndrew Rybchenko fail_kvarg_debug_init:
40463d588ffSAndrew Rybchenko 	sfc_kvargs_cleanup(sa);
40563d588ffSAndrew Rybchenko 
40663d588ffSAndrew Rybchenko fail_kvargs_parse:
40763d588ffSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
40863d588ffSAndrew Rybchenko 	SFC_ASSERT(rc > 0);
40963d588ffSAndrew Rybchenko 	return -rc;
41063d588ffSAndrew Rybchenko }
41163d588ffSAndrew Rybchenko 
41263d588ffSAndrew Rybchenko static int
41363d588ffSAndrew Rybchenko sfc_eth_dev_uninit(struct rte_eth_dev *dev)
41463d588ffSAndrew Rybchenko {
41563d588ffSAndrew Rybchenko 	struct sfc_adapter *sa = dev->data->dev_private;
41663d588ffSAndrew Rybchenko 
41763d588ffSAndrew Rybchenko 	sfc_log_init(sa, "entry");
41863d588ffSAndrew Rybchenko 
419ba641f20SAndrew Rybchenko 	sfc_adapter_lock(sa);
420ba641f20SAndrew Rybchenko 
421ba641f20SAndrew Rybchenko 	sfc_detach(sa);
422ba641f20SAndrew Rybchenko 
423ba641f20SAndrew Rybchenko 	rte_free(dev->data->mac_addrs);
424ba641f20SAndrew Rybchenko 	dev->data->mac_addrs = NULL;
425ba641f20SAndrew Rybchenko 
42663d588ffSAndrew Rybchenko 	dev->dev_ops = NULL;
427921f6cf1SAndrew Rybchenko 	dev->rx_pkt_burst = NULL;
42863d588ffSAndrew Rybchenko 
42963d588ffSAndrew Rybchenko 	sfc_kvargs_cleanup(sa);
43063d588ffSAndrew Rybchenko 
431ba641f20SAndrew Rybchenko 	sfc_adapter_unlock(sa);
432ba641f20SAndrew Rybchenko 	sfc_adapter_lock_fini(sa);
433ba641f20SAndrew Rybchenko 
43463d588ffSAndrew Rybchenko 	sfc_log_init(sa, "done");
43563d588ffSAndrew Rybchenko 
43663d588ffSAndrew Rybchenko 	/* Required for logging, so cleanup last */
43763d588ffSAndrew Rybchenko 	sa->eth_dev = NULL;
43863d588ffSAndrew Rybchenko 	return 0;
43963d588ffSAndrew Rybchenko }
44063d588ffSAndrew Rybchenko 
44163d588ffSAndrew Rybchenko static const struct rte_pci_id pci_id_sfc_efx_map[] = {
442ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
443ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
444ba641f20SAndrew Rybchenko 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
44563d588ffSAndrew Rybchenko 	{ .vendor_id = 0 /* sentinel */ }
44663d588ffSAndrew Rybchenko };
44763d588ffSAndrew Rybchenko 
44863d588ffSAndrew Rybchenko static struct eth_driver sfc_efx_pmd = {
44963d588ffSAndrew Rybchenko 	.pci_drv = {
45063d588ffSAndrew Rybchenko 		.id_table = pci_id_sfc_efx_map,
451ba641f20SAndrew Rybchenko 		.drv_flags =
452ba641f20SAndrew Rybchenko 			RTE_PCI_DRV_NEED_MAPPING,
45363d588ffSAndrew Rybchenko 		.probe = rte_eth_dev_pci_probe,
45463d588ffSAndrew Rybchenko 		.remove = rte_eth_dev_pci_remove,
45563d588ffSAndrew Rybchenko 	},
45663d588ffSAndrew Rybchenko 	.eth_dev_init = sfc_eth_dev_init,
45763d588ffSAndrew Rybchenko 	.eth_dev_uninit = sfc_eth_dev_uninit,
45863d588ffSAndrew Rybchenko 	.dev_private_size = sizeof(struct sfc_adapter),
45963d588ffSAndrew Rybchenko };
46063d588ffSAndrew Rybchenko 
46163d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv);
46263d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
46363d588ffSAndrew Rybchenko RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
46463d588ffSAndrew Rybchenko 	SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
465