xref: /dpdk/drivers/net/sfc/sfc_ethdev.c (revision a8ad8cf83f01afce616ffcc7a19bc44bc8609b96)
1 /*-
2  * Copyright (c) 2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * This software was jointly developed between OKTET Labs (under contract
6  * for Solarflare) and Solarflare Communications, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  *    this list of conditions and the following disclaimer in the documentation
15  *    and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <rte_dev.h>
31 #include <rte_ethdev.h>
32 #include <rte_pci.h>
33 
34 #include "efx.h"
35 
36 #include "sfc.h"
37 #include "sfc_debug.h"
38 #include "sfc_log.h"
39 #include "sfc_kvargs.h"
40 #include "sfc_ev.h"
41 #include "sfc_rx.h"
42 
43 
44 static void
45 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
46 {
47 	struct sfc_adapter *sa = dev->data->dev_private;
48 
49 	sfc_log_init(sa, "entry");
50 
51 	dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
52 	dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
53 
54 	dev_info->max_rx_queues = sa->rxq_max;
55 	dev_info->max_tx_queues = sa->txq_max;
56 
57 	/* By default packets are dropped if no descriptors are available */
58 	dev_info->default_rxconf.rx_drop_en = 1;
59 
60 	dev_info->tx_offload_capa =
61 		DEV_TX_OFFLOAD_IPV4_CKSUM |
62 		DEV_TX_OFFLOAD_UDP_CKSUM |
63 		DEV_TX_OFFLOAD_TCP_CKSUM;
64 
65 	dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOVLANOFFL |
66 					     ETH_TXQ_FLAGS_NOXSUMSCTP;
67 
68 	dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
69 	dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
70 	/* The RXQ hardware requires that the descriptor count is a power
71 	 * of 2, but rx_desc_lim cannot properly describe that constraint.
72 	 */
73 	dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
74 
75 	dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
76 	dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
77 	/*
78 	 * The TXQ hardware requires that the descriptor count is a power
79 	 * of 2, but tx_desc_lim cannot properly describe that constraint
80 	 */
81 	dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
82 }
83 
84 static int
85 sfc_dev_configure(struct rte_eth_dev *dev)
86 {
87 	struct rte_eth_dev_data *dev_data = dev->data;
88 	struct sfc_adapter *sa = dev_data->dev_private;
89 	int rc;
90 
91 	sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
92 		     dev_data->nb_rx_queues, dev_data->nb_tx_queues);
93 
94 	sfc_adapter_lock(sa);
95 	switch (sa->state) {
96 	case SFC_ADAPTER_CONFIGURED:
97 		sfc_close(sa);
98 		SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
99 		/* FALLTHROUGH */
100 	case SFC_ADAPTER_INITIALIZED:
101 		rc = sfc_configure(sa);
102 		break;
103 	default:
104 		sfc_err(sa, "unexpected adapter state %u to configure",
105 			sa->state);
106 		rc = EINVAL;
107 		break;
108 	}
109 	sfc_adapter_unlock(sa);
110 
111 	sfc_log_init(sa, "done %d", rc);
112 	SFC_ASSERT(rc >= 0);
113 	return -rc;
114 }
115 
116 static int
117 sfc_dev_start(struct rte_eth_dev *dev)
118 {
119 	struct sfc_adapter *sa = dev->data->dev_private;
120 	int rc;
121 
122 	sfc_log_init(sa, "entry");
123 
124 	sfc_adapter_lock(sa);
125 	rc = sfc_start(sa);
126 	sfc_adapter_unlock(sa);
127 
128 	sfc_log_init(sa, "done %d", rc);
129 	SFC_ASSERT(rc >= 0);
130 	return -rc;
131 }
132 
133 static int
134 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
135 {
136 	struct sfc_adapter *sa = dev->data->dev_private;
137 	struct rte_eth_link *dev_link = &dev->data->dev_link;
138 	struct rte_eth_link old_link;
139 	struct rte_eth_link current_link;
140 
141 	sfc_log_init(sa, "entry");
142 
143 	if (sa->state != SFC_ADAPTER_STARTED)
144 		return 0;
145 
146 retry:
147 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
148 	*(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
149 
150 	if (wait_to_complete) {
151 		efx_link_mode_t link_mode;
152 
153 		efx_port_poll(sa->nic, &link_mode);
154 		sfc_port_link_mode_to_info(link_mode, &current_link);
155 
156 		if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
157 					 *(uint64_t *)&old_link,
158 					 *(uint64_t *)&current_link))
159 			goto retry;
160 	} else {
161 		sfc_ev_mgmt_qpoll(sa);
162 		*(int64_t *)&current_link =
163 			rte_atomic64_read((rte_atomic64_t *)dev_link);
164 	}
165 
166 	if (old_link.link_status != current_link.link_status)
167 		sfc_info(sa, "Link status is %s",
168 			 current_link.link_status ? "UP" : "DOWN");
169 
170 	return old_link.link_status == current_link.link_status ? 0 : -1;
171 }
172 
173 static void
174 sfc_dev_stop(struct rte_eth_dev *dev)
175 {
176 	struct sfc_adapter *sa = dev->data->dev_private;
177 
178 	sfc_log_init(sa, "entry");
179 
180 	sfc_adapter_lock(sa);
181 	sfc_stop(sa);
182 	sfc_adapter_unlock(sa);
183 
184 	sfc_log_init(sa, "done");
185 }
186 
187 static void
188 sfc_dev_close(struct rte_eth_dev *dev)
189 {
190 	struct sfc_adapter *sa = dev->data->dev_private;
191 
192 	sfc_log_init(sa, "entry");
193 
194 	sfc_adapter_lock(sa);
195 	switch (sa->state) {
196 	case SFC_ADAPTER_STARTED:
197 		sfc_stop(sa);
198 		SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
199 		/* FALLTHROUGH */
200 	case SFC_ADAPTER_CONFIGURED:
201 		sfc_close(sa);
202 		SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
203 		/* FALLTHROUGH */
204 	case SFC_ADAPTER_INITIALIZED:
205 		break;
206 	default:
207 		sfc_err(sa, "unexpected adapter state %u on close", sa->state);
208 		break;
209 	}
210 	sfc_adapter_unlock(sa);
211 
212 	sfc_log_init(sa, "done");
213 }
214 
215 static int
216 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
217 		   uint16_t nb_rx_desc, unsigned int socket_id,
218 		   const struct rte_eth_rxconf *rx_conf,
219 		   struct rte_mempool *mb_pool)
220 {
221 	struct sfc_adapter *sa = dev->data->dev_private;
222 	int rc;
223 
224 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
225 		     rx_queue_id, nb_rx_desc, socket_id);
226 
227 	sfc_adapter_lock(sa);
228 
229 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
230 			  rx_conf, mb_pool);
231 	if (rc != 0)
232 		goto fail_rx_qinit;
233 
234 	dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
235 
236 	sfc_adapter_unlock(sa);
237 
238 	return 0;
239 
240 fail_rx_qinit:
241 	sfc_adapter_unlock(sa);
242 	SFC_ASSERT(rc > 0);
243 	return -rc;
244 }
245 
246 static void
247 sfc_rx_queue_release(void *queue)
248 {
249 	struct sfc_rxq *rxq = queue;
250 	struct sfc_adapter *sa;
251 	unsigned int sw_index;
252 
253 	if (rxq == NULL)
254 		return;
255 
256 	sa = rxq->evq->sa;
257 	sfc_adapter_lock(sa);
258 
259 	sw_index = sfc_rxq_sw_index(rxq);
260 
261 	sfc_log_init(sa, "RxQ=%u", sw_index);
262 
263 	sa->eth_dev->data->rx_queues[sw_index] = NULL;
264 
265 	sfc_rx_qfini(sa, sw_index);
266 
267 	sfc_adapter_unlock(sa);
268 }
269 
270 static const struct eth_dev_ops sfc_eth_dev_ops = {
271 	.dev_configure			= sfc_dev_configure,
272 	.dev_start			= sfc_dev_start,
273 	.dev_stop			= sfc_dev_stop,
274 	.dev_close			= sfc_dev_close,
275 	.link_update			= sfc_dev_link_update,
276 	.dev_infos_get			= sfc_dev_infos_get,
277 	.rx_queue_setup			= sfc_rx_queue_setup,
278 	.rx_queue_release		= sfc_rx_queue_release,
279 };
280 
281 static int
282 sfc_eth_dev_init(struct rte_eth_dev *dev)
283 {
284 	struct sfc_adapter *sa = dev->data->dev_private;
285 	struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
286 	int rc;
287 	const efx_nic_cfg_t *encp;
288 	const struct ether_addr *from;
289 
290 	/* Required for logging */
291 	sa->eth_dev = dev;
292 
293 	/* Copy PCI device info to the dev->data */
294 	rte_eth_copy_pci_info(dev, pci_dev);
295 
296 	rc = sfc_kvargs_parse(sa);
297 	if (rc != 0)
298 		goto fail_kvargs_parse;
299 
300 	rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
301 				sfc_kvarg_bool_handler, &sa->debug_init);
302 	if (rc != 0)
303 		goto fail_kvarg_debug_init;
304 
305 	sfc_log_init(sa, "entry");
306 
307 	dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
308 	if (dev->data->mac_addrs == NULL) {
309 		rc = ENOMEM;
310 		goto fail_mac_addrs;
311 	}
312 
313 	sfc_adapter_lock_init(sa);
314 	sfc_adapter_lock(sa);
315 
316 	sfc_log_init(sa, "attaching");
317 	rc = sfc_attach(sa);
318 	if (rc != 0)
319 		goto fail_attach;
320 
321 	encp = efx_nic_cfg_get(sa->nic);
322 
323 	/*
324 	 * The arguments are really reverse order in comparison to
325 	 * Linux kernel. Copy from NIC config to Ethernet device data.
326 	 */
327 	from = (const struct ether_addr *)(encp->enc_mac_addr);
328 	ether_addr_copy(from, &dev->data->mac_addrs[0]);
329 
330 	dev->dev_ops = &sfc_eth_dev_ops;
331 	dev->rx_pkt_burst = &sfc_recv_pkts;
332 
333 	sfc_adapter_unlock(sa);
334 
335 	sfc_log_init(sa, "done");
336 	return 0;
337 
338 fail_attach:
339 	sfc_adapter_unlock(sa);
340 	sfc_adapter_lock_fini(sa);
341 	rte_free(dev->data->mac_addrs);
342 	dev->data->mac_addrs = NULL;
343 
344 fail_mac_addrs:
345 fail_kvarg_debug_init:
346 	sfc_kvargs_cleanup(sa);
347 
348 fail_kvargs_parse:
349 	sfc_log_init(sa, "failed %d", rc);
350 	SFC_ASSERT(rc > 0);
351 	return -rc;
352 }
353 
354 static int
355 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
356 {
357 	struct sfc_adapter *sa = dev->data->dev_private;
358 
359 	sfc_log_init(sa, "entry");
360 
361 	sfc_adapter_lock(sa);
362 
363 	sfc_detach(sa);
364 
365 	rte_free(dev->data->mac_addrs);
366 	dev->data->mac_addrs = NULL;
367 
368 	dev->dev_ops = NULL;
369 	dev->rx_pkt_burst = NULL;
370 
371 	sfc_kvargs_cleanup(sa);
372 
373 	sfc_adapter_unlock(sa);
374 	sfc_adapter_lock_fini(sa);
375 
376 	sfc_log_init(sa, "done");
377 
378 	/* Required for logging, so cleanup last */
379 	sa->eth_dev = NULL;
380 	return 0;
381 }
382 
383 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
384 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
385 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
386 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
387 	{ .vendor_id = 0 /* sentinel */ }
388 };
389 
390 static struct eth_driver sfc_efx_pmd = {
391 	.pci_drv = {
392 		.id_table = pci_id_sfc_efx_map,
393 		.drv_flags =
394 			RTE_PCI_DRV_NEED_MAPPING,
395 		.probe = rte_eth_dev_pci_probe,
396 		.remove = rte_eth_dev_pci_remove,
397 	},
398 	.eth_dev_init = sfc_eth_dev_init,
399 	.eth_dev_uninit = sfc_eth_dev_uninit,
400 	.dev_private_size = sizeof(struct sfc_adapter),
401 };
402 
403 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv);
404 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
405 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
406 	SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
407