xref: /dpdk/drivers/net/bnx2x/bnx2x_ethdev.c (revision ceb1ccd5d50c1a89ba8bdd97cc199e7f07422b98)
1 /*
2  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3  *
4  * Copyright (c) 2015 QLogic Corporation.
5  * All rights reserved.
6  * www.qlogic.com
7  *
8  * See LICENSE.bnx2x_pmd for copyright and licensing details.
9  */
10 
11 #include "bnx2x.h"
12 #include "bnx2x_rxtx.h"
13 
14 #include <rte_dev.h>
15 
16 /*
17  * The set of PCI devices this driver supports
18  */
19 static struct rte_pci_id pci_id_bnx2x_map[] = {
20 #define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
21 #include "rte_pci_dev_ids.h"
22 	{ .vendor_id = 0, }
23 };
24 
25 static struct rte_pci_id pci_id_bnx2xvf_map[] = {
26 #define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
27 #include "rte_pci_dev_ids.h"
28 	{ .vendor_id = 0, }
29 };
30 
31 static void
32 bnx2x_link_update(struct rte_eth_dev *dev)
33 {
34 	struct bnx2x_softc *sc = dev->data->dev_private;
35 
36 	PMD_INIT_FUNC_TRACE();
37 	bnx2x_link_status_update(sc);
38 	mb();
39 	dev->data->dev_link.link_speed = sc->link_vars.line_speed;
40 	switch (sc->link_vars.duplex) {
41 		case DUPLEX_FULL:
42 			dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
43 			break;
44 		case DUPLEX_HALF:
45 			dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
46 			break;
47 		default:
48 			dev->data->dev_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
49 	}
50 	dev->data->dev_link.link_status = sc->link_vars.link_up;
51 }
52 
53 static void
54 bnx2x_interrupt_action(struct rte_eth_dev *dev)
55 {
56 	struct bnx2x_softc *sc = dev->data->dev_private;
57 	uint32_t link_status;
58 
59 	PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
60 
61 	if (bnx2x_intr_legacy(sc, 0))
62 		DELAY_MS(250);
63 	if (sc->periodic_flags & PERIODIC_GO)
64 		bnx2x_periodic_callout(sc);
65 	link_status = REG_RD(sc, sc->link_params.shmem_base +
66 			offsetof(struct shmem_region,
67 				port_mb[sc->link_params.port].link_status));
68 	if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
69 		bnx2x_link_update(dev);
70 }
71 
72 static __rte_unused void
73 bnx2x_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
74 {
75 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
76 
77 	bnx2x_interrupt_action(dev);
78 	rte_intr_enable(&(dev->pci_dev->intr_handle));
79 }
80 
81 /*
82  * Devops - helper functions can be called from user application
83  */
84 
85 static int
86 bnx2x_dev_configure(struct rte_eth_dev *dev)
87 {
88 	struct bnx2x_softc *sc = dev->data->dev_private;
89 	int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
90 
91 	PMD_INIT_FUNC_TRACE();
92 
93 	if (dev->data->dev_conf.rxmode.jumbo_frame)
94 		sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
95 
96 	if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
97 		PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
98 		return -EINVAL;
99 	}
100 
101 	sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
102 	if (sc->num_queues > mp_ncpus) {
103 		PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
104 		return -EINVAL;
105 	}
106 
107 	PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
108 		       sc->num_queues, sc->mtu);
109 
110 	/* allocate ilt */
111 	if (bnx2x_alloc_ilt_mem(sc) != 0) {
112 		PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
113 		return -ENXIO;
114 	}
115 
116 	/* allocate the host hardware/software hsi structures */
117 	if (bnx2x_alloc_hsi_mem(sc) != 0) {
118 		PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
119 		bnx2x_free_ilt_mem(sc);
120 		return -ENXIO;
121 	}
122 
123 	return 0;
124 }
125 
126 static int
127 bnx2x_dev_start(struct rte_eth_dev *dev)
128 {
129 	struct bnx2x_softc *sc = dev->data->dev_private;
130 	int ret = 0;
131 
132 	PMD_INIT_FUNC_TRACE();
133 
134 	ret = bnx2x_init(sc);
135 	if (ret) {
136 		PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
137 		return -1;
138 	}
139 
140 	if (IS_PF(sc)) {
141 		rte_intr_callback_register(&(dev->pci_dev->intr_handle),
142 				bnx2x_interrupt_handler, (void *)dev);
143 
144 		if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
145 			PMD_DRV_LOG(ERR, "rte_intr_enable failed");
146 	}
147 
148 	ret = bnx2x_dev_rx_init(dev);
149 	if (ret != 0) {
150 		PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
151 		return -3;
152 	}
153 
154 	/* Print important adapter info for the user. */
155 	bnx2x_print_adapter_info(sc);
156 
157 	DELAY_MS(2500);
158 
159 	return ret;
160 }
161 
162 static void
163 bnx2x_dev_stop(struct rte_eth_dev *dev)
164 {
165 	struct bnx2x_softc *sc = dev->data->dev_private;
166 	int ret = 0;
167 
168 	PMD_INIT_FUNC_TRACE();
169 
170 	if (IS_PF(sc)) {
171 		rte_intr_disable(&(dev->pci_dev->intr_handle));
172 		rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
173 				bnx2x_interrupt_handler, (void *)dev);
174 	}
175 
176 	ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
177 	if (ret) {
178 		PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
179 		return;
180 	}
181 
182 	return;
183 }
184 
185 static void
186 bnx2x_dev_close(struct rte_eth_dev *dev)
187 {
188 	struct bnx2x_softc *sc = dev->data->dev_private;
189 
190 	PMD_INIT_FUNC_TRACE();
191 
192 	if (IS_VF(sc))
193 		bnx2x_vf_close(sc);
194 
195 	bnx2x_dev_clear_queues(dev);
196 	memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
197 
198 	/* free the host hardware/software hsi structures */
199 	bnx2x_free_hsi_mem(sc);
200 
201 	/* free ilt */
202 	bnx2x_free_ilt_mem(sc);
203 }
204 
205 static void
206 bnx2x_promisc_enable(struct rte_eth_dev *dev)
207 {
208 	struct bnx2x_softc *sc = dev->data->dev_private;
209 
210 	PMD_INIT_FUNC_TRACE();
211 	sc->rx_mode = BNX2X_RX_MODE_PROMISC;
212 	bnx2x_set_rx_mode(sc);
213 }
214 
215 static void
216 bnx2x_promisc_disable(struct rte_eth_dev *dev)
217 {
218 	struct bnx2x_softc *sc = dev->data->dev_private;
219 
220 	PMD_INIT_FUNC_TRACE();
221 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
222 	bnx2x_set_rx_mode(sc);
223 }
224 
225 static void
226 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
227 {
228 	struct bnx2x_softc *sc = dev->data->dev_private;
229 
230 	PMD_INIT_FUNC_TRACE();
231 	sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
232 	bnx2x_set_rx_mode(sc);
233 }
234 
235 static void
236 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
237 {
238 	struct bnx2x_softc *sc = dev->data->dev_private;
239 
240 	PMD_INIT_FUNC_TRACE();
241 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
242 	bnx2x_set_rx_mode(sc);
243 }
244 
245 static int
246 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
247 {
248 	PMD_INIT_FUNC_TRACE();
249 
250 	int old_link_status = dev->data->dev_link.link_status;
251 
252 	bnx2x_link_update(dev);
253 
254 	return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
255 }
256 
257 static int
258 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
259 {
260 	int old_link_status = dev->data->dev_link.link_status;
261 	struct bnx2x_softc *sc = dev->data->dev_private;
262 
263 	bnx2x_link_update(dev);
264 
265 	bnx2x_check_bull(sc);
266 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
267 		PMD_DRV_LOG(ERR, "PF indicated channel is down."
268 				"VF device is no longer operational");
269 		dev->data->dev_link.link_status = 0;
270 	}
271 
272 	return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
273 }
274 
275 static void
276 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
277 {
278 	struct bnx2x_softc *sc = dev->data->dev_private;
279 
280 	PMD_INIT_FUNC_TRACE();
281 
282 	bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
283 
284 	memset(stats, 0, sizeof (struct rte_eth_stats));
285 
286 	stats->ipackets =
287 		HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
288 				sc->eth_stats.total_unicast_packets_received_lo) +
289 		HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
290 				sc->eth_stats.total_multicast_packets_received_lo) +
291 		HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
292 				sc->eth_stats.total_broadcast_packets_received_lo);
293 
294 	stats->opackets =
295 		HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
296 				sc->eth_stats.total_unicast_packets_transmitted_lo) +
297 		HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
298 				sc->eth_stats.total_multicast_packets_transmitted_lo) +
299 		HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
300 				sc->eth_stats.total_broadcast_packets_transmitted_lo);
301 
302 	stats->ibytes =
303 		HILO_U64(sc->eth_stats.total_bytes_received_hi,
304 				sc->eth_stats.total_bytes_received_lo);
305 
306 	stats->obytes =
307 		HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
308 				sc->eth_stats.total_bytes_transmitted_lo);
309 
310 	stats->ierrors =
311 		HILO_U64(sc->eth_stats.error_bytes_received_hi,
312 				sc->eth_stats.error_bytes_received_lo);
313 
314 	stats->oerrors = 0;
315 
316 	stats->rx_nombuf =
317 		HILO_U64(sc->eth_stats.no_buff_discard_hi,
318 				sc->eth_stats.no_buff_discard_lo);
319 }
320 
321 static void
322 bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
323 {
324 	struct bnx2x_softc *sc = dev->data->dev_private;
325 	dev_info->max_rx_queues  = sc->max_rx_queues;
326 	dev_info->max_tx_queues  = sc->max_tx_queues;
327 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
328 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
329 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
330 }
331 
332 static void
333 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
334 		uint32_t index, uint32_t pool)
335 {
336 	struct bnx2x_softc *sc = dev->data->dev_private;
337 
338 	if (sc->mac_ops.mac_addr_add)
339 		sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
340 }
341 
342 static void
343 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
344 {
345 	struct bnx2x_softc *sc = dev->data->dev_private;
346 
347 	if (sc->mac_ops.mac_addr_remove)
348 		sc->mac_ops.mac_addr_remove(dev, index);
349 }
350 
351 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
352 	.dev_configure                = bnx2x_dev_configure,
353 	.dev_start                    = bnx2x_dev_start,
354 	.dev_stop                     = bnx2x_dev_stop,
355 	.dev_close                    = bnx2x_dev_close,
356 	.promiscuous_enable           = bnx2x_promisc_enable,
357 	.promiscuous_disable          = bnx2x_promisc_disable,
358 	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
359 	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
360 	.link_update                  = bnx2x_dev_link_update,
361 	.stats_get                    = bnx2x_dev_stats_get,
362 	.dev_infos_get                = bnx2x_dev_infos_get,
363 	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
364 	.rx_queue_release             = bnx2x_dev_rx_queue_release,
365 	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
366 	.tx_queue_release             = bnx2x_dev_tx_queue_release,
367 	.mac_addr_add                 = bnx2x_mac_addr_add,
368 	.mac_addr_remove              = bnx2x_mac_addr_remove,
369 };
370 
371 /*
372  * dev_ops for virtual function
373  */
374 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
375 	.dev_configure                = bnx2x_dev_configure,
376 	.dev_start                    = bnx2x_dev_start,
377 	.dev_stop                     = bnx2x_dev_stop,
378 	.dev_close                    = bnx2x_dev_close,
379 	.promiscuous_enable           = bnx2x_promisc_enable,
380 	.promiscuous_disable          = bnx2x_promisc_disable,
381 	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
382 	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
383 	.link_update                  = bnx2xvf_dev_link_update,
384 	.stats_get                    = bnx2x_dev_stats_get,
385 	.dev_infos_get                = bnx2x_dev_infos_get,
386 	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
387 	.rx_queue_release             = bnx2x_dev_rx_queue_release,
388 	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
389 	.tx_queue_release             = bnx2x_dev_tx_queue_release,
390 	.mac_addr_add                 = bnx2x_mac_addr_add,
391 	.mac_addr_remove              = bnx2x_mac_addr_remove,
392 };
393 
394 
395 static int
396 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
397 {
398 	int ret = 0;
399 	struct rte_pci_device *pci_dev;
400 	struct bnx2x_softc *sc;
401 
402 	PMD_INIT_FUNC_TRACE();
403 
404 	eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
405 	pci_dev = eth_dev->pci_dev;
406 
407 	rte_eth_copy_pci_info(eth_dev, pci_dev);
408 
409 	sc = eth_dev->data->dev_private;
410 	sc->pcie_bus    = pci_dev->addr.bus;
411 	sc->pcie_device = pci_dev->addr.devid;
412 
413 	if (is_vf)
414 		sc->flags = BNX2X_IS_VF_FLAG;
415 
416 	sc->devinfo.vendor_id    = pci_dev->id.vendor_id;
417 	sc->devinfo.device_id    = pci_dev->id.device_id;
418 	sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
419 	sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
420 
421 	sc->pcie_func = pci_dev->addr.function;
422 	sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
423 	if (is_vf)
424 		sc->bar[BAR1].base_addr = (void *)
425 			((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
426 	else
427 		sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
428 
429 	assert(sc->bar[BAR0].base_addr);
430 	assert(sc->bar[BAR1].base_addr);
431 
432 	bnx2x_load_firmware(sc);
433 	assert(sc->firmware);
434 
435 	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
436 		sc->udp_rss = 1;
437 
438 	sc->rx_budget = BNX2X_RX_BUDGET;
439 	sc->hc_rx_ticks = BNX2X_RX_TICKS;
440 	sc->hc_tx_ticks = BNX2X_TX_TICKS;
441 
442 	sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
443 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
444 
445 	sc->pci_dev = pci_dev;
446 	ret = bnx2x_attach(sc);
447 	if (ret) {
448 		PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
449 		return ret;
450 	}
451 
452 	eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
453 
454 	PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
455 			sc->pcie_bus, sc->pcie_device);
456 	PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
457 			sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
458 	PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
459 			PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
460 	PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
461 			eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
462 
463 	if (IS_VF(sc)) {
464 		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
465 				    &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
466 				    RTE_CACHE_LINE_SIZE) != 0)
467 			return -ENOMEM;
468 
469 		sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
470 					 sc->vf2pf_mbox_mapping.vaddr;
471 
472 		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
473 				    &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
474 				    RTE_CACHE_LINE_SIZE) != 0)
475 			return -ENOMEM;
476 
477 		sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
478 					     sc->pf2vf_bulletin_mapping.vaddr;
479 
480 		ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
481 					     sc->max_rx_queues);
482 		if (ret)
483 			return ret;
484 	}
485 
486 	return 0;
487 }
488 
489 static int
490 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
491 {
492 	PMD_INIT_FUNC_TRACE();
493 	return bnx2x_common_dev_init(eth_dev, 0);
494 }
495 
496 static int
497 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
498 {
499 	PMD_INIT_FUNC_TRACE();
500 	return bnx2x_common_dev_init(eth_dev, 1);
501 }
502 
503 static struct eth_driver rte_bnx2x_pmd = {
504 	.pci_drv = {
505 		.name = "rte_bnx2x_pmd",
506 		.id_table = pci_id_bnx2x_map,
507 		.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
508 	},
509 	.eth_dev_init = eth_bnx2x_dev_init,
510 	.dev_private_size = sizeof(struct bnx2x_softc),
511 };
512 
513 /*
514  * virtual function driver struct
515  */
516 static struct eth_driver rte_bnx2xvf_pmd = {
517 	.pci_drv = {
518 		.name = "rte_bnx2xvf_pmd",
519 		.id_table = pci_id_bnx2xvf_map,
520 		.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
521 	},
522 	.eth_dev_init = eth_bnx2xvf_dev_init,
523 	.dev_private_size = sizeof(struct bnx2x_softc),
524 };
525 
526 static int rte_bnx2x_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
527 {
528 	PMD_INIT_FUNC_TRACE();
529 	rte_eth_driver_register(&rte_bnx2x_pmd);
530 
531 	return 0;
532 }
533 
534 static int rte_bnx2xvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
535 {
536 	PMD_INIT_FUNC_TRACE();
537 	rte_eth_driver_register(&rte_bnx2xvf_pmd);
538 
539 	return 0;
540 }
541 
542 static struct rte_driver rte_bnx2x_driver = {
543 	.type = PMD_PDEV,
544 	.init = rte_bnx2x_pmd_init,
545 };
546 
547 static struct rte_driver rte_bnx2xvf_driver = {
548 	.type = PMD_PDEV,
549 	.init = rte_bnx2xvf_pmd_init,
550 };
551 
552 PMD_REGISTER_DRIVER(rte_bnx2x_driver);
553 PMD_REGISTER_DRIVER(rte_bnx2xvf_driver);
554