xref: /dpdk/drivers/net/bnx2x/bnx2x_ethdev.c (revision d80e42cce4c7017ed8c99dabb8ae444a492acc1c)
1 /*
2  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3  *
4  * Copyright (c) 2015-2018 Cavium Inc.
5  * All rights reserved.
6  * www.cavium.com
7  *
8  * See LICENSE.bnx2x_pmd for copyright and licensing details.
9  */
10 
11 #include "bnx2x.h"
12 #include "bnx2x_rxtx.h"
13 
14 #include <rte_dev.h>
15 #include <rte_ethdev_pci.h>
16 
17 int bnx2x_logtype_init;
18 int bnx2x_logtype_driver;
19 
20 /*
21  * The set of PCI devices this driver supports
22  */
23 #define BROADCOM_PCI_VENDOR_ID 0x14E4
24 static const struct rte_pci_id pci_id_bnx2x_map[] = {
25 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
26 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
27 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
28 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
29 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
30 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
31 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
32 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
33 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
34 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
35 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
36 #endif
37 	{ .vendor_id = 0, }
38 };
39 
40 static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
41 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
42 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
43 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
44 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
45 	{ .vendor_id = 0, }
46 };
47 
48 struct rte_bnx2x_xstats_name_off {
49 	char name[RTE_ETH_XSTATS_NAME_SIZE];
50 	uint32_t offset_hi;
51 	uint32_t offset_lo;
52 };
53 
54 static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
55 	{"rx_buffer_drops",
56 		offsetof(struct bnx2x_eth_stats, brb_drop_hi),
57 		offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
58 	{"rx_buffer_truncates",
59 		offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
60 		offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
61 	{"rx_buffer_truncate_discard",
62 		offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
63 		offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
64 	{"mac_filter_discard",
65 		offsetof(struct bnx2x_eth_stats, mac_filter_discard),
66 		offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
67 	{"no_match_vlan_tag_discard",
68 		offsetof(struct bnx2x_eth_stats, mf_tag_discard),
69 		offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
70 	{"tx_pause",
71 		offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
72 		offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
73 	{"rx_pause",
74 		offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
75 		offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
76 	{"tx_priority_flow_control",
77 		offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
78 		offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
79 	{"rx_priority_flow_control",
80 		offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
81 		offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
82 };
83 
84 static void
85 bnx2x_link_update(struct rte_eth_dev *dev)
86 {
87 	struct bnx2x_softc *sc = dev->data->dev_private;
88 
89 	PMD_INIT_FUNC_TRACE();
90 	bnx2x_link_status_update(sc);
91 	mb();
92 	dev->data->dev_link.link_speed = sc->link_vars.line_speed;
93 	switch (sc->link_vars.duplex) {
94 		case DUPLEX_FULL:
95 			dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
96 			break;
97 		case DUPLEX_HALF:
98 			dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
99 			break;
100 	}
101 	dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
102 			ETH_LINK_SPEED_FIXED);
103 	dev->data->dev_link.link_status = sc->link_vars.link_up;
104 }
105 
106 static void
107 bnx2x_interrupt_action(struct rte_eth_dev *dev)
108 {
109 	struct bnx2x_softc *sc = dev->data->dev_private;
110 	uint32_t link_status;
111 
112 	PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
113 
114 	bnx2x_intr_legacy(sc, 0);
115 
116 	if (sc->periodic_flags & PERIODIC_GO)
117 		bnx2x_periodic_callout(sc);
118 	link_status = REG_RD(sc, sc->link_params.shmem_base +
119 			offsetof(struct shmem_region,
120 				port_mb[sc->link_params.port].link_status));
121 	if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
122 		bnx2x_link_update(dev);
123 }
124 
125 static void
126 bnx2x_interrupt_handler(void *param)
127 {
128 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
129 	struct bnx2x_softc *sc = dev->data->dev_private;
130 
131 	bnx2x_interrupt_action(dev);
132 	rte_intr_enable(&sc->pci_dev->intr_handle);
133 }
134 
135 /*
136  * Devops - helper functions can be called from user application
137  */
138 
139 static int
140 bnx2x_dev_configure(struct rte_eth_dev *dev)
141 {
142 	struct bnx2x_softc *sc = dev->data->dev_private;
143 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
144 
145 	int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
146 
147 	PMD_INIT_FUNC_TRACE();
148 
149 	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
150 		sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
151 
152 	if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
153 		PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
154 		return -EINVAL;
155 	}
156 
157 	sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
158 	if (sc->num_queues > mp_ncpus) {
159 		PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
160 		return -EINVAL;
161 	}
162 
163 	PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
164 		       sc->num_queues, sc->mtu);
165 
166 	/* allocate ilt */
167 	if (bnx2x_alloc_ilt_mem(sc) != 0) {
168 		PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
169 		return -ENXIO;
170 	}
171 
172 	/* allocate the host hardware/software hsi structures */
173 	if (bnx2x_alloc_hsi_mem(sc) != 0) {
174 		PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
175 		bnx2x_free_ilt_mem(sc);
176 		return -ENXIO;
177 	}
178 
179 	return 0;
180 }
181 
182 static int
183 bnx2x_dev_start(struct rte_eth_dev *dev)
184 {
185 	struct bnx2x_softc *sc = dev->data->dev_private;
186 	int ret = 0;
187 
188 	PMD_INIT_FUNC_TRACE();
189 
190 	ret = bnx2x_init(sc);
191 	if (ret) {
192 		PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
193 		return -1;
194 	}
195 
196 	if (IS_PF(sc)) {
197 		rte_intr_callback_register(&sc->pci_dev->intr_handle,
198 				bnx2x_interrupt_handler, (void *)dev);
199 
200 		if (rte_intr_enable(&sc->pci_dev->intr_handle))
201 			PMD_DRV_LOG(ERR, "rte_intr_enable failed");
202 	}
203 
204 	ret = bnx2x_dev_rx_init(dev);
205 	if (ret != 0) {
206 		PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
207 		return -3;
208 	}
209 
210 	/* Print important adapter info for the user. */
211 	bnx2x_print_adapter_info(sc);
212 
213 	return ret;
214 }
215 
216 static void
217 bnx2x_dev_stop(struct rte_eth_dev *dev)
218 {
219 	struct bnx2x_softc *sc = dev->data->dev_private;
220 	int ret = 0;
221 
222 	PMD_INIT_FUNC_TRACE();
223 
224 	if (IS_PF(sc)) {
225 		rte_intr_disable(&sc->pci_dev->intr_handle);
226 		rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
227 				bnx2x_interrupt_handler, (void *)dev);
228 	}
229 
230 	ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
231 	if (ret) {
232 		PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
233 		return;
234 	}
235 
236 	return;
237 }
238 
239 static void
240 bnx2x_dev_close(struct rte_eth_dev *dev)
241 {
242 	struct bnx2x_softc *sc = dev->data->dev_private;
243 
244 	PMD_INIT_FUNC_TRACE();
245 
246 	if (IS_VF(sc))
247 		bnx2x_vf_close(sc);
248 
249 	bnx2x_dev_clear_queues(dev);
250 	memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
251 
252 	/* free the host hardware/software hsi structures */
253 	bnx2x_free_hsi_mem(sc);
254 
255 	/* free ilt */
256 	bnx2x_free_ilt_mem(sc);
257 }
258 
259 static void
260 bnx2x_promisc_enable(struct rte_eth_dev *dev)
261 {
262 	struct bnx2x_softc *sc = dev->data->dev_private;
263 
264 	PMD_INIT_FUNC_TRACE();
265 	sc->rx_mode = BNX2X_RX_MODE_PROMISC;
266 	if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
267 		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
268 	bnx2x_set_rx_mode(sc);
269 }
270 
271 static void
272 bnx2x_promisc_disable(struct rte_eth_dev *dev)
273 {
274 	struct bnx2x_softc *sc = dev->data->dev_private;
275 
276 	PMD_INIT_FUNC_TRACE();
277 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
278 	if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
279 		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
280 	bnx2x_set_rx_mode(sc);
281 }
282 
283 static void
284 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
285 {
286 	struct bnx2x_softc *sc = dev->data->dev_private;
287 
288 	PMD_INIT_FUNC_TRACE();
289 	sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
290 	if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
291 		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
292 	bnx2x_set_rx_mode(sc);
293 }
294 
295 static void
296 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
297 {
298 	struct bnx2x_softc *sc = dev->data->dev_private;
299 
300 	PMD_INIT_FUNC_TRACE();
301 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
302 	if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
303 		sc->rx_mode = BNX2X_RX_MODE_PROMISC;
304 	bnx2x_set_rx_mode(sc);
305 }
306 
307 static int
308 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
309 {
310 	PMD_INIT_FUNC_TRACE();
311 
312 	int old_link_status = dev->data->dev_link.link_status;
313 
314 	bnx2x_link_update(dev);
315 
316 	return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
317 }
318 
319 static int
320 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
321 {
322 	int old_link_status = dev->data->dev_link.link_status;
323 	struct bnx2x_softc *sc = dev->data->dev_private;
324 
325 	bnx2x_link_update(dev);
326 
327 	bnx2x_check_bull(sc);
328 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
329 		PMD_DRV_LOG(ERR, "PF indicated channel is down."
330 				"VF device is no longer operational");
331 		dev->data->dev_link.link_status = ETH_LINK_DOWN;
332 	}
333 
334 	return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
335 }
336 
337 static int
338 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
339 {
340 	struct bnx2x_softc *sc = dev->data->dev_private;
341 	uint32_t brb_truncate_discard;
342 	uint64_t brb_drops;
343 	uint64_t brb_truncates;
344 
345 	PMD_INIT_FUNC_TRACE();
346 
347 	bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
348 
349 	memset(stats, 0, sizeof (struct rte_eth_stats));
350 
351 	stats->ipackets =
352 		HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
353 				sc->eth_stats.total_unicast_packets_received_lo) +
354 		HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
355 				sc->eth_stats.total_multicast_packets_received_lo) +
356 		HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
357 				sc->eth_stats.total_broadcast_packets_received_lo);
358 
359 	stats->opackets =
360 		HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
361 				sc->eth_stats.total_unicast_packets_transmitted_lo) +
362 		HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
363 				sc->eth_stats.total_multicast_packets_transmitted_lo) +
364 		HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
365 				sc->eth_stats.total_broadcast_packets_transmitted_lo);
366 
367 	stats->ibytes =
368 		HILO_U64(sc->eth_stats.total_bytes_received_hi,
369 				sc->eth_stats.total_bytes_received_lo);
370 
371 	stats->obytes =
372 		HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
373 				sc->eth_stats.total_bytes_transmitted_lo);
374 
375 	stats->ierrors =
376 		HILO_U64(sc->eth_stats.error_bytes_received_hi,
377 				sc->eth_stats.error_bytes_received_lo);
378 
379 	stats->oerrors = 0;
380 
381 	stats->rx_nombuf =
382 		HILO_U64(sc->eth_stats.no_buff_discard_hi,
383 				sc->eth_stats.no_buff_discard_lo);
384 
385 	brb_drops =
386 		HILO_U64(sc->eth_stats.brb_drop_hi,
387 			 sc->eth_stats.brb_drop_lo);
388 
389 	brb_truncates =
390 		HILO_U64(sc->eth_stats.brb_truncate_hi,
391 			 sc->eth_stats.brb_truncate_lo);
392 
393 	brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
394 
395 	stats->imissed = brb_drops + brb_truncates +
396 			 brb_truncate_discard + stats->rx_nombuf;
397 
398 	return 0;
399 }
400 
401 static int
402 bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
403 		       struct rte_eth_xstat_name *xstats_names,
404 		       __rte_unused unsigned limit)
405 {
406 	unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
407 
408 	if (xstats_names != NULL)
409 		for (i = 0; i < stat_cnt; i++)
410 			snprintf(xstats_names[i].name,
411 				sizeof(xstats_names[i].name),
412 				"%s",
413 				bnx2x_xstats_strings[i].name);
414 
415 	return stat_cnt;
416 }
417 
418 static int
419 bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
420 		     unsigned int n)
421 {
422 	struct bnx2x_softc *sc = dev->data->dev_private;
423 	unsigned int num = RTE_DIM(bnx2x_xstats_strings);
424 
425 	if (n < num)
426 		return num;
427 
428 	bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
429 
430 	for (num = 0; num < n; num++) {
431 		if (bnx2x_xstats_strings[num].offset_hi !=
432 		    bnx2x_xstats_strings[num].offset_lo)
433 			xstats[num].value = HILO_U64(
434 					  *(uint32_t *)((char *)&sc->eth_stats +
435 					  bnx2x_xstats_strings[num].offset_hi),
436 					  *(uint32_t *)((char *)&sc->eth_stats +
437 					  bnx2x_xstats_strings[num].offset_lo));
438 		else
439 			xstats[num].value =
440 					  *(uint64_t *)((char *)&sc->eth_stats +
441 					  bnx2x_xstats_strings[num].offset_lo);
442 		xstats[num].id = num;
443 	}
444 
445 	return num;
446 }
447 
448 static void
449 bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
450 {
451 	struct bnx2x_softc *sc = dev->data->dev_private;
452 	dev_info->max_rx_queues  = sc->max_rx_queues;
453 	dev_info->max_tx_queues  = sc->max_tx_queues;
454 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
455 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
456 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
457 	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
458 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
459 }
460 
461 static int
462 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
463 		uint32_t index, uint32_t pool)
464 {
465 	struct bnx2x_softc *sc = dev->data->dev_private;
466 
467 	if (sc->mac_ops.mac_addr_add) {
468 		sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
469 		return 0;
470 	}
471 	return -ENOTSUP;
472 }
473 
474 static void
475 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
476 {
477 	struct bnx2x_softc *sc = dev->data->dev_private;
478 
479 	if (sc->mac_ops.mac_addr_remove)
480 		sc->mac_ops.mac_addr_remove(dev, index);
481 }
482 
483 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
484 	.dev_configure                = bnx2x_dev_configure,
485 	.dev_start                    = bnx2x_dev_start,
486 	.dev_stop                     = bnx2x_dev_stop,
487 	.dev_close                    = bnx2x_dev_close,
488 	.promiscuous_enable           = bnx2x_promisc_enable,
489 	.promiscuous_disable          = bnx2x_promisc_disable,
490 	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
491 	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
492 	.link_update                  = bnx2x_dev_link_update,
493 	.stats_get                    = bnx2x_dev_stats_get,
494 	.xstats_get                   = bnx2x_dev_xstats_get,
495 	.xstats_get_names             = bnx2x_get_xstats_names,
496 	.dev_infos_get                = bnx2x_dev_infos_get,
497 	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
498 	.rx_queue_release             = bnx2x_dev_rx_queue_release,
499 	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
500 	.tx_queue_release             = bnx2x_dev_tx_queue_release,
501 	.mac_addr_add                 = bnx2x_mac_addr_add,
502 	.mac_addr_remove              = bnx2x_mac_addr_remove,
503 };
504 
505 /*
506  * dev_ops for virtual function
507  */
508 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
509 	.dev_configure                = bnx2x_dev_configure,
510 	.dev_start                    = bnx2x_dev_start,
511 	.dev_stop                     = bnx2x_dev_stop,
512 	.dev_close                    = bnx2x_dev_close,
513 	.promiscuous_enable           = bnx2x_promisc_enable,
514 	.promiscuous_disable          = bnx2x_promisc_disable,
515 	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
516 	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
517 	.link_update                  = bnx2xvf_dev_link_update,
518 	.stats_get                    = bnx2x_dev_stats_get,
519 	.xstats_get                   = bnx2x_dev_xstats_get,
520 	.xstats_get_names             = bnx2x_get_xstats_names,
521 	.dev_infos_get                = bnx2x_dev_infos_get,
522 	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
523 	.rx_queue_release             = bnx2x_dev_rx_queue_release,
524 	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
525 	.tx_queue_release             = bnx2x_dev_tx_queue_release,
526 	.mac_addr_add                 = bnx2x_mac_addr_add,
527 	.mac_addr_remove              = bnx2x_mac_addr_remove,
528 };
529 
530 
531 static int
532 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
533 {
534 	int ret = 0;
535 	struct rte_pci_device *pci_dev;
536 	struct bnx2x_softc *sc;
537 
538 	PMD_INIT_FUNC_TRACE();
539 
540 	eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
541 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
542 
543 	rte_eth_copy_pci_info(eth_dev, pci_dev);
544 
545 	sc = eth_dev->data->dev_private;
546 	sc->pcie_bus    = pci_dev->addr.bus;
547 	sc->pcie_device = pci_dev->addr.devid;
548 
549 	if (is_vf)
550 		sc->flags = BNX2X_IS_VF_FLAG;
551 
552 	sc->devinfo.vendor_id    = pci_dev->id.vendor_id;
553 	sc->devinfo.device_id    = pci_dev->id.device_id;
554 	sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
555 	sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
556 
557 	sc->pcie_func = pci_dev->addr.function;
558 	sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
559 	if (is_vf)
560 		sc->bar[BAR1].base_addr = (void *)
561 			((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
562 	else
563 		sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
564 
565 	assert(sc->bar[BAR0].base_addr);
566 	assert(sc->bar[BAR1].base_addr);
567 
568 	bnx2x_load_firmware(sc);
569 	assert(sc->firmware);
570 
571 	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
572 		sc->udp_rss = 1;
573 
574 	sc->rx_budget = BNX2X_RX_BUDGET;
575 	sc->hc_rx_ticks = BNX2X_RX_TICKS;
576 	sc->hc_tx_ticks = BNX2X_TX_TICKS;
577 
578 	sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
579 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
580 
581 	sc->pci_dev = pci_dev;
582 	ret = bnx2x_attach(sc);
583 	if (ret) {
584 		PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
585 		return ret;
586 	}
587 
588 	eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
589 
590 	PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
591 			sc->pcie_bus, sc->pcie_device);
592 	PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
593 			sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
594 	PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
595 			PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
596 	PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
597 			eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
598 
599 	if (IS_VF(sc)) {
600 		rte_spinlock_init(&sc->vf2pf_lock);
601 
602 		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
603 				    &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
604 				    RTE_CACHE_LINE_SIZE) != 0)
605 			return -ENOMEM;
606 
607 		sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
608 					 sc->vf2pf_mbox_mapping.vaddr;
609 
610 		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
611 				    &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
612 				    RTE_CACHE_LINE_SIZE) != 0)
613 			return -ENOMEM;
614 
615 		sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
616 					     sc->pf2vf_bulletin_mapping.vaddr;
617 
618 		ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
619 					     sc->max_rx_queues);
620 		if (ret)
621 			return ret;
622 	}
623 
624 	return 0;
625 }
626 
627 static int
628 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
629 {
630 	PMD_INIT_FUNC_TRACE();
631 	return bnx2x_common_dev_init(eth_dev, 0);
632 }
633 
634 static int
635 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
636 {
637 	PMD_INIT_FUNC_TRACE();
638 	return bnx2x_common_dev_init(eth_dev, 1);
639 }
640 
641 static struct rte_pci_driver rte_bnx2x_pmd;
642 static struct rte_pci_driver rte_bnx2xvf_pmd;
643 
644 static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
645 	struct rte_pci_device *pci_dev)
646 {
647 	if (pci_drv == &rte_bnx2x_pmd)
648 		return rte_eth_dev_pci_generic_probe(pci_dev,
649 				sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
650 	else if (pci_drv == &rte_bnx2xvf_pmd)
651 		return rte_eth_dev_pci_generic_probe(pci_dev,
652 				sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
653 	else
654 		return -EINVAL;
655 }
656 
657 static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
658 {
659 	return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
660 }
661 
662 static struct rte_pci_driver rte_bnx2x_pmd = {
663 	.id_table = pci_id_bnx2x_map,
664 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
665 	.probe = eth_bnx2x_pci_probe,
666 	.remove = eth_bnx2x_pci_remove,
667 };
668 
669 /*
670  * virtual function driver struct
671  */
672 static struct rte_pci_driver rte_bnx2xvf_pmd = {
673 	.id_table = pci_id_bnx2xvf_map,
674 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
675 	.probe = eth_bnx2x_pci_probe,
676 	.remove = eth_bnx2x_pci_remove,
677 };
678 
679 RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
680 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
681 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
682 RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
683 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
684 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
685 
686 RTE_INIT(bnx2x_init_log);
687 static void
688 bnx2x_init_log(void)
689 {
690 	bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init");
691 	if (bnx2x_logtype_init >= 0)
692 		rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
693 	bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver");
694 	if (bnx2x_logtype_driver >= 0)
695 		rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);
696 }
697