xref: /dpdk/drivers/net/bnx2x/bnx2x_ethdev.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3  * Copyright (c) 2015-2018 Cavium Inc.
4  * All rights reserved.
5  * www.cavium.com
6  */
7 
8 #include "bnx2x.h"
9 #include "bnx2x_rxtx.h"
10 
11 #include <rte_string_fns.h>
12 #include <rte_dev.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_alarm.h>
15 
16 int bnx2x_logtype_init;
17 int bnx2x_logtype_driver;
18 
19 /*
20  * The set of PCI devices this driver supports
21  */
22 #define BROADCOM_PCI_VENDOR_ID 0x14E4
23 static const struct rte_pci_id pci_id_bnx2x_map[] = {
24 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
25 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
26 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
27 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
28 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
29 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
30 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
31 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
32 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
33 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
34 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
35 #endif
36 	{ .vendor_id = 0, }
37 };
38 
39 static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
40 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
41 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
42 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
43 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
44 	{ .vendor_id = 0, }
45 };
46 
47 struct rte_bnx2x_xstats_name_off {
48 	char name[RTE_ETH_XSTATS_NAME_SIZE];
49 	uint32_t offset_hi;
50 	uint32_t offset_lo;
51 };
52 
53 static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
54 	{"rx_buffer_drops",
55 		offsetof(struct bnx2x_eth_stats, brb_drop_hi),
56 		offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
57 	{"rx_buffer_truncates",
58 		offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
59 		offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
60 	{"rx_buffer_truncate_discard",
61 		offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
62 		offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
63 	{"mac_filter_discard",
64 		offsetof(struct bnx2x_eth_stats, mac_filter_discard),
65 		offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
66 	{"no_match_vlan_tag_discard",
67 		offsetof(struct bnx2x_eth_stats, mf_tag_discard),
68 		offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
69 	{"tx_pause",
70 		offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
71 		offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
72 	{"rx_pause",
73 		offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
74 		offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
75 	{"tx_priority_flow_control",
76 		offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
77 		offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
78 	{"rx_priority_flow_control",
79 		offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
80 		offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
81 };
82 
83 static int
84 bnx2x_link_update(struct rte_eth_dev *dev)
85 {
86 	struct bnx2x_softc *sc = dev->data->dev_private;
87 	struct rte_eth_link link;
88 
89 	PMD_INIT_FUNC_TRACE(sc);
90 
91 	memset(&link, 0, sizeof(link));
92 	mb();
93 	link.link_speed = sc->link_vars.line_speed;
94 	switch (sc->link_vars.duplex) {
95 		case DUPLEX_FULL:
96 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
97 			break;
98 		case DUPLEX_HALF:
99 			link.link_duplex = ETH_LINK_HALF_DUPLEX;
100 			break;
101 	}
102 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
103 			ETH_LINK_SPEED_FIXED);
104 	link.link_status = sc->link_vars.link_up;
105 
106 	return rte_eth_linkstatus_set(dev, &link);
107 }
108 
109 static void
110 bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt)
111 {
112 	struct bnx2x_softc *sc = dev->data->dev_private;
113 	uint32_t link_status;
114 
115 	bnx2x_intr_legacy(sc);
116 
117 	if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) &&
118 	    !intr_cxt)
119 		bnx2x_periodic_callout(sc);
120 	link_status = REG_RD(sc, sc->link_params.shmem_base +
121 			offsetof(struct shmem_region,
122 				port_mb[sc->link_params.port].link_status));
123 	if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
124 		bnx2x_link_update(dev);
125 }
126 
127 static void
128 bnx2x_interrupt_handler(void *param)
129 {
130 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
131 	struct bnx2x_softc *sc = dev->data->dev_private;
132 
133 	PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
134 
135 	bnx2x_interrupt_action(dev, 1);
136 	rte_intr_ack(&sc->pci_dev->intr_handle);
137 }
138 
139 static void bnx2x_periodic_start(void *param)
140 {
141 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
142 	struct bnx2x_softc *sc = dev->data->dev_private;
143 	int ret = 0;
144 
145 	atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
146 	bnx2x_interrupt_action(dev, 0);
147 	if (IS_PF(sc)) {
148 		ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
149 					bnx2x_periodic_start, (void *)dev);
150 		if (ret) {
151 			PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
152 					     " timer rc %d", ret);
153 		}
154 	}
155 }
156 
157 void bnx2x_periodic_stop(void *param)
158 {
159 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
160 	struct bnx2x_softc *sc = dev->data->dev_private;
161 
162 	atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
163 
164 	rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
165 
166 	PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped");
167 }
168 
169 /*
170  * Devops - helper functions can be called from user application
171  */
172 
173 static int
174 bnx2x_dev_configure(struct rte_eth_dev *dev)
175 {
176 	struct bnx2x_softc *sc = dev->data->dev_private;
177 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
178 
179 	int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
180 
181 	PMD_INIT_FUNC_TRACE(sc);
182 
183 	if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
184 		sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
185 		dev->data->mtu = sc->mtu;
186 	}
187 
188 	if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
189 		PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
190 		return -EINVAL;
191 	}
192 
193 	sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
194 	if (sc->num_queues > mp_ncpus) {
195 		PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs");
196 		return -EINVAL;
197 	}
198 
199 	PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d",
200 		       sc->num_queues, sc->mtu);
201 
202 	/* allocate ilt */
203 	if (bnx2x_alloc_ilt_mem(sc) != 0) {
204 		PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed");
205 		return -ENXIO;
206 	}
207 
208 	bnx2x_dev_rxtx_init_dummy(dev);
209 	return 0;
210 }
211 
212 static int
213 bnx2x_dev_start(struct rte_eth_dev *dev)
214 {
215 	struct bnx2x_softc *sc = dev->data->dev_private;
216 	int ret = 0;
217 
218 	PMD_INIT_FUNC_TRACE(sc);
219 
220 	/* start the periodic callout */
221 	if (IS_PF(sc)) {
222 		if (atomic_load_acq_long(&sc->periodic_flags) ==
223 		    PERIODIC_STOP) {
224 			bnx2x_periodic_start(dev);
225 			PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
226 		}
227 	}
228 
229 	ret = bnx2x_init(sc);
230 	if (ret) {
231 		PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret);
232 		return -1;
233 	}
234 
235 	if (IS_PF(sc)) {
236 		rte_intr_callback_register(&sc->pci_dev->intr_handle,
237 				bnx2x_interrupt_handler, (void *)dev);
238 
239 		if (rte_intr_enable(&sc->pci_dev->intr_handle))
240 			PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
241 	}
242 
243 	bnx2x_dev_rxtx_init(dev);
244 
245 	bnx2x_print_device_info(sc);
246 
247 	return ret;
248 }
249 
250 static void
251 bnx2x_dev_stop(struct rte_eth_dev *dev)
252 {
253 	struct bnx2x_softc *sc = dev->data->dev_private;
254 	int ret = 0;
255 
256 	PMD_INIT_FUNC_TRACE(sc);
257 
258 	bnx2x_dev_rxtx_init_dummy(dev);
259 
260 	if (IS_PF(sc)) {
261 		rte_intr_disable(&sc->pci_dev->intr_handle);
262 		rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
263 				bnx2x_interrupt_handler, (void *)dev);
264 
265 		/* stop the periodic callout */
266 		bnx2x_periodic_stop(dev);
267 	}
268 
269 	ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
270 	if (ret) {
271 		PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret);
272 		return;
273 	}
274 
275 	return;
276 }
277 
278 static void
279 bnx2x_dev_close(struct rte_eth_dev *dev)
280 {
281 	struct bnx2x_softc *sc = dev->data->dev_private;
282 
283 	PMD_INIT_FUNC_TRACE(sc);
284 
285 	if (IS_VF(sc))
286 		bnx2x_vf_close(sc);
287 
288 	bnx2x_dev_clear_queues(dev);
289 	memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
290 
291 	/* free ilt */
292 	bnx2x_free_ilt_mem(sc);
293 }
294 
295 static int
296 bnx2x_promisc_enable(struct rte_eth_dev *dev)
297 {
298 	struct bnx2x_softc *sc = dev->data->dev_private;
299 
300 	PMD_INIT_FUNC_TRACE(sc);
301 	sc->rx_mode = BNX2X_RX_MODE_PROMISC;
302 	if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
303 		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
304 	bnx2x_set_rx_mode(sc);
305 
306 	return 0;
307 }
308 
309 static int
310 bnx2x_promisc_disable(struct rte_eth_dev *dev)
311 {
312 	struct bnx2x_softc *sc = dev->data->dev_private;
313 
314 	PMD_INIT_FUNC_TRACE(sc);
315 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
316 	if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
317 		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
318 	bnx2x_set_rx_mode(sc);
319 
320 	return 0;
321 }
322 
323 static int
324 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
325 {
326 	struct bnx2x_softc *sc = dev->data->dev_private;
327 
328 	PMD_INIT_FUNC_TRACE(sc);
329 	sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
330 	if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
331 		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
332 	bnx2x_set_rx_mode(sc);
333 
334 	return 0;
335 }
336 
337 static int
338 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
339 {
340 	struct bnx2x_softc *sc = dev->data->dev_private;
341 
342 	PMD_INIT_FUNC_TRACE(sc);
343 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
344 	if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
345 		sc->rx_mode = BNX2X_RX_MODE_PROMISC;
346 	bnx2x_set_rx_mode(sc);
347 
348 	return 0;
349 }
350 
351 static int
352 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
353 {
354 	struct bnx2x_softc *sc = dev->data->dev_private;
355 
356 	PMD_INIT_FUNC_TRACE(sc);
357 
358 	return bnx2x_link_update(dev);
359 }
360 
361 static int
362 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
363 {
364 	struct bnx2x_softc *sc = dev->data->dev_private;
365 	int ret = 0;
366 
367 	ret = bnx2x_link_update(dev);
368 
369 	bnx2x_check_bull(sc);
370 	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
371 		PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
372 				"VF device is no longer operational");
373 		dev->data->dev_link.link_status = ETH_LINK_DOWN;
374 	}
375 
376 	return ret;
377 }
378 
379 static int
380 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
381 {
382 	struct bnx2x_softc *sc = dev->data->dev_private;
383 	uint32_t brb_truncate_discard;
384 	uint64_t brb_drops;
385 	uint64_t brb_truncates;
386 
387 	PMD_INIT_FUNC_TRACE(sc);
388 
389 	bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
390 
391 	memset(stats, 0, sizeof (struct rte_eth_stats));
392 
393 	stats->ipackets =
394 		HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
395 				sc->eth_stats.total_unicast_packets_received_lo) +
396 		HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
397 				sc->eth_stats.total_multicast_packets_received_lo) +
398 		HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
399 				sc->eth_stats.total_broadcast_packets_received_lo);
400 
401 	stats->opackets =
402 		HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
403 				sc->eth_stats.total_unicast_packets_transmitted_lo) +
404 		HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
405 				sc->eth_stats.total_multicast_packets_transmitted_lo) +
406 		HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
407 				sc->eth_stats.total_broadcast_packets_transmitted_lo);
408 
409 	stats->ibytes =
410 		HILO_U64(sc->eth_stats.total_bytes_received_hi,
411 				sc->eth_stats.total_bytes_received_lo);
412 
413 	stats->obytes =
414 		HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
415 				sc->eth_stats.total_bytes_transmitted_lo);
416 
417 	stats->ierrors =
418 		HILO_U64(sc->eth_stats.error_bytes_received_hi,
419 				sc->eth_stats.error_bytes_received_lo);
420 
421 	stats->oerrors = 0;
422 
423 	stats->rx_nombuf =
424 		HILO_U64(sc->eth_stats.no_buff_discard_hi,
425 				sc->eth_stats.no_buff_discard_lo);
426 
427 	brb_drops =
428 		HILO_U64(sc->eth_stats.brb_drop_hi,
429 			 sc->eth_stats.brb_drop_lo);
430 
431 	brb_truncates =
432 		HILO_U64(sc->eth_stats.brb_truncate_hi,
433 			 sc->eth_stats.brb_truncate_lo);
434 
435 	brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
436 
437 	stats->imissed = brb_drops + brb_truncates +
438 			 brb_truncate_discard + stats->rx_nombuf;
439 
440 	return 0;
441 }
442 
443 static int
444 bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
445 		       struct rte_eth_xstat_name *xstats_names,
446 		       __rte_unused unsigned limit)
447 {
448 	unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
449 
450 	if (xstats_names != NULL)
451 		for (i = 0; i < stat_cnt; i++)
452 			strlcpy(xstats_names[i].name,
453 				bnx2x_xstats_strings[i].name,
454 				sizeof(xstats_names[i].name));
455 
456 	return stat_cnt;
457 }
458 
459 static int
460 bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
461 		     unsigned int n)
462 {
463 	struct bnx2x_softc *sc = dev->data->dev_private;
464 	unsigned int num = RTE_DIM(bnx2x_xstats_strings);
465 
466 	if (n < num)
467 		return num;
468 
469 	bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
470 
471 	for (num = 0; num < n; num++) {
472 		if (bnx2x_xstats_strings[num].offset_hi !=
473 		    bnx2x_xstats_strings[num].offset_lo)
474 			xstats[num].value = HILO_U64(
475 					  *(uint32_t *)((char *)&sc->eth_stats +
476 					  bnx2x_xstats_strings[num].offset_hi),
477 					  *(uint32_t *)((char *)&sc->eth_stats +
478 					  bnx2x_xstats_strings[num].offset_lo));
479 		else
480 			xstats[num].value =
481 					  *(uint64_t *)((char *)&sc->eth_stats +
482 					  bnx2x_xstats_strings[num].offset_lo);
483 		xstats[num].id = num;
484 	}
485 
486 	return num;
487 }
488 
489 static int
490 bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
491 {
492 	struct bnx2x_softc *sc = dev->data->dev_private;
493 
494 	dev_info->max_rx_queues  = sc->max_rx_queues;
495 	dev_info->max_tx_queues  = sc->max_tx_queues;
496 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
497 	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
498 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
499 	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
500 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
501 
502 	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
503 	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
504 	dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
505 
506 	return 0;
507 }
508 
509 static int
510 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
511 		uint32_t index, uint32_t pool)
512 {
513 	struct bnx2x_softc *sc = dev->data->dev_private;
514 
515 	if (sc->mac_ops.mac_addr_add) {
516 		sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
517 		return 0;
518 	}
519 	return -ENOTSUP;
520 }
521 
522 static void
523 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
524 {
525 	struct bnx2x_softc *sc = dev->data->dev_private;
526 
527 	if (sc->mac_ops.mac_addr_remove)
528 		sc->mac_ops.mac_addr_remove(dev, index);
529 }
530 
531 static const struct eth_dev_ops bnx2x_eth_dev_ops = {
532 	.dev_configure                = bnx2x_dev_configure,
533 	.dev_start                    = bnx2x_dev_start,
534 	.dev_stop                     = bnx2x_dev_stop,
535 	.dev_close                    = bnx2x_dev_close,
536 	.promiscuous_enable           = bnx2x_promisc_enable,
537 	.promiscuous_disable          = bnx2x_promisc_disable,
538 	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
539 	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
540 	.link_update                  = bnx2x_dev_link_update,
541 	.stats_get                    = bnx2x_dev_stats_get,
542 	.xstats_get                   = bnx2x_dev_xstats_get,
543 	.xstats_get_names             = bnx2x_get_xstats_names,
544 	.dev_infos_get                = bnx2x_dev_infos_get,
545 	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
546 	.rx_queue_release             = bnx2x_dev_rx_queue_release,
547 	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
548 	.tx_queue_release             = bnx2x_dev_tx_queue_release,
549 	.mac_addr_add                 = bnx2x_mac_addr_add,
550 	.mac_addr_remove              = bnx2x_mac_addr_remove,
551 };
552 
553 /*
554  * dev_ops for virtual function
555  */
556 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
557 	.dev_configure                = bnx2x_dev_configure,
558 	.dev_start                    = bnx2x_dev_start,
559 	.dev_stop                     = bnx2x_dev_stop,
560 	.dev_close                    = bnx2x_dev_close,
561 	.promiscuous_enable           = bnx2x_promisc_enable,
562 	.promiscuous_disable          = bnx2x_promisc_disable,
563 	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
564 	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
565 	.link_update                  = bnx2xvf_dev_link_update,
566 	.stats_get                    = bnx2x_dev_stats_get,
567 	.xstats_get                   = bnx2x_dev_xstats_get,
568 	.xstats_get_names             = bnx2x_get_xstats_names,
569 	.dev_infos_get                = bnx2x_dev_infos_get,
570 	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
571 	.rx_queue_release             = bnx2x_dev_rx_queue_release,
572 	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
573 	.tx_queue_release             = bnx2x_dev_tx_queue_release,
574 	.mac_addr_add                 = bnx2x_mac_addr_add,
575 	.mac_addr_remove              = bnx2x_mac_addr_remove,
576 };
577 
578 
579 static int
580 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
581 {
582 	int ret = 0;
583 	struct rte_pci_device *pci_dev;
584 	struct rte_pci_addr pci_addr;
585 	struct bnx2x_softc *sc;
586 	static bool adapter_info = true;
587 
588 	/* Extract key data structures */
589 	sc = eth_dev->data->dev_private;
590 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
591 	pci_addr = pci_dev->addr;
592 
593 	snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
594 		 pci_addr.bus, pci_addr.devid, pci_addr.function,
595 		 eth_dev->data->port_id);
596 
597 	PMD_INIT_FUNC_TRACE(sc);
598 
599 	eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
600 
601 	rte_eth_copy_pci_info(eth_dev, pci_dev);
602 
603 	sc->pcie_bus    = pci_dev->addr.bus;
604 	sc->pcie_device = pci_dev->addr.devid;
605 
606 	sc->devinfo.vendor_id    = pci_dev->id.vendor_id;
607 	sc->devinfo.device_id    = pci_dev->id.device_id;
608 	sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
609 	sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
610 
611 	if (is_vf)
612 		sc->flags = BNX2X_IS_VF_FLAG;
613 
614 	sc->pcie_func = pci_dev->addr.function;
615 	sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
616 	if (is_vf)
617 		sc->bar[BAR1].base_addr = (void *)
618 			((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
619 	else
620 		sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
621 
622 	assert(sc->bar[BAR0].base_addr);
623 	assert(sc->bar[BAR1].base_addr);
624 
625 	bnx2x_load_firmware(sc);
626 	assert(sc->firmware);
627 
628 	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
629 		sc->udp_rss = 1;
630 
631 	sc->rx_budget = BNX2X_RX_BUDGET;
632 	sc->hc_rx_ticks = BNX2X_RX_TICKS;
633 	sc->hc_tx_ticks = BNX2X_TX_TICKS;
634 
635 	sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
636 	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
637 
638 	sc->pci_dev = pci_dev;
639 	ret = bnx2x_attach(sc);
640 	if (ret) {
641 		PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret);
642 		return ret;
643 	}
644 
645 	/* Print important adapter info for the user. */
646 	if (adapter_info) {
647 		bnx2x_print_adapter_info(sc);
648 		adapter_info = false;
649 	}
650 
651 	/* schedule periodic poll for slowpath link events */
652 	if (IS_PF(sc)) {
653 		PMD_DRV_LOG(DEBUG, sc, "Scheduling periodic poll for slowpath link events");
654 		ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
655 					bnx2x_periodic_start, (void *)eth_dev);
656 		if (ret) {
657 			PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
658 					     " timer rc %d", ret);
659 			return -EINVAL;
660 		}
661 	}
662 
663 	eth_dev->data->mac_addrs =
664 		(struct rte_ether_addr *)sc->link_params.mac_addr;
665 
666 	if (IS_VF(sc)) {
667 		rte_spinlock_init(&sc->vf2pf_lock);
668 
669 		ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
670 				      &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
671 				      RTE_CACHE_LINE_SIZE);
672 		if (ret)
673 			goto out;
674 
675 		sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
676 					 sc->vf2pf_mbox_mapping.vaddr;
677 
678 		ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
679 				      &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
680 				      RTE_CACHE_LINE_SIZE);
681 		if (ret)
682 			goto out;
683 
684 		sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
685 					     sc->pf2vf_bulletin_mapping.vaddr;
686 
687 		ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
688 					     sc->max_rx_queues);
689 		if (ret)
690 			goto out;
691 	}
692 
693 	return 0;
694 
695 out:
696 	if (IS_PF(sc))
697 		bnx2x_periodic_stop(eth_dev);
698 
699 	return ret;
700 }
701 
702 static int
703 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
704 {
705 	struct bnx2x_softc *sc = eth_dev->data->dev_private;
706 	PMD_INIT_FUNC_TRACE(sc);
707 	return bnx2x_common_dev_init(eth_dev, 0);
708 }
709 
710 static int
711 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
712 {
713 	struct bnx2x_softc *sc = eth_dev->data->dev_private;
714 	PMD_INIT_FUNC_TRACE(sc);
715 	return bnx2x_common_dev_init(eth_dev, 1);
716 }
717 
718 static int eth_bnx2x_dev_uninit(struct rte_eth_dev *eth_dev)
719 {
720 	/* mac_addrs must not be freed alone because part of dev_private */
721 	eth_dev->data->mac_addrs = NULL;
722 	return 0;
723 }
724 
725 static struct rte_pci_driver rte_bnx2x_pmd;
726 static struct rte_pci_driver rte_bnx2xvf_pmd;
727 
728 static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
729 	struct rte_pci_device *pci_dev)
730 {
731 	if (pci_drv == &rte_bnx2x_pmd)
732 		return rte_eth_dev_pci_generic_probe(pci_dev,
733 				sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
734 	else if (pci_drv == &rte_bnx2xvf_pmd)
735 		return rte_eth_dev_pci_generic_probe(pci_dev,
736 				sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
737 	else
738 		return -EINVAL;
739 }
740 
741 static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
742 {
743 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_bnx2x_dev_uninit);
744 }
745 
746 static struct rte_pci_driver rte_bnx2x_pmd = {
747 	.id_table = pci_id_bnx2x_map,
748 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
749 	.probe = eth_bnx2x_pci_probe,
750 	.remove = eth_bnx2x_pci_remove,
751 };
752 
753 /*
754  * virtual function driver struct
755  */
756 static struct rte_pci_driver rte_bnx2xvf_pmd = {
757 	.id_table = pci_id_bnx2xvf_map,
758 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
759 	.probe = eth_bnx2x_pci_probe,
760 	.remove = eth_bnx2x_pci_remove,
761 };
762 
763 RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
764 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
765 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
766 RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
767 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
768 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
769 
770 RTE_INIT(bnx2x_init_log)
771 {
772 	bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init");
773 	if (bnx2x_logtype_init >= 0)
774 		rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
775 	bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver");
776 	if (bnx2x_logtype_driver >= 0)
777 		rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);
778 }
779