1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. 3 * Copyright (c) 2015-2018 Cavium Inc. 4 * All rights reserved. 5 * www.cavium.com 6 */ 7 8 #include "bnx2x.h" 9 #include "bnx2x_rxtx.h" 10 11 #include <rte_string_fns.h> 12 #include <rte_dev.h> 13 #include <rte_ethdev_pci.h> 14 #include <rte_alarm.h> 15 16 int bnx2x_logtype_init; 17 int bnx2x_logtype_driver; 18 19 /* 20 * The set of PCI devices this driver supports 21 */ 22 #define BROADCOM_PCI_VENDOR_ID 0x14E4 23 static const struct rte_pci_id pci_id_bnx2x_map[] = { 24 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) }, 25 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) }, 26 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) }, 27 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) }, 28 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) }, 29 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) }, 30 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) }, 31 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT 32 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) }, 33 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) }, 34 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) }, 35 #endif 36 { .vendor_id = 0, } 37 }; 38 39 static const struct rte_pci_id pci_id_bnx2xvf_map[] = { 40 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) }, 41 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) }, 42 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) }, 43 { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) }, 44 { .vendor_id = 0, } 45 }; 46 47 struct rte_bnx2x_xstats_name_off { 48 char name[RTE_ETH_XSTATS_NAME_SIZE]; 49 uint32_t offset_hi; 50 uint32_t offset_lo; 51 }; 52 53 static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = { 54 {"rx_buffer_drops", 55 offsetof(struct bnx2x_eth_stats, brb_drop_hi), 56 offsetof(struct bnx2x_eth_stats, brb_drop_lo)}, 57 {"rx_buffer_truncates", 58 offsetof(struct bnx2x_eth_stats, brb_truncate_hi), 59 offsetof(struct bnx2x_eth_stats, brb_truncate_lo)}, 60 {"rx_buffer_truncate_discard", 61 offsetof(struct bnx2x_eth_stats, brb_truncate_discard), 62 offsetof(struct bnx2x_eth_stats, brb_truncate_discard)}, 63 {"mac_filter_discard", 64 offsetof(struct bnx2x_eth_stats, mac_filter_discard), 65 offsetof(struct bnx2x_eth_stats, mac_filter_discard)}, 66 {"no_match_vlan_tag_discard", 67 offsetof(struct bnx2x_eth_stats, mf_tag_discard), 68 offsetof(struct bnx2x_eth_stats, mf_tag_discard)}, 69 {"tx_pause", 70 offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi), 71 offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)}, 72 {"rx_pause", 73 offsetof(struct bnx2x_eth_stats, pause_frames_received_hi), 74 offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)}, 75 {"tx_priority_flow_control", 76 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi), 77 offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)}, 78 {"rx_priority_flow_control", 79 offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi), 80 offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)} 81 }; 82 83 static int 84 bnx2x_link_update(struct rte_eth_dev *dev) 85 { 86 struct bnx2x_softc *sc = dev->data->dev_private; 87 struct rte_eth_link link; 88 89 PMD_INIT_FUNC_TRACE(sc); 90 91 bnx2x_link_status_update(sc); 92 memset(&link, 0, sizeof(link)); 93 mb(); 94 link.link_speed = sc->link_vars.line_speed; 95 switch (sc->link_vars.duplex) { 96 case DUPLEX_FULL: 97 link.link_duplex = ETH_LINK_FULL_DUPLEX; 98 break; 99 case DUPLEX_HALF: 100 link.link_duplex = ETH_LINK_HALF_DUPLEX; 101 break; 102 } 103 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 104 ETH_LINK_SPEED_FIXED); 105 link.link_status = sc->link_vars.link_up; 106 107 return rte_eth_linkstatus_set(dev, &link); 108 } 109 110 static void 111 bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt) 112 { 113 struct bnx2x_softc *sc = dev->data->dev_private; 114 uint32_t link_status; 115 116 bnx2x_intr_legacy(sc); 117 118 if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) && 119 !intr_cxt) 120 bnx2x_periodic_callout(sc); 121 link_status = REG_RD(sc, sc->link_params.shmem_base + 122 offsetof(struct shmem_region, 123 port_mb[sc->link_params.port].link_status)); 124 if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status) 125 bnx2x_link_update(dev); 126 } 127 128 static void 129 bnx2x_interrupt_handler(void *param) 130 { 131 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 132 struct bnx2x_softc *sc = dev->data->dev_private; 133 134 PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled"); 135 136 bnx2x_interrupt_action(dev, 1); 137 rte_intr_enable(&sc->pci_dev->intr_handle); 138 } 139 140 static void bnx2x_periodic_start(void *param) 141 { 142 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 143 struct bnx2x_softc *sc = dev->data->dev_private; 144 int ret = 0; 145 146 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 147 bnx2x_interrupt_action(dev, 0); 148 if (IS_PF(sc)) { 149 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD, 150 bnx2x_periodic_start, (void *)dev); 151 if (ret) { 152 PMD_DRV_LOG(ERR, sc, "Unable to start periodic" 153 " timer rc %d", ret); 154 assert(false && "Unable to start periodic timer"); 155 } 156 } 157 } 158 159 void bnx2x_periodic_stop(void *param) 160 { 161 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 162 struct bnx2x_softc *sc = dev->data->dev_private; 163 164 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 165 166 rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev); 167 168 PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped"); 169 } 170 171 /* 172 * Devops - helper functions can be called from user application 173 */ 174 175 static int 176 bnx2x_dev_configure(struct rte_eth_dev *dev) 177 { 178 struct bnx2x_softc *sc = dev->data->dev_private; 179 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 180 181 int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF); 182 183 PMD_INIT_FUNC_TRACE(sc); 184 185 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 186 sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len; 187 dev->data->mtu = sc->mtu; 188 } 189 190 if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) { 191 PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues"); 192 return -EINVAL; 193 } 194 195 sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 196 if (sc->num_queues > mp_ncpus) { 197 PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs"); 198 return -EINVAL; 199 } 200 201 PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d", 202 sc->num_queues, sc->mtu); 203 204 /* allocate ilt */ 205 if (bnx2x_alloc_ilt_mem(sc) != 0) { 206 PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed"); 207 return -ENXIO; 208 } 209 210 /* allocate the host hardware/software hsi structures */ 211 if (bnx2x_alloc_hsi_mem(sc) != 0) { 212 PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed"); 213 bnx2x_free_ilt_mem(sc); 214 return -ENXIO; 215 } 216 217 bnx2x_dev_rxtx_init_dummy(dev); 218 return 0; 219 } 220 221 static int 222 bnx2x_dev_start(struct rte_eth_dev *dev) 223 { 224 struct bnx2x_softc *sc = dev->data->dev_private; 225 int ret = 0; 226 227 PMD_INIT_FUNC_TRACE(sc); 228 229 /* start the periodic callout */ 230 if (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP) { 231 bnx2x_periodic_start(dev); 232 PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started"); 233 } 234 235 ret = bnx2x_init(sc); 236 if (ret) { 237 PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret); 238 return -1; 239 } 240 241 if (IS_PF(sc)) { 242 rte_intr_callback_register(&sc->pci_dev->intr_handle, 243 bnx2x_interrupt_handler, (void *)dev); 244 245 if (rte_intr_enable(&sc->pci_dev->intr_handle)) 246 PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed"); 247 } 248 249 bnx2x_dev_rxtx_init(dev); 250 251 bnx2x_print_device_info(sc); 252 253 return ret; 254 } 255 256 static void 257 bnx2x_dev_stop(struct rte_eth_dev *dev) 258 { 259 struct bnx2x_softc *sc = dev->data->dev_private; 260 int ret = 0; 261 262 PMD_INIT_FUNC_TRACE(sc); 263 264 bnx2x_dev_rxtx_init_dummy(dev); 265 266 if (IS_PF(sc)) { 267 rte_intr_disable(&sc->pci_dev->intr_handle); 268 rte_intr_callback_unregister(&sc->pci_dev->intr_handle, 269 bnx2x_interrupt_handler, (void *)dev); 270 } 271 272 /* stop the periodic callout */ 273 bnx2x_periodic_stop(dev); 274 275 ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE); 276 if (ret) { 277 PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret); 278 return; 279 } 280 281 return; 282 } 283 284 static void 285 bnx2x_dev_close(struct rte_eth_dev *dev) 286 { 287 struct bnx2x_softc *sc = dev->data->dev_private; 288 289 PMD_INIT_FUNC_TRACE(sc); 290 291 if (IS_VF(sc)) 292 bnx2x_vf_close(sc); 293 294 bnx2x_dev_clear_queues(dev); 295 memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link)); 296 297 /* free the host hardware/software hsi structures */ 298 bnx2x_free_hsi_mem(sc); 299 300 /* free ilt */ 301 bnx2x_free_ilt_mem(sc); 302 } 303 304 static void 305 bnx2x_promisc_enable(struct rte_eth_dev *dev) 306 { 307 struct bnx2x_softc *sc = dev->data->dev_private; 308 309 PMD_INIT_FUNC_TRACE(sc); 310 sc->rx_mode = BNX2X_RX_MODE_PROMISC; 311 if (rte_eth_allmulticast_get(dev->data->port_id) == 1) 312 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC; 313 bnx2x_set_rx_mode(sc); 314 } 315 316 static void 317 bnx2x_promisc_disable(struct rte_eth_dev *dev) 318 { 319 struct bnx2x_softc *sc = dev->data->dev_private; 320 321 PMD_INIT_FUNC_TRACE(sc); 322 sc->rx_mode = BNX2X_RX_MODE_NORMAL; 323 if (rte_eth_allmulticast_get(dev->data->port_id) == 1) 324 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI; 325 bnx2x_set_rx_mode(sc); 326 } 327 328 static void 329 bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev) 330 { 331 struct bnx2x_softc *sc = dev->data->dev_private; 332 333 PMD_INIT_FUNC_TRACE(sc); 334 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI; 335 if (rte_eth_promiscuous_get(dev->data->port_id) == 1) 336 sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC; 337 bnx2x_set_rx_mode(sc); 338 } 339 340 static void 341 bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev) 342 { 343 struct bnx2x_softc *sc = dev->data->dev_private; 344 345 PMD_INIT_FUNC_TRACE(sc); 346 sc->rx_mode = BNX2X_RX_MODE_NORMAL; 347 if (rte_eth_promiscuous_get(dev->data->port_id) == 1) 348 sc->rx_mode = BNX2X_RX_MODE_PROMISC; 349 bnx2x_set_rx_mode(sc); 350 } 351 352 static int 353 bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) 354 { 355 struct bnx2x_softc *sc = dev->data->dev_private; 356 357 PMD_INIT_FUNC_TRACE(sc); 358 359 return bnx2x_link_update(dev); 360 } 361 362 static int 363 bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) 364 { 365 struct bnx2x_softc *sc = dev->data->dev_private; 366 int ret = 0; 367 368 ret = bnx2x_link_update(dev); 369 370 bnx2x_check_bull(sc); 371 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) { 372 PMD_DRV_LOG(ERR, sc, "PF indicated channel is down." 373 "VF device is no longer operational"); 374 dev->data->dev_link.link_status = ETH_LINK_DOWN; 375 } 376 377 return ret; 378 } 379 380 static int 381 bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 382 { 383 struct bnx2x_softc *sc = dev->data->dev_private; 384 uint32_t brb_truncate_discard; 385 uint64_t brb_drops; 386 uint64_t brb_truncates; 387 388 PMD_INIT_FUNC_TRACE(sc); 389 390 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE); 391 392 memset(stats, 0, sizeof (struct rte_eth_stats)); 393 394 stats->ipackets = 395 HILO_U64(sc->eth_stats.total_unicast_packets_received_hi, 396 sc->eth_stats.total_unicast_packets_received_lo) + 397 HILO_U64(sc->eth_stats.total_multicast_packets_received_hi, 398 sc->eth_stats.total_multicast_packets_received_lo) + 399 HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi, 400 sc->eth_stats.total_broadcast_packets_received_lo); 401 402 stats->opackets = 403 HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi, 404 sc->eth_stats.total_unicast_packets_transmitted_lo) + 405 HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi, 406 sc->eth_stats.total_multicast_packets_transmitted_lo) + 407 HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi, 408 sc->eth_stats.total_broadcast_packets_transmitted_lo); 409 410 stats->ibytes = 411 HILO_U64(sc->eth_stats.total_bytes_received_hi, 412 sc->eth_stats.total_bytes_received_lo); 413 414 stats->obytes = 415 HILO_U64(sc->eth_stats.total_bytes_transmitted_hi, 416 sc->eth_stats.total_bytes_transmitted_lo); 417 418 stats->ierrors = 419 HILO_U64(sc->eth_stats.error_bytes_received_hi, 420 sc->eth_stats.error_bytes_received_lo); 421 422 stats->oerrors = 0; 423 424 stats->rx_nombuf = 425 HILO_U64(sc->eth_stats.no_buff_discard_hi, 426 sc->eth_stats.no_buff_discard_lo); 427 428 brb_drops = 429 HILO_U64(sc->eth_stats.brb_drop_hi, 430 sc->eth_stats.brb_drop_lo); 431 432 brb_truncates = 433 HILO_U64(sc->eth_stats.brb_truncate_hi, 434 sc->eth_stats.brb_truncate_lo); 435 436 brb_truncate_discard = sc->eth_stats.brb_truncate_discard; 437 438 stats->imissed = brb_drops + brb_truncates + 439 brb_truncate_discard + stats->rx_nombuf; 440 441 return 0; 442 } 443 444 static int 445 bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev, 446 struct rte_eth_xstat_name *xstats_names, 447 __rte_unused unsigned limit) 448 { 449 unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings); 450 451 if (xstats_names != NULL) 452 for (i = 0; i < stat_cnt; i++) 453 strlcpy(xstats_names[i].name, 454 bnx2x_xstats_strings[i].name, 455 sizeof(xstats_names[i].name)); 456 457 return stat_cnt; 458 } 459 460 static int 461 bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 462 unsigned int n) 463 { 464 struct bnx2x_softc *sc = dev->data->dev_private; 465 unsigned int num = RTE_DIM(bnx2x_xstats_strings); 466 467 if (n < num) 468 return num; 469 470 bnx2x_stats_handle(sc, STATS_EVENT_UPDATE); 471 472 for (num = 0; num < n; num++) { 473 if (bnx2x_xstats_strings[num].offset_hi != 474 bnx2x_xstats_strings[num].offset_lo) 475 xstats[num].value = HILO_U64( 476 *(uint32_t *)((char *)&sc->eth_stats + 477 bnx2x_xstats_strings[num].offset_hi), 478 *(uint32_t *)((char *)&sc->eth_stats + 479 bnx2x_xstats_strings[num].offset_lo)); 480 else 481 xstats[num].value = 482 *(uint64_t *)((char *)&sc->eth_stats + 483 bnx2x_xstats_strings[num].offset_lo); 484 xstats[num].id = num; 485 } 486 487 return num; 488 } 489 490 static void 491 bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 492 { 493 struct bnx2x_softc *sc = dev->data->dev_private; 494 dev_info->max_rx_queues = sc->max_rx_queues; 495 dev_info->max_tx_queues = sc->max_tx_queues; 496 dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE; 497 dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN; 498 dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS; 499 dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G; 500 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME; 501 } 502 503 static int 504 bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 505 uint32_t index, uint32_t pool) 506 { 507 struct bnx2x_softc *sc = dev->data->dev_private; 508 509 if (sc->mac_ops.mac_addr_add) { 510 sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool); 511 return 0; 512 } 513 return -ENOTSUP; 514 } 515 516 static void 517 bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 518 { 519 struct bnx2x_softc *sc = dev->data->dev_private; 520 521 if (sc->mac_ops.mac_addr_remove) 522 sc->mac_ops.mac_addr_remove(dev, index); 523 } 524 525 static const struct eth_dev_ops bnx2x_eth_dev_ops = { 526 .dev_configure = bnx2x_dev_configure, 527 .dev_start = bnx2x_dev_start, 528 .dev_stop = bnx2x_dev_stop, 529 .dev_close = bnx2x_dev_close, 530 .promiscuous_enable = bnx2x_promisc_enable, 531 .promiscuous_disable = bnx2x_promisc_disable, 532 .allmulticast_enable = bnx2x_dev_allmulticast_enable, 533 .allmulticast_disable = bnx2x_dev_allmulticast_disable, 534 .link_update = bnx2x_dev_link_update, 535 .stats_get = bnx2x_dev_stats_get, 536 .xstats_get = bnx2x_dev_xstats_get, 537 .xstats_get_names = bnx2x_get_xstats_names, 538 .dev_infos_get = bnx2x_dev_infos_get, 539 .rx_queue_setup = bnx2x_dev_rx_queue_setup, 540 .rx_queue_release = bnx2x_dev_rx_queue_release, 541 .tx_queue_setup = bnx2x_dev_tx_queue_setup, 542 .tx_queue_release = bnx2x_dev_tx_queue_release, 543 .mac_addr_add = bnx2x_mac_addr_add, 544 .mac_addr_remove = bnx2x_mac_addr_remove, 545 }; 546 547 /* 548 * dev_ops for virtual function 549 */ 550 static const struct eth_dev_ops bnx2xvf_eth_dev_ops = { 551 .dev_configure = bnx2x_dev_configure, 552 .dev_start = bnx2x_dev_start, 553 .dev_stop = bnx2x_dev_stop, 554 .dev_close = bnx2x_dev_close, 555 .promiscuous_enable = bnx2x_promisc_enable, 556 .promiscuous_disable = bnx2x_promisc_disable, 557 .allmulticast_enable = bnx2x_dev_allmulticast_enable, 558 .allmulticast_disable = bnx2x_dev_allmulticast_disable, 559 .link_update = bnx2xvf_dev_link_update, 560 .stats_get = bnx2x_dev_stats_get, 561 .xstats_get = bnx2x_dev_xstats_get, 562 .xstats_get_names = bnx2x_get_xstats_names, 563 .dev_infos_get = bnx2x_dev_infos_get, 564 .rx_queue_setup = bnx2x_dev_rx_queue_setup, 565 .rx_queue_release = bnx2x_dev_rx_queue_release, 566 .tx_queue_setup = bnx2x_dev_tx_queue_setup, 567 .tx_queue_release = bnx2x_dev_tx_queue_release, 568 .mac_addr_add = bnx2x_mac_addr_add, 569 .mac_addr_remove = bnx2x_mac_addr_remove, 570 }; 571 572 573 static int 574 bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) 575 { 576 int ret = 0; 577 struct rte_pci_device *pci_dev; 578 struct rte_pci_addr pci_addr; 579 struct bnx2x_softc *sc; 580 static bool adapter_info = true; 581 582 /* Extract key data structures */ 583 sc = eth_dev->data->dev_private; 584 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 585 pci_addr = pci_dev->addr; 586 587 snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 588 pci_addr.bus, pci_addr.devid, pci_addr.function, 589 eth_dev->data->port_id); 590 591 PMD_INIT_FUNC_TRACE(sc); 592 593 eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops; 594 595 rte_eth_copy_pci_info(eth_dev, pci_dev); 596 597 sc->pcie_bus = pci_dev->addr.bus; 598 sc->pcie_device = pci_dev->addr.devid; 599 600 sc->devinfo.vendor_id = pci_dev->id.vendor_id; 601 sc->devinfo.device_id = pci_dev->id.device_id; 602 sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id; 603 sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id; 604 605 if (is_vf) 606 sc->flags = BNX2X_IS_VF_FLAG; 607 608 sc->pcie_func = pci_dev->addr.function; 609 sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr; 610 if (is_vf) 611 sc->bar[BAR1].base_addr = (void *) 612 ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START); 613 else 614 sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr; 615 616 assert(sc->bar[BAR0].base_addr); 617 assert(sc->bar[BAR1].base_addr); 618 619 bnx2x_load_firmware(sc); 620 assert(sc->firmware); 621 622 if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 623 sc->udp_rss = 1; 624 625 sc->rx_budget = BNX2X_RX_BUDGET; 626 sc->hc_rx_ticks = BNX2X_RX_TICKS; 627 sc->hc_tx_ticks = BNX2X_TX_TICKS; 628 629 sc->interrupt_mode = INTR_MODE_SINGLE_MSIX; 630 sc->rx_mode = BNX2X_RX_MODE_NORMAL; 631 632 sc->pci_dev = pci_dev; 633 ret = bnx2x_attach(sc); 634 if (ret) { 635 PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret); 636 return ret; 637 } 638 639 /* Print important adapter info for the user. */ 640 if (adapter_info) { 641 bnx2x_print_adapter_info(sc); 642 adapter_info = false; 643 } 644 645 /* schedule periodic poll for slowpath link events */ 646 if (IS_PF(sc)) { 647 PMD_DRV_LOG(DEBUG, sc, "Scheduling periodic poll for slowpath link events"); 648 ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD, 649 bnx2x_periodic_start, (void *)eth_dev); 650 if (ret) { 651 PMD_DRV_LOG(ERR, sc, "Unable to start periodic" 652 " timer rc %d", ret); 653 return -EINVAL; 654 } 655 } 656 657 eth_dev->data->mac_addrs = 658 (struct rte_ether_addr *)sc->link_params.mac_addr; 659 660 if (IS_VF(sc)) { 661 rte_spinlock_init(&sc->vf2pf_lock); 662 663 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg), 664 &sc->vf2pf_mbox_mapping, "vf2pf_mbox", 665 RTE_CACHE_LINE_SIZE); 666 if (ret) 667 goto out; 668 669 sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *) 670 sc->vf2pf_mbox_mapping.vaddr; 671 672 ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin), 673 &sc->pf2vf_bulletin_mapping, "vf2pf_bull", 674 RTE_CACHE_LINE_SIZE); 675 if (ret) 676 goto out; 677 678 sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *) 679 sc->pf2vf_bulletin_mapping.vaddr; 680 681 ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues, 682 sc->max_rx_queues); 683 if (ret) 684 goto out; 685 } 686 687 return 0; 688 689 out: 690 bnx2x_periodic_stop(eth_dev); 691 return ret; 692 } 693 694 static int 695 eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev) 696 { 697 struct bnx2x_softc *sc = eth_dev->data->dev_private; 698 PMD_INIT_FUNC_TRACE(sc); 699 return bnx2x_common_dev_init(eth_dev, 0); 700 } 701 702 static int 703 eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev) 704 { 705 struct bnx2x_softc *sc = eth_dev->data->dev_private; 706 PMD_INIT_FUNC_TRACE(sc); 707 return bnx2x_common_dev_init(eth_dev, 1); 708 } 709 710 static struct rte_pci_driver rte_bnx2x_pmd; 711 static struct rte_pci_driver rte_bnx2xvf_pmd; 712 713 static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv, 714 struct rte_pci_device *pci_dev) 715 { 716 if (pci_drv == &rte_bnx2x_pmd) 717 return rte_eth_dev_pci_generic_probe(pci_dev, 718 sizeof(struct bnx2x_softc), eth_bnx2x_dev_init); 719 else if (pci_drv == &rte_bnx2xvf_pmd) 720 return rte_eth_dev_pci_generic_probe(pci_dev, 721 sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init); 722 else 723 return -EINVAL; 724 } 725 726 static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev) 727 { 728 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 729 } 730 731 static struct rte_pci_driver rte_bnx2x_pmd = { 732 .id_table = pci_id_bnx2x_map, 733 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 734 .probe = eth_bnx2x_pci_probe, 735 .remove = eth_bnx2x_pci_remove, 736 }; 737 738 /* 739 * virtual function driver struct 740 */ 741 static struct rte_pci_driver rte_bnx2xvf_pmd = { 742 .id_table = pci_id_bnx2xvf_map, 743 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 744 .probe = eth_bnx2x_pci_probe, 745 .remove = eth_bnx2x_pci_remove, 746 }; 747 748 RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd); 749 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map); 750 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci"); 751 RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd); 752 RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map); 753 RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci"); 754 755 RTE_INIT(bnx2x_init_log) 756 { 757 bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init"); 758 if (bnx2x_logtype_init >= 0) 759 rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE); 760 bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver"); 761 if (bnx2x_logtype_driver >= 0) 762 rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE); 763 } 764