1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 91 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 92 #define BNXT_DEVARG_REPRESENTOR "representor" 93 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 94 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 95 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 96 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 97 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 98 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 99 #define BNXT_DEVARG_APP_ID "app-id" 100 101 static const char *const bnxt_dev_args[] = { 102 BNXT_DEVARG_REPRESENTOR, 103 BNXT_DEVARG_FLOW_XSTAT, 104 BNXT_DEVARG_MAX_NUM_KFLOWS, 105 BNXT_DEVARG_REP_BASED_PF, 106 BNXT_DEVARG_REP_IS_PF, 107 BNXT_DEVARG_REP_Q_R2F, 108 BNXT_DEVARG_REP_Q_F2R, 109 BNXT_DEVARG_REP_FC_R2F, 110 BNXT_DEVARG_REP_FC_F2R, 111 BNXT_DEVARG_APP_ID, 112 NULL 113 }; 114 115 /* 116 * app-id = an non-negative 8-bit number 117 */ 118 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 119 120 /* 121 * flow_xstat == false to disable the feature 122 * flow_xstat == true to enable the feature 123 */ 124 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 125 126 /* 127 * rep_is_pf == false to indicate VF representor 128 * rep_is_pf == true to indicate PF representor 129 */ 130 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 131 132 /* 133 * rep_based_pf == Physical index of the PF 134 */ 135 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 136 /* 137 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 138 */ 139 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 140 141 /* 142 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 143 */ 144 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 145 146 /* 147 * rep_fc_r2f == Flow control for the representor to endpoint direction 148 */ 149 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 150 151 /* 152 * rep_fc_f2r == Flow control for the endpoint to representor direction 153 */ 154 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 155 156 int bnxt_cfa_code_dynfield_offset = -1; 157 158 /* 159 * max_num_kflows must be >= 32 160 * and must be a power-of-2 supported value 161 * return: 1 -> invalid 162 * 0 -> valid 163 */ 164 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 165 { 166 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 167 return 1; 168 return 0; 169 } 170 171 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 172 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 173 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 174 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 175 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 176 static int bnxt_restore_vlan_filters(struct bnxt *bp); 177 static void bnxt_dev_recover(void *arg); 178 static void bnxt_free_error_recovery_info(struct bnxt *bp); 179 static void bnxt_free_rep_info(struct bnxt *bp); 180 static int bnxt_check_fw_ready(struct bnxt *bp); 181 182 int is_bnxt_in_error(struct bnxt *bp) 183 { 184 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 185 return -EIO; 186 if (bp->flags & BNXT_FLAG_FW_RESET) 187 return -EBUSY; 188 189 return 0; 190 } 191 192 /***********************/ 193 194 /* 195 * High level utility functions 196 */ 197 198 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 199 { 200 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 201 BNXT_RSS_TBL_SIZE_P5); 202 203 if (!BNXT_CHIP_P5(bp)) 204 return 1; 205 206 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 207 BNXT_RSS_ENTRIES_PER_CTX_P5) / 208 BNXT_RSS_ENTRIES_PER_CTX_P5; 209 } 210 211 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 212 { 213 if (!BNXT_CHIP_P5(bp)) 214 return HW_HASH_INDEX_SIZE; 215 216 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 217 } 218 219 static void bnxt_free_parent_info(struct bnxt *bp) 220 { 221 rte_free(bp->parent); 222 bp->parent = NULL; 223 } 224 225 static void bnxt_free_pf_info(struct bnxt *bp) 226 { 227 rte_free(bp->pf); 228 bp->pf = NULL; 229 } 230 231 static void bnxt_free_link_info(struct bnxt *bp) 232 { 233 rte_free(bp->link_info); 234 bp->link_info = NULL; 235 } 236 237 static void bnxt_free_leds_info(struct bnxt *bp) 238 { 239 if (BNXT_VF(bp)) 240 return; 241 242 rte_free(bp->leds); 243 bp->leds = NULL; 244 } 245 246 static void bnxt_free_flow_stats_info(struct bnxt *bp) 247 { 248 rte_free(bp->flow_stat); 249 bp->flow_stat = NULL; 250 } 251 252 static void bnxt_free_cos_queues(struct bnxt *bp) 253 { 254 rte_free(bp->rx_cos_queue); 255 bp->rx_cos_queue = NULL; 256 rte_free(bp->tx_cos_queue); 257 bp->tx_cos_queue = NULL; 258 } 259 260 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 261 { 262 bnxt_free_filter_mem(bp); 263 bnxt_free_vnic_attributes(bp); 264 bnxt_free_vnic_mem(bp); 265 266 /* tx/rx rings are configured as part of *_queue_setup callbacks. 267 * If the number of rings change across fw update, 268 * we don't have much choice except to warn the user. 269 */ 270 if (!reconfig) { 271 bnxt_free_stats(bp); 272 bnxt_free_tx_rings(bp); 273 bnxt_free_rx_rings(bp); 274 } 275 bnxt_free_async_cp_ring(bp); 276 bnxt_free_rxtx_nq_ring(bp); 277 278 rte_free(bp->grp_info); 279 bp->grp_info = NULL; 280 } 281 282 static int bnxt_alloc_parent_info(struct bnxt *bp) 283 { 284 bp->parent = rte_zmalloc("bnxt_parent_info", 285 sizeof(struct bnxt_parent_info), 0); 286 if (bp->parent == NULL) 287 return -ENOMEM; 288 289 return 0; 290 } 291 292 static int bnxt_alloc_pf_info(struct bnxt *bp) 293 { 294 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 295 if (bp->pf == NULL) 296 return -ENOMEM; 297 298 return 0; 299 } 300 301 static int bnxt_alloc_link_info(struct bnxt *bp) 302 { 303 bp->link_info = 304 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 305 if (bp->link_info == NULL) 306 return -ENOMEM; 307 308 return 0; 309 } 310 311 static int bnxt_alloc_leds_info(struct bnxt *bp) 312 { 313 if (BNXT_VF(bp)) 314 return 0; 315 316 bp->leds = rte_zmalloc("bnxt_leds", 317 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 318 0); 319 if (bp->leds == NULL) 320 return -ENOMEM; 321 322 return 0; 323 } 324 325 static int bnxt_alloc_cos_queues(struct bnxt *bp) 326 { 327 bp->rx_cos_queue = 328 rte_zmalloc("bnxt_rx_cosq", 329 BNXT_COS_QUEUE_COUNT * 330 sizeof(struct bnxt_cos_queue_info), 331 0); 332 if (bp->rx_cos_queue == NULL) 333 return -ENOMEM; 334 335 bp->tx_cos_queue = 336 rte_zmalloc("bnxt_tx_cosq", 337 BNXT_COS_QUEUE_COUNT * 338 sizeof(struct bnxt_cos_queue_info), 339 0); 340 if (bp->tx_cos_queue == NULL) 341 return -ENOMEM; 342 343 return 0; 344 } 345 346 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 347 { 348 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 349 sizeof(struct bnxt_flow_stat_info), 0); 350 if (bp->flow_stat == NULL) 351 return -ENOMEM; 352 353 return 0; 354 } 355 356 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 357 { 358 int rc; 359 360 rc = bnxt_alloc_ring_grps(bp); 361 if (rc) 362 goto alloc_mem_err; 363 364 rc = bnxt_alloc_async_ring_struct(bp); 365 if (rc) 366 goto alloc_mem_err; 367 368 rc = bnxt_alloc_vnic_mem(bp); 369 if (rc) 370 goto alloc_mem_err; 371 372 rc = bnxt_alloc_vnic_attributes(bp, reconfig); 373 if (rc) 374 goto alloc_mem_err; 375 376 rc = bnxt_alloc_filter_mem(bp); 377 if (rc) 378 goto alloc_mem_err; 379 380 rc = bnxt_alloc_async_cp_ring(bp); 381 if (rc) 382 goto alloc_mem_err; 383 384 rc = bnxt_alloc_rxtx_nq_ring(bp); 385 if (rc) 386 goto alloc_mem_err; 387 388 if (BNXT_FLOW_XSTATS_EN(bp)) { 389 rc = bnxt_alloc_flow_stats_info(bp); 390 if (rc) 391 goto alloc_mem_err; 392 } 393 394 return 0; 395 396 alloc_mem_err: 397 bnxt_free_mem(bp, reconfig); 398 return rc; 399 } 400 401 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 402 { 403 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 404 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 405 uint64_t rx_offloads = dev_conf->rxmode.offloads; 406 struct bnxt_rx_queue *rxq; 407 unsigned int j; 408 int rc; 409 410 rc = bnxt_vnic_grp_alloc(bp, vnic); 411 if (rc) 412 goto err_out; 413 414 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 415 vnic_id, vnic, vnic->fw_grp_ids); 416 417 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 418 if (rc) 419 goto err_out; 420 421 /* Alloc RSS context only if RSS mode is enabled */ 422 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { 423 int j, nr_ctxs = bnxt_rss_ctxts(bp); 424 425 /* RSS table size in Thor is 512. 426 * Cap max Rx rings to same value 427 */ 428 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 429 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 430 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 431 goto err_out; 432 } 433 434 rc = 0; 435 for (j = 0; j < nr_ctxs; j++) { 436 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 437 if (rc) 438 break; 439 } 440 if (rc) { 441 PMD_DRV_LOG(ERR, 442 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 443 vnic_id, j, rc); 444 goto err_out; 445 } 446 vnic->num_lb_ctxts = nr_ctxs; 447 } 448 449 /* 450 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 451 * setting is not available at this time, it will not be 452 * configured correctly in the CFA. 453 */ 454 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 455 vnic->vlan_strip = true; 456 else 457 vnic->vlan_strip = false; 458 459 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 460 if (rc) 461 goto err_out; 462 463 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 464 if (rc) 465 goto err_out; 466 467 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 468 rxq = bp->eth_dev->data->rx_queues[j]; 469 470 PMD_DRV_LOG(DEBUG, 471 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 472 j, rxq->vnic, rxq->vnic->fw_grp_ids); 473 474 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 475 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 476 else 477 vnic->rx_queue_cnt++; 478 } 479 480 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 481 482 rc = bnxt_vnic_rss_configure(bp, vnic); 483 if (rc) 484 goto err_out; 485 486 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 487 488 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 489 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? 490 true : false); 491 if (rc) 492 goto err_out; 493 494 return 0; 495 err_out: 496 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 497 vnic_id, rc); 498 return rc; 499 } 500 501 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 502 { 503 int rc = 0; 504 505 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 506 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 507 if (rc) 508 return rc; 509 510 PMD_DRV_LOG(DEBUG, 511 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 512 " rx_fc_in_tbl.ctx_id = %d\n", 513 bp->flow_stat->rx_fc_in_tbl.va, 514 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 515 bp->flow_stat->rx_fc_in_tbl.ctx_id); 516 517 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 518 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 519 if (rc) 520 return rc; 521 522 PMD_DRV_LOG(DEBUG, 523 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 524 " rx_fc_out_tbl.ctx_id = %d\n", 525 bp->flow_stat->rx_fc_out_tbl.va, 526 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 527 bp->flow_stat->rx_fc_out_tbl.ctx_id); 528 529 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 530 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 531 if (rc) 532 return rc; 533 534 PMD_DRV_LOG(DEBUG, 535 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 536 " tx_fc_in_tbl.ctx_id = %d\n", 537 bp->flow_stat->tx_fc_in_tbl.va, 538 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 539 bp->flow_stat->tx_fc_in_tbl.ctx_id); 540 541 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 542 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 543 if (rc) 544 return rc; 545 546 PMD_DRV_LOG(DEBUG, 547 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 548 " tx_fc_out_tbl.ctx_id = %d\n", 549 bp->flow_stat->tx_fc_out_tbl.va, 550 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 551 bp->flow_stat->tx_fc_out_tbl.ctx_id); 552 553 memset(bp->flow_stat->rx_fc_out_tbl.va, 554 0, 555 bp->flow_stat->rx_fc_out_tbl.size); 556 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 557 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 558 bp->flow_stat->rx_fc_out_tbl.ctx_id, 559 bp->flow_stat->max_fc, 560 true); 561 if (rc) 562 return rc; 563 564 memset(bp->flow_stat->tx_fc_out_tbl.va, 565 0, 566 bp->flow_stat->tx_fc_out_tbl.size); 567 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 568 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 569 bp->flow_stat->tx_fc_out_tbl.ctx_id, 570 bp->flow_stat->max_fc, 571 true); 572 573 return rc; 574 } 575 576 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 577 struct bnxt_ctx_mem_buf_info *ctx) 578 { 579 if (!ctx) 580 return -EINVAL; 581 582 ctx->va = rte_zmalloc_socket(type, size, 0, 583 bp->eth_dev->device->numa_node); 584 if (ctx->va == NULL) 585 return -ENOMEM; 586 rte_mem_lock_page(ctx->va); 587 ctx->size = size; 588 ctx->dma = rte_mem_virt2iova(ctx->va); 589 if (ctx->dma == RTE_BAD_IOVA) 590 return -ENOMEM; 591 592 return 0; 593 } 594 595 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 596 { 597 struct rte_pci_device *pdev = bp->pdev; 598 char type[RTE_MEMZONE_NAMESIZE]; 599 uint16_t max_fc; 600 int rc = 0; 601 602 max_fc = bp->flow_stat->max_fc; 603 604 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 605 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 606 /* 4 bytes for each counter-id */ 607 rc = bnxt_alloc_ctx_mem_buf(bp, type, 608 max_fc * 4, 609 &bp->flow_stat->rx_fc_in_tbl); 610 if (rc) 611 return rc; 612 613 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 614 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 615 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 616 rc = bnxt_alloc_ctx_mem_buf(bp, type, 617 max_fc * 16, 618 &bp->flow_stat->rx_fc_out_tbl); 619 if (rc) 620 return rc; 621 622 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 623 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 624 /* 4 bytes for each counter-id */ 625 rc = bnxt_alloc_ctx_mem_buf(bp, type, 626 max_fc * 4, 627 &bp->flow_stat->tx_fc_in_tbl); 628 if (rc) 629 return rc; 630 631 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 632 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 633 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 634 rc = bnxt_alloc_ctx_mem_buf(bp, type, 635 max_fc * 16, 636 &bp->flow_stat->tx_fc_out_tbl); 637 if (rc) 638 return rc; 639 640 rc = bnxt_register_fc_ctx_mem(bp); 641 642 return rc; 643 } 644 645 static int bnxt_init_ctx_mem(struct bnxt *bp) 646 { 647 int rc = 0; 648 649 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 650 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 651 !BNXT_FLOW_XSTATS_EN(bp)) 652 return 0; 653 654 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 655 if (rc) 656 return rc; 657 658 rc = bnxt_init_fc_ctx_mem(bp); 659 660 return rc; 661 } 662 663 static inline bool bnxt_force_link_config(struct bnxt *bp) 664 { 665 uint16_t subsystem_device_id = bp->pdev->id.subsystem_device_id; 666 667 switch (subsystem_device_id) { 668 case BROADCOM_DEV_957508_N2100: 669 case BROADCOM_DEV_957414_N225: 670 return true; 671 default: 672 return false; 673 } 674 } 675 676 static int bnxt_update_phy_setting(struct bnxt *bp) 677 { 678 struct rte_eth_link new; 679 int rc; 680 681 rc = bnxt_get_hwrm_link_config(bp, &new); 682 if (rc) { 683 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 684 return rc; 685 } 686 687 /* 688 * Device is not obliged link down in certain scenarios, even 689 * when forced. When FW does not allow any user other than BMC 690 * to shutdown the port, bnxt_get_hwrm_link_config() call always 691 * returns link up. Force phy update always in that case. 692 */ 693 if (!new.link_status || bnxt_force_link_config(bp)) { 694 rc = bnxt_set_hwrm_link_config(bp, true); 695 if (rc) { 696 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 697 return rc; 698 } 699 } 700 701 return rc; 702 } 703 704 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 705 { 706 rte_free(bp->prev_rx_ring_stats); 707 rte_free(bp->prev_tx_ring_stats); 708 709 bp->prev_rx_ring_stats = NULL; 710 bp->prev_tx_ring_stats = NULL; 711 } 712 713 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 714 { 715 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 716 sizeof(struct bnxt_ring_stats) * 717 bp->rx_cp_nr_rings, 718 0); 719 if (bp->prev_rx_ring_stats == NULL) 720 return -ENOMEM; 721 722 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 723 sizeof(struct bnxt_ring_stats) * 724 bp->tx_cp_nr_rings, 725 0); 726 if (bp->prev_tx_ring_stats == NULL) 727 goto error; 728 729 return 0; 730 731 error: 732 bnxt_free_prev_ring_stats(bp); 733 return -ENOMEM; 734 } 735 736 static int bnxt_start_nic(struct bnxt *bp) 737 { 738 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 739 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 740 uint32_t intr_vector = 0; 741 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 742 uint32_t vec = BNXT_MISC_VEC_ID; 743 unsigned int i, j; 744 int rc; 745 746 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) 747 bp->flags |= BNXT_FLAG_JUMBO; 748 else 749 bp->flags &= ~BNXT_FLAG_JUMBO; 750 751 /* THOR does not support ring groups. 752 * But we will use the array to save RSS context IDs. 753 */ 754 if (BNXT_CHIP_P5(bp)) 755 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 756 757 rc = bnxt_alloc_hwrm_rings(bp); 758 if (rc) { 759 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 760 goto err_out; 761 } 762 763 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 764 if (rc) { 765 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 766 goto err_out; 767 } 768 769 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 770 goto skip_cosq_cfg; 771 772 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 773 if (bp->rx_cos_queue[i].id != 0xff) { 774 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 775 776 if (!vnic) { 777 PMD_DRV_LOG(ERR, 778 "Num pools more than FW profile\n"); 779 rc = -EINVAL; 780 goto err_out; 781 } 782 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 783 bp->rx_cosq_cnt++; 784 } 785 } 786 787 skip_cosq_cfg: 788 rc = bnxt_mq_rx_configure(bp); 789 if (rc) { 790 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 791 goto err_out; 792 } 793 794 for (j = 0; j < bp->rx_nr_rings; j++) { 795 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 796 797 if (!rxq->rx_deferred_start) { 798 bp->eth_dev->data->rx_queue_state[j] = 799 RTE_ETH_QUEUE_STATE_STARTED; 800 rxq->rx_started = true; 801 } 802 } 803 804 /* VNIC configuration */ 805 for (i = 0; i < bp->nr_vnics; i++) { 806 rc = bnxt_setup_one_vnic(bp, i); 807 if (rc) 808 goto err_out; 809 } 810 811 for (j = 0; j < bp->tx_nr_rings; j++) { 812 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 813 814 if (!txq->tx_deferred_start) { 815 bp->eth_dev->data->tx_queue_state[j] = 816 RTE_ETH_QUEUE_STATE_STARTED; 817 txq->tx_started = true; 818 } 819 } 820 821 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 822 if (rc) { 823 PMD_DRV_LOG(ERR, 824 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 825 goto err_out; 826 } 827 828 /* check and configure queue intr-vector mapping */ 829 if ((rte_intr_cap_multiple(intr_handle) || 830 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 831 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 832 intr_vector = bp->eth_dev->data->nb_rx_queues; 833 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 834 if (intr_vector > bp->rx_cp_nr_rings) { 835 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 836 bp->rx_cp_nr_rings); 837 return -ENOTSUP; 838 } 839 rc = rte_intr_efd_enable(intr_handle, intr_vector); 840 if (rc) 841 return rc; 842 } 843 844 if (rte_intr_dp_is_en(intr_handle)) { 845 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 846 bp->eth_dev->data->nb_rx_queues)) { 847 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 848 " intr_vec", bp->eth_dev->data->nb_rx_queues); 849 rc = -ENOMEM; 850 goto err_out; 851 } 852 PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " 853 "intr_handle->max_intr = %d\n", 854 rte_intr_nb_efd_get(intr_handle), 855 rte_intr_max_intr_get(intr_handle)); 856 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 857 queue_id++) { 858 rte_intr_vec_list_index_set(intr_handle, 859 queue_id, vec + BNXT_RX_VEC_START); 860 if (vec < base + rte_intr_nb_efd_get(intr_handle) 861 - 1) 862 vec++; 863 } 864 } 865 866 /* enable uio/vfio intr/eventfd mapping */ 867 rc = rte_intr_enable(intr_handle); 868 #ifndef RTE_EXEC_ENV_FREEBSD 869 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 870 if (rc) 871 goto err_out; 872 #endif 873 874 rc = bnxt_update_phy_setting(bp); 875 if (rc) 876 goto err_out; 877 878 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 879 if (!bp->mark_table) 880 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 881 882 return 0; 883 884 err_out: 885 /* Some of the error status returned by FW may not be from errno.h */ 886 if (rc > 0) 887 rc = -EIO; 888 889 return rc; 890 } 891 892 static int bnxt_shutdown_nic(struct bnxt *bp) 893 { 894 bnxt_free_all_hwrm_resources(bp); 895 bnxt_free_all_filters(bp); 896 bnxt_free_all_vnics(bp); 897 return 0; 898 } 899 900 /* 901 * Device configuration and status function 902 */ 903 904 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 905 { 906 uint32_t link_speed = 0; 907 uint32_t speed_capa = 0; 908 909 if (bp->link_info == NULL) 910 return 0; 911 912 link_speed = bp->link_info->support_speeds; 913 914 /* If PAM4 is configured, use PAM4 supported speed */ 915 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 916 link_speed = bp->link_info->support_pam4_speeds; 917 918 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 919 speed_capa |= RTE_ETH_LINK_SPEED_100M; 920 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 921 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 922 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 923 speed_capa |= RTE_ETH_LINK_SPEED_1G; 924 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 925 speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 926 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 927 speed_capa |= RTE_ETH_LINK_SPEED_10G; 928 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 929 speed_capa |= RTE_ETH_LINK_SPEED_20G; 930 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 931 speed_capa |= RTE_ETH_LINK_SPEED_25G; 932 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 933 speed_capa |= RTE_ETH_LINK_SPEED_40G; 934 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 935 speed_capa |= RTE_ETH_LINK_SPEED_50G; 936 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 937 speed_capa |= RTE_ETH_LINK_SPEED_100G; 938 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 939 speed_capa |= RTE_ETH_LINK_SPEED_50G; 940 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 941 speed_capa |= RTE_ETH_LINK_SPEED_100G; 942 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 943 speed_capa |= RTE_ETH_LINK_SPEED_200G; 944 945 if (bp->link_info->auto_mode == 946 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 947 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 948 949 return speed_capa; 950 } 951 952 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 953 struct rte_eth_dev_info *dev_info) 954 { 955 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 956 struct bnxt *bp = eth_dev->data->dev_private; 957 uint16_t max_vnics, i, j, vpool, vrxq; 958 unsigned int max_rx_rings; 959 int rc; 960 961 rc = is_bnxt_in_error(bp); 962 if (rc) 963 return rc; 964 965 /* MAC Specifics */ 966 dev_info->max_mac_addrs = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 967 dev_info->max_hash_mac_addrs = 0; 968 969 /* PF/VF specifics */ 970 if (BNXT_PF(bp)) 971 dev_info->max_vfs = pdev->max_vfs; 972 973 max_rx_rings = bnxt_max_rings(bp); 974 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 975 dev_info->max_rx_queues = max_rx_rings; 976 dev_info->max_tx_queues = max_rx_rings; 977 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 978 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 979 max_vnics = bp->max_vnics; 980 981 /* MTU specifics */ 982 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 983 dev_info->max_mtu = BNXT_MAX_MTU; 984 985 /* Fast path specifics */ 986 dev_info->min_rx_bufsize = 1; 987 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 988 989 dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp); 990 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 991 dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) | 992 dev_info->tx_queue_offload_capa; 993 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 994 995 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 996 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 997 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 998 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 999 1000 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1001 .rx_thresh = { 1002 .pthresh = 8, 1003 .hthresh = 8, 1004 .wthresh = 0, 1005 }, 1006 .rx_free_thresh = 32, 1007 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1008 }; 1009 1010 dev_info->default_txconf = (struct rte_eth_txconf) { 1011 .tx_thresh = { 1012 .pthresh = 32, 1013 .hthresh = 0, 1014 .wthresh = 0, 1015 }, 1016 .tx_free_thresh = 32, 1017 .tx_rs_thresh = 32, 1018 }; 1019 eth_dev->data->dev_conf.intr_conf.lsc = 1; 1020 1021 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1022 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1023 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1024 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1025 1026 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1027 dev_info->switch_info.name = eth_dev->device->name; 1028 dev_info->switch_info.domain_id = bp->switch_domain_id; 1029 dev_info->switch_info.port_id = 1030 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1031 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1032 } 1033 1034 /* 1035 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1036 * need further investigation. 1037 */ 1038 1039 /* VMDq resources */ 1040 vpool = 64; /* RTE_ETH_64_POOLS */ 1041 vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ 1042 for (i = 0; i < 4; vpool >>= 1, i++) { 1043 if (max_vnics > vpool) { 1044 for (j = 0; j < 5; vrxq >>= 1, j++) { 1045 if (dev_info->max_rx_queues > vrxq) { 1046 if (vpool > vrxq) 1047 vpool = vrxq; 1048 goto found; 1049 } 1050 } 1051 /* Not enough resources to support VMDq */ 1052 break; 1053 } 1054 } 1055 /* Not enough resources to support VMDq */ 1056 vpool = 0; 1057 vrxq = 0; 1058 found: 1059 dev_info->max_vmdq_pools = vpool; 1060 dev_info->vmdq_queue_num = vrxq; 1061 1062 dev_info->vmdq_pool_base = 0; 1063 dev_info->vmdq_queue_base = 0; 1064 1065 return 0; 1066 } 1067 1068 /* Configure the device based on the configuration provided */ 1069 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1070 { 1071 struct bnxt *bp = eth_dev->data->dev_private; 1072 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1073 struct rte_eth_rss_conf *rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf; 1074 int rc; 1075 1076 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1077 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1078 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1079 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1080 1081 rc = is_bnxt_in_error(bp); 1082 if (rc) 1083 return rc; 1084 1085 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1086 rc = bnxt_hwrm_check_vf_rings(bp); 1087 if (rc) { 1088 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1089 return -ENOSPC; 1090 } 1091 1092 /* If a resource has already been allocated - in this case 1093 * it is the async completion ring, free it. Reallocate it after 1094 * resource reservation. This will ensure the resource counts 1095 * are calculated correctly. 1096 */ 1097 1098 pthread_mutex_lock(&bp->def_cp_lock); 1099 1100 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1101 bnxt_disable_int(bp); 1102 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1103 } 1104 1105 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1106 if (rc) { 1107 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1108 pthread_mutex_unlock(&bp->def_cp_lock); 1109 return -ENOSPC; 1110 } 1111 1112 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1113 rc = bnxt_alloc_async_cp_ring(bp); 1114 if (rc) { 1115 pthread_mutex_unlock(&bp->def_cp_lock); 1116 return rc; 1117 } 1118 bnxt_enable_int(bp); 1119 } 1120 1121 pthread_mutex_unlock(&bp->def_cp_lock); 1122 } 1123 1124 /* Inherit new configurations */ 1125 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1126 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1127 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1128 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1129 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1130 bp->max_stat_ctx) 1131 goto resource_error; 1132 1133 if (BNXT_HAS_RING_GRPS(bp) && 1134 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1135 goto resource_error; 1136 1137 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && 1138 bp->max_vnics < eth_dev->data->nb_rx_queues) 1139 goto resource_error; 1140 1141 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1142 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1143 1144 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1145 rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1146 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1147 1148 /* application provides the hash key to program */ 1149 if (rss_conf->rss_key != NULL) { 1150 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) 1151 PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long", 1152 eth_dev->data->port_id, HW_HASH_KEY_SIZE); 1153 else 1154 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 1155 } 1156 bp->rss_conf.rss_key_len = HW_HASH_KEY_SIZE; 1157 bp->rss_conf.rss_hf = rss_conf->rss_hf; 1158 1159 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1160 1161 return 0; 1162 1163 resource_error: 1164 PMD_DRV_LOG(ERR, 1165 "Insufficient resources to support requested config\n"); 1166 PMD_DRV_LOG(ERR, 1167 "Num Queues Requested: Tx %d, Rx %d\n", 1168 eth_dev->data->nb_tx_queues, 1169 eth_dev->data->nb_rx_queues); 1170 PMD_DRV_LOG(ERR, 1171 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1172 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1173 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1174 return -ENOSPC; 1175 } 1176 1177 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1178 { 1179 struct rte_eth_link *link = ð_dev->data->dev_link; 1180 1181 if (link->link_status) 1182 PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n", 1183 eth_dev->data->port_id, 1184 (uint32_t)link->link_speed, 1185 (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 1186 ("full-duplex") : ("half-duplex\n")); 1187 else 1188 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1189 eth_dev->data->port_id); 1190 } 1191 1192 /* 1193 * Determine whether the current configuration requires support for scattered 1194 * receive; return 1 if scattered receive is required and 0 if not. 1195 */ 1196 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1197 { 1198 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 1199 uint16_t buf_size; 1200 int i; 1201 1202 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1203 return 1; 1204 1205 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 1206 return 1; 1207 1208 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1209 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1210 1211 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1212 RTE_PKTMBUF_HEADROOM); 1213 if (eth_dev->data->mtu + overhead > buf_size) 1214 return 1; 1215 } 1216 return 0; 1217 } 1218 1219 static eth_rx_burst_t 1220 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1221 { 1222 struct bnxt *bp = eth_dev->data->dev_private; 1223 1224 /* Disable vector mode RX for Stingray2 for now */ 1225 if (BNXT_CHIP_SR2(bp)) { 1226 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1227 return bnxt_recv_pkts; 1228 } 1229 1230 #if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \ 1231 !defined(RTE_LIBRTE_IEEE1588) 1232 1233 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1234 if (eth_dev->data->scattered_rx) 1235 goto use_scalar_rx; 1236 1237 /* 1238 * Vector mode receive cannot be enabled if Truflow is enabled or if 1239 * asynchronous completions and receive completions can be placed in 1240 * the same completion ring. 1241 */ 1242 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1243 goto use_scalar_rx; 1244 1245 /* 1246 * Vector mode receive cannot be enabled if any receive offloads outside 1247 * a limited subset have been enabled. 1248 */ 1249 if (eth_dev->data->dev_conf.rxmode.offloads & 1250 ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1251 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1252 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1253 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1254 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1255 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1256 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 1257 RTE_ETH_RX_OFFLOAD_RSS_HASH | 1258 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) 1259 goto use_scalar_rx; 1260 1261 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1262 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1263 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1264 PMD_DRV_LOG(INFO, 1265 "Using AVX2 vector mode receive for port %d\n", 1266 eth_dev->data->port_id); 1267 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1268 return bnxt_recv_pkts_vec_avx2; 1269 } 1270 #endif 1271 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1272 PMD_DRV_LOG(INFO, 1273 "Using SSE vector mode receive for port %d\n", 1274 eth_dev->data->port_id); 1275 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1276 return bnxt_recv_pkts_vec; 1277 } 1278 1279 use_scalar_rx: 1280 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1281 eth_dev->data->port_id); 1282 PMD_DRV_LOG(INFO, 1283 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1284 eth_dev->data->port_id, 1285 eth_dev->data->scattered_rx, 1286 eth_dev->data->dev_conf.rxmode.offloads); 1287 #endif 1288 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1289 return bnxt_recv_pkts; 1290 } 1291 1292 static eth_tx_burst_t 1293 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1294 { 1295 struct bnxt *bp = eth_dev->data->dev_private; 1296 1297 /* Disable vector mode TX for Stingray2 for now */ 1298 if (BNXT_CHIP_SR2(bp)) 1299 return bnxt_xmit_pkts; 1300 1301 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \ 1302 !defined(RTE_LIBRTE_IEEE1588) 1303 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1304 1305 /* 1306 * Vector mode transmit can be enabled only if not using scatter rx 1307 * or tx offloads. 1308 */ 1309 if (eth_dev->data->scattered_rx || 1310 (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || 1311 BNXT_TRUFLOW_EN(bp)) 1312 goto use_scalar_tx; 1313 1314 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1315 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1316 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1317 PMD_DRV_LOG(INFO, 1318 "Using AVX2 vector mode transmit for port %d\n", 1319 eth_dev->data->port_id); 1320 return bnxt_xmit_pkts_vec_avx2; 1321 } 1322 #endif 1323 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1324 PMD_DRV_LOG(INFO, 1325 "Using SSE vector mode transmit for port %d\n", 1326 eth_dev->data->port_id); 1327 return bnxt_xmit_pkts_vec; 1328 } 1329 1330 use_scalar_tx: 1331 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1332 eth_dev->data->port_id); 1333 PMD_DRV_LOG(INFO, 1334 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1335 eth_dev->data->port_id, 1336 eth_dev->data->scattered_rx, 1337 offloads); 1338 #endif 1339 return bnxt_xmit_pkts; 1340 } 1341 1342 static int bnxt_handle_if_change_status(struct bnxt *bp) 1343 { 1344 int rc; 1345 1346 /* Since fw has undergone a reset and lost all contexts, 1347 * set fatal flag to not issue hwrm during cleanup 1348 */ 1349 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1350 bnxt_uninit_resources(bp, true); 1351 1352 /* clear fatal flag so that re-init happens */ 1353 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1354 1355 rc = bnxt_check_fw_ready(bp); 1356 if (rc) 1357 return rc; 1358 1359 rc = bnxt_init_resources(bp, true); 1360 1361 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1362 1363 return rc; 1364 } 1365 1366 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1367 { 1368 struct bnxt *bp = eth_dev->data->dev_private; 1369 int rc = 0; 1370 1371 if (!BNXT_SINGLE_PF(bp)) 1372 return -ENOTSUP; 1373 1374 if (!bp->link_info->link_up) 1375 rc = bnxt_set_hwrm_link_config(bp, true); 1376 if (!rc) 1377 eth_dev->data->dev_link.link_status = 1; 1378 1379 bnxt_print_link_info(eth_dev); 1380 return rc; 1381 } 1382 1383 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1384 { 1385 struct bnxt *bp = eth_dev->data->dev_private; 1386 1387 if (!BNXT_SINGLE_PF(bp)) 1388 return -ENOTSUP; 1389 1390 eth_dev->data->dev_link.link_status = 0; 1391 bnxt_set_hwrm_link_config(bp, false); 1392 bp->link_info->link_up = 0; 1393 1394 return 0; 1395 } 1396 1397 static void bnxt_free_switch_domain(struct bnxt *bp) 1398 { 1399 int rc = 0; 1400 1401 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1402 return; 1403 1404 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1405 if (rc) 1406 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1407 bp->switch_domain_id, rc); 1408 } 1409 1410 static void bnxt_ptp_get_current_time(void *arg) 1411 { 1412 struct bnxt *bp = arg; 1413 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1414 int rc; 1415 1416 rc = is_bnxt_in_error(bp); 1417 if (rc) 1418 return; 1419 1420 if (!ptp) 1421 return; 1422 1423 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1424 &ptp->current_time); 1425 1426 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1427 if (rc != 0) { 1428 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1429 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1430 } 1431 } 1432 1433 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1434 { 1435 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1436 int rc; 1437 1438 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1439 return 0; 1440 1441 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1442 &ptp->current_time); 1443 1444 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1445 return rc; 1446 } 1447 1448 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1449 { 1450 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1451 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1452 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1453 } 1454 } 1455 1456 static void bnxt_ptp_stop(struct bnxt *bp) 1457 { 1458 bnxt_cancel_ptp_alarm(bp); 1459 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1460 } 1461 1462 static int bnxt_ptp_start(struct bnxt *bp) 1463 { 1464 int rc; 1465 1466 rc = bnxt_schedule_ptp_alarm(bp); 1467 if (rc != 0) { 1468 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1469 } else { 1470 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1471 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1472 } 1473 1474 return rc; 1475 } 1476 1477 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1478 { 1479 struct bnxt *bp = eth_dev->data->dev_private; 1480 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1481 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1482 struct rte_eth_link link; 1483 int ret; 1484 1485 eth_dev->data->dev_started = 0; 1486 1487 /* Prevent crashes when queues are still in use */ 1488 bnxt_stop_rxtx(eth_dev); 1489 1490 bnxt_disable_int(bp); 1491 1492 /* disable uio/vfio intr/eventfd mapping */ 1493 rte_intr_disable(intr_handle); 1494 1495 /* Stop the child representors for this device */ 1496 ret = bnxt_rep_stop_all(bp); 1497 if (ret != 0) 1498 return ret; 1499 1500 /* delete the bnxt ULP port details */ 1501 bnxt_ulp_port_deinit(bp); 1502 1503 bnxt_cancel_fw_health_check(bp); 1504 1505 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1506 bnxt_cancel_ptp_alarm(bp); 1507 1508 /* Do not bring link down during reset recovery */ 1509 if (!is_bnxt_in_error(bp)) { 1510 bnxt_dev_set_link_down_op(eth_dev); 1511 /* Wait for link to be reset */ 1512 if (BNXT_SINGLE_PF(bp)) 1513 rte_delay_ms(500); 1514 /* clear the recorded link status */ 1515 memset(&link, 0, sizeof(link)); 1516 rte_eth_linkstatus_set(eth_dev, &link); 1517 } 1518 1519 /* Clean queue intr-vector mapping */ 1520 rte_intr_efd_disable(intr_handle); 1521 rte_intr_vec_list_free(intr_handle); 1522 1523 bnxt_hwrm_port_clr_stats(bp); 1524 bnxt_free_tx_mbufs(bp); 1525 bnxt_free_rx_mbufs(bp); 1526 /* Process any remaining notifications in default completion queue */ 1527 bnxt_int_handler(eth_dev); 1528 bnxt_shutdown_nic(bp); 1529 bnxt_hwrm_if_change(bp, false); 1530 1531 bnxt_free_prev_ring_stats(bp); 1532 rte_free(bp->mark_table); 1533 bp->mark_table = NULL; 1534 1535 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1536 bp->rx_cosq_cnt = 0; 1537 /* All filters are deleted on a port stop. */ 1538 if (BNXT_FLOW_XSTATS_EN(bp)) 1539 bp->flow_stat->flow_count = 0; 1540 1541 eth_dev->data->scattered_rx = 0; 1542 1543 return 0; 1544 } 1545 1546 /* Unload the driver, release resources */ 1547 int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1548 { 1549 struct bnxt *bp = eth_dev->data->dev_private; 1550 1551 pthread_mutex_lock(&bp->err_recovery_lock); 1552 if (bp->flags & BNXT_FLAG_FW_RESET) { 1553 PMD_DRV_LOG(ERR, 1554 "Adapter recovering from error..Please retry\n"); 1555 pthread_mutex_unlock(&bp->err_recovery_lock); 1556 return -EAGAIN; 1557 } 1558 pthread_mutex_unlock(&bp->err_recovery_lock); 1559 1560 return bnxt_dev_stop(eth_dev); 1561 } 1562 1563 int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1564 { 1565 struct bnxt *bp = eth_dev->data->dev_private; 1566 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1567 int vlan_mask = 0; 1568 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1569 1570 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1571 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1572 return -EINVAL; 1573 } 1574 1575 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1576 PMD_DRV_LOG(ERR, 1577 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1578 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1579 1580 do { 1581 rc = bnxt_hwrm_if_change(bp, true); 1582 if (rc == 0 || rc != -EAGAIN) 1583 break; 1584 1585 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1586 } while (retry_cnt--); 1587 1588 if (rc) 1589 return rc; 1590 1591 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1592 rc = bnxt_handle_if_change_status(bp); 1593 if (rc) 1594 return rc; 1595 } 1596 1597 bnxt_enable_int(bp); 1598 1599 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1600 1601 rc = bnxt_start_nic(bp); 1602 if (rc) 1603 goto error; 1604 1605 rc = bnxt_alloc_prev_ring_stats(bp); 1606 if (rc) 1607 goto error; 1608 1609 eth_dev->data->dev_started = 1; 1610 1611 bnxt_link_update_op(eth_dev, 0); 1612 1613 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1614 vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; 1615 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1616 vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; 1617 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1618 if (rc) 1619 goto error; 1620 1621 /* Initialize bnxt ULP port details */ 1622 rc = bnxt_ulp_port_init(bp); 1623 if (rc) 1624 goto error; 1625 1626 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1627 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1628 1629 bnxt_schedule_fw_health_check(bp); 1630 1631 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1632 bnxt_schedule_ptp_alarm(bp); 1633 1634 return 0; 1635 1636 error: 1637 bnxt_dev_stop(eth_dev); 1638 return rc; 1639 } 1640 1641 static void 1642 bnxt_uninit_locks(struct bnxt *bp) 1643 { 1644 pthread_mutex_destroy(&bp->flow_lock); 1645 pthread_mutex_destroy(&bp->def_cp_lock); 1646 pthread_mutex_destroy(&bp->health_check_lock); 1647 pthread_mutex_destroy(&bp->err_recovery_lock); 1648 if (bp->rep_info) { 1649 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1650 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1651 } 1652 } 1653 1654 static void bnxt_drv_uninit(struct bnxt *bp) 1655 { 1656 bnxt_free_leds_info(bp); 1657 bnxt_free_cos_queues(bp); 1658 bnxt_free_link_info(bp); 1659 bnxt_free_parent_info(bp); 1660 bnxt_uninit_locks(bp); 1661 1662 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1663 bp->tx_mem_zone = NULL; 1664 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1665 bp->rx_mem_zone = NULL; 1666 1667 bnxt_free_vf_info(bp); 1668 bnxt_free_pf_info(bp); 1669 1670 rte_free(bp->grp_info); 1671 bp->grp_info = NULL; 1672 } 1673 1674 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1675 { 1676 struct bnxt *bp = eth_dev->data->dev_private; 1677 int ret = 0; 1678 1679 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1680 return 0; 1681 1682 pthread_mutex_lock(&bp->err_recovery_lock); 1683 if (bp->flags & BNXT_FLAG_FW_RESET) { 1684 PMD_DRV_LOG(ERR, 1685 "Adapter recovering from error...Please retry\n"); 1686 pthread_mutex_unlock(&bp->err_recovery_lock); 1687 return -EAGAIN; 1688 } 1689 pthread_mutex_unlock(&bp->err_recovery_lock); 1690 1691 /* cancel the recovery handler before remove dev */ 1692 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1693 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1694 bnxt_cancel_fc_thread(bp); 1695 rte_eal_alarm_cancel(bnxt_handle_vf_cfg_change, (void *)bp); 1696 1697 if (eth_dev->data->dev_started) 1698 ret = bnxt_dev_stop(eth_dev); 1699 1700 bnxt_uninit_resources(bp, false); 1701 1702 bnxt_drv_uninit(bp); 1703 1704 return ret; 1705 } 1706 1707 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1708 uint32_t index) 1709 { 1710 struct bnxt *bp = eth_dev->data->dev_private; 1711 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1712 struct bnxt_vnic_info *vnic; 1713 struct bnxt_filter_info *filter, *temp_filter; 1714 uint32_t i; 1715 1716 if (is_bnxt_in_error(bp)) 1717 return; 1718 1719 /* 1720 * Loop through all VNICs from the specified filter flow pools to 1721 * remove the corresponding MAC addr filter 1722 */ 1723 for (i = 0; i < bp->nr_vnics; i++) { 1724 if (!(pool_mask & (1ULL << i))) 1725 continue; 1726 1727 vnic = &bp->vnic_info[i]; 1728 filter = STAILQ_FIRST(&vnic->filter); 1729 while (filter) { 1730 temp_filter = STAILQ_NEXT(filter, next); 1731 if (filter->mac_index == index) { 1732 STAILQ_REMOVE(&vnic->filter, filter, 1733 bnxt_filter_info, next); 1734 bnxt_hwrm_clear_l2_filter(bp, filter); 1735 bnxt_free_filter(bp, filter); 1736 } 1737 filter = temp_filter; 1738 } 1739 } 1740 } 1741 1742 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1743 struct rte_ether_addr *mac_addr, uint32_t index, 1744 uint32_t pool) 1745 { 1746 struct bnxt_filter_info *filter; 1747 int rc = 0; 1748 1749 /* Attach requested MAC address to the new l2_filter */ 1750 STAILQ_FOREACH(filter, &vnic->filter, next) { 1751 if (filter->mac_index == index) { 1752 PMD_DRV_LOG(DEBUG, 1753 "MAC addr already existed for pool %d\n", 1754 pool); 1755 return 0; 1756 } 1757 } 1758 1759 filter = bnxt_alloc_filter(bp); 1760 if (!filter) { 1761 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1762 return -ENODEV; 1763 } 1764 1765 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1766 * if the MAC that's been programmed now is a different one, then, 1767 * copy that addr to filter->l2_addr 1768 */ 1769 if (mac_addr) 1770 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1771 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1772 1773 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1774 if (!rc) { 1775 filter->mac_index = index; 1776 if (filter->mac_index == 0) 1777 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1778 else 1779 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1780 } else { 1781 bnxt_free_filter(bp, filter); 1782 } 1783 1784 return rc; 1785 } 1786 1787 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1788 struct rte_ether_addr *mac_addr, 1789 uint32_t index, uint32_t pool) 1790 { 1791 struct bnxt *bp = eth_dev->data->dev_private; 1792 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1793 int rc = 0; 1794 1795 rc = is_bnxt_in_error(bp); 1796 if (rc) 1797 return rc; 1798 1799 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1800 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1801 return -ENOTSUP; 1802 } 1803 1804 if (!vnic) { 1805 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1806 return -EINVAL; 1807 } 1808 1809 /* Filter settings will get applied when port is started */ 1810 if (!eth_dev->data->dev_started) 1811 return 0; 1812 1813 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1814 1815 return rc; 1816 } 1817 1818 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1819 { 1820 int rc = 0; 1821 struct bnxt *bp = eth_dev->data->dev_private; 1822 struct rte_eth_link new; 1823 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1824 BNXT_MIN_LINK_WAIT_CNT; 1825 1826 rc = is_bnxt_in_error(bp); 1827 if (rc) 1828 return rc; 1829 1830 memset(&new, 0, sizeof(new)); 1831 1832 if (bp->link_info == NULL) 1833 goto out; 1834 1835 /* Only single function PF can bring the phy down. 1836 * In certain scenarios, device is not obliged link down even when forced. 1837 * When port is stopped, report link down in those cases. 1838 */ 1839 if (!eth_dev->data->dev_started && 1840 (!BNXT_SINGLE_PF(bp) || bnxt_force_link_config(bp))) 1841 goto out; 1842 1843 do { 1844 /* Retrieve link info from hardware */ 1845 rc = bnxt_get_hwrm_link_config(bp, &new); 1846 if (rc) { 1847 new.link_speed = RTE_ETH_LINK_SPEED_100M; 1848 new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1849 PMD_DRV_LOG(ERR, 1850 "Failed to retrieve link rc = 0x%x!\n", rc); 1851 goto out; 1852 } 1853 1854 if (!wait_to_complete || new.link_status) 1855 break; 1856 1857 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1858 } while (cnt--); 1859 1860 out: 1861 /* Timed out or success */ 1862 if (new.link_status != eth_dev->data->dev_link.link_status || 1863 new.link_speed != eth_dev->data->dev_link.link_speed) { 1864 rte_eth_linkstatus_set(eth_dev, &new); 1865 bnxt_print_link_info(eth_dev); 1866 } 1867 1868 return rc; 1869 } 1870 1871 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1872 { 1873 struct bnxt *bp = eth_dev->data->dev_private; 1874 struct bnxt_vnic_info *vnic; 1875 uint32_t old_flags; 1876 int rc; 1877 1878 rc = is_bnxt_in_error(bp); 1879 if (rc) 1880 return rc; 1881 1882 /* Filter settings will get applied when port is started */ 1883 if (!eth_dev->data->dev_started) 1884 return 0; 1885 1886 if (bp->vnic_info == NULL) 1887 return 0; 1888 1889 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1890 1891 old_flags = vnic->flags; 1892 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1893 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1894 if (rc != 0) 1895 vnic->flags = old_flags; 1896 1897 return rc; 1898 } 1899 1900 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1901 { 1902 struct bnxt *bp = eth_dev->data->dev_private; 1903 struct bnxt_vnic_info *vnic; 1904 uint32_t old_flags; 1905 int rc; 1906 1907 rc = is_bnxt_in_error(bp); 1908 if (rc) 1909 return rc; 1910 1911 /* Filter settings will get applied when port is started */ 1912 if (!eth_dev->data->dev_started) 1913 return 0; 1914 1915 if (bp->vnic_info == NULL) 1916 return 0; 1917 1918 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1919 1920 old_flags = vnic->flags; 1921 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1922 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1923 if (rc != 0) 1924 vnic->flags = old_flags; 1925 1926 return rc; 1927 } 1928 1929 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1930 { 1931 struct bnxt *bp = eth_dev->data->dev_private; 1932 struct bnxt_vnic_info *vnic; 1933 uint32_t old_flags; 1934 int rc; 1935 1936 rc = is_bnxt_in_error(bp); 1937 if (rc) 1938 return rc; 1939 1940 /* Filter settings will get applied when port is started */ 1941 if (!eth_dev->data->dev_started) 1942 return 0; 1943 1944 if (bp->vnic_info == NULL) 1945 return 0; 1946 1947 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1948 1949 old_flags = vnic->flags; 1950 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1951 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1952 if (rc != 0) 1953 vnic->flags = old_flags; 1954 1955 return rc; 1956 } 1957 1958 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1959 { 1960 struct bnxt *bp = eth_dev->data->dev_private; 1961 struct bnxt_vnic_info *vnic; 1962 uint32_t old_flags; 1963 int rc; 1964 1965 rc = is_bnxt_in_error(bp); 1966 if (rc) 1967 return rc; 1968 1969 /* Filter settings will get applied when port is started */ 1970 if (!eth_dev->data->dev_started) 1971 return 0; 1972 1973 if (bp->vnic_info == NULL) 1974 return 0; 1975 1976 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1977 1978 old_flags = vnic->flags; 1979 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1980 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1981 if (rc != 0) 1982 vnic->flags = old_flags; 1983 1984 return rc; 1985 } 1986 1987 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1988 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1989 { 1990 if (qid >= bp->rx_nr_rings) 1991 return NULL; 1992 1993 return bp->eth_dev->data->rx_queues[qid]; 1994 } 1995 1996 /* Return rxq corresponding to a given rss table ring/group ID. */ 1997 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1998 { 1999 struct bnxt_rx_queue *rxq; 2000 unsigned int i; 2001 2002 if (!BNXT_HAS_RING_GRPS(bp)) { 2003 for (i = 0; i < bp->rx_nr_rings; i++) { 2004 rxq = bp->eth_dev->data->rx_queues[i]; 2005 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 2006 return rxq->index; 2007 } 2008 } else { 2009 for (i = 0; i < bp->rx_nr_rings; i++) { 2010 if (bp->grp_info[i].fw_grp_id == fwr) 2011 return i; 2012 } 2013 } 2014 2015 return INVALID_HW_RING_ID; 2016 } 2017 2018 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2019 struct rte_eth_rss_reta_entry64 *reta_conf, 2020 uint16_t reta_size) 2021 { 2022 struct bnxt *bp = eth_dev->data->dev_private; 2023 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2024 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2025 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2026 uint16_t idx, sft; 2027 int i, rc; 2028 2029 rc = is_bnxt_in_error(bp); 2030 if (rc) 2031 return rc; 2032 2033 if (!vnic->rss_table) 2034 return -EINVAL; 2035 2036 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 2037 return -EINVAL; 2038 2039 if (reta_size != tbl_size) { 2040 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2041 "(%d) must equal the size supported by the hardware " 2042 "(%d)\n", reta_size, tbl_size); 2043 return -EINVAL; 2044 } 2045 2046 for (i = 0; i < reta_size; i++) { 2047 struct bnxt_rx_queue *rxq; 2048 2049 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2050 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2051 2052 if (!(reta_conf[idx].mask & (1ULL << sft))) 2053 continue; 2054 2055 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2056 if (!rxq) { 2057 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 2058 return -EINVAL; 2059 } 2060 2061 if (BNXT_CHIP_P5(bp)) { 2062 vnic->rss_table[i * 2] = 2063 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2064 vnic->rss_table[i * 2 + 1] = 2065 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2066 } else { 2067 vnic->rss_table[i] = 2068 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2069 } 2070 } 2071 2072 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2073 return rc; 2074 } 2075 2076 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2077 struct rte_eth_rss_reta_entry64 *reta_conf, 2078 uint16_t reta_size) 2079 { 2080 struct bnxt *bp = eth_dev->data->dev_private; 2081 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2082 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2083 uint16_t idx, sft, i; 2084 int rc; 2085 2086 rc = is_bnxt_in_error(bp); 2087 if (rc) 2088 return rc; 2089 2090 if (!vnic) 2091 return -EINVAL; 2092 if (!vnic->rss_table) 2093 return -EINVAL; 2094 2095 if (reta_size != tbl_size) { 2096 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2097 "(%d) must equal the size supported by the hardware " 2098 "(%d)\n", reta_size, tbl_size); 2099 return -EINVAL; 2100 } 2101 2102 for (idx = 0, i = 0; i < reta_size; i++) { 2103 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2104 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2105 2106 if (reta_conf[idx].mask & (1ULL << sft)) { 2107 uint16_t qid; 2108 2109 if (BNXT_CHIP_P5(bp)) 2110 qid = bnxt_rss_to_qid(bp, 2111 vnic->rss_table[i * 2]); 2112 else 2113 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2114 2115 if (qid == INVALID_HW_RING_ID) { 2116 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2117 return -EINVAL; 2118 } 2119 reta_conf[idx].reta[sft] = qid; 2120 } 2121 } 2122 2123 return 0; 2124 } 2125 2126 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2127 struct rte_eth_rss_conf *rss_conf) 2128 { 2129 struct bnxt *bp = eth_dev->data->dev_private; 2130 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2131 struct bnxt_vnic_info *vnic; 2132 int rc; 2133 2134 rc = is_bnxt_in_error(bp); 2135 if (rc) 2136 return rc; 2137 2138 /* 2139 * If RSS enablement were different than dev_configure, 2140 * then return -EINVAL 2141 */ 2142 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2143 if (!rss_conf->rss_hf) 2144 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2145 } else { 2146 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2147 return -EINVAL; 2148 } 2149 2150 /* Update the default RSS VNIC(s) */ 2151 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2152 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2153 vnic->hash_mode = 2154 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2155 RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); 2156 2157 /* Cache the hash function */ 2158 bp->rss_conf.rss_hf = rss_conf->rss_hf; 2159 2160 /* 2161 * If hashkey is not specified, use the previously configured 2162 * hashkey 2163 */ 2164 if (!rss_conf->rss_key) 2165 goto rss_config; 2166 2167 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2168 PMD_DRV_LOG(ERR, 2169 "Invalid hashkey length, should be %d bytes\n", 2170 HW_HASH_KEY_SIZE); 2171 return -EINVAL; 2172 } 2173 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2174 2175 /* Cache the hash key */ 2176 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 2177 2178 rss_config: 2179 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2180 return rc; 2181 } 2182 2183 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2184 struct rte_eth_rss_conf *rss_conf) 2185 { 2186 struct bnxt *bp = eth_dev->data->dev_private; 2187 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2188 int len, rc; 2189 uint32_t hash_types; 2190 2191 rc = is_bnxt_in_error(bp); 2192 if (rc) 2193 return rc; 2194 2195 /* RSS configuration is the same for all VNICs */ 2196 if (vnic && vnic->rss_hash_key) { 2197 if (rss_conf->rss_key) { 2198 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2199 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2200 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2201 } 2202 2203 hash_types = vnic->hash_type; 2204 rss_conf->rss_hf = 0; 2205 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2206 rss_conf->rss_hf |= RTE_ETH_RSS_IPV4; 2207 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2208 } 2209 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2210 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 2211 hash_types &= 2212 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2213 } 2214 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2215 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; 2216 hash_types &= 2217 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2218 } 2219 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2220 rss_conf->rss_hf |= RTE_ETH_RSS_IPV6; 2221 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2222 } 2223 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2224 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 2225 hash_types &= 2226 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2227 } 2228 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2229 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; 2230 hash_types &= 2231 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2232 } 2233 2234 rss_conf->rss_hf |= 2235 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2236 2237 if (hash_types) { 2238 PMD_DRV_LOG(ERR, 2239 "Unknown RSS config from firmware (%08x), RSS disabled", 2240 vnic->hash_type); 2241 return -ENOTSUP; 2242 } 2243 } else { 2244 rss_conf->rss_hf = 0; 2245 } 2246 return 0; 2247 } 2248 2249 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2250 struct rte_eth_fc_conf *fc_conf) 2251 { 2252 struct bnxt *bp = dev->data->dev_private; 2253 struct rte_eth_link link_info; 2254 int rc; 2255 2256 rc = is_bnxt_in_error(bp); 2257 if (rc) 2258 return rc; 2259 2260 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2261 if (rc) 2262 return rc; 2263 2264 memset(fc_conf, 0, sizeof(*fc_conf)); 2265 if (bp->link_info->auto_pause) 2266 fc_conf->autoneg = 1; 2267 switch (bp->link_info->pause) { 2268 case 0: 2269 fc_conf->mode = RTE_ETH_FC_NONE; 2270 break; 2271 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2272 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2273 break; 2274 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2275 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2276 break; 2277 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2278 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2279 fc_conf->mode = RTE_ETH_FC_FULL; 2280 break; 2281 } 2282 return 0; 2283 } 2284 2285 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2286 struct rte_eth_fc_conf *fc_conf) 2287 { 2288 struct bnxt *bp = dev->data->dev_private; 2289 int rc; 2290 2291 rc = is_bnxt_in_error(bp); 2292 if (rc) 2293 return rc; 2294 2295 if (!BNXT_SINGLE_PF(bp)) { 2296 PMD_DRV_LOG(ERR, 2297 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2298 return -ENOTSUP; 2299 } 2300 2301 switch (fc_conf->mode) { 2302 case RTE_ETH_FC_NONE: 2303 bp->link_info->auto_pause = 0; 2304 bp->link_info->force_pause = 0; 2305 break; 2306 case RTE_ETH_FC_RX_PAUSE: 2307 if (fc_conf->autoneg) { 2308 bp->link_info->auto_pause = 2309 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2310 bp->link_info->force_pause = 0; 2311 } else { 2312 bp->link_info->auto_pause = 0; 2313 bp->link_info->force_pause = 2314 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2315 } 2316 break; 2317 case RTE_ETH_FC_TX_PAUSE: 2318 if (fc_conf->autoneg) { 2319 bp->link_info->auto_pause = 2320 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2321 bp->link_info->force_pause = 0; 2322 } else { 2323 bp->link_info->auto_pause = 0; 2324 bp->link_info->force_pause = 2325 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2326 } 2327 break; 2328 case RTE_ETH_FC_FULL: 2329 if (fc_conf->autoneg) { 2330 bp->link_info->auto_pause = 2331 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2332 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2333 bp->link_info->force_pause = 0; 2334 } else { 2335 bp->link_info->auto_pause = 0; 2336 bp->link_info->force_pause = 2337 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2338 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2339 } 2340 break; 2341 } 2342 return bnxt_set_hwrm_link_config(bp, true); 2343 } 2344 2345 /* Add UDP tunneling port */ 2346 static int 2347 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2348 struct rte_eth_udp_tunnel *udp_tunnel) 2349 { 2350 struct bnxt *bp = eth_dev->data->dev_private; 2351 uint16_t tunnel_type = 0; 2352 int rc = 0; 2353 2354 rc = is_bnxt_in_error(bp); 2355 if (rc) 2356 return rc; 2357 2358 switch (udp_tunnel->prot_type) { 2359 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2360 if (bp->vxlan_port_cnt) { 2361 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2362 udp_tunnel->udp_port); 2363 if (bp->vxlan_port != udp_tunnel->udp_port) { 2364 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2365 return -ENOSPC; 2366 } 2367 bp->vxlan_port_cnt++; 2368 return 0; 2369 } 2370 tunnel_type = 2371 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2372 break; 2373 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2374 if (bp->geneve_port_cnt) { 2375 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2376 udp_tunnel->udp_port); 2377 if (bp->geneve_port != udp_tunnel->udp_port) { 2378 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2379 return -ENOSPC; 2380 } 2381 bp->geneve_port_cnt++; 2382 return 0; 2383 } 2384 tunnel_type = 2385 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2386 break; 2387 default: 2388 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2389 return -ENOTSUP; 2390 } 2391 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2392 tunnel_type); 2393 2394 if (rc != 0) 2395 return rc; 2396 2397 if (tunnel_type == 2398 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2399 bp->vxlan_port_cnt++; 2400 2401 if (tunnel_type == 2402 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2403 bp->geneve_port_cnt++; 2404 2405 return rc; 2406 } 2407 2408 static int 2409 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2410 struct rte_eth_udp_tunnel *udp_tunnel) 2411 { 2412 struct bnxt *bp = eth_dev->data->dev_private; 2413 uint16_t tunnel_type = 0; 2414 uint16_t port = 0; 2415 int rc = 0; 2416 2417 rc = is_bnxt_in_error(bp); 2418 if (rc) 2419 return rc; 2420 2421 switch (udp_tunnel->prot_type) { 2422 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2423 if (!bp->vxlan_port_cnt) { 2424 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2425 return -EINVAL; 2426 } 2427 if (bp->vxlan_port != udp_tunnel->udp_port) { 2428 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2429 udp_tunnel->udp_port, bp->vxlan_port); 2430 return -EINVAL; 2431 } 2432 if (--bp->vxlan_port_cnt) 2433 return 0; 2434 2435 tunnel_type = 2436 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2437 port = bp->vxlan_fw_dst_port_id; 2438 break; 2439 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2440 if (!bp->geneve_port_cnt) { 2441 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2442 return -EINVAL; 2443 } 2444 if (bp->geneve_port != udp_tunnel->udp_port) { 2445 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2446 udp_tunnel->udp_port, bp->geneve_port); 2447 return -EINVAL; 2448 } 2449 if (--bp->geneve_port_cnt) 2450 return 0; 2451 2452 tunnel_type = 2453 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2454 port = bp->geneve_fw_dst_port_id; 2455 break; 2456 default: 2457 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2458 return -ENOTSUP; 2459 } 2460 2461 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2462 return rc; 2463 } 2464 2465 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2466 { 2467 struct bnxt_filter_info *filter; 2468 struct bnxt_vnic_info *vnic; 2469 int rc = 0; 2470 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2471 2472 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2473 filter = STAILQ_FIRST(&vnic->filter); 2474 while (filter) { 2475 /* Search for this matching MAC+VLAN filter */ 2476 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2477 /* Delete the filter */ 2478 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2479 if (rc) 2480 return rc; 2481 STAILQ_REMOVE(&vnic->filter, filter, 2482 bnxt_filter_info, next); 2483 bnxt_free_filter(bp, filter); 2484 PMD_DRV_LOG(INFO, 2485 "Deleted vlan filter for %d\n", 2486 vlan_id); 2487 return 0; 2488 } 2489 filter = STAILQ_NEXT(filter, next); 2490 } 2491 return -ENOENT; 2492 } 2493 2494 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2495 { 2496 struct bnxt_filter_info *filter; 2497 struct bnxt_vnic_info *vnic; 2498 int rc = 0; 2499 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2500 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2501 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2502 2503 /* Implementation notes on the use of VNIC in this command: 2504 * 2505 * By default, these filters belong to default vnic for the function. 2506 * Once these filters are set up, only destination VNIC can be modified. 2507 * If the destination VNIC is not specified in this command, 2508 * then the HWRM shall only create an l2 context id. 2509 */ 2510 2511 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2512 filter = STAILQ_FIRST(&vnic->filter); 2513 /* Check if the VLAN has already been added */ 2514 while (filter) { 2515 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2516 return -EEXIST; 2517 2518 filter = STAILQ_NEXT(filter, next); 2519 } 2520 2521 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2522 * command to create MAC+VLAN filter with the right flags, enables set. 2523 */ 2524 filter = bnxt_alloc_filter(bp); 2525 if (!filter) { 2526 PMD_DRV_LOG(ERR, 2527 "MAC/VLAN filter alloc failed\n"); 2528 return -ENOMEM; 2529 } 2530 /* MAC + VLAN ID filter */ 2531 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2532 * untagged packets are received 2533 * 2534 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2535 * packets and only the programmed vlan's packets are received 2536 */ 2537 filter->l2_ivlan = vlan_id; 2538 filter->l2_ivlan_mask = 0x0FFF; 2539 filter->enables |= en; 2540 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2541 2542 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2543 if (rc) { 2544 /* Free the newly allocated filter as we were 2545 * not able to create the filter in hardware. 2546 */ 2547 bnxt_free_filter(bp, filter); 2548 return rc; 2549 } 2550 2551 filter->mac_index = 0; 2552 /* Add this new filter to the list */ 2553 if (vlan_id == 0) 2554 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2555 else 2556 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2557 2558 PMD_DRV_LOG(INFO, 2559 "Added Vlan filter for %d\n", vlan_id); 2560 return rc; 2561 } 2562 2563 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2564 uint16_t vlan_id, int on) 2565 { 2566 struct bnxt *bp = eth_dev->data->dev_private; 2567 int rc; 2568 2569 rc = is_bnxt_in_error(bp); 2570 if (rc) 2571 return rc; 2572 2573 if (!eth_dev->data->dev_started) { 2574 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2575 return -EINVAL; 2576 } 2577 2578 /* These operations apply to ALL existing MAC/VLAN filters */ 2579 if (on) 2580 return bnxt_add_vlan_filter(bp, vlan_id); 2581 else 2582 return bnxt_del_vlan_filter(bp, vlan_id); 2583 } 2584 2585 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2586 struct bnxt_vnic_info *vnic) 2587 { 2588 struct bnxt_filter_info *filter; 2589 int rc; 2590 2591 filter = STAILQ_FIRST(&vnic->filter); 2592 while (filter) { 2593 if (filter->mac_index == 0 && 2594 !memcmp(filter->l2_addr, bp->mac_addr, 2595 RTE_ETHER_ADDR_LEN)) { 2596 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2597 if (!rc) { 2598 STAILQ_REMOVE(&vnic->filter, filter, 2599 bnxt_filter_info, next); 2600 bnxt_free_filter(bp, filter); 2601 } 2602 return rc; 2603 } 2604 filter = STAILQ_NEXT(filter, next); 2605 } 2606 return 0; 2607 } 2608 2609 static int 2610 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2611 { 2612 struct bnxt_vnic_info *vnic; 2613 unsigned int i; 2614 int rc; 2615 2616 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2617 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 2618 /* Remove any VLAN filters programmed */ 2619 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2620 bnxt_del_vlan_filter(bp, i); 2621 2622 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2623 if (rc) 2624 return rc; 2625 } else { 2626 /* Default filter will allow packets that match the 2627 * dest mac. So, it has to be deleted, otherwise, we 2628 * will endup receiving vlan packets for which the 2629 * filter is not programmed, when hw-vlan-filter 2630 * configuration is ON 2631 */ 2632 bnxt_del_dflt_mac_filter(bp, vnic); 2633 /* This filter will allow only untagged packets */ 2634 bnxt_add_vlan_filter(bp, 0); 2635 } 2636 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2637 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); 2638 2639 return 0; 2640 } 2641 2642 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2643 { 2644 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2645 unsigned int i; 2646 int rc; 2647 2648 /* Destroy vnic filters and vnic */ 2649 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2650 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2651 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2652 bnxt_del_vlan_filter(bp, i); 2653 } 2654 bnxt_del_dflt_mac_filter(bp, vnic); 2655 2656 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2657 if (rc) 2658 return rc; 2659 2660 rc = bnxt_hwrm_vnic_free(bp, vnic); 2661 if (rc) 2662 return rc; 2663 2664 rte_free(vnic->fw_grp_ids); 2665 vnic->fw_grp_ids = NULL; 2666 2667 vnic->rx_queue_cnt = 0; 2668 2669 return 0; 2670 } 2671 2672 static int 2673 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2674 { 2675 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2676 int rc; 2677 2678 /* Destroy, recreate and reconfigure the default vnic */ 2679 rc = bnxt_free_one_vnic(bp, 0); 2680 if (rc) 2681 return rc; 2682 2683 /* default vnic 0 */ 2684 rc = bnxt_setup_one_vnic(bp, 0); 2685 if (rc) 2686 return rc; 2687 2688 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2689 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2690 rc = bnxt_add_vlan_filter(bp, 0); 2691 if (rc) 2692 return rc; 2693 rc = bnxt_restore_vlan_filters(bp); 2694 if (rc) 2695 return rc; 2696 } else { 2697 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2698 if (rc) 2699 return rc; 2700 } 2701 2702 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2703 if (rc) 2704 return rc; 2705 2706 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2707 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); 2708 2709 return rc; 2710 } 2711 2712 static int 2713 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2714 { 2715 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2716 struct bnxt *bp = dev->data->dev_private; 2717 int rc; 2718 2719 rc = is_bnxt_in_error(bp); 2720 if (rc) 2721 return rc; 2722 2723 /* Filter settings will get applied when port is started */ 2724 if (!dev->data->dev_started) 2725 return 0; 2726 2727 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2728 /* Enable or disable VLAN filtering */ 2729 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2730 if (rc) 2731 return rc; 2732 } 2733 2734 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2735 /* Enable or disable VLAN stripping */ 2736 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2737 if (rc) 2738 return rc; 2739 } 2740 2741 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2742 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2743 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2744 else 2745 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2746 } 2747 2748 return 0; 2749 } 2750 2751 static int 2752 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2753 uint16_t tpid) 2754 { 2755 struct bnxt *bp = dev->data->dev_private; 2756 int qinq = dev->data->dev_conf.rxmode.offloads & 2757 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 2758 2759 if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && 2760 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { 2761 PMD_DRV_LOG(ERR, 2762 "Unsupported vlan type."); 2763 return -EINVAL; 2764 } 2765 if (!qinq) { 2766 PMD_DRV_LOG(ERR, 2767 "QinQ not enabled. Needs to be ON as we can " 2768 "accelerate only outer vlan\n"); 2769 return -EINVAL; 2770 } 2771 2772 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2773 switch (tpid) { 2774 case RTE_ETHER_TYPE_QINQ: 2775 bp->outer_tpid_bd = 2776 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2777 break; 2778 case RTE_ETHER_TYPE_VLAN: 2779 bp->outer_tpid_bd = 2780 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2781 break; 2782 case RTE_ETHER_TYPE_QINQ1: 2783 bp->outer_tpid_bd = 2784 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2785 break; 2786 case RTE_ETHER_TYPE_QINQ2: 2787 bp->outer_tpid_bd = 2788 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2789 break; 2790 case RTE_ETHER_TYPE_QINQ3: 2791 bp->outer_tpid_bd = 2792 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2793 break; 2794 default: 2795 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2796 return -EINVAL; 2797 } 2798 bp->outer_tpid_bd |= tpid; 2799 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2800 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 2801 PMD_DRV_LOG(ERR, 2802 "Can accelerate only outer vlan in QinQ\n"); 2803 return -EINVAL; 2804 } 2805 2806 return 0; 2807 } 2808 2809 static int 2810 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2811 struct rte_ether_addr *addr) 2812 { 2813 struct bnxt *bp = dev->data->dev_private; 2814 /* Default Filter is tied to VNIC 0 */ 2815 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2816 int rc; 2817 2818 rc = is_bnxt_in_error(bp); 2819 if (rc) 2820 return rc; 2821 2822 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2823 return -EPERM; 2824 2825 if (rte_is_zero_ether_addr(addr)) 2826 return -EINVAL; 2827 2828 /* Filter settings will get applied when port is started */ 2829 if (!dev->data->dev_started) 2830 return 0; 2831 2832 /* Check if the requested MAC is already added */ 2833 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2834 return 0; 2835 2836 /* Destroy filter and re-create it */ 2837 bnxt_del_dflt_mac_filter(bp, vnic); 2838 2839 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2840 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2841 /* This filter will allow only untagged packets */ 2842 rc = bnxt_add_vlan_filter(bp, 0); 2843 } else { 2844 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2845 } 2846 2847 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2848 return rc; 2849 } 2850 2851 static int 2852 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2853 struct rte_ether_addr *mc_addr_set, 2854 uint32_t nb_mc_addr) 2855 { 2856 struct bnxt *bp = eth_dev->data->dev_private; 2857 struct bnxt_vnic_info *vnic; 2858 uint32_t i = 0; 2859 int rc; 2860 2861 rc = is_bnxt_in_error(bp); 2862 if (rc) 2863 return rc; 2864 2865 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2866 2867 bp->nb_mc_addr = nb_mc_addr; 2868 2869 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2870 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2871 goto allmulti; 2872 } 2873 2874 /* TODO Check for Duplicate mcast addresses */ 2875 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2876 for (i = 0; i < nb_mc_addr; i++) 2877 rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]); 2878 2879 if (bp->nb_mc_addr) 2880 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2881 else 2882 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2883 2884 allmulti: 2885 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2886 } 2887 2888 static int 2889 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2890 { 2891 struct bnxt *bp = dev->data->dev_private; 2892 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2893 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2894 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2895 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2896 int ret; 2897 2898 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2899 fw_major, fw_minor, fw_updt, fw_rsvd); 2900 if (ret < 0) 2901 return -EINVAL; 2902 2903 ret += 1; /* add the size of '\0' */ 2904 if (fw_size < (size_t)ret) 2905 return ret; 2906 else 2907 return 0; 2908 } 2909 2910 static void 2911 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2912 struct rte_eth_rxq_info *qinfo) 2913 { 2914 struct bnxt *bp = dev->data->dev_private; 2915 struct bnxt_rx_queue *rxq; 2916 2917 if (is_bnxt_in_error(bp)) 2918 return; 2919 2920 rxq = dev->data->rx_queues[queue_id]; 2921 2922 qinfo->mp = rxq->mb_pool; 2923 qinfo->scattered_rx = dev->data->scattered_rx; 2924 qinfo->nb_desc = rxq->nb_rx_desc; 2925 2926 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2927 qinfo->conf.rx_drop_en = rxq->drop_en; 2928 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2929 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2930 } 2931 2932 static void 2933 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2934 struct rte_eth_txq_info *qinfo) 2935 { 2936 struct bnxt *bp = dev->data->dev_private; 2937 struct bnxt_tx_queue *txq; 2938 2939 if (is_bnxt_in_error(bp)) 2940 return; 2941 2942 txq = dev->data->tx_queues[queue_id]; 2943 2944 qinfo->nb_desc = txq->nb_tx_desc; 2945 2946 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2947 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2948 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2949 2950 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2951 qinfo->conf.tx_rs_thresh = 0; 2952 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2953 qinfo->conf.offloads = txq->offloads; 2954 } 2955 2956 static const struct { 2957 eth_rx_burst_t pkt_burst; 2958 const char *info; 2959 } bnxt_rx_burst_info[] = { 2960 {bnxt_recv_pkts, "Scalar"}, 2961 #if defined(RTE_ARCH_X86) 2962 {bnxt_recv_pkts_vec, "Vector SSE"}, 2963 #endif 2964 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2965 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 2966 #endif 2967 #if defined(RTE_ARCH_ARM64) 2968 {bnxt_recv_pkts_vec, "Vector Neon"}, 2969 #endif 2970 }; 2971 2972 static int 2973 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2974 struct rte_eth_burst_mode *mode) 2975 { 2976 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2977 size_t i; 2978 2979 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2980 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2981 snprintf(mode->info, sizeof(mode->info), "%s", 2982 bnxt_rx_burst_info[i].info); 2983 return 0; 2984 } 2985 } 2986 2987 return -EINVAL; 2988 } 2989 2990 static const struct { 2991 eth_tx_burst_t pkt_burst; 2992 const char *info; 2993 } bnxt_tx_burst_info[] = { 2994 {bnxt_xmit_pkts, "Scalar"}, 2995 #if defined(RTE_ARCH_X86) 2996 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2997 #endif 2998 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2999 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 3000 #endif 3001 #if defined(RTE_ARCH_ARM64) 3002 {bnxt_xmit_pkts_vec, "Vector Neon"}, 3003 #endif 3004 }; 3005 3006 static int 3007 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3008 struct rte_eth_burst_mode *mode) 3009 { 3010 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 3011 size_t i; 3012 3013 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 3014 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 3015 snprintf(mode->info, sizeof(mode->info), "%s", 3016 bnxt_tx_burst_info[i].info); 3017 return 0; 3018 } 3019 } 3020 3021 return -EINVAL; 3022 } 3023 3024 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3025 { 3026 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 3027 struct bnxt *bp = eth_dev->data->dev_private; 3028 uint32_t new_pkt_size; 3029 uint32_t rc; 3030 uint32_t i; 3031 3032 rc = is_bnxt_in_error(bp); 3033 if (rc) 3034 return rc; 3035 3036 /* Exit if receive queues are not configured yet */ 3037 if (!eth_dev->data->nb_rx_queues) 3038 return rc; 3039 3040 new_pkt_size = new_mtu + overhead; 3041 3042 /* 3043 * Disallow any MTU change that would require scattered receive support 3044 * if it is not already enabled. 3045 */ 3046 if (eth_dev->data->dev_started && 3047 !eth_dev->data->scattered_rx && 3048 (new_pkt_size > 3049 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3050 PMD_DRV_LOG(ERR, 3051 "MTU change would require scattered rx support. "); 3052 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 3053 return -EINVAL; 3054 } 3055 3056 if (new_mtu > RTE_ETHER_MTU) 3057 bp->flags |= BNXT_FLAG_JUMBO; 3058 else 3059 bp->flags &= ~BNXT_FLAG_JUMBO; 3060 3061 /* Is there a change in mtu setting? */ 3062 if (eth_dev->data->mtu == new_mtu) 3063 return rc; 3064 3065 for (i = 0; i < bp->nr_vnics; i++) { 3066 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3067 uint16_t size = 0; 3068 3069 vnic->mru = BNXT_VNIC_MRU(new_mtu); 3070 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 3071 if (rc) 3072 break; 3073 3074 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 3075 size -= RTE_PKTMBUF_HEADROOM; 3076 3077 if (size < new_mtu) { 3078 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 3079 if (rc) 3080 return rc; 3081 } 3082 } 3083 3084 if (bnxt_hwrm_config_host_mtu(bp)) 3085 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3086 3087 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3088 3089 return rc; 3090 } 3091 3092 static int 3093 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3094 { 3095 struct bnxt *bp = dev->data->dev_private; 3096 uint16_t vlan = bp->vlan; 3097 int rc; 3098 3099 rc = is_bnxt_in_error(bp); 3100 if (rc) 3101 return rc; 3102 3103 if (!BNXT_SINGLE_PF(bp)) { 3104 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3105 return -ENOTSUP; 3106 } 3107 bp->vlan = on ? pvid : 0; 3108 3109 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3110 if (rc) 3111 bp->vlan = vlan; 3112 return rc; 3113 } 3114 3115 static int 3116 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3117 { 3118 struct bnxt *bp = dev->data->dev_private; 3119 int rc; 3120 3121 rc = is_bnxt_in_error(bp); 3122 if (rc) 3123 return rc; 3124 3125 return bnxt_hwrm_port_led_cfg(bp, true); 3126 } 3127 3128 static int 3129 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3130 { 3131 struct bnxt *bp = dev->data->dev_private; 3132 int rc; 3133 3134 rc = is_bnxt_in_error(bp); 3135 if (rc) 3136 return rc; 3137 3138 return bnxt_hwrm_port_led_cfg(bp, false); 3139 } 3140 3141 static uint32_t 3142 bnxt_rx_queue_count_op(void *rx_queue) 3143 { 3144 struct bnxt *bp; 3145 struct bnxt_cp_ring_info *cpr; 3146 uint32_t desc = 0, raw_cons, cp_ring_size; 3147 struct bnxt_rx_queue *rxq; 3148 struct rx_pkt_cmpl *rxcmp; 3149 int rc; 3150 3151 rxq = rx_queue; 3152 bp = rxq->bp; 3153 3154 rc = is_bnxt_in_error(bp); 3155 if (rc) 3156 return rc; 3157 3158 cpr = rxq->cp_ring; 3159 raw_cons = cpr->cp_raw_cons; 3160 cp_ring_size = cpr->cp_ring_struct->ring_size; 3161 3162 while (1) { 3163 uint32_t agg_cnt, cons, cmpl_type; 3164 3165 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3166 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3167 3168 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3169 break; 3170 3171 cmpl_type = CMP_TYPE(rxcmp); 3172 3173 switch (cmpl_type) { 3174 case CMPL_BASE_TYPE_RX_L2: 3175 case CMPL_BASE_TYPE_RX_L2_V2: 3176 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3177 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3178 desc++; 3179 break; 3180 3181 case CMPL_BASE_TYPE_RX_TPA_END: 3182 if (BNXT_CHIP_P5(rxq->bp)) { 3183 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3184 3185 p5_tpa_end = (void *)rxcmp; 3186 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3187 } else { 3188 struct rx_tpa_end_cmpl *tpa_end; 3189 3190 tpa_end = (void *)rxcmp; 3191 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3192 } 3193 3194 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3195 desc++; 3196 break; 3197 3198 default: 3199 raw_cons += CMP_LEN(cmpl_type); 3200 } 3201 } 3202 3203 return desc; 3204 } 3205 3206 static int 3207 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3208 { 3209 struct bnxt_rx_queue *rxq = rx_queue; 3210 struct bnxt_cp_ring_info *cpr; 3211 struct bnxt_rx_ring_info *rxr; 3212 uint32_t desc, raw_cons, cp_ring_size; 3213 struct bnxt *bp = rxq->bp; 3214 struct rx_pkt_cmpl *rxcmp; 3215 int rc; 3216 3217 rc = is_bnxt_in_error(bp); 3218 if (rc) 3219 return rc; 3220 3221 if (offset >= rxq->nb_rx_desc) 3222 return -EINVAL; 3223 3224 rxr = rxq->rx_ring; 3225 cpr = rxq->cp_ring; 3226 cp_ring_size = cpr->cp_ring_struct->ring_size; 3227 3228 /* 3229 * For the vector receive case, the completion at the requested 3230 * offset can be indexed directly. 3231 */ 3232 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3233 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3234 struct rx_pkt_cmpl *rxcmp; 3235 uint32_t cons; 3236 3237 /* Check status of completion descriptor. */ 3238 raw_cons = cpr->cp_raw_cons + 3239 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3240 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3241 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3242 3243 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3244 return RTE_ETH_RX_DESC_DONE; 3245 3246 /* Check whether rx desc has an mbuf attached. */ 3247 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3248 if (cons >= rxq->rxrearm_start && 3249 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3250 return RTE_ETH_RX_DESC_UNAVAIL; 3251 } 3252 3253 return RTE_ETH_RX_DESC_AVAIL; 3254 } 3255 #endif 3256 3257 /* 3258 * For the non-vector receive case, scan the completion ring to 3259 * locate the completion descriptor for the requested offset. 3260 */ 3261 raw_cons = cpr->cp_raw_cons; 3262 desc = 0; 3263 while (1) { 3264 uint32_t agg_cnt, cons, cmpl_type; 3265 3266 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3267 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3268 3269 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3270 break; 3271 3272 cmpl_type = CMP_TYPE(rxcmp); 3273 3274 switch (cmpl_type) { 3275 case CMPL_BASE_TYPE_RX_L2: 3276 case CMPL_BASE_TYPE_RX_L2_V2: 3277 if (desc == offset) { 3278 cons = rxcmp->opaque; 3279 if (rxr->rx_buf_ring[cons]) 3280 return RTE_ETH_RX_DESC_DONE; 3281 else 3282 return RTE_ETH_RX_DESC_UNAVAIL; 3283 } 3284 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3285 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3286 desc++; 3287 break; 3288 3289 case CMPL_BASE_TYPE_RX_TPA_END: 3290 if (desc == offset) 3291 return RTE_ETH_RX_DESC_DONE; 3292 3293 if (BNXT_CHIP_P5(rxq->bp)) { 3294 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3295 3296 p5_tpa_end = (void *)rxcmp; 3297 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3298 } else { 3299 struct rx_tpa_end_cmpl *tpa_end; 3300 3301 tpa_end = (void *)rxcmp; 3302 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3303 } 3304 3305 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3306 desc++; 3307 break; 3308 3309 default: 3310 raw_cons += CMP_LEN(cmpl_type); 3311 } 3312 } 3313 3314 return RTE_ETH_RX_DESC_AVAIL; 3315 } 3316 3317 static int 3318 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3319 { 3320 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3321 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3322 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3323 struct cmpl_base *cp_desc_ring; 3324 int rc; 3325 3326 rc = is_bnxt_in_error(txq->bp); 3327 if (rc) 3328 return rc; 3329 3330 if (offset >= txq->nb_tx_desc) 3331 return -EINVAL; 3332 3333 /* Return "desc done" if descriptor is available for use. */ 3334 if (bnxt_tx_bds_in_hw(txq) <= offset) 3335 return RTE_ETH_TX_DESC_DONE; 3336 3337 raw_cons = cpr->cp_raw_cons; 3338 cp_desc_ring = cpr->cp_desc_ring; 3339 ring_mask = cpr->cp_ring_struct->ring_mask; 3340 3341 /* Check to see if hw has posted a completion for the descriptor. */ 3342 while (1) { 3343 struct tx_cmpl *txcmp; 3344 uint32_t cons; 3345 3346 cons = RING_CMPL(ring_mask, raw_cons); 3347 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3348 3349 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3350 break; 3351 3352 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3353 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3354 3355 if (nb_tx_pkts > offset) 3356 return RTE_ETH_TX_DESC_DONE; 3357 3358 raw_cons = NEXT_RAW_CMP(raw_cons); 3359 } 3360 3361 /* Descriptor is pending transmit, not yet completed by hardware. */ 3362 return RTE_ETH_TX_DESC_FULL; 3363 } 3364 3365 int 3366 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3367 const struct rte_flow_ops **ops) 3368 { 3369 struct bnxt *bp = dev->data->dev_private; 3370 int ret = 0; 3371 3372 if (!bp) 3373 return -EIO; 3374 3375 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3376 struct bnxt_representor *vfr = dev->data->dev_private; 3377 bp = vfr->parent_dev->data->dev_private; 3378 /* parent is deleted while children are still valid */ 3379 if (!bp) { 3380 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3381 dev->data->port_id); 3382 return -EIO; 3383 } 3384 } 3385 3386 ret = is_bnxt_in_error(bp); 3387 if (ret) 3388 return ret; 3389 3390 /* PMD supports thread-safe flow operations. rte_flow API 3391 * functions can avoid mutex for multi-thread safety. 3392 */ 3393 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3394 3395 if (BNXT_TRUFLOW_EN(bp)) 3396 *ops = &bnxt_ulp_rte_flow_ops; 3397 else 3398 *ops = &bnxt_flow_ops; 3399 3400 return ret; 3401 } 3402 3403 static const uint32_t * 3404 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3405 { 3406 static const uint32_t ptypes[] = { 3407 RTE_PTYPE_L2_ETHER_VLAN, 3408 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3409 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3410 RTE_PTYPE_L4_ICMP, 3411 RTE_PTYPE_L4_TCP, 3412 RTE_PTYPE_L4_UDP, 3413 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3414 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3415 RTE_PTYPE_INNER_L4_ICMP, 3416 RTE_PTYPE_INNER_L4_TCP, 3417 RTE_PTYPE_INNER_L4_UDP, 3418 RTE_PTYPE_UNKNOWN 3419 }; 3420 3421 if (!dev->rx_pkt_burst) 3422 return NULL; 3423 3424 return ptypes; 3425 } 3426 3427 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3428 int reg_win) 3429 { 3430 uint32_t reg_base = *reg_arr & 0xfffff000; 3431 uint32_t win_off; 3432 int i; 3433 3434 for (i = 0; i < count; i++) { 3435 if ((reg_arr[i] & 0xfffff000) != reg_base) 3436 return -ERANGE; 3437 } 3438 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3439 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3440 return 0; 3441 } 3442 3443 static int bnxt_map_ptp_regs(struct bnxt *bp) 3444 { 3445 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3446 uint32_t *reg_arr; 3447 int rc, i; 3448 3449 reg_arr = ptp->rx_regs; 3450 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3451 if (rc) 3452 return rc; 3453 3454 reg_arr = ptp->tx_regs; 3455 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3456 if (rc) 3457 return rc; 3458 3459 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3460 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3461 3462 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3463 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3464 3465 return 0; 3466 } 3467 3468 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3469 { 3470 rte_write32(0, (uint8_t *)bp->bar0 + 3471 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3472 rte_write32(0, (uint8_t *)bp->bar0 + 3473 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3474 } 3475 3476 static uint64_t bnxt_cc_read(struct bnxt *bp) 3477 { 3478 uint64_t ns; 3479 3480 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3481 BNXT_GRCPF_REG_SYNC_TIME)); 3482 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3483 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3484 return ns; 3485 } 3486 3487 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3488 { 3489 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3490 uint32_t fifo; 3491 3492 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3493 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3494 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3495 return -EAGAIN; 3496 3497 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3498 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3499 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3500 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3501 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3502 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3503 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3504 3505 return 0; 3506 } 3507 3508 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3509 { 3510 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3511 struct bnxt_pf_info *pf = bp->pf; 3512 uint16_t port_id; 3513 int i = 0; 3514 uint32_t fifo; 3515 3516 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3517 return -EINVAL; 3518 3519 port_id = pf->port_id; 3520 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3521 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3522 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3523 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3524 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3525 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3526 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3527 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3528 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3529 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3530 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3531 i++; 3532 } 3533 3534 if (i >= BNXT_PTP_RX_PND_CNT) 3535 return -EBUSY; 3536 3537 return 0; 3538 } 3539 3540 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3541 { 3542 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3543 struct bnxt_pf_info *pf = bp->pf; 3544 uint16_t port_id; 3545 uint32_t fifo; 3546 3547 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3548 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3549 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3550 return -EAGAIN; 3551 3552 port_id = pf->port_id; 3553 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3554 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3555 3556 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3557 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3558 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3559 return bnxt_clr_rx_ts(bp, ts); 3560 3561 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3562 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3563 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3564 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3565 3566 return 0; 3567 } 3568 3569 static int 3570 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3571 { 3572 uint64_t ns; 3573 struct bnxt *bp = dev->data->dev_private; 3574 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3575 3576 if (!ptp) 3577 return -ENOTSUP; 3578 3579 ns = rte_timespec_to_ns(ts); 3580 /* Set the timecounters to a new value. */ 3581 ptp->tc.nsec = ns; 3582 ptp->tx_tstamp_tc.nsec = ns; 3583 ptp->rx_tstamp_tc.nsec = ns; 3584 3585 return 0; 3586 } 3587 3588 static int 3589 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3590 { 3591 struct bnxt *bp = dev->data->dev_private; 3592 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3593 uint64_t ns, systime_cycles = 0; 3594 int rc = 0; 3595 3596 if (!ptp) 3597 return -ENOTSUP; 3598 3599 if (BNXT_CHIP_P5(bp)) 3600 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3601 &systime_cycles); 3602 else 3603 systime_cycles = bnxt_cc_read(bp); 3604 3605 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3606 *ts = rte_ns_to_timespec(ns); 3607 3608 return rc; 3609 } 3610 static int 3611 bnxt_timesync_enable(struct rte_eth_dev *dev) 3612 { 3613 struct bnxt *bp = dev->data->dev_private; 3614 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3615 uint32_t shift = 0; 3616 int rc; 3617 3618 if (!ptp) 3619 return -ENOTSUP; 3620 3621 ptp->rx_filter = 1; 3622 ptp->tx_tstamp_en = 1; 3623 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3624 3625 rc = bnxt_hwrm_ptp_cfg(bp); 3626 if (rc) 3627 return rc; 3628 3629 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3630 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3631 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3632 3633 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3634 ptp->tc.cc_shift = shift; 3635 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3636 3637 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3638 ptp->rx_tstamp_tc.cc_shift = shift; 3639 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3640 3641 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3642 ptp->tx_tstamp_tc.cc_shift = shift; 3643 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3644 3645 if (!BNXT_CHIP_P5(bp)) 3646 bnxt_map_ptp_regs(bp); 3647 else 3648 rc = bnxt_ptp_start(bp); 3649 3650 return rc; 3651 } 3652 3653 static int 3654 bnxt_timesync_disable(struct rte_eth_dev *dev) 3655 { 3656 struct bnxt *bp = dev->data->dev_private; 3657 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3658 3659 if (!ptp) 3660 return -ENOTSUP; 3661 3662 ptp->rx_filter = 0; 3663 ptp->tx_tstamp_en = 0; 3664 ptp->rxctl = 0; 3665 3666 bnxt_hwrm_ptp_cfg(bp); 3667 3668 if (!BNXT_CHIP_P5(bp)) 3669 bnxt_unmap_ptp_regs(bp); 3670 else 3671 bnxt_ptp_stop(bp); 3672 3673 return 0; 3674 } 3675 3676 static int 3677 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3678 struct timespec *timestamp, 3679 uint32_t flags __rte_unused) 3680 { 3681 struct bnxt *bp = dev->data->dev_private; 3682 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3683 uint64_t rx_tstamp_cycles = 0; 3684 uint64_t ns; 3685 3686 if (!ptp) 3687 return -ENOTSUP; 3688 3689 if (BNXT_CHIP_P5(bp)) 3690 rx_tstamp_cycles = ptp->rx_timestamp; 3691 else 3692 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3693 3694 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3695 *timestamp = rte_ns_to_timespec(ns); 3696 return 0; 3697 } 3698 3699 static int 3700 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3701 struct timespec *timestamp) 3702 { 3703 struct bnxt *bp = dev->data->dev_private; 3704 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3705 uint64_t tx_tstamp_cycles = 0; 3706 uint64_t ns; 3707 int rc = 0; 3708 3709 if (!ptp) 3710 return -ENOTSUP; 3711 3712 if (BNXT_CHIP_P5(bp)) 3713 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3714 &tx_tstamp_cycles); 3715 else 3716 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3717 3718 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3719 *timestamp = rte_ns_to_timespec(ns); 3720 3721 return rc; 3722 } 3723 3724 static int 3725 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3726 { 3727 struct bnxt *bp = dev->data->dev_private; 3728 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3729 3730 if (!ptp) 3731 return -ENOTSUP; 3732 3733 ptp->tc.nsec += delta; 3734 ptp->tx_tstamp_tc.nsec += delta; 3735 ptp->rx_tstamp_tc.nsec += delta; 3736 3737 return 0; 3738 } 3739 3740 static int 3741 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3742 { 3743 struct bnxt *bp = dev->data->dev_private; 3744 int rc; 3745 uint32_t dir_entries; 3746 uint32_t entry_length; 3747 3748 rc = is_bnxt_in_error(bp); 3749 if (rc) 3750 return rc; 3751 3752 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3753 bp->pdev->addr.domain, bp->pdev->addr.bus, 3754 bp->pdev->addr.devid, bp->pdev->addr.function); 3755 3756 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3757 if (rc != 0) 3758 return rc; 3759 3760 return dir_entries * entry_length; 3761 } 3762 3763 static int 3764 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3765 struct rte_dev_eeprom_info *in_eeprom) 3766 { 3767 struct bnxt *bp = dev->data->dev_private; 3768 uint32_t index; 3769 uint32_t offset; 3770 int rc; 3771 3772 rc = is_bnxt_in_error(bp); 3773 if (rc) 3774 return rc; 3775 3776 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3777 bp->pdev->addr.domain, bp->pdev->addr.bus, 3778 bp->pdev->addr.devid, bp->pdev->addr.function, 3779 in_eeprom->offset, in_eeprom->length); 3780 3781 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3782 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3783 in_eeprom->data); 3784 3785 index = in_eeprom->offset >> 24; 3786 offset = in_eeprom->offset & 0xffffff; 3787 3788 if (index != 0) 3789 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3790 in_eeprom->length, in_eeprom->data); 3791 3792 return 0; 3793 } 3794 3795 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3796 { 3797 switch (dir_type) { 3798 case BNX_DIR_TYPE_CHIMP_PATCH: 3799 case BNX_DIR_TYPE_BOOTCODE: 3800 case BNX_DIR_TYPE_BOOTCODE_2: 3801 case BNX_DIR_TYPE_APE_FW: 3802 case BNX_DIR_TYPE_APE_PATCH: 3803 case BNX_DIR_TYPE_KONG_FW: 3804 case BNX_DIR_TYPE_KONG_PATCH: 3805 case BNX_DIR_TYPE_BONO_FW: 3806 case BNX_DIR_TYPE_BONO_PATCH: 3807 /* FALLTHROUGH */ 3808 return true; 3809 } 3810 3811 return false; 3812 } 3813 3814 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3815 { 3816 switch (dir_type) { 3817 case BNX_DIR_TYPE_AVS: 3818 case BNX_DIR_TYPE_EXP_ROM_MBA: 3819 case BNX_DIR_TYPE_PCIE: 3820 case BNX_DIR_TYPE_TSCF_UCODE: 3821 case BNX_DIR_TYPE_EXT_PHY: 3822 case BNX_DIR_TYPE_CCM: 3823 case BNX_DIR_TYPE_ISCSI_BOOT: 3824 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3825 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3826 /* FALLTHROUGH */ 3827 return true; 3828 } 3829 3830 return false; 3831 } 3832 3833 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3834 { 3835 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3836 bnxt_dir_type_is_other_exec_format(dir_type); 3837 } 3838 3839 static int 3840 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3841 struct rte_dev_eeprom_info *in_eeprom) 3842 { 3843 struct bnxt *bp = dev->data->dev_private; 3844 uint8_t index, dir_op; 3845 uint16_t type, ext, ordinal, attr; 3846 int rc; 3847 3848 rc = is_bnxt_in_error(bp); 3849 if (rc) 3850 return rc; 3851 3852 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3853 bp->pdev->addr.domain, bp->pdev->addr.bus, 3854 bp->pdev->addr.devid, bp->pdev->addr.function, 3855 in_eeprom->offset, in_eeprom->length); 3856 3857 if (!BNXT_PF(bp)) { 3858 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3859 return -EINVAL; 3860 } 3861 3862 type = in_eeprom->magic >> 16; 3863 3864 if (type == 0xffff) { /* special value for directory operations */ 3865 index = in_eeprom->magic & 0xff; 3866 dir_op = in_eeprom->magic >> 8; 3867 if (index == 0) 3868 return -EINVAL; 3869 switch (dir_op) { 3870 case 0x0e: /* erase */ 3871 if (in_eeprom->offset != ~in_eeprom->magic) 3872 return -EINVAL; 3873 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3874 default: 3875 return -EINVAL; 3876 } 3877 } 3878 3879 /* Create or re-write an NVM item: */ 3880 if (bnxt_dir_type_is_executable(type) == true) 3881 return -EOPNOTSUPP; 3882 ext = in_eeprom->magic & 0xffff; 3883 ordinal = in_eeprom->offset >> 16; 3884 attr = in_eeprom->offset & 0xffff; 3885 3886 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3887 in_eeprom->data, in_eeprom->length); 3888 } 3889 3890 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3891 struct rte_eth_dev_module_info *modinfo) 3892 { 3893 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3894 struct bnxt *bp = dev->data->dev_private; 3895 int rc; 3896 3897 /* No point in going further if phy status indicates 3898 * module is not inserted or if it is powered down or 3899 * if it is of type 10GBase-T 3900 */ 3901 if (bp->link_info->module_status > 3902 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3903 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3904 dev->data->port_id); 3905 return -ENOTSUP; 3906 } 3907 3908 /* This feature is not supported in older firmware versions */ 3909 if (bp->hwrm_spec_code < 0x10202) { 3910 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3911 dev->data->port_id); 3912 return -ENOTSUP; 3913 } 3914 3915 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3916 SFF_DIAG_SUPPORT_OFFSET + 1, 3917 module_info); 3918 3919 if (rc) 3920 return rc; 3921 3922 switch (module_info[0]) { 3923 case SFF_MODULE_ID_SFP: 3924 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3925 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3926 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3927 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3928 break; 3929 case SFF_MODULE_ID_QSFP: 3930 case SFF_MODULE_ID_QSFP_PLUS: 3931 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3932 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3933 break; 3934 case SFF_MODULE_ID_QSFP28: 3935 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3936 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3937 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3938 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3939 break; 3940 default: 3941 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3942 return -ENOTSUP; 3943 } 3944 3945 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3946 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3947 3948 return 0; 3949 } 3950 3951 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3952 struct rte_dev_eeprom_info *info) 3953 { 3954 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3955 uint32_t offset = info->offset, length = info->length; 3956 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3957 struct bnxt *bp = dev->data->dev_private; 3958 uint8_t *data = info->data; 3959 uint8_t page = offset >> 7; 3960 uint8_t max_pages = 2; 3961 uint8_t opt_pages; 3962 int rc; 3963 3964 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3965 SFF_DIAG_SUPPORT_OFFSET + 1, 3966 module_info); 3967 if (rc) 3968 return rc; 3969 3970 switch (module_info[0]) { 3971 case SFF_MODULE_ID_SFP: 3972 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 3973 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 3974 pg_addr[2] = I2C_DEV_ADDR_A2; 3975 pg_addr[3] = I2C_DEV_ADDR_A2; 3976 max_pages = 4; 3977 } 3978 break; 3979 case SFF_MODULE_ID_QSFP28: 3980 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3981 SFF8636_OPT_PAGES_OFFSET, 3982 1, &opt_pages); 3983 if (rc) 3984 return rc; 3985 3986 if (opt_pages & SFF8636_PAGE1_MASK) { 3987 pg_addr[2] = I2C_DEV_ADDR_A0; 3988 max_pages = 3; 3989 } 3990 if (opt_pages & SFF8636_PAGE2_MASK) { 3991 pg_addr[3] = I2C_DEV_ADDR_A0; 3992 max_pages = 4; 3993 } 3994 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 3995 pg_addr[4] = I2C_DEV_ADDR_A0; 3996 max_pages = 5; 3997 } 3998 break; 3999 default: 4000 break; 4001 } 4002 4003 memset(data, 0, length); 4004 4005 offset &= 0xff; 4006 while (length && page < max_pages) { 4007 uint8_t raw_page = page ? page - 1 : 0; 4008 uint16_t chunk; 4009 4010 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4011 raw_page = 0; 4012 else if (page) 4013 offset |= 0x80; 4014 chunk = RTE_MIN(length, 256 - offset); 4015 4016 if (pg_addr[page]) { 4017 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4018 raw_page, offset, 4019 chunk, data); 4020 if (rc) 4021 return rc; 4022 } 4023 4024 data += chunk; 4025 length -= chunk; 4026 offset = 0; 4027 page += 1 + (chunk > 128); 4028 } 4029 4030 return length ? -EINVAL : 0; 4031 } 4032 4033 /* 4034 * Initialization 4035 */ 4036 4037 static const struct eth_dev_ops bnxt_dev_ops = { 4038 .dev_infos_get = bnxt_dev_info_get_op, 4039 .dev_close = bnxt_dev_close_op, 4040 .dev_configure = bnxt_dev_configure_op, 4041 .dev_start = bnxt_dev_start_op, 4042 .dev_stop = bnxt_dev_stop_op, 4043 .dev_set_link_up = bnxt_dev_set_link_up_op, 4044 .dev_set_link_down = bnxt_dev_set_link_down_op, 4045 .stats_get = bnxt_stats_get_op, 4046 .stats_reset = bnxt_stats_reset_op, 4047 .rx_queue_setup = bnxt_rx_queue_setup_op, 4048 .rx_queue_release = bnxt_rx_queue_release_op, 4049 .tx_queue_setup = bnxt_tx_queue_setup_op, 4050 .tx_queue_release = bnxt_tx_queue_release_op, 4051 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4052 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4053 .reta_update = bnxt_reta_update_op, 4054 .reta_query = bnxt_reta_query_op, 4055 .rss_hash_update = bnxt_rss_hash_update_op, 4056 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4057 .link_update = bnxt_link_update_op, 4058 .promiscuous_enable = bnxt_promiscuous_enable_op, 4059 .promiscuous_disable = bnxt_promiscuous_disable_op, 4060 .allmulticast_enable = bnxt_allmulticast_enable_op, 4061 .allmulticast_disable = bnxt_allmulticast_disable_op, 4062 .mac_addr_add = bnxt_mac_addr_add_op, 4063 .mac_addr_remove = bnxt_mac_addr_remove_op, 4064 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4065 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4066 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4067 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4068 .vlan_filter_set = bnxt_vlan_filter_set_op, 4069 .vlan_offload_set = bnxt_vlan_offload_set_op, 4070 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4071 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4072 .mtu_set = bnxt_mtu_set_op, 4073 .mac_addr_set = bnxt_set_default_mac_addr_op, 4074 .xstats_get = bnxt_dev_xstats_get_op, 4075 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4076 .xstats_reset = bnxt_dev_xstats_reset_op, 4077 .fw_version_get = bnxt_fw_version_get, 4078 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4079 .rxq_info_get = bnxt_rxq_info_get_op, 4080 .txq_info_get = bnxt_txq_info_get_op, 4081 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4082 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4083 .dev_led_on = bnxt_dev_led_on_op, 4084 .dev_led_off = bnxt_dev_led_off_op, 4085 .rx_queue_start = bnxt_rx_queue_start, 4086 .rx_queue_stop = bnxt_rx_queue_stop, 4087 .tx_queue_start = bnxt_tx_queue_start, 4088 .tx_queue_stop = bnxt_tx_queue_stop, 4089 .flow_ops_get = bnxt_flow_ops_get_op, 4090 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4091 .get_eeprom_length = bnxt_get_eeprom_length_op, 4092 .get_eeprom = bnxt_get_eeprom_op, 4093 .set_eeprom = bnxt_set_eeprom_op, 4094 .get_module_info = bnxt_get_module_info, 4095 .get_module_eeprom = bnxt_get_module_eeprom, 4096 .timesync_enable = bnxt_timesync_enable, 4097 .timesync_disable = bnxt_timesync_disable, 4098 .timesync_read_time = bnxt_timesync_read_time, 4099 .timesync_write_time = bnxt_timesync_write_time, 4100 .timesync_adjust_time = bnxt_timesync_adjust_time, 4101 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4102 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4103 }; 4104 4105 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4106 { 4107 uint32_t offset; 4108 4109 /* Only pre-map the reset GRC registers using window 3 */ 4110 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4111 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4112 4113 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4114 4115 return offset; 4116 } 4117 4118 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4119 { 4120 struct bnxt_error_recovery_info *info = bp->recovery_info; 4121 uint32_t reg_base = 0xffffffff; 4122 int i; 4123 4124 /* Only pre-map the monitoring GRC registers using window 2 */ 4125 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4126 uint32_t reg = info->status_regs[i]; 4127 4128 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4129 continue; 4130 4131 if (reg_base == 0xffffffff) 4132 reg_base = reg & 0xfffff000; 4133 if ((reg & 0xfffff000) != reg_base) 4134 return -ERANGE; 4135 4136 /* Use mask 0xffc as the Lower 2 bits indicates 4137 * address space location 4138 */ 4139 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4140 (reg & 0xffc); 4141 } 4142 4143 if (reg_base == 0xffffffff) 4144 return 0; 4145 4146 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4147 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4148 4149 return 0; 4150 } 4151 4152 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4153 { 4154 struct bnxt_error_recovery_info *info = bp->recovery_info; 4155 uint32_t delay = info->delay_after_reset[index]; 4156 uint32_t val = info->reset_reg_val[index]; 4157 uint32_t reg = info->reset_reg[index]; 4158 uint32_t type, offset; 4159 int ret; 4160 4161 type = BNXT_FW_STATUS_REG_TYPE(reg); 4162 offset = BNXT_FW_STATUS_REG_OFF(reg); 4163 4164 switch (type) { 4165 case BNXT_FW_STATUS_REG_TYPE_CFG: 4166 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4167 if (ret < 0) { 4168 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4169 val, offset); 4170 return; 4171 } 4172 break; 4173 case BNXT_FW_STATUS_REG_TYPE_GRC: 4174 offset = bnxt_map_reset_regs(bp, offset); 4175 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4176 break; 4177 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4178 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4179 break; 4180 } 4181 /* wait on a specific interval of time until core reset is complete */ 4182 if (delay) 4183 rte_delay_ms(delay); 4184 } 4185 4186 static void bnxt_dev_cleanup(struct bnxt *bp) 4187 { 4188 bp->eth_dev->data->dev_link.link_status = 0; 4189 bp->link_info->link_up = 0; 4190 if (bp->eth_dev->data->dev_started) 4191 bnxt_dev_stop(bp->eth_dev); 4192 4193 bnxt_uninit_resources(bp, true); 4194 } 4195 4196 static int 4197 bnxt_check_fw_reset_done(struct bnxt *bp) 4198 { 4199 int timeout = bp->fw_reset_max_msecs; 4200 uint16_t val = 0; 4201 int rc; 4202 4203 do { 4204 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4205 if (rc < 0) { 4206 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4207 return rc; 4208 } 4209 if (val != 0xffff) 4210 break; 4211 rte_delay_ms(1); 4212 } while (timeout--); 4213 4214 if (val == 0xffff) { 4215 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4216 return -1; 4217 } 4218 4219 return 0; 4220 } 4221 4222 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4223 { 4224 struct rte_eth_dev *dev = bp->eth_dev; 4225 struct rte_vlan_filter_conf *vfc; 4226 int vidx, vbit, rc; 4227 uint16_t vlan_id; 4228 4229 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4230 vfc = &dev->data->vlan_filter_conf; 4231 vidx = vlan_id / 64; 4232 vbit = vlan_id % 64; 4233 4234 /* Each bit corresponds to a VLAN id */ 4235 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4236 rc = bnxt_add_vlan_filter(bp, vlan_id); 4237 if (rc) 4238 return rc; 4239 } 4240 } 4241 4242 return 0; 4243 } 4244 4245 static int bnxt_restore_mac_filters(struct bnxt *bp) 4246 { 4247 struct rte_eth_dev *dev = bp->eth_dev; 4248 struct rte_eth_dev_info dev_info; 4249 struct rte_ether_addr *addr; 4250 uint64_t pool_mask; 4251 uint32_t pool = 0; 4252 uint32_t i; 4253 int rc; 4254 4255 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4256 return 0; 4257 4258 rc = bnxt_dev_info_get_op(dev, &dev_info); 4259 if (rc) 4260 return rc; 4261 4262 /* replay MAC address configuration */ 4263 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4264 addr = &dev->data->mac_addrs[i]; 4265 4266 /* skip zero address */ 4267 if (rte_is_zero_ether_addr(addr)) 4268 continue; 4269 4270 pool = 0; 4271 pool_mask = dev->data->mac_pool_sel[i]; 4272 4273 do { 4274 if (pool_mask & 1ULL) { 4275 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4276 if (rc) 4277 return rc; 4278 } 4279 pool_mask >>= 1; 4280 pool++; 4281 } while (pool_mask); 4282 } 4283 4284 return 0; 4285 } 4286 4287 static int bnxt_restore_mcast_mac_filters(struct bnxt *bp) 4288 { 4289 int ret = 0; 4290 4291 ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list, 4292 bp->nb_mc_addr); 4293 if (ret) 4294 PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n"); 4295 4296 return ret; 4297 } 4298 4299 static int bnxt_restore_filters(struct bnxt *bp) 4300 { 4301 struct rte_eth_dev *dev = bp->eth_dev; 4302 int ret = 0; 4303 4304 if (dev->data->all_multicast) { 4305 ret = bnxt_allmulticast_enable_op(dev); 4306 if (ret) 4307 return ret; 4308 } 4309 if (dev->data->promiscuous) { 4310 ret = bnxt_promiscuous_enable_op(dev); 4311 if (ret) 4312 return ret; 4313 } 4314 4315 ret = bnxt_restore_mac_filters(bp); 4316 if (ret) 4317 return ret; 4318 4319 /* if vlans are already programmed, this can fail with -EEXIST */ 4320 ret = bnxt_restore_vlan_filters(bp); 4321 if (ret && ret != -EEXIST) 4322 return ret; 4323 4324 ret = bnxt_restore_mcast_mac_filters(bp); 4325 if (ret) 4326 return ret; 4327 4328 return ret; 4329 } 4330 4331 static int bnxt_check_fw_ready(struct bnxt *bp) 4332 { 4333 int timeout = bp->fw_reset_max_msecs ? : BNXT_MAX_FW_RESET_TIMEOUT; 4334 int rc = 0; 4335 4336 do { 4337 rc = bnxt_hwrm_poll_ver_get(bp); 4338 if (rc == 0) 4339 break; 4340 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4341 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4342 } while (rc && timeout > 0); 4343 4344 if (rc) 4345 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4346 4347 return rc; 4348 } 4349 4350 static void bnxt_dev_recover(void *arg) 4351 { 4352 struct bnxt *bp = arg; 4353 int rc = 0; 4354 4355 pthread_mutex_lock(&bp->err_recovery_lock); 4356 4357 if (!bp->fw_reset_min_msecs) { 4358 rc = bnxt_check_fw_reset_done(bp); 4359 if (rc) 4360 goto err; 4361 } 4362 4363 /* Clear Error flag so that device re-init should happen */ 4364 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4365 PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n", 4366 bp->eth_dev->data->port_id); 4367 4368 rc = bnxt_check_fw_ready(bp); 4369 if (rc) 4370 goto err; 4371 4372 rc = bnxt_init_resources(bp, true); 4373 if (rc) { 4374 PMD_DRV_LOG(ERR, 4375 "Failed to initialize resources after reset\n"); 4376 goto err; 4377 } 4378 /* clear reset flag as the device is initialized now */ 4379 bp->flags &= ~BNXT_FLAG_FW_RESET; 4380 4381 rc = bnxt_dev_start_op(bp->eth_dev); 4382 if (rc) { 4383 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4384 goto err_start; 4385 } 4386 4387 rc = bnxt_restore_filters(bp); 4388 if (rc) 4389 goto err_start; 4390 4391 rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = 4392 bp->eth_dev->rx_pkt_burst; 4393 rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = 4394 bp->eth_dev->tx_pkt_burst; 4395 rte_mb(); 4396 4397 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", 4398 bp->eth_dev->data->port_id); 4399 pthread_mutex_unlock(&bp->err_recovery_lock); 4400 4401 return; 4402 err_start: 4403 bnxt_dev_stop(bp->eth_dev); 4404 err: 4405 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4406 bnxt_uninit_resources(bp, false); 4407 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4408 rte_eth_dev_callback_process(bp->eth_dev, 4409 RTE_ETH_EVENT_INTR_RMV, 4410 NULL); 4411 pthread_mutex_unlock(&bp->err_recovery_lock); 4412 PMD_DRV_LOG(ERR, "Port %u: Failed to recover from FW reset\n", 4413 bp->eth_dev->data->port_id); 4414 } 4415 4416 void bnxt_dev_reset_and_resume(void *arg) 4417 { 4418 struct bnxt *bp = arg; 4419 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4420 uint16_t val = 0; 4421 int rc; 4422 4423 bnxt_dev_cleanup(bp); 4424 PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n", 4425 bp->eth_dev->data->port_id); 4426 4427 bnxt_wait_for_device_shutdown(bp); 4428 4429 /* During some fatal firmware error conditions, the PCI config space 4430 * register 0x2e which normally contains the subsystem ID will become 4431 * 0xffff. This register will revert back to the normal value after 4432 * the chip has completed core reset. If we detect this condition, 4433 * we can poll this config register immediately for the value to revert. 4434 */ 4435 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4436 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4437 if (rc < 0) { 4438 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4439 return; 4440 } 4441 if (val == 0xffff) { 4442 bp->fw_reset_min_msecs = 0; 4443 us = 1; 4444 } 4445 } 4446 4447 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4448 if (rc) 4449 PMD_DRV_LOG(ERR, "Port %u: Error setting recovery alarm", 4450 bp->eth_dev->data->port_id); 4451 } 4452 4453 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4454 { 4455 struct bnxt_error_recovery_info *info = bp->recovery_info; 4456 uint32_t reg = info->status_regs[index]; 4457 uint32_t type, offset, val = 0; 4458 int ret = 0; 4459 4460 type = BNXT_FW_STATUS_REG_TYPE(reg); 4461 offset = BNXT_FW_STATUS_REG_OFF(reg); 4462 4463 switch (type) { 4464 case BNXT_FW_STATUS_REG_TYPE_CFG: 4465 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4466 if (ret < 0) 4467 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4468 offset); 4469 break; 4470 case BNXT_FW_STATUS_REG_TYPE_GRC: 4471 offset = info->mapped_status_regs[index]; 4472 /* FALLTHROUGH */ 4473 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4474 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4475 offset)); 4476 break; 4477 } 4478 4479 return val; 4480 } 4481 4482 static int bnxt_fw_reset_all(struct bnxt *bp) 4483 { 4484 struct bnxt_error_recovery_info *info = bp->recovery_info; 4485 uint32_t i; 4486 int rc = 0; 4487 4488 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4489 /* Reset through primary function driver */ 4490 for (i = 0; i < info->reg_array_cnt; i++) 4491 bnxt_write_fw_reset_reg(bp, i); 4492 /* Wait for time specified by FW after triggering reset */ 4493 rte_delay_ms(info->primary_func_wait_period_after_reset); 4494 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4495 /* Reset with the help of Kong processor */ 4496 rc = bnxt_hwrm_fw_reset(bp); 4497 if (rc) 4498 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4499 } 4500 4501 return rc; 4502 } 4503 4504 static void bnxt_fw_reset_cb(void *arg) 4505 { 4506 struct bnxt *bp = arg; 4507 struct bnxt_error_recovery_info *info = bp->recovery_info; 4508 int rc = 0; 4509 4510 /* Only Primary function can do FW reset */ 4511 if (bnxt_is_primary_func(bp) && 4512 bnxt_is_recovery_enabled(bp)) { 4513 rc = bnxt_fw_reset_all(bp); 4514 if (rc) { 4515 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4516 return; 4517 } 4518 } 4519 4520 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4521 * EXCEPTION_FATAL_ASYNC event to all the functions 4522 * (including MASTER FUNC). After receiving this Async, all the active 4523 * drivers should treat this case as FW initiated recovery 4524 */ 4525 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4526 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4527 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4528 4529 /* To recover from error */ 4530 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4531 (void *)bp); 4532 } 4533 } 4534 4535 /* Driver should poll FW heartbeat, reset_counter with the frequency 4536 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4537 * When the driver detects heartbeat stop or change in reset_counter, 4538 * it has to trigger a reset to recover from the error condition. 4539 * A “primary function” is the function who will have the privilege to 4540 * initiate the chimp reset. The primary function will be elected by the 4541 * firmware and will be notified through async message. 4542 */ 4543 static void bnxt_check_fw_health(void *arg) 4544 { 4545 struct bnxt *bp = arg; 4546 struct bnxt_error_recovery_info *info = bp->recovery_info; 4547 uint32_t val = 0, wait_msec; 4548 4549 if (!info || !bnxt_is_recovery_enabled(bp) || 4550 is_bnxt_in_error(bp)) 4551 return; 4552 4553 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4554 if (val == info->last_heart_beat) 4555 goto reset; 4556 4557 info->last_heart_beat = val; 4558 4559 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4560 if (val != info->last_reset_counter) 4561 goto reset; 4562 4563 info->last_reset_counter = val; 4564 4565 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4566 bnxt_check_fw_health, (void *)bp); 4567 4568 return; 4569 reset: 4570 /* Stop DMA to/from device */ 4571 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4572 bp->flags |= BNXT_FLAG_FW_RESET; 4573 4574 bnxt_stop_rxtx(bp->eth_dev); 4575 4576 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4577 4578 if (bnxt_is_primary_func(bp)) 4579 wait_msec = info->primary_func_wait_period; 4580 else 4581 wait_msec = info->normal_func_wait_period; 4582 4583 rte_eal_alarm_set(US_PER_MS * wait_msec, 4584 bnxt_fw_reset_cb, (void *)bp); 4585 } 4586 4587 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4588 { 4589 uint32_t polling_freq; 4590 4591 pthread_mutex_lock(&bp->health_check_lock); 4592 4593 if (!bnxt_is_recovery_enabled(bp)) 4594 goto done; 4595 4596 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4597 goto done; 4598 4599 polling_freq = bp->recovery_info->driver_polling_freq; 4600 4601 rte_eal_alarm_set(US_PER_MS * polling_freq, 4602 bnxt_check_fw_health, (void *)bp); 4603 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4604 4605 done: 4606 pthread_mutex_unlock(&bp->health_check_lock); 4607 } 4608 4609 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4610 { 4611 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4612 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4613 } 4614 4615 static bool bnxt_vf_pciid(uint16_t device_id) 4616 { 4617 switch (device_id) { 4618 case BROADCOM_DEV_ID_57304_VF: 4619 case BROADCOM_DEV_ID_57406_VF: 4620 case BROADCOM_DEV_ID_5731X_VF: 4621 case BROADCOM_DEV_ID_5741X_VF: 4622 case BROADCOM_DEV_ID_57414_VF: 4623 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4624 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4625 case BROADCOM_DEV_ID_58802_VF: 4626 case BROADCOM_DEV_ID_57500_VF1: 4627 case BROADCOM_DEV_ID_57500_VF2: 4628 case BROADCOM_DEV_ID_58818_VF: 4629 /* FALLTHROUGH */ 4630 return true; 4631 default: 4632 return false; 4633 } 4634 } 4635 4636 /* Phase 5 device */ 4637 static bool bnxt_p5_device(uint16_t device_id) 4638 { 4639 switch (device_id) { 4640 case BROADCOM_DEV_ID_57508: 4641 case BROADCOM_DEV_ID_57504: 4642 case BROADCOM_DEV_ID_57502: 4643 case BROADCOM_DEV_ID_57508_MF1: 4644 case BROADCOM_DEV_ID_57504_MF1: 4645 case BROADCOM_DEV_ID_57502_MF1: 4646 case BROADCOM_DEV_ID_57508_MF2: 4647 case BROADCOM_DEV_ID_57504_MF2: 4648 case BROADCOM_DEV_ID_57502_MF2: 4649 case BROADCOM_DEV_ID_57500_VF1: 4650 case BROADCOM_DEV_ID_57500_VF2: 4651 case BROADCOM_DEV_ID_58812: 4652 case BROADCOM_DEV_ID_58814: 4653 case BROADCOM_DEV_ID_58818: 4654 case BROADCOM_DEV_ID_58818_VF: 4655 /* FALLTHROUGH */ 4656 return true; 4657 default: 4658 return false; 4659 } 4660 } 4661 4662 bool bnxt_stratus_device(struct bnxt *bp) 4663 { 4664 uint16_t device_id = bp->pdev->id.device_id; 4665 4666 switch (device_id) { 4667 case BROADCOM_DEV_ID_STRATUS_NIC: 4668 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4669 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4670 /* FALLTHROUGH */ 4671 return true; 4672 default: 4673 return false; 4674 } 4675 } 4676 4677 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4678 { 4679 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4680 struct bnxt *bp = eth_dev->data->dev_private; 4681 4682 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4683 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4684 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4685 if (!bp->bar0 || !bp->doorbell_base) { 4686 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4687 return -ENODEV; 4688 } 4689 4690 bp->eth_dev = eth_dev; 4691 bp->pdev = pci_dev; 4692 4693 return 0; 4694 } 4695 4696 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4697 struct bnxt_ctx_pg_info *ctx_pg, 4698 uint32_t mem_size, 4699 const char *suffix, 4700 uint16_t idx) 4701 { 4702 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4703 const struct rte_memzone *mz = NULL; 4704 char mz_name[RTE_MEMZONE_NAMESIZE]; 4705 rte_iova_t mz_phys_addr; 4706 uint64_t valid_bits = 0; 4707 uint32_t sz; 4708 int i; 4709 4710 if (!mem_size) 4711 return 0; 4712 4713 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4714 BNXT_PAGE_SIZE; 4715 rmem->page_size = BNXT_PAGE_SIZE; 4716 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4717 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4718 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4719 4720 valid_bits = PTU_PTE_VALID; 4721 4722 if (rmem->nr_pages > 1) { 4723 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4724 "bnxt_ctx_pg_tbl%s_%x_%d", 4725 suffix, idx, bp->eth_dev->data->port_id); 4726 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4727 mz = rte_memzone_lookup(mz_name); 4728 if (!mz) { 4729 mz = rte_memzone_reserve_aligned(mz_name, 4730 rmem->nr_pages * 8, 4731 bp->eth_dev->device->numa_node, 4732 RTE_MEMZONE_2MB | 4733 RTE_MEMZONE_SIZE_HINT_ONLY | 4734 RTE_MEMZONE_IOVA_CONTIG, 4735 BNXT_PAGE_SIZE); 4736 if (mz == NULL) 4737 return -ENOMEM; 4738 } 4739 4740 memset(mz->addr, 0, mz->len); 4741 mz_phys_addr = mz->iova; 4742 4743 rmem->pg_tbl = mz->addr; 4744 rmem->pg_tbl_map = mz_phys_addr; 4745 rmem->pg_tbl_mz = mz; 4746 } 4747 4748 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4749 suffix, idx, bp->eth_dev->data->port_id); 4750 mz = rte_memzone_lookup(mz_name); 4751 if (!mz) { 4752 mz = rte_memzone_reserve_aligned(mz_name, 4753 mem_size, 4754 bp->eth_dev->device->numa_node, 4755 RTE_MEMZONE_1GB | 4756 RTE_MEMZONE_SIZE_HINT_ONLY | 4757 RTE_MEMZONE_IOVA_CONTIG, 4758 BNXT_PAGE_SIZE); 4759 if (mz == NULL) 4760 return -ENOMEM; 4761 } 4762 4763 memset(mz->addr, 0, mz->len); 4764 mz_phys_addr = mz->iova; 4765 4766 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4767 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4768 rmem->dma_arr[i] = mz_phys_addr + sz; 4769 4770 if (rmem->nr_pages > 1) { 4771 if (i == rmem->nr_pages - 2 && 4772 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4773 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4774 else if (i == rmem->nr_pages - 1 && 4775 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4776 valid_bits |= PTU_PTE_LAST; 4777 4778 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4779 valid_bits); 4780 } 4781 } 4782 4783 rmem->mz = mz; 4784 if (rmem->vmem_size) 4785 rmem->vmem = (void **)mz->addr; 4786 rmem->dma_arr[0] = mz_phys_addr; 4787 return 0; 4788 } 4789 4790 static void bnxt_free_ctx_mem(struct bnxt *bp) 4791 { 4792 int i; 4793 4794 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4795 return; 4796 4797 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4798 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4799 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4800 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4801 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4802 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4803 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4804 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4805 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4806 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4807 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4808 4809 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4810 if (bp->ctx->tqm_mem[i]) 4811 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4812 } 4813 4814 rte_free(bp->ctx); 4815 bp->ctx = NULL; 4816 } 4817 4818 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4819 4820 #define min_t(type, x, y) ({ \ 4821 type __min1 = (x); \ 4822 type __min2 = (y); \ 4823 __min1 < __min2 ? __min1 : __min2; }) 4824 4825 #define max_t(type, x, y) ({ \ 4826 type __max1 = (x); \ 4827 type __max2 = (y); \ 4828 __max1 > __max2 ? __max1 : __max2; }) 4829 4830 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4831 4832 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4833 { 4834 struct bnxt_ctx_pg_info *ctx_pg; 4835 struct bnxt_ctx_mem_info *ctx; 4836 uint32_t mem_size, ena, entries; 4837 uint32_t entries_sp, min; 4838 int i, rc; 4839 4840 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4841 if (rc) { 4842 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4843 return rc; 4844 } 4845 ctx = bp->ctx; 4846 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4847 return 0; 4848 4849 ctx_pg = &ctx->qp_mem; 4850 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4851 if (ctx->qp_entry_size) { 4852 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4853 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4854 if (rc) 4855 return rc; 4856 } 4857 4858 ctx_pg = &ctx->srq_mem; 4859 ctx_pg->entries = ctx->srq_max_l2_entries; 4860 if (ctx->srq_entry_size) { 4861 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4862 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4863 if (rc) 4864 return rc; 4865 } 4866 4867 ctx_pg = &ctx->cq_mem; 4868 ctx_pg->entries = ctx->cq_max_l2_entries; 4869 if (ctx->cq_entry_size) { 4870 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4871 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4872 if (rc) 4873 return rc; 4874 } 4875 4876 ctx_pg = &ctx->vnic_mem; 4877 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4878 ctx->vnic_max_ring_table_entries; 4879 if (ctx->vnic_entry_size) { 4880 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4881 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4882 if (rc) 4883 return rc; 4884 } 4885 4886 ctx_pg = &ctx->stat_mem; 4887 ctx_pg->entries = ctx->stat_max_entries; 4888 if (ctx->stat_entry_size) { 4889 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4890 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4891 if (rc) 4892 return rc; 4893 } 4894 4895 min = ctx->tqm_min_entries_per_ring; 4896 4897 entries_sp = ctx->qp_max_l2_entries + 4898 ctx->vnic_max_vnic_entries + 4899 2 * ctx->qp_min_qp1_entries + min; 4900 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4901 4902 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4903 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4904 entries = clamp_t(uint32_t, entries, min, 4905 ctx->tqm_max_entries_per_ring); 4906 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4907 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4908 * i > 8 is other ext rings. 4909 */ 4910 ctx_pg = ctx->tqm_mem[i]; 4911 ctx_pg->entries = i ? entries : entries_sp; 4912 if (ctx->tqm_entry_size) { 4913 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4914 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4915 "tqm_mem", i); 4916 if (rc) 4917 return rc; 4918 } 4919 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4920 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4921 else 4922 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4923 } 4924 4925 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4926 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4927 if (rc) 4928 PMD_DRV_LOG(ERR, 4929 "Failed to configure context mem: rc = %d\n", rc); 4930 else 4931 ctx->flags |= BNXT_CTX_FLAG_INITED; 4932 4933 return rc; 4934 } 4935 4936 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4937 { 4938 struct rte_pci_device *pci_dev = bp->pdev; 4939 char mz_name[RTE_MEMZONE_NAMESIZE]; 4940 const struct rte_memzone *mz = NULL; 4941 uint32_t total_alloc_len; 4942 rte_iova_t mz_phys_addr; 4943 4944 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4945 return 0; 4946 4947 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4948 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4949 pci_dev->addr.bus, pci_dev->addr.devid, 4950 pci_dev->addr.function, "rx_port_stats"); 4951 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4952 mz = rte_memzone_lookup(mz_name); 4953 total_alloc_len = 4954 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4955 sizeof(struct rx_port_stats_ext) + 512); 4956 if (!mz) { 4957 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4958 SOCKET_ID_ANY, 4959 RTE_MEMZONE_2MB | 4960 RTE_MEMZONE_SIZE_HINT_ONLY | 4961 RTE_MEMZONE_IOVA_CONTIG); 4962 if (mz == NULL) 4963 return -ENOMEM; 4964 } 4965 memset(mz->addr, 0, mz->len); 4966 mz_phys_addr = mz->iova; 4967 4968 bp->rx_mem_zone = (const void *)mz; 4969 bp->hw_rx_port_stats = mz->addr; 4970 bp->hw_rx_port_stats_map = mz_phys_addr; 4971 4972 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4973 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4974 pci_dev->addr.bus, pci_dev->addr.devid, 4975 pci_dev->addr.function, "tx_port_stats"); 4976 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4977 mz = rte_memzone_lookup(mz_name); 4978 total_alloc_len = 4979 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4980 sizeof(struct tx_port_stats_ext) + 512); 4981 if (!mz) { 4982 mz = rte_memzone_reserve(mz_name, 4983 total_alloc_len, 4984 SOCKET_ID_ANY, 4985 RTE_MEMZONE_2MB | 4986 RTE_MEMZONE_SIZE_HINT_ONLY | 4987 RTE_MEMZONE_IOVA_CONTIG); 4988 if (mz == NULL) 4989 return -ENOMEM; 4990 } 4991 memset(mz->addr, 0, mz->len); 4992 mz_phys_addr = mz->iova; 4993 4994 bp->tx_mem_zone = (const void *)mz; 4995 bp->hw_tx_port_stats = mz->addr; 4996 bp->hw_tx_port_stats_map = mz_phys_addr; 4997 bp->flags |= BNXT_FLAG_PORT_STATS; 4998 4999 /* Display extended statistics if FW supports it */ 5000 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 5001 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 5002 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 5003 return 0; 5004 5005 bp->hw_rx_port_stats_ext = (void *) 5006 ((uint8_t *)bp->hw_rx_port_stats + 5007 sizeof(struct rx_port_stats)); 5008 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 5009 sizeof(struct rx_port_stats); 5010 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 5011 5012 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 5013 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 5014 bp->hw_tx_port_stats_ext = (void *) 5015 ((uint8_t *)bp->hw_tx_port_stats + 5016 sizeof(struct tx_port_stats)); 5017 bp->hw_tx_port_stats_ext_map = 5018 bp->hw_tx_port_stats_map + 5019 sizeof(struct tx_port_stats); 5020 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 5021 } 5022 5023 return 0; 5024 } 5025 5026 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5027 { 5028 struct bnxt *bp = eth_dev->data->dev_private; 5029 size_t max_mac_addr = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5030 int rc = 0; 5031 5032 if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR) 5033 PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n", 5034 bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5035 5036 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5037 RTE_ETHER_ADDR_LEN * max_mac_addr, 5038 0); 5039 if (eth_dev->data->mac_addrs == NULL) { 5040 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5041 return -ENOMEM; 5042 } 5043 5044 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5045 if (BNXT_PF(bp)) 5046 return -EINVAL; 5047 5048 /* Generate a random MAC address, if none was assigned by PF */ 5049 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5050 bnxt_eth_hw_addr_random(bp->mac_addr); 5051 PMD_DRV_LOG(INFO, 5052 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5053 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5054 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5055 5056 rc = bnxt_hwrm_set_mac(bp); 5057 if (rc) 5058 return rc; 5059 } 5060 5061 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5062 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5063 5064 /* 5065 * Allocate memory to hold multicast mac addresses added. 5066 * Used to restore them during reset recovery 5067 */ 5068 bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl", 5069 sizeof(struct rte_ether_addr) * 5070 BNXT_MAX_MC_ADDRS, 0); 5071 if (bp->mcast_addr_list == NULL) { 5072 PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n"); 5073 return -ENOMEM; 5074 } 5075 bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list); 5076 if (bp->mc_list_dma_addr == RTE_BAD_IOVA) { 5077 PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n"); 5078 return -ENOMEM; 5079 } 5080 5081 return rc; 5082 } 5083 5084 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5085 { 5086 int rc = 0; 5087 5088 /* MAC is already configured in FW */ 5089 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5090 return 0; 5091 5092 /* Restore the old MAC configured */ 5093 rc = bnxt_hwrm_set_mac(bp); 5094 if (rc) 5095 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5096 5097 return rc; 5098 } 5099 5100 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5101 { 5102 if (!BNXT_PF(bp)) 5103 return; 5104 5105 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5106 5107 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5108 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5109 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5110 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5111 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5112 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5113 } 5114 5115 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5116 { 5117 struct bnxt_error_recovery_info *info = bp->recovery_info; 5118 5119 if (info) { 5120 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5121 memset(info, 0, sizeof(*info)); 5122 return; 5123 } 5124 5125 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5126 return; 5127 5128 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5129 sizeof(*info), 0); 5130 if (!info) 5131 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5132 5133 bp->recovery_info = info; 5134 } 5135 5136 static void bnxt_check_fw_status(struct bnxt *bp) 5137 { 5138 uint32_t fw_status; 5139 5140 if (!(bp->recovery_info && 5141 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5142 return; 5143 5144 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5145 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5146 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5147 fw_status); 5148 } 5149 5150 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5151 { 5152 struct bnxt_error_recovery_info *info = bp->recovery_info; 5153 uint32_t status_loc; 5154 uint32_t sig_ver; 5155 5156 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5157 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5158 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5159 BNXT_GRCP_WINDOW_2_BASE + 5160 offsetof(struct hcomm_status, 5161 sig_ver))); 5162 /* If the signature is absent, then FW does not support this feature */ 5163 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5164 HCOMM_STATUS_SIGNATURE_VAL) 5165 return 0; 5166 5167 if (!info) { 5168 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5169 sizeof(*info), 0); 5170 if (!info) 5171 return -ENOMEM; 5172 bp->recovery_info = info; 5173 } else { 5174 memset(info, 0, sizeof(*info)); 5175 } 5176 5177 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5178 BNXT_GRCP_WINDOW_2_BASE + 5179 offsetof(struct hcomm_status, 5180 fw_status_loc))); 5181 5182 /* Only pre-map the FW health status GRC register */ 5183 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5184 return 0; 5185 5186 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5187 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5188 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5189 5190 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5191 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5192 5193 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5194 5195 return 0; 5196 } 5197 5198 /* This function gets the FW version along with the 5199 * capabilities(MAX and current) of the function, vnic, 5200 * error recovery, phy and other chip related info 5201 */ 5202 static int bnxt_get_config(struct bnxt *bp) 5203 { 5204 uint16_t mtu; 5205 int rc = 0; 5206 5207 bp->fw_cap = 0; 5208 5209 rc = bnxt_map_hcomm_fw_status_reg(bp); 5210 if (rc) 5211 return rc; 5212 5213 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5214 if (rc) { 5215 bnxt_check_fw_status(bp); 5216 return rc; 5217 } 5218 5219 rc = bnxt_hwrm_func_reset(bp); 5220 if (rc) 5221 return -EIO; 5222 5223 rc = bnxt_hwrm_vnic_qcaps(bp); 5224 if (rc) 5225 return rc; 5226 5227 rc = bnxt_hwrm_queue_qportcfg(bp); 5228 if (rc) 5229 return rc; 5230 5231 /* Get the MAX capabilities for this function. 5232 * This function also allocates context memory for TQM rings and 5233 * informs the firmware about this allocated backing store memory. 5234 */ 5235 rc = bnxt_hwrm_func_qcaps(bp); 5236 if (rc) 5237 return rc; 5238 5239 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5240 if (rc) 5241 return rc; 5242 5243 bnxt_hwrm_port_mac_qcfg(bp); 5244 5245 bnxt_hwrm_parent_pf_qcfg(bp); 5246 5247 bnxt_hwrm_port_phy_qcaps(bp); 5248 5249 bnxt_alloc_error_recovery_info(bp); 5250 /* Get the adapter error recovery support info */ 5251 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5252 if (rc) 5253 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5254 5255 bnxt_hwrm_port_led_qcaps(bp); 5256 5257 return 0; 5258 } 5259 5260 static int 5261 bnxt_init_locks(struct bnxt *bp) 5262 { 5263 int err; 5264 5265 err = pthread_mutex_init(&bp->flow_lock, NULL); 5266 if (err) { 5267 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5268 return err; 5269 } 5270 5271 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5272 if (err) { 5273 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5274 return err; 5275 } 5276 5277 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5278 if (err) { 5279 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5280 return err; 5281 } 5282 5283 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5284 if (err) 5285 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5286 5287 return err; 5288 } 5289 5290 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5291 { 5292 int rc = 0; 5293 5294 rc = bnxt_get_config(bp); 5295 if (rc) 5296 return rc; 5297 5298 if (!reconfig_dev) { 5299 rc = bnxt_setup_mac_addr(bp->eth_dev); 5300 if (rc) 5301 return rc; 5302 } else { 5303 rc = bnxt_restore_dflt_mac(bp); 5304 if (rc) 5305 return rc; 5306 } 5307 5308 bnxt_config_vf_req_fwd(bp); 5309 5310 rc = bnxt_hwrm_func_driver_register(bp); 5311 if (rc) { 5312 PMD_DRV_LOG(ERR, "Failed to register driver"); 5313 return -EBUSY; 5314 } 5315 5316 if (BNXT_PF(bp)) { 5317 if (bp->pdev->max_vfs) { 5318 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5319 if (rc) { 5320 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5321 return rc; 5322 } 5323 } else { 5324 rc = bnxt_hwrm_allocate_pf_only(bp); 5325 if (rc) { 5326 PMD_DRV_LOG(ERR, 5327 "Failed to allocate PF resources"); 5328 return rc; 5329 } 5330 } 5331 } 5332 5333 if (!reconfig_dev) { 5334 bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key", 5335 HW_HASH_KEY_SIZE, 0); 5336 if (bp->rss_conf.rss_key == NULL) { 5337 PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory", 5338 bp->eth_dev->data->port_id); 5339 return -ENOMEM; 5340 } 5341 } 5342 5343 rc = bnxt_alloc_mem(bp, reconfig_dev); 5344 if (rc) 5345 return rc; 5346 5347 rc = bnxt_setup_int(bp); 5348 if (rc) 5349 return rc; 5350 5351 rc = bnxt_request_int(bp); 5352 if (rc) 5353 return rc; 5354 5355 rc = bnxt_init_ctx_mem(bp); 5356 if (rc) { 5357 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5358 return rc; 5359 } 5360 5361 return 0; 5362 } 5363 5364 static int 5365 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5366 const char *value, void *opaque_arg) 5367 { 5368 struct bnxt *bp = opaque_arg; 5369 unsigned long flow_xstat; 5370 char *end = NULL; 5371 5372 if (!value || !opaque_arg) { 5373 PMD_DRV_LOG(ERR, 5374 "Invalid parameter passed to flow_xstat devarg.\n"); 5375 return -EINVAL; 5376 } 5377 5378 flow_xstat = strtoul(value, &end, 10); 5379 if (end == NULL || *end != '\0' || 5380 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5381 PMD_DRV_LOG(ERR, 5382 "Invalid parameter passed to flow_xstat devarg.\n"); 5383 return -EINVAL; 5384 } 5385 5386 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5387 PMD_DRV_LOG(ERR, 5388 "Invalid value passed to flow_xstat devarg.\n"); 5389 return -EINVAL; 5390 } 5391 5392 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5393 if (BNXT_FLOW_XSTATS_EN(bp)) 5394 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5395 5396 return 0; 5397 } 5398 5399 static int 5400 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5401 const char *value, void *opaque_arg) 5402 { 5403 struct bnxt *bp = opaque_arg; 5404 unsigned long max_num_kflows; 5405 char *end = NULL; 5406 5407 if (!value || !opaque_arg) { 5408 PMD_DRV_LOG(ERR, 5409 "Invalid parameter passed to max_num_kflows devarg.\n"); 5410 return -EINVAL; 5411 } 5412 5413 max_num_kflows = strtoul(value, &end, 10); 5414 if (end == NULL || *end != '\0' || 5415 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5416 PMD_DRV_LOG(ERR, 5417 "Invalid parameter passed to max_num_kflows devarg.\n"); 5418 return -EINVAL; 5419 } 5420 5421 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5422 PMD_DRV_LOG(ERR, 5423 "Invalid value passed to max_num_kflows devarg.\n"); 5424 return -EINVAL; 5425 } 5426 5427 bp->max_num_kflows = max_num_kflows; 5428 if (bp->max_num_kflows) 5429 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5430 max_num_kflows); 5431 5432 return 0; 5433 } 5434 5435 static int 5436 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5437 const char *value, void *opaque_arg) 5438 { 5439 struct bnxt *bp = opaque_arg; 5440 unsigned long app_id; 5441 char *end = NULL; 5442 5443 if (!value || !opaque_arg) { 5444 PMD_DRV_LOG(ERR, 5445 "Invalid parameter passed to app-id " 5446 "devargs.\n"); 5447 return -EINVAL; 5448 } 5449 5450 app_id = strtoul(value, &end, 10); 5451 if (end == NULL || *end != '\0' || 5452 (app_id == ULONG_MAX && errno == ERANGE)) { 5453 PMD_DRV_LOG(ERR, 5454 "Invalid parameter passed to app_id " 5455 "devargs.\n"); 5456 return -EINVAL; 5457 } 5458 5459 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5460 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5461 (uint16_t)app_id); 5462 return -EINVAL; 5463 } 5464 5465 bp->app_id = app_id; 5466 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5467 5468 return 0; 5469 } 5470 5471 static int 5472 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5473 const char *value, void *opaque_arg) 5474 { 5475 struct bnxt_representor *vfr_bp = opaque_arg; 5476 unsigned long rep_is_pf; 5477 char *end = NULL; 5478 5479 if (!value || !opaque_arg) { 5480 PMD_DRV_LOG(ERR, 5481 "Invalid parameter passed to rep_is_pf devargs.\n"); 5482 return -EINVAL; 5483 } 5484 5485 rep_is_pf = strtoul(value, &end, 10); 5486 if (end == NULL || *end != '\0' || 5487 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5488 PMD_DRV_LOG(ERR, 5489 "Invalid parameter passed to rep_is_pf devargs.\n"); 5490 return -EINVAL; 5491 } 5492 5493 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5494 PMD_DRV_LOG(ERR, 5495 "Invalid value passed to rep_is_pf devargs.\n"); 5496 return -EINVAL; 5497 } 5498 5499 vfr_bp->flags |= rep_is_pf; 5500 if (BNXT_REP_PF(vfr_bp)) 5501 PMD_DRV_LOG(INFO, "PF representor\n"); 5502 else 5503 PMD_DRV_LOG(INFO, "VF representor\n"); 5504 5505 return 0; 5506 } 5507 5508 static int 5509 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5510 const char *value, void *opaque_arg) 5511 { 5512 struct bnxt_representor *vfr_bp = opaque_arg; 5513 unsigned long rep_based_pf; 5514 char *end = NULL; 5515 5516 if (!value || !opaque_arg) { 5517 PMD_DRV_LOG(ERR, 5518 "Invalid parameter passed to rep_based_pf " 5519 "devargs.\n"); 5520 return -EINVAL; 5521 } 5522 5523 rep_based_pf = strtoul(value, &end, 10); 5524 if (end == NULL || *end != '\0' || 5525 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5526 PMD_DRV_LOG(ERR, 5527 "Invalid parameter passed to rep_based_pf " 5528 "devargs.\n"); 5529 return -EINVAL; 5530 } 5531 5532 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5533 PMD_DRV_LOG(ERR, 5534 "Invalid value passed to rep_based_pf devargs.\n"); 5535 return -EINVAL; 5536 } 5537 5538 vfr_bp->rep_based_pf = rep_based_pf; 5539 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5540 5541 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5542 5543 return 0; 5544 } 5545 5546 static int 5547 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5548 const char *value, void *opaque_arg) 5549 { 5550 struct bnxt_representor *vfr_bp = opaque_arg; 5551 unsigned long rep_q_r2f; 5552 char *end = NULL; 5553 5554 if (!value || !opaque_arg) { 5555 PMD_DRV_LOG(ERR, 5556 "Invalid parameter passed to rep_q_r2f " 5557 "devargs.\n"); 5558 return -EINVAL; 5559 } 5560 5561 rep_q_r2f = strtoul(value, &end, 10); 5562 if (end == NULL || *end != '\0' || 5563 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5564 PMD_DRV_LOG(ERR, 5565 "Invalid parameter passed to rep_q_r2f " 5566 "devargs.\n"); 5567 return -EINVAL; 5568 } 5569 5570 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5571 PMD_DRV_LOG(ERR, 5572 "Invalid value passed to rep_q_r2f devargs.\n"); 5573 return -EINVAL; 5574 } 5575 5576 vfr_bp->rep_q_r2f = rep_q_r2f; 5577 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5578 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5579 5580 return 0; 5581 } 5582 5583 static int 5584 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5585 const char *value, void *opaque_arg) 5586 { 5587 struct bnxt_representor *vfr_bp = opaque_arg; 5588 unsigned long rep_q_f2r; 5589 char *end = NULL; 5590 5591 if (!value || !opaque_arg) { 5592 PMD_DRV_LOG(ERR, 5593 "Invalid parameter passed to rep_q_f2r " 5594 "devargs.\n"); 5595 return -EINVAL; 5596 } 5597 5598 rep_q_f2r = strtoul(value, &end, 10); 5599 if (end == NULL || *end != '\0' || 5600 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5601 PMD_DRV_LOG(ERR, 5602 "Invalid parameter passed to rep_q_f2r " 5603 "devargs.\n"); 5604 return -EINVAL; 5605 } 5606 5607 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5608 PMD_DRV_LOG(ERR, 5609 "Invalid value passed to rep_q_f2r devargs.\n"); 5610 return -EINVAL; 5611 } 5612 5613 vfr_bp->rep_q_f2r = rep_q_f2r; 5614 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5615 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5616 5617 return 0; 5618 } 5619 5620 static int 5621 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5622 const char *value, void *opaque_arg) 5623 { 5624 struct bnxt_representor *vfr_bp = opaque_arg; 5625 unsigned long rep_fc_r2f; 5626 char *end = NULL; 5627 5628 if (!value || !opaque_arg) { 5629 PMD_DRV_LOG(ERR, 5630 "Invalid parameter passed to rep_fc_r2f " 5631 "devargs.\n"); 5632 return -EINVAL; 5633 } 5634 5635 rep_fc_r2f = strtoul(value, &end, 10); 5636 if (end == NULL || *end != '\0' || 5637 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5638 PMD_DRV_LOG(ERR, 5639 "Invalid parameter passed to rep_fc_r2f " 5640 "devargs.\n"); 5641 return -EINVAL; 5642 } 5643 5644 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5645 PMD_DRV_LOG(ERR, 5646 "Invalid value passed to rep_fc_r2f devargs.\n"); 5647 return -EINVAL; 5648 } 5649 5650 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5651 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5652 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5653 5654 return 0; 5655 } 5656 5657 static int 5658 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5659 const char *value, void *opaque_arg) 5660 { 5661 struct bnxt_representor *vfr_bp = opaque_arg; 5662 unsigned long rep_fc_f2r; 5663 char *end = NULL; 5664 5665 if (!value || !opaque_arg) { 5666 PMD_DRV_LOG(ERR, 5667 "Invalid parameter passed to rep_fc_f2r " 5668 "devargs.\n"); 5669 return -EINVAL; 5670 } 5671 5672 rep_fc_f2r = strtoul(value, &end, 10); 5673 if (end == NULL || *end != '\0' || 5674 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5675 PMD_DRV_LOG(ERR, 5676 "Invalid parameter passed to rep_fc_f2r " 5677 "devargs.\n"); 5678 return -EINVAL; 5679 } 5680 5681 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5682 PMD_DRV_LOG(ERR, 5683 "Invalid value passed to rep_fc_f2r devargs.\n"); 5684 return -EINVAL; 5685 } 5686 5687 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5688 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5689 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5690 5691 return 0; 5692 } 5693 5694 static int 5695 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5696 { 5697 struct rte_kvargs *kvlist; 5698 int ret; 5699 5700 if (devargs == NULL) 5701 return 0; 5702 5703 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5704 if (kvlist == NULL) 5705 return -EINVAL; 5706 5707 /* 5708 * Handler for "flow_xstat" devarg. 5709 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5710 */ 5711 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5712 bnxt_parse_devarg_flow_xstat, bp); 5713 if (ret) 5714 goto err; 5715 5716 /* 5717 * Handler for "max_num_kflows" devarg. 5718 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5719 */ 5720 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5721 bnxt_parse_devarg_max_num_kflows, bp); 5722 if (ret) 5723 goto err; 5724 5725 err: 5726 /* 5727 * Handler for "app-id" devarg. 5728 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5729 */ 5730 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5731 bnxt_parse_devarg_app_id, bp); 5732 5733 rte_kvargs_free(kvlist); 5734 return ret; 5735 } 5736 5737 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5738 { 5739 int rc = 0; 5740 5741 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5742 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5743 if (rc) 5744 PMD_DRV_LOG(ERR, 5745 "Failed to alloc switch domain: %d\n", rc); 5746 else 5747 PMD_DRV_LOG(INFO, 5748 "Switch domain allocated %d\n", 5749 bp->switch_domain_id); 5750 } 5751 5752 return rc; 5753 } 5754 5755 /* Allocate and initialize various fields in bnxt struct that 5756 * need to be allocated/destroyed only once in the lifetime of the driver 5757 */ 5758 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5759 { 5760 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5761 struct bnxt *bp = eth_dev->data->dev_private; 5762 int rc = 0; 5763 5764 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5765 5766 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5767 bp->flags |= BNXT_FLAG_VF; 5768 5769 if (bnxt_p5_device(pci_dev->id.device_id)) 5770 bp->flags |= BNXT_FLAG_CHIP_P5; 5771 5772 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5773 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5774 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5775 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5776 bp->flags |= BNXT_FLAG_STINGRAY; 5777 5778 if (BNXT_TRUFLOW_EN(bp)) { 5779 /* extra mbuf field is required to store CFA code from mark */ 5780 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5781 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5782 .size = sizeof(bnxt_cfa_code_dynfield_t), 5783 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5784 }; 5785 bnxt_cfa_code_dynfield_offset = 5786 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5787 if (bnxt_cfa_code_dynfield_offset < 0) { 5788 PMD_DRV_LOG(ERR, 5789 "Failed to register mbuf field for TruFlow mark\n"); 5790 return -rte_errno; 5791 } 5792 } 5793 5794 rc = bnxt_map_pci_bars(eth_dev); 5795 if (rc) { 5796 PMD_DRV_LOG(ERR, 5797 "Failed to initialize board rc: %x\n", rc); 5798 return rc; 5799 } 5800 5801 rc = bnxt_alloc_pf_info(bp); 5802 if (rc) 5803 return rc; 5804 5805 rc = bnxt_alloc_link_info(bp); 5806 if (rc) 5807 return rc; 5808 5809 rc = bnxt_alloc_parent_info(bp); 5810 if (rc) 5811 return rc; 5812 5813 rc = bnxt_alloc_hwrm_resources(bp); 5814 if (rc) { 5815 PMD_DRV_LOG(ERR, 5816 "Failed to allocate response buffer rc: %x\n", rc); 5817 return rc; 5818 } 5819 rc = bnxt_alloc_leds_info(bp); 5820 if (rc) 5821 return rc; 5822 5823 rc = bnxt_alloc_cos_queues(bp); 5824 if (rc) 5825 return rc; 5826 5827 rc = bnxt_init_locks(bp); 5828 if (rc) 5829 return rc; 5830 5831 rc = bnxt_alloc_switch_domain(bp); 5832 if (rc) 5833 return rc; 5834 5835 return rc; 5836 } 5837 5838 static int 5839 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5840 { 5841 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5842 static int version_printed; 5843 struct bnxt *bp; 5844 int rc; 5845 5846 if (version_printed++ == 0) 5847 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5848 5849 eth_dev->dev_ops = &bnxt_dev_ops; 5850 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5851 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5852 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5853 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5854 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5855 5856 /* 5857 * For secondary processes, we don't initialise any further 5858 * as primary has already done this work. 5859 */ 5860 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5861 return 0; 5862 5863 rte_eth_copy_pci_info(eth_dev, pci_dev); 5864 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 5865 5866 bp = eth_dev->data->dev_private; 5867 5868 /* Parse dev arguments passed on when starting the DPDK application. */ 5869 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5870 if (rc) 5871 goto error_free; 5872 5873 rc = bnxt_drv_init(eth_dev); 5874 if (rc) 5875 goto error_free; 5876 5877 rc = bnxt_init_resources(bp, false); 5878 if (rc) 5879 goto error_free; 5880 5881 rc = bnxt_alloc_stats_mem(bp); 5882 if (rc) 5883 goto error_free; 5884 5885 PMD_DRV_LOG(INFO, 5886 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 5887 DRV_MODULE_NAME, 5888 pci_dev->mem_resource[0].phys_addr, 5889 pci_dev->mem_resource[0].addr); 5890 5891 return 0; 5892 5893 error_free: 5894 bnxt_dev_uninit(eth_dev); 5895 return rc; 5896 } 5897 5898 5899 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5900 { 5901 if (!ctx) 5902 return; 5903 5904 rte_free(ctx->va); 5905 5906 ctx->va = NULL; 5907 ctx->dma = RTE_BAD_IOVA; 5908 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 5909 } 5910 5911 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 5912 { 5913 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 5914 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5915 bp->flow_stat->rx_fc_out_tbl.ctx_id, 5916 bp->flow_stat->max_fc, 5917 false); 5918 5919 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 5920 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5921 bp->flow_stat->tx_fc_out_tbl.ctx_id, 5922 bp->flow_stat->max_fc, 5923 false); 5924 5925 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5926 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 5927 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5928 5929 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5930 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 5931 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5932 5933 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5934 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 5935 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5936 5937 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5938 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 5939 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5940 } 5941 5942 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 5943 { 5944 bnxt_unregister_fc_ctx_mem(bp); 5945 5946 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 5947 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 5948 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 5949 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 5950 } 5951 5952 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 5953 { 5954 if (BNXT_FLOW_XSTATS_EN(bp)) 5955 bnxt_uninit_fc_ctx_mem(bp); 5956 } 5957 5958 static void 5959 bnxt_free_error_recovery_info(struct bnxt *bp) 5960 { 5961 rte_free(bp->recovery_info); 5962 bp->recovery_info = NULL; 5963 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5964 } 5965 5966 static int 5967 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 5968 { 5969 int rc; 5970 5971 bnxt_free_int(bp); 5972 bnxt_free_mem(bp, reconfig_dev); 5973 5974 bnxt_hwrm_func_buf_unrgtr(bp); 5975 if (bp->pf != NULL) { 5976 rte_free(bp->pf->vf_req_buf); 5977 bp->pf->vf_req_buf = NULL; 5978 } 5979 5980 rc = bnxt_hwrm_func_driver_unregister(bp); 5981 bp->flags &= ~BNXT_FLAG_REGISTERED; 5982 bnxt_free_ctx_mem(bp); 5983 if (!reconfig_dev) { 5984 bnxt_free_hwrm_resources(bp); 5985 bnxt_free_error_recovery_info(bp); 5986 rte_free(bp->mcast_addr_list); 5987 bp->mcast_addr_list = NULL; 5988 rte_free(bp->rss_conf.rss_key); 5989 bp->rss_conf.rss_key = NULL; 5990 } 5991 5992 bnxt_uninit_ctx_mem(bp); 5993 5994 bnxt_free_flow_stats_info(bp); 5995 if (bp->rep_info != NULL) 5996 bnxt_free_switch_domain(bp); 5997 bnxt_free_rep_info(bp); 5998 rte_free(bp->ptp_cfg); 5999 bp->ptp_cfg = NULL; 6000 return rc; 6001 } 6002 6003 static int 6004 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6005 { 6006 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6007 return -EPERM; 6008 6009 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6010 6011 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6012 bnxt_dev_close_op(eth_dev); 6013 6014 return 0; 6015 } 6016 6017 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6018 { 6019 struct bnxt *bp = eth_dev->data->dev_private; 6020 struct rte_eth_dev *vf_rep_eth_dev; 6021 int ret = 0, i; 6022 6023 if (!bp) 6024 return -EINVAL; 6025 6026 for (i = 0; i < bp->num_reps; i++) { 6027 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6028 if (!vf_rep_eth_dev) 6029 continue; 6030 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6031 vf_rep_eth_dev->data->port_id); 6032 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6033 } 6034 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6035 eth_dev->data->port_id); 6036 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6037 6038 return ret; 6039 } 6040 6041 static void bnxt_free_rep_info(struct bnxt *bp) 6042 { 6043 rte_free(bp->rep_info); 6044 bp->rep_info = NULL; 6045 rte_free(bp->cfa_code_map); 6046 bp->cfa_code_map = NULL; 6047 } 6048 6049 static int bnxt_init_rep_info(struct bnxt *bp) 6050 { 6051 int i = 0, rc; 6052 6053 if (bp->rep_info) 6054 return 0; 6055 6056 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6057 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp), 6058 0); 6059 if (!bp->rep_info) { 6060 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6061 return -ENOMEM; 6062 } 6063 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6064 sizeof(*bp->cfa_code_map) * 6065 BNXT_MAX_CFA_CODE, 0); 6066 if (!bp->cfa_code_map) { 6067 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6068 bnxt_free_rep_info(bp); 6069 return -ENOMEM; 6070 } 6071 6072 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6073 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6074 6075 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6076 if (rc) { 6077 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6078 bnxt_free_rep_info(bp); 6079 return rc; 6080 } 6081 6082 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6083 if (rc) { 6084 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6085 bnxt_free_rep_info(bp); 6086 return rc; 6087 } 6088 6089 return rc; 6090 } 6091 6092 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6093 struct rte_eth_devargs *eth_da, 6094 struct rte_eth_dev *backing_eth_dev, 6095 const char *dev_args) 6096 { 6097 struct rte_eth_dev *vf_rep_eth_dev; 6098 char name[RTE_ETH_NAME_MAX_LEN]; 6099 struct bnxt *backing_bp = backing_eth_dev->data->dev_private; 6100 uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp); 6101 6102 uint16_t num_rep; 6103 int i, ret = 0; 6104 struct rte_kvargs *kvlist = NULL; 6105 6106 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6107 return 0; 6108 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6109 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6110 eth_da->type); 6111 return -ENOTSUP; 6112 } 6113 num_rep = eth_da->nb_representor_ports; 6114 if (num_rep > max_vf_reps) { 6115 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6116 num_rep, max_vf_reps); 6117 return -EINVAL; 6118 } 6119 6120 if (num_rep >= RTE_MAX_ETHPORTS) { 6121 PMD_DRV_LOG(ERR, 6122 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6123 num_rep, RTE_MAX_ETHPORTS); 6124 return -EINVAL; 6125 } 6126 6127 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6128 PMD_DRV_LOG(ERR, 6129 "Not a PF or trusted VF. No Representor support\n"); 6130 /* Returning an error is not an option. 6131 * Applications are not handling this correctly 6132 */ 6133 return 0; 6134 } 6135 6136 if (bnxt_init_rep_info(backing_bp)) 6137 return 0; 6138 6139 for (i = 0; i < num_rep; i++) { 6140 struct bnxt_representor representor = { 6141 .vf_id = eth_da->representor_ports[i], 6142 .switch_domain_id = backing_bp->switch_domain_id, 6143 .parent_dev = backing_eth_dev 6144 }; 6145 6146 if (representor.vf_id >= max_vf_reps) { 6147 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6148 representor.vf_id, max_vf_reps); 6149 continue; 6150 } 6151 6152 /* representor port net_bdf_port */ 6153 snprintf(name, sizeof(name), "net_%s_representor_%d", 6154 pci_dev->device.name, eth_da->representor_ports[i]); 6155 6156 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6157 if (kvlist) { 6158 /* 6159 * Handler for "rep_is_pf" devarg. 6160 * Invoked as for ex: "-a 000:00:0d.0, 6161 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6162 */ 6163 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6164 bnxt_parse_devarg_rep_is_pf, 6165 (void *)&representor); 6166 if (ret) { 6167 ret = -EINVAL; 6168 goto err; 6169 } 6170 /* 6171 * Handler for "rep_based_pf" devarg. 6172 * Invoked as for ex: "-a 000:00:0d.0, 6173 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6174 */ 6175 ret = rte_kvargs_process(kvlist, 6176 BNXT_DEVARG_REP_BASED_PF, 6177 bnxt_parse_devarg_rep_based_pf, 6178 (void *)&representor); 6179 if (ret) { 6180 ret = -EINVAL; 6181 goto err; 6182 } 6183 /* 6184 * Handler for "rep_based_pf" devarg. 6185 * Invoked as for ex: "-a 000:00:0d.0, 6186 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6187 */ 6188 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6189 bnxt_parse_devarg_rep_q_r2f, 6190 (void *)&representor); 6191 if (ret) { 6192 ret = -EINVAL; 6193 goto err; 6194 } 6195 /* 6196 * Handler for "rep_based_pf" devarg. 6197 * Invoked as for ex: "-a 000:00:0d.0, 6198 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6199 */ 6200 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6201 bnxt_parse_devarg_rep_q_f2r, 6202 (void *)&representor); 6203 if (ret) { 6204 ret = -EINVAL; 6205 goto err; 6206 } 6207 /* 6208 * Handler for "rep_based_pf" devarg. 6209 * Invoked as for ex: "-a 000:00:0d.0, 6210 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6211 */ 6212 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6213 bnxt_parse_devarg_rep_fc_r2f, 6214 (void *)&representor); 6215 if (ret) { 6216 ret = -EINVAL; 6217 goto err; 6218 } 6219 /* 6220 * Handler for "rep_based_pf" devarg. 6221 * Invoked as for ex: "-a 000:00:0d.0, 6222 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6223 */ 6224 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6225 bnxt_parse_devarg_rep_fc_f2r, 6226 (void *)&representor); 6227 if (ret) { 6228 ret = -EINVAL; 6229 goto err; 6230 } 6231 } 6232 6233 ret = rte_eth_dev_create(&pci_dev->device, name, 6234 sizeof(struct bnxt_representor), 6235 NULL, NULL, 6236 bnxt_representor_init, 6237 &representor); 6238 if (ret) { 6239 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6240 "representor %s.", name); 6241 goto err; 6242 } 6243 6244 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6245 if (!vf_rep_eth_dev) { 6246 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6247 " for VF-Rep: %s.", name); 6248 ret = -ENODEV; 6249 goto err; 6250 } 6251 6252 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6253 backing_eth_dev->data->port_id); 6254 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6255 vf_rep_eth_dev; 6256 backing_bp->num_reps++; 6257 6258 } 6259 6260 rte_kvargs_free(kvlist); 6261 return 0; 6262 6263 err: 6264 /* If num_rep > 1, then rollback already created 6265 * ports, since we'll be failing the probe anyway 6266 */ 6267 if (num_rep > 1) 6268 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6269 rte_errno = -ret; 6270 rte_kvargs_free(kvlist); 6271 6272 return ret; 6273 } 6274 6275 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6276 struct rte_pci_device *pci_dev) 6277 { 6278 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6279 struct rte_eth_dev *backing_eth_dev; 6280 uint16_t num_rep; 6281 int ret = 0; 6282 6283 if (pci_dev->device.devargs) { 6284 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6285 ð_da); 6286 if (ret) 6287 return ret; 6288 } 6289 6290 num_rep = eth_da.nb_representor_ports; 6291 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6292 num_rep); 6293 6294 /* We could come here after first level of probe is already invoked 6295 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6296 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6297 */ 6298 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6299 if (backing_eth_dev == NULL) { 6300 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6301 sizeof(struct bnxt), 6302 eth_dev_pci_specific_init, pci_dev, 6303 bnxt_dev_init, NULL); 6304 6305 if (ret || !num_rep) 6306 return ret; 6307 6308 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6309 } 6310 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6311 backing_eth_dev->data->port_id); 6312 6313 if (!num_rep) 6314 return ret; 6315 6316 /* probe representor ports now */ 6317 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6318 pci_dev->device.devargs->args); 6319 6320 return ret; 6321 } 6322 6323 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6324 { 6325 struct rte_eth_dev *eth_dev; 6326 6327 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6328 if (!eth_dev) 6329 return 0; /* Invoked typically only by OVS-DPDK, by the 6330 * time it comes here the eth_dev is already 6331 * deleted by rte_eth_dev_close(), so returning 6332 * +ve value will at least help in proper cleanup 6333 */ 6334 6335 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6336 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6337 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6338 return rte_eth_dev_destroy(eth_dev, 6339 bnxt_representor_uninit); 6340 else 6341 return rte_eth_dev_destroy(eth_dev, 6342 bnxt_dev_uninit); 6343 } else { 6344 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6345 } 6346 } 6347 6348 static struct rte_pci_driver bnxt_rte_pmd = { 6349 .id_table = bnxt_pci_id_map, 6350 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6351 RTE_PCI_DRV_INTR_RMV | 6352 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6353 * and OVS-DPDK 6354 */ 6355 .probe = bnxt_pci_probe, 6356 .remove = bnxt_pci_remove, 6357 }; 6358 6359 static bool 6360 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6361 { 6362 if (strcmp(dev->device->driver->name, drv->driver.name)) 6363 return false; 6364 6365 return true; 6366 } 6367 6368 bool is_bnxt_supported(struct rte_eth_dev *dev) 6369 { 6370 return is_device_supported(dev, &bnxt_rte_pmd); 6371 } 6372 6373 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6374 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6375 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6376 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6377