1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 91 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 92 #define BNXT_DEVARG_REPRESENTOR "representor" 93 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 94 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 95 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 96 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 97 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 98 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 99 #define BNXT_DEVARG_APP_ID "app-id" 100 101 static const char *const bnxt_dev_args[] = { 102 BNXT_DEVARG_REPRESENTOR, 103 BNXT_DEVARG_FLOW_XSTAT, 104 BNXT_DEVARG_MAX_NUM_KFLOWS, 105 BNXT_DEVARG_REP_BASED_PF, 106 BNXT_DEVARG_REP_IS_PF, 107 BNXT_DEVARG_REP_Q_R2F, 108 BNXT_DEVARG_REP_Q_F2R, 109 BNXT_DEVARG_REP_FC_R2F, 110 BNXT_DEVARG_REP_FC_F2R, 111 BNXT_DEVARG_APP_ID, 112 NULL 113 }; 114 115 /* 116 * app-id = an non-negative 8-bit number 117 */ 118 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 119 120 /* 121 * flow_xstat == false to disable the feature 122 * flow_xstat == true to enable the feature 123 */ 124 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 125 126 /* 127 * rep_is_pf == false to indicate VF representor 128 * rep_is_pf == true to indicate PF representor 129 */ 130 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 131 132 /* 133 * rep_based_pf == Physical index of the PF 134 */ 135 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 136 /* 137 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 138 */ 139 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 140 141 /* 142 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 143 */ 144 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 145 146 /* 147 * rep_fc_r2f == Flow control for the representor to endpoint direction 148 */ 149 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 150 151 /* 152 * rep_fc_f2r == Flow control for the endpoint to representor direction 153 */ 154 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 155 156 int bnxt_cfa_code_dynfield_offset = -1; 157 158 /* 159 * max_num_kflows must be >= 32 160 * and must be a power-of-2 supported value 161 * return: 1 -> invalid 162 * 0 -> valid 163 */ 164 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 165 { 166 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 167 return 1; 168 return 0; 169 } 170 171 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 172 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 173 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 174 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 175 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 176 static int bnxt_restore_vlan_filters(struct bnxt *bp); 177 static void bnxt_dev_recover(void *arg); 178 static void bnxt_free_error_recovery_info(struct bnxt *bp); 179 static void bnxt_free_rep_info(struct bnxt *bp); 180 181 int is_bnxt_in_error(struct bnxt *bp) 182 { 183 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 184 return -EIO; 185 if (bp->flags & BNXT_FLAG_FW_RESET) 186 return -EBUSY; 187 188 return 0; 189 } 190 191 /***********************/ 192 193 /* 194 * High level utility functions 195 */ 196 197 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 198 { 199 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 200 BNXT_RSS_TBL_SIZE_P5); 201 202 if (!BNXT_CHIP_P5(bp)) 203 return 1; 204 205 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 206 BNXT_RSS_ENTRIES_PER_CTX_P5) / 207 BNXT_RSS_ENTRIES_PER_CTX_P5; 208 } 209 210 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 211 { 212 if (!BNXT_CHIP_P5(bp)) 213 return HW_HASH_INDEX_SIZE; 214 215 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 216 } 217 218 static void bnxt_free_parent_info(struct bnxt *bp) 219 { 220 rte_free(bp->parent); 221 bp->parent = NULL; 222 } 223 224 static void bnxt_free_pf_info(struct bnxt *bp) 225 { 226 rte_free(bp->pf); 227 bp->pf = NULL; 228 } 229 230 static void bnxt_free_link_info(struct bnxt *bp) 231 { 232 rte_free(bp->link_info); 233 bp->link_info = NULL; 234 } 235 236 static void bnxt_free_leds_info(struct bnxt *bp) 237 { 238 if (BNXT_VF(bp)) 239 return; 240 241 rte_free(bp->leds); 242 bp->leds = NULL; 243 } 244 245 static void bnxt_free_flow_stats_info(struct bnxt *bp) 246 { 247 rte_free(bp->flow_stat); 248 bp->flow_stat = NULL; 249 } 250 251 static void bnxt_free_cos_queues(struct bnxt *bp) 252 { 253 rte_free(bp->rx_cos_queue); 254 bp->rx_cos_queue = NULL; 255 rte_free(bp->tx_cos_queue); 256 bp->tx_cos_queue = NULL; 257 } 258 259 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 260 { 261 bnxt_free_filter_mem(bp); 262 bnxt_free_vnic_attributes(bp); 263 bnxt_free_vnic_mem(bp); 264 265 /* tx/rx rings are configured as part of *_queue_setup callbacks. 266 * If the number of rings change across fw update, 267 * we don't have much choice except to warn the user. 268 */ 269 if (!reconfig) { 270 bnxt_free_stats(bp); 271 bnxt_free_tx_rings(bp); 272 bnxt_free_rx_rings(bp); 273 } 274 bnxt_free_async_cp_ring(bp); 275 bnxt_free_rxtx_nq_ring(bp); 276 277 rte_free(bp->grp_info); 278 bp->grp_info = NULL; 279 } 280 281 static int bnxt_alloc_parent_info(struct bnxt *bp) 282 { 283 bp->parent = rte_zmalloc("bnxt_parent_info", 284 sizeof(struct bnxt_parent_info), 0); 285 if (bp->parent == NULL) 286 return -ENOMEM; 287 288 return 0; 289 } 290 291 static int bnxt_alloc_pf_info(struct bnxt *bp) 292 { 293 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 294 if (bp->pf == NULL) 295 return -ENOMEM; 296 297 return 0; 298 } 299 300 static int bnxt_alloc_link_info(struct bnxt *bp) 301 { 302 bp->link_info = 303 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 304 if (bp->link_info == NULL) 305 return -ENOMEM; 306 307 return 0; 308 } 309 310 static int bnxt_alloc_leds_info(struct bnxt *bp) 311 { 312 if (BNXT_VF(bp)) 313 return 0; 314 315 bp->leds = rte_zmalloc("bnxt_leds", 316 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 317 0); 318 if (bp->leds == NULL) 319 return -ENOMEM; 320 321 return 0; 322 } 323 324 static int bnxt_alloc_cos_queues(struct bnxt *bp) 325 { 326 bp->rx_cos_queue = 327 rte_zmalloc("bnxt_rx_cosq", 328 BNXT_COS_QUEUE_COUNT * 329 sizeof(struct bnxt_cos_queue_info), 330 0); 331 if (bp->rx_cos_queue == NULL) 332 return -ENOMEM; 333 334 bp->tx_cos_queue = 335 rte_zmalloc("bnxt_tx_cosq", 336 BNXT_COS_QUEUE_COUNT * 337 sizeof(struct bnxt_cos_queue_info), 338 0); 339 if (bp->tx_cos_queue == NULL) 340 return -ENOMEM; 341 342 return 0; 343 } 344 345 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 346 { 347 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 348 sizeof(struct bnxt_flow_stat_info), 0); 349 if (bp->flow_stat == NULL) 350 return -ENOMEM; 351 352 return 0; 353 } 354 355 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 356 { 357 int rc; 358 359 rc = bnxt_alloc_ring_grps(bp); 360 if (rc) 361 goto alloc_mem_err; 362 363 rc = bnxt_alloc_async_ring_struct(bp); 364 if (rc) 365 goto alloc_mem_err; 366 367 rc = bnxt_alloc_vnic_mem(bp); 368 if (rc) 369 goto alloc_mem_err; 370 371 rc = bnxt_alloc_vnic_attributes(bp); 372 if (rc) 373 goto alloc_mem_err; 374 375 rc = bnxt_alloc_filter_mem(bp); 376 if (rc) 377 goto alloc_mem_err; 378 379 rc = bnxt_alloc_async_cp_ring(bp); 380 if (rc) 381 goto alloc_mem_err; 382 383 rc = bnxt_alloc_rxtx_nq_ring(bp); 384 if (rc) 385 goto alloc_mem_err; 386 387 if (BNXT_FLOW_XSTATS_EN(bp)) { 388 rc = bnxt_alloc_flow_stats_info(bp); 389 if (rc) 390 goto alloc_mem_err; 391 } 392 393 return 0; 394 395 alloc_mem_err: 396 bnxt_free_mem(bp, reconfig); 397 return rc; 398 } 399 400 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 401 { 402 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 403 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 404 uint64_t rx_offloads = dev_conf->rxmode.offloads; 405 struct bnxt_rx_queue *rxq; 406 unsigned int j; 407 int rc; 408 409 rc = bnxt_vnic_grp_alloc(bp, vnic); 410 if (rc) 411 goto err_out; 412 413 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 414 vnic_id, vnic, vnic->fw_grp_ids); 415 416 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 417 if (rc) 418 goto err_out; 419 420 /* Alloc RSS context only if RSS mode is enabled */ 421 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { 422 int j, nr_ctxs = bnxt_rss_ctxts(bp); 423 424 /* RSS table size in Thor is 512. 425 * Cap max Rx rings to same value 426 */ 427 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 428 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 429 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 430 goto err_out; 431 } 432 433 rc = 0; 434 for (j = 0; j < nr_ctxs; j++) { 435 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 436 if (rc) 437 break; 438 } 439 if (rc) { 440 PMD_DRV_LOG(ERR, 441 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 442 vnic_id, j, rc); 443 goto err_out; 444 } 445 vnic->num_lb_ctxts = nr_ctxs; 446 } 447 448 /* 449 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 450 * setting is not available at this time, it will not be 451 * configured correctly in the CFA. 452 */ 453 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 454 vnic->vlan_strip = true; 455 else 456 vnic->vlan_strip = false; 457 458 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 459 if (rc) 460 goto err_out; 461 462 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 463 if (rc) 464 goto err_out; 465 466 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 467 rxq = bp->eth_dev->data->rx_queues[j]; 468 469 PMD_DRV_LOG(DEBUG, 470 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 471 j, rxq->vnic, rxq->vnic->fw_grp_ids); 472 473 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 474 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 475 else 476 vnic->rx_queue_cnt++; 477 } 478 479 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 480 481 rc = bnxt_vnic_rss_configure(bp, vnic); 482 if (rc) 483 goto err_out; 484 485 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 486 487 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 488 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? 489 true : false); 490 if (rc) 491 goto err_out; 492 493 return 0; 494 err_out: 495 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 496 vnic_id, rc); 497 return rc; 498 } 499 500 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 501 { 502 int rc = 0; 503 504 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 505 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 506 if (rc) 507 return rc; 508 509 PMD_DRV_LOG(DEBUG, 510 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 511 " rx_fc_in_tbl.ctx_id = %d\n", 512 bp->flow_stat->rx_fc_in_tbl.va, 513 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 514 bp->flow_stat->rx_fc_in_tbl.ctx_id); 515 516 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 517 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 518 if (rc) 519 return rc; 520 521 PMD_DRV_LOG(DEBUG, 522 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 523 " rx_fc_out_tbl.ctx_id = %d\n", 524 bp->flow_stat->rx_fc_out_tbl.va, 525 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 526 bp->flow_stat->rx_fc_out_tbl.ctx_id); 527 528 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 529 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 530 if (rc) 531 return rc; 532 533 PMD_DRV_LOG(DEBUG, 534 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 535 " tx_fc_in_tbl.ctx_id = %d\n", 536 bp->flow_stat->tx_fc_in_tbl.va, 537 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 538 bp->flow_stat->tx_fc_in_tbl.ctx_id); 539 540 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 541 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 542 if (rc) 543 return rc; 544 545 PMD_DRV_LOG(DEBUG, 546 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 547 " tx_fc_out_tbl.ctx_id = %d\n", 548 bp->flow_stat->tx_fc_out_tbl.va, 549 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 550 bp->flow_stat->tx_fc_out_tbl.ctx_id); 551 552 memset(bp->flow_stat->rx_fc_out_tbl.va, 553 0, 554 bp->flow_stat->rx_fc_out_tbl.size); 555 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 556 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 557 bp->flow_stat->rx_fc_out_tbl.ctx_id, 558 bp->flow_stat->max_fc, 559 true); 560 if (rc) 561 return rc; 562 563 memset(bp->flow_stat->tx_fc_out_tbl.va, 564 0, 565 bp->flow_stat->tx_fc_out_tbl.size); 566 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 567 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 568 bp->flow_stat->tx_fc_out_tbl.ctx_id, 569 bp->flow_stat->max_fc, 570 true); 571 572 return rc; 573 } 574 575 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 576 struct bnxt_ctx_mem_buf_info *ctx) 577 { 578 if (!ctx) 579 return -EINVAL; 580 581 ctx->va = rte_zmalloc_socket(type, size, 0, 582 bp->eth_dev->device->numa_node); 583 if (ctx->va == NULL) 584 return -ENOMEM; 585 rte_mem_lock_page(ctx->va); 586 ctx->size = size; 587 ctx->dma = rte_mem_virt2iova(ctx->va); 588 if (ctx->dma == RTE_BAD_IOVA) 589 return -ENOMEM; 590 591 return 0; 592 } 593 594 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 595 { 596 struct rte_pci_device *pdev = bp->pdev; 597 char type[RTE_MEMZONE_NAMESIZE]; 598 uint16_t max_fc; 599 int rc = 0; 600 601 max_fc = bp->flow_stat->max_fc; 602 603 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 604 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 605 /* 4 bytes for each counter-id */ 606 rc = bnxt_alloc_ctx_mem_buf(bp, type, 607 max_fc * 4, 608 &bp->flow_stat->rx_fc_in_tbl); 609 if (rc) 610 return rc; 611 612 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 613 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 614 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 615 rc = bnxt_alloc_ctx_mem_buf(bp, type, 616 max_fc * 16, 617 &bp->flow_stat->rx_fc_out_tbl); 618 if (rc) 619 return rc; 620 621 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 622 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 623 /* 4 bytes for each counter-id */ 624 rc = bnxt_alloc_ctx_mem_buf(bp, type, 625 max_fc * 4, 626 &bp->flow_stat->tx_fc_in_tbl); 627 if (rc) 628 return rc; 629 630 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 631 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 632 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 633 rc = bnxt_alloc_ctx_mem_buf(bp, type, 634 max_fc * 16, 635 &bp->flow_stat->tx_fc_out_tbl); 636 if (rc) 637 return rc; 638 639 rc = bnxt_register_fc_ctx_mem(bp); 640 641 return rc; 642 } 643 644 static int bnxt_init_ctx_mem(struct bnxt *bp) 645 { 646 int rc = 0; 647 648 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 649 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 650 !BNXT_FLOW_XSTATS_EN(bp)) 651 return 0; 652 653 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 654 if (rc) 655 return rc; 656 657 rc = bnxt_init_fc_ctx_mem(bp); 658 659 return rc; 660 } 661 662 static int bnxt_update_phy_setting(struct bnxt *bp) 663 { 664 struct rte_eth_link new; 665 int rc; 666 667 rc = bnxt_get_hwrm_link_config(bp, &new); 668 if (rc) { 669 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 670 return rc; 671 } 672 673 /* 674 * On BCM957508-N2100 adapters, FW will not allow any user other 675 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 676 * always returns link up. Force phy update always in that case. 677 */ 678 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 679 rc = bnxt_set_hwrm_link_config(bp, true); 680 if (rc) { 681 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 682 return rc; 683 } 684 } 685 686 return rc; 687 } 688 689 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 690 { 691 rte_free(bp->prev_rx_ring_stats); 692 rte_free(bp->prev_tx_ring_stats); 693 694 bp->prev_rx_ring_stats = NULL; 695 bp->prev_tx_ring_stats = NULL; 696 } 697 698 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 699 { 700 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 701 sizeof(struct bnxt_ring_stats) * 702 bp->rx_cp_nr_rings, 703 0); 704 if (bp->prev_rx_ring_stats == NULL) 705 return -ENOMEM; 706 707 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 708 sizeof(struct bnxt_ring_stats) * 709 bp->tx_cp_nr_rings, 710 0); 711 if (bp->prev_tx_ring_stats == NULL) 712 goto error; 713 714 return 0; 715 716 error: 717 bnxt_free_prev_ring_stats(bp); 718 return -ENOMEM; 719 } 720 721 static int bnxt_start_nic(struct bnxt *bp) 722 { 723 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 724 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 725 uint32_t intr_vector = 0; 726 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 727 uint32_t vec = BNXT_MISC_VEC_ID; 728 unsigned int i, j; 729 int rc; 730 731 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) 732 bp->flags |= BNXT_FLAG_JUMBO; 733 else 734 bp->flags &= ~BNXT_FLAG_JUMBO; 735 736 /* THOR does not support ring groups. 737 * But we will use the array to save RSS context IDs. 738 */ 739 if (BNXT_CHIP_P5(bp)) 740 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 741 742 rc = bnxt_alloc_hwrm_rings(bp); 743 if (rc) { 744 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 745 goto err_out; 746 } 747 748 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 749 if (rc) { 750 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 751 goto err_out; 752 } 753 754 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 755 goto skip_cosq_cfg; 756 757 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 758 if (bp->rx_cos_queue[i].id != 0xff) { 759 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 760 761 if (!vnic) { 762 PMD_DRV_LOG(ERR, 763 "Num pools more than FW profile\n"); 764 rc = -EINVAL; 765 goto err_out; 766 } 767 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 768 bp->rx_cosq_cnt++; 769 } 770 } 771 772 skip_cosq_cfg: 773 rc = bnxt_mq_rx_configure(bp); 774 if (rc) { 775 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 776 goto err_out; 777 } 778 779 for (j = 0; j < bp->rx_nr_rings; j++) { 780 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 781 782 if (!rxq->rx_deferred_start) { 783 bp->eth_dev->data->rx_queue_state[j] = 784 RTE_ETH_QUEUE_STATE_STARTED; 785 rxq->rx_started = true; 786 } 787 } 788 789 /* default vnic 0 */ 790 rc = bnxt_setup_one_vnic(bp, 0); 791 if (rc) 792 goto err_out; 793 /* VNIC configuration */ 794 if (BNXT_RFS_NEEDS_VNIC(bp)) { 795 for (i = 1; i < bp->nr_vnics; i++) { 796 rc = bnxt_setup_one_vnic(bp, i); 797 if (rc) 798 goto err_out; 799 } 800 } 801 802 for (j = 0; j < bp->tx_nr_rings; j++) { 803 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 804 805 if (!txq->tx_deferred_start) { 806 bp->eth_dev->data->tx_queue_state[j] = 807 RTE_ETH_QUEUE_STATE_STARTED; 808 txq->tx_started = true; 809 } 810 } 811 812 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 813 if (rc) { 814 PMD_DRV_LOG(ERR, 815 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 816 goto err_out; 817 } 818 819 /* check and configure queue intr-vector mapping */ 820 if ((rte_intr_cap_multiple(intr_handle) || 821 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 822 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 823 intr_vector = bp->eth_dev->data->nb_rx_queues; 824 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 825 if (intr_vector > bp->rx_cp_nr_rings) { 826 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 827 bp->rx_cp_nr_rings); 828 return -ENOTSUP; 829 } 830 rc = rte_intr_efd_enable(intr_handle, intr_vector); 831 if (rc) 832 return rc; 833 } 834 835 if (rte_intr_dp_is_en(intr_handle)) { 836 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 837 bp->eth_dev->data->nb_rx_queues)) { 838 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 839 " intr_vec", bp->eth_dev->data->nb_rx_queues); 840 rc = -ENOMEM; 841 goto err_out; 842 } 843 PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " 844 "intr_handle->max_intr = %d\n", 845 rte_intr_nb_efd_get(intr_handle), 846 rte_intr_max_intr_get(intr_handle)); 847 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 848 queue_id++) { 849 rte_intr_vec_list_index_set(intr_handle, 850 queue_id, vec + BNXT_RX_VEC_START); 851 if (vec < base + rte_intr_nb_efd_get(intr_handle) 852 - 1) 853 vec++; 854 } 855 } 856 857 /* enable uio/vfio intr/eventfd mapping */ 858 rc = rte_intr_enable(intr_handle); 859 #ifndef RTE_EXEC_ENV_FREEBSD 860 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 861 if (rc) 862 goto err_out; 863 #endif 864 865 rc = bnxt_update_phy_setting(bp); 866 if (rc) 867 goto err_out; 868 869 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 870 if (!bp->mark_table) 871 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 872 873 return 0; 874 875 err_out: 876 /* Some of the error status returned by FW may not be from errno.h */ 877 if (rc > 0) 878 rc = -EIO; 879 880 return rc; 881 } 882 883 static int bnxt_shutdown_nic(struct bnxt *bp) 884 { 885 bnxt_free_all_hwrm_resources(bp); 886 bnxt_free_all_filters(bp); 887 bnxt_free_all_vnics(bp); 888 return 0; 889 } 890 891 /* 892 * Device configuration and status function 893 */ 894 895 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 896 { 897 uint32_t link_speed = 0; 898 uint32_t speed_capa = 0; 899 900 if (bp->link_info == NULL) 901 return 0; 902 903 link_speed = bp->link_info->support_speeds; 904 905 /* If PAM4 is configured, use PAM4 supported speed */ 906 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 907 link_speed = bp->link_info->support_pam4_speeds; 908 909 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 910 speed_capa |= RTE_ETH_LINK_SPEED_100M; 911 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 912 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 913 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 914 speed_capa |= RTE_ETH_LINK_SPEED_1G; 915 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 916 speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 917 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 918 speed_capa |= RTE_ETH_LINK_SPEED_10G; 919 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 920 speed_capa |= RTE_ETH_LINK_SPEED_20G; 921 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 922 speed_capa |= RTE_ETH_LINK_SPEED_25G; 923 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 924 speed_capa |= RTE_ETH_LINK_SPEED_40G; 925 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 926 speed_capa |= RTE_ETH_LINK_SPEED_50G; 927 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 928 speed_capa |= RTE_ETH_LINK_SPEED_100G; 929 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 930 speed_capa |= RTE_ETH_LINK_SPEED_50G; 931 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 932 speed_capa |= RTE_ETH_LINK_SPEED_100G; 933 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 934 speed_capa |= RTE_ETH_LINK_SPEED_200G; 935 936 if (bp->link_info->auto_mode == 937 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 938 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 939 940 return speed_capa; 941 } 942 943 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 944 struct rte_eth_dev_info *dev_info) 945 { 946 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 947 struct bnxt *bp = eth_dev->data->dev_private; 948 uint16_t max_vnics, i, j, vpool, vrxq; 949 unsigned int max_rx_rings; 950 int rc; 951 952 rc = is_bnxt_in_error(bp); 953 if (rc) 954 return rc; 955 956 /* MAC Specifics */ 957 dev_info->max_mac_addrs = bp->max_l2_ctx; 958 dev_info->max_hash_mac_addrs = 0; 959 960 /* PF/VF specifics */ 961 if (BNXT_PF(bp)) 962 dev_info->max_vfs = pdev->max_vfs; 963 964 max_rx_rings = bnxt_max_rings(bp); 965 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 966 dev_info->max_rx_queues = max_rx_rings; 967 dev_info->max_tx_queues = max_rx_rings; 968 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 969 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 970 max_vnics = bp->max_vnics; 971 972 /* MTU specifics */ 973 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 974 dev_info->max_mtu = BNXT_MAX_MTU; 975 976 /* Fast path specifics */ 977 dev_info->min_rx_bufsize = 1; 978 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 979 980 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 981 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 982 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 983 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) 984 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 985 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 986 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 987 dev_info->tx_queue_offload_capa; 988 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 989 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 990 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 991 992 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 993 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 994 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 995 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 996 997 dev_info->default_rxconf = (struct rte_eth_rxconf) { 998 .rx_thresh = { 999 .pthresh = 8, 1000 .hthresh = 8, 1001 .wthresh = 0, 1002 }, 1003 .rx_free_thresh = 32, 1004 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1005 }; 1006 1007 dev_info->default_txconf = (struct rte_eth_txconf) { 1008 .tx_thresh = { 1009 .pthresh = 32, 1010 .hthresh = 0, 1011 .wthresh = 0, 1012 }, 1013 .tx_free_thresh = 32, 1014 .tx_rs_thresh = 32, 1015 }; 1016 eth_dev->data->dev_conf.intr_conf.lsc = 1; 1017 1018 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1019 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1020 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1021 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1022 1023 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1024 dev_info->switch_info.name = eth_dev->device->name; 1025 dev_info->switch_info.domain_id = bp->switch_domain_id; 1026 dev_info->switch_info.port_id = 1027 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1028 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1029 } 1030 1031 /* 1032 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1033 * need further investigation. 1034 */ 1035 1036 /* VMDq resources */ 1037 vpool = 64; /* RTE_ETH_64_POOLS */ 1038 vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ 1039 for (i = 0; i < 4; vpool >>= 1, i++) { 1040 if (max_vnics > vpool) { 1041 for (j = 0; j < 5; vrxq >>= 1, j++) { 1042 if (dev_info->max_rx_queues > vrxq) { 1043 if (vpool > vrxq) 1044 vpool = vrxq; 1045 goto found; 1046 } 1047 } 1048 /* Not enough resources to support VMDq */ 1049 break; 1050 } 1051 } 1052 /* Not enough resources to support VMDq */ 1053 vpool = 0; 1054 vrxq = 0; 1055 found: 1056 dev_info->max_vmdq_pools = vpool; 1057 dev_info->vmdq_queue_num = vrxq; 1058 1059 dev_info->vmdq_pool_base = 0; 1060 dev_info->vmdq_queue_base = 0; 1061 1062 return 0; 1063 } 1064 1065 /* Configure the device based on the configuration provided */ 1066 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1067 { 1068 struct bnxt *bp = eth_dev->data->dev_private; 1069 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1070 int rc; 1071 1072 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1073 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1074 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1075 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1076 1077 rc = is_bnxt_in_error(bp); 1078 if (rc) 1079 return rc; 1080 1081 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1082 rc = bnxt_hwrm_check_vf_rings(bp); 1083 if (rc) { 1084 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1085 return -ENOSPC; 1086 } 1087 1088 /* If a resource has already been allocated - in this case 1089 * it is the async completion ring, free it. Reallocate it after 1090 * resource reservation. This will ensure the resource counts 1091 * are calculated correctly. 1092 */ 1093 1094 pthread_mutex_lock(&bp->def_cp_lock); 1095 1096 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1097 bnxt_disable_int(bp); 1098 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1099 } 1100 1101 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1102 if (rc) { 1103 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1104 pthread_mutex_unlock(&bp->def_cp_lock); 1105 return -ENOSPC; 1106 } 1107 1108 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1109 rc = bnxt_alloc_async_cp_ring(bp); 1110 if (rc) { 1111 pthread_mutex_unlock(&bp->def_cp_lock); 1112 return rc; 1113 } 1114 bnxt_enable_int(bp); 1115 } 1116 1117 pthread_mutex_unlock(&bp->def_cp_lock); 1118 } 1119 1120 /* Inherit new configurations */ 1121 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1122 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1123 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1124 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1125 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1126 bp->max_stat_ctx) 1127 goto resource_error; 1128 1129 if (BNXT_HAS_RING_GRPS(bp) && 1130 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1131 goto resource_error; 1132 1133 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && 1134 bp->max_vnics < eth_dev->data->nb_rx_queues) 1135 goto resource_error; 1136 1137 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1138 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1139 1140 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1141 rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1142 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1143 1144 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1145 1146 return 0; 1147 1148 resource_error: 1149 PMD_DRV_LOG(ERR, 1150 "Insufficient resources to support requested config\n"); 1151 PMD_DRV_LOG(ERR, 1152 "Num Queues Requested: Tx %d, Rx %d\n", 1153 eth_dev->data->nb_tx_queues, 1154 eth_dev->data->nb_rx_queues); 1155 PMD_DRV_LOG(ERR, 1156 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1157 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1158 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1159 return -ENOSPC; 1160 } 1161 1162 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1163 { 1164 struct rte_eth_link *link = ð_dev->data->dev_link; 1165 1166 if (link->link_status) 1167 PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n", 1168 eth_dev->data->port_id, 1169 (uint32_t)link->link_speed, 1170 (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 1171 ("full-duplex") : ("half-duplex\n")); 1172 else 1173 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1174 eth_dev->data->port_id); 1175 } 1176 1177 /* 1178 * Determine whether the current configuration requires support for scattered 1179 * receive; return 1 if scattered receive is required and 0 if not. 1180 */ 1181 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1182 { 1183 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 1184 uint16_t buf_size; 1185 int i; 1186 1187 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1188 return 1; 1189 1190 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 1191 return 1; 1192 1193 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1194 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1195 1196 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1197 RTE_PKTMBUF_HEADROOM); 1198 if (eth_dev->data->mtu + overhead > buf_size) 1199 return 1; 1200 } 1201 return 0; 1202 } 1203 1204 static eth_rx_burst_t 1205 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1206 { 1207 struct bnxt *bp = eth_dev->data->dev_private; 1208 1209 /* Disable vector mode RX for Stingray2 for now */ 1210 if (BNXT_CHIP_SR2(bp)) { 1211 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1212 return bnxt_recv_pkts; 1213 } 1214 1215 #if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \ 1216 !defined(RTE_LIBRTE_IEEE1588) 1217 1218 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1219 if (eth_dev->data->scattered_rx) 1220 goto use_scalar_rx; 1221 1222 /* 1223 * Vector mode receive cannot be enabled if Truflow is enabled or if 1224 * asynchronous completions and receive completions can be placed in 1225 * the same completion ring. 1226 */ 1227 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1228 goto use_scalar_rx; 1229 1230 /* 1231 * Vector mode receive cannot be enabled if any receive offloads outside 1232 * a limited subset have been enabled. 1233 */ 1234 if (eth_dev->data->dev_conf.rxmode.offloads & 1235 ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1236 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1237 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1238 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1239 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1240 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1241 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 1242 RTE_ETH_RX_OFFLOAD_RSS_HASH | 1243 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) 1244 goto use_scalar_rx; 1245 1246 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1247 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1248 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1249 PMD_DRV_LOG(INFO, 1250 "Using AVX2 vector mode receive for port %d\n", 1251 eth_dev->data->port_id); 1252 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1253 return bnxt_recv_pkts_vec_avx2; 1254 } 1255 #endif 1256 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1257 PMD_DRV_LOG(INFO, 1258 "Using SSE vector mode receive for port %d\n", 1259 eth_dev->data->port_id); 1260 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1261 return bnxt_recv_pkts_vec; 1262 } 1263 1264 use_scalar_rx: 1265 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1266 eth_dev->data->port_id); 1267 PMD_DRV_LOG(INFO, 1268 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1269 eth_dev->data->port_id, 1270 eth_dev->data->scattered_rx, 1271 eth_dev->data->dev_conf.rxmode.offloads); 1272 #endif 1273 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1274 return bnxt_recv_pkts; 1275 } 1276 1277 static eth_tx_burst_t 1278 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1279 { 1280 struct bnxt *bp = eth_dev->data->dev_private; 1281 1282 /* Disable vector mode TX for Stingray2 for now */ 1283 if (BNXT_CHIP_SR2(bp)) 1284 return bnxt_xmit_pkts; 1285 1286 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \ 1287 !defined(RTE_LIBRTE_IEEE1588) 1288 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1289 1290 /* 1291 * Vector mode transmit can be enabled only if not using scatter rx 1292 * or tx offloads. 1293 */ 1294 if (eth_dev->data->scattered_rx || 1295 (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || 1296 BNXT_TRUFLOW_EN(bp)) 1297 goto use_scalar_tx; 1298 1299 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1300 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1301 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1302 PMD_DRV_LOG(INFO, 1303 "Using AVX2 vector mode transmit for port %d\n", 1304 eth_dev->data->port_id); 1305 return bnxt_xmit_pkts_vec_avx2; 1306 } 1307 #endif 1308 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1309 PMD_DRV_LOG(INFO, 1310 "Using SSE vector mode transmit for port %d\n", 1311 eth_dev->data->port_id); 1312 return bnxt_xmit_pkts_vec; 1313 } 1314 1315 use_scalar_tx: 1316 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1317 eth_dev->data->port_id); 1318 PMD_DRV_LOG(INFO, 1319 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1320 eth_dev->data->port_id, 1321 eth_dev->data->scattered_rx, 1322 offloads); 1323 #endif 1324 return bnxt_xmit_pkts; 1325 } 1326 1327 static int bnxt_handle_if_change_status(struct bnxt *bp) 1328 { 1329 int rc; 1330 1331 /* Since fw has undergone a reset and lost all contexts, 1332 * set fatal flag to not issue hwrm during cleanup 1333 */ 1334 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1335 bnxt_uninit_resources(bp, true); 1336 1337 /* clear fatal flag so that re-init happens */ 1338 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1339 rc = bnxt_init_resources(bp, true); 1340 1341 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1342 1343 return rc; 1344 } 1345 1346 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1347 { 1348 struct bnxt *bp = eth_dev->data->dev_private; 1349 int rc = 0; 1350 1351 if (!BNXT_SINGLE_PF(bp)) 1352 return -ENOTSUP; 1353 1354 if (!bp->link_info->link_up) 1355 rc = bnxt_set_hwrm_link_config(bp, true); 1356 if (!rc) 1357 eth_dev->data->dev_link.link_status = 1; 1358 1359 bnxt_print_link_info(eth_dev); 1360 return rc; 1361 } 1362 1363 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1364 { 1365 struct bnxt *bp = eth_dev->data->dev_private; 1366 1367 if (!BNXT_SINGLE_PF(bp)) 1368 return -ENOTSUP; 1369 1370 eth_dev->data->dev_link.link_status = 0; 1371 bnxt_set_hwrm_link_config(bp, false); 1372 bp->link_info->link_up = 0; 1373 1374 return 0; 1375 } 1376 1377 static void bnxt_free_switch_domain(struct bnxt *bp) 1378 { 1379 int rc = 0; 1380 1381 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1382 return; 1383 1384 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1385 if (rc) 1386 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1387 bp->switch_domain_id, rc); 1388 } 1389 1390 static void bnxt_ptp_get_current_time(void *arg) 1391 { 1392 struct bnxt *bp = arg; 1393 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1394 int rc; 1395 1396 rc = is_bnxt_in_error(bp); 1397 if (rc) 1398 return; 1399 1400 if (!ptp) 1401 return; 1402 1403 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1404 &ptp->current_time); 1405 1406 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1407 if (rc != 0) { 1408 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1409 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1410 } 1411 } 1412 1413 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1414 { 1415 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1416 int rc; 1417 1418 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1419 return 0; 1420 1421 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1422 &ptp->current_time); 1423 1424 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1425 return rc; 1426 } 1427 1428 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1429 { 1430 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1431 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1432 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1433 } 1434 } 1435 1436 static void bnxt_ptp_stop(struct bnxt *bp) 1437 { 1438 bnxt_cancel_ptp_alarm(bp); 1439 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1440 } 1441 1442 static int bnxt_ptp_start(struct bnxt *bp) 1443 { 1444 int rc; 1445 1446 rc = bnxt_schedule_ptp_alarm(bp); 1447 if (rc != 0) { 1448 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1449 } else { 1450 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1451 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1452 } 1453 1454 return rc; 1455 } 1456 1457 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1458 { 1459 struct bnxt *bp = eth_dev->data->dev_private; 1460 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1461 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1462 struct rte_eth_link link; 1463 int ret; 1464 1465 eth_dev->data->dev_started = 0; 1466 1467 /* Prevent crashes when queues are still in use */ 1468 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1469 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1470 1471 bnxt_disable_int(bp); 1472 1473 /* disable uio/vfio intr/eventfd mapping */ 1474 rte_intr_disable(intr_handle); 1475 1476 /* Stop the child representors for this device */ 1477 ret = bnxt_rep_stop_all(bp); 1478 if (ret != 0) 1479 return ret; 1480 1481 /* delete the bnxt ULP port details */ 1482 bnxt_ulp_port_deinit(bp); 1483 1484 bnxt_cancel_fw_health_check(bp); 1485 1486 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1487 bnxt_cancel_ptp_alarm(bp); 1488 1489 /* Do not bring link down during reset recovery */ 1490 if (!is_bnxt_in_error(bp)) { 1491 bnxt_dev_set_link_down_op(eth_dev); 1492 /* Wait for link to be reset */ 1493 if (BNXT_SINGLE_PF(bp)) 1494 rte_delay_ms(500); 1495 /* clear the recorded link status */ 1496 memset(&link, 0, sizeof(link)); 1497 rte_eth_linkstatus_set(eth_dev, &link); 1498 } 1499 1500 /* Clean queue intr-vector mapping */ 1501 rte_intr_efd_disable(intr_handle); 1502 rte_intr_vec_list_free(intr_handle); 1503 1504 bnxt_hwrm_port_clr_stats(bp); 1505 bnxt_free_tx_mbufs(bp); 1506 bnxt_free_rx_mbufs(bp); 1507 /* Process any remaining notifications in default completion queue */ 1508 bnxt_int_handler(eth_dev); 1509 bnxt_shutdown_nic(bp); 1510 bnxt_hwrm_if_change(bp, false); 1511 1512 bnxt_free_prev_ring_stats(bp); 1513 rte_free(bp->mark_table); 1514 bp->mark_table = NULL; 1515 1516 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1517 bp->rx_cosq_cnt = 0; 1518 /* All filters are deleted on a port stop. */ 1519 if (BNXT_FLOW_XSTATS_EN(bp)) 1520 bp->flow_stat->flow_count = 0; 1521 1522 eth_dev->data->scattered_rx = 0; 1523 1524 return 0; 1525 } 1526 1527 /* Unload the driver, release resources */ 1528 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1529 { 1530 struct bnxt *bp = eth_dev->data->dev_private; 1531 1532 pthread_mutex_lock(&bp->err_recovery_lock); 1533 if (bp->flags & BNXT_FLAG_FW_RESET) { 1534 PMD_DRV_LOG(ERR, 1535 "Adapter recovering from error..Please retry\n"); 1536 pthread_mutex_unlock(&bp->err_recovery_lock); 1537 return -EAGAIN; 1538 } 1539 pthread_mutex_unlock(&bp->err_recovery_lock); 1540 1541 return bnxt_dev_stop(eth_dev); 1542 } 1543 1544 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1545 { 1546 struct bnxt *bp = eth_dev->data->dev_private; 1547 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1548 int vlan_mask = 0; 1549 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1550 1551 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1552 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1553 return -EINVAL; 1554 } 1555 1556 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1557 PMD_DRV_LOG(ERR, 1558 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1559 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1560 1561 do { 1562 rc = bnxt_hwrm_if_change(bp, true); 1563 if (rc == 0 || rc != -EAGAIN) 1564 break; 1565 1566 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1567 } while (retry_cnt--); 1568 1569 if (rc) 1570 return rc; 1571 1572 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1573 rc = bnxt_handle_if_change_status(bp); 1574 if (rc) 1575 return rc; 1576 } 1577 1578 bnxt_enable_int(bp); 1579 1580 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1581 1582 rc = bnxt_start_nic(bp); 1583 if (rc) 1584 goto error; 1585 1586 rc = bnxt_alloc_prev_ring_stats(bp); 1587 if (rc) 1588 goto error; 1589 1590 eth_dev->data->dev_started = 1; 1591 1592 bnxt_link_update_op(eth_dev, 1); 1593 1594 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1595 vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; 1596 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1597 vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; 1598 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1599 if (rc) 1600 goto error; 1601 1602 /* Initialize bnxt ULP port details */ 1603 rc = bnxt_ulp_port_init(bp); 1604 if (rc) 1605 goto error; 1606 1607 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1608 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1609 1610 bnxt_schedule_fw_health_check(bp); 1611 1612 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1613 bnxt_schedule_ptp_alarm(bp); 1614 1615 return 0; 1616 1617 error: 1618 bnxt_dev_stop(eth_dev); 1619 return rc; 1620 } 1621 1622 static void 1623 bnxt_uninit_locks(struct bnxt *bp) 1624 { 1625 pthread_mutex_destroy(&bp->flow_lock); 1626 pthread_mutex_destroy(&bp->def_cp_lock); 1627 pthread_mutex_destroy(&bp->health_check_lock); 1628 pthread_mutex_destroy(&bp->err_recovery_lock); 1629 if (bp->rep_info) { 1630 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1631 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1632 } 1633 } 1634 1635 static void bnxt_drv_uninit(struct bnxt *bp) 1636 { 1637 bnxt_free_leds_info(bp); 1638 bnxt_free_cos_queues(bp); 1639 bnxt_free_link_info(bp); 1640 bnxt_free_parent_info(bp); 1641 bnxt_uninit_locks(bp); 1642 1643 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1644 bp->tx_mem_zone = NULL; 1645 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1646 bp->rx_mem_zone = NULL; 1647 1648 bnxt_free_vf_info(bp); 1649 bnxt_free_pf_info(bp); 1650 1651 rte_free(bp->grp_info); 1652 bp->grp_info = NULL; 1653 } 1654 1655 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1656 { 1657 struct bnxt *bp = eth_dev->data->dev_private; 1658 int ret = 0; 1659 1660 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1661 return 0; 1662 1663 pthread_mutex_lock(&bp->err_recovery_lock); 1664 if (bp->flags & BNXT_FLAG_FW_RESET) { 1665 PMD_DRV_LOG(ERR, 1666 "Adapter recovering from error...Please retry\n"); 1667 pthread_mutex_unlock(&bp->err_recovery_lock); 1668 return -EAGAIN; 1669 } 1670 pthread_mutex_unlock(&bp->err_recovery_lock); 1671 1672 /* cancel the recovery handler before remove dev */ 1673 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1674 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1675 bnxt_cancel_fc_thread(bp); 1676 1677 if (eth_dev->data->dev_started) 1678 ret = bnxt_dev_stop(eth_dev); 1679 1680 bnxt_uninit_resources(bp, false); 1681 1682 bnxt_drv_uninit(bp); 1683 1684 return ret; 1685 } 1686 1687 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1688 uint32_t index) 1689 { 1690 struct bnxt *bp = eth_dev->data->dev_private; 1691 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1692 struct bnxt_vnic_info *vnic; 1693 struct bnxt_filter_info *filter, *temp_filter; 1694 uint32_t i; 1695 1696 if (is_bnxt_in_error(bp)) 1697 return; 1698 1699 /* 1700 * Loop through all VNICs from the specified filter flow pools to 1701 * remove the corresponding MAC addr filter 1702 */ 1703 for (i = 0; i < bp->nr_vnics; i++) { 1704 if (!(pool_mask & (1ULL << i))) 1705 continue; 1706 1707 vnic = &bp->vnic_info[i]; 1708 filter = STAILQ_FIRST(&vnic->filter); 1709 while (filter) { 1710 temp_filter = STAILQ_NEXT(filter, next); 1711 if (filter->mac_index == index) { 1712 STAILQ_REMOVE(&vnic->filter, filter, 1713 bnxt_filter_info, next); 1714 bnxt_hwrm_clear_l2_filter(bp, filter); 1715 bnxt_free_filter(bp, filter); 1716 } 1717 filter = temp_filter; 1718 } 1719 } 1720 } 1721 1722 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1723 struct rte_ether_addr *mac_addr, uint32_t index, 1724 uint32_t pool) 1725 { 1726 struct bnxt_filter_info *filter; 1727 int rc = 0; 1728 1729 /* Attach requested MAC address to the new l2_filter */ 1730 STAILQ_FOREACH(filter, &vnic->filter, next) { 1731 if (filter->mac_index == index) { 1732 PMD_DRV_LOG(DEBUG, 1733 "MAC addr already existed for pool %d\n", 1734 pool); 1735 return 0; 1736 } 1737 } 1738 1739 filter = bnxt_alloc_filter(bp); 1740 if (!filter) { 1741 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1742 return -ENODEV; 1743 } 1744 1745 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1746 * if the MAC that's been programmed now is a different one, then, 1747 * copy that addr to filter->l2_addr 1748 */ 1749 if (mac_addr) 1750 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1751 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1752 1753 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1754 if (!rc) { 1755 filter->mac_index = index; 1756 if (filter->mac_index == 0) 1757 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1758 else 1759 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1760 } else { 1761 bnxt_free_filter(bp, filter); 1762 } 1763 1764 return rc; 1765 } 1766 1767 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1768 struct rte_ether_addr *mac_addr, 1769 uint32_t index, uint32_t pool) 1770 { 1771 struct bnxt *bp = eth_dev->data->dev_private; 1772 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1773 int rc = 0; 1774 1775 rc = is_bnxt_in_error(bp); 1776 if (rc) 1777 return rc; 1778 1779 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1780 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1781 return -ENOTSUP; 1782 } 1783 1784 if (!vnic) { 1785 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1786 return -EINVAL; 1787 } 1788 1789 /* Filter settings will get applied when port is started */ 1790 if (!eth_dev->data->dev_started) 1791 return 0; 1792 1793 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1794 1795 return rc; 1796 } 1797 1798 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1799 { 1800 int rc = 0; 1801 struct bnxt *bp = eth_dev->data->dev_private; 1802 struct rte_eth_link new; 1803 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1804 BNXT_MIN_LINK_WAIT_CNT; 1805 1806 rc = is_bnxt_in_error(bp); 1807 if (rc) 1808 return rc; 1809 1810 memset(&new, 0, sizeof(new)); 1811 1812 if (bp->link_info == NULL) 1813 goto out; 1814 1815 do { 1816 /* Retrieve link info from hardware */ 1817 rc = bnxt_get_hwrm_link_config(bp, &new); 1818 if (rc) { 1819 new.link_speed = RTE_ETH_LINK_SPEED_100M; 1820 new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1821 PMD_DRV_LOG(ERR, 1822 "Failed to retrieve link rc = 0x%x!\n", rc); 1823 goto out; 1824 } 1825 1826 if (!wait_to_complete || new.link_status) 1827 break; 1828 1829 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1830 } while (cnt--); 1831 1832 /* Only single function PF can bring phy down. 1833 * When port is stopped, report link down for VF/MH/NPAR functions. 1834 */ 1835 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1836 memset(&new, 0, sizeof(new)); 1837 1838 out: 1839 /* Timed out or success */ 1840 if (new.link_status != eth_dev->data->dev_link.link_status || 1841 new.link_speed != eth_dev->data->dev_link.link_speed) { 1842 rte_eth_linkstatus_set(eth_dev, &new); 1843 bnxt_print_link_info(eth_dev); 1844 } 1845 1846 return rc; 1847 } 1848 1849 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1850 { 1851 struct bnxt *bp = eth_dev->data->dev_private; 1852 struct bnxt_vnic_info *vnic; 1853 uint32_t old_flags; 1854 int rc; 1855 1856 rc = is_bnxt_in_error(bp); 1857 if (rc) 1858 return rc; 1859 1860 /* Filter settings will get applied when port is started */ 1861 if (!eth_dev->data->dev_started) 1862 return 0; 1863 1864 if (bp->vnic_info == NULL) 1865 return 0; 1866 1867 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1868 1869 old_flags = vnic->flags; 1870 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1871 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1872 if (rc != 0) 1873 vnic->flags = old_flags; 1874 1875 return rc; 1876 } 1877 1878 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1879 { 1880 struct bnxt *bp = eth_dev->data->dev_private; 1881 struct bnxt_vnic_info *vnic; 1882 uint32_t old_flags; 1883 int rc; 1884 1885 rc = is_bnxt_in_error(bp); 1886 if (rc) 1887 return rc; 1888 1889 /* Filter settings will get applied when port is started */ 1890 if (!eth_dev->data->dev_started) 1891 return 0; 1892 1893 if (bp->vnic_info == NULL) 1894 return 0; 1895 1896 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1897 1898 old_flags = vnic->flags; 1899 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1900 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1901 if (rc != 0) 1902 vnic->flags = old_flags; 1903 1904 return rc; 1905 } 1906 1907 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1908 { 1909 struct bnxt *bp = eth_dev->data->dev_private; 1910 struct bnxt_vnic_info *vnic; 1911 uint32_t old_flags; 1912 int rc; 1913 1914 rc = is_bnxt_in_error(bp); 1915 if (rc) 1916 return rc; 1917 1918 /* Filter settings will get applied when port is started */ 1919 if (!eth_dev->data->dev_started) 1920 return 0; 1921 1922 if (bp->vnic_info == NULL) 1923 return 0; 1924 1925 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1926 1927 old_flags = vnic->flags; 1928 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1929 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1930 if (rc != 0) 1931 vnic->flags = old_flags; 1932 1933 return rc; 1934 } 1935 1936 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1937 { 1938 struct bnxt *bp = eth_dev->data->dev_private; 1939 struct bnxt_vnic_info *vnic; 1940 uint32_t old_flags; 1941 int rc; 1942 1943 rc = is_bnxt_in_error(bp); 1944 if (rc) 1945 return rc; 1946 1947 /* Filter settings will get applied when port is started */ 1948 if (!eth_dev->data->dev_started) 1949 return 0; 1950 1951 if (bp->vnic_info == NULL) 1952 return 0; 1953 1954 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1955 1956 old_flags = vnic->flags; 1957 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1958 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1959 if (rc != 0) 1960 vnic->flags = old_flags; 1961 1962 return rc; 1963 } 1964 1965 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1966 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1967 { 1968 if (qid >= bp->rx_nr_rings) 1969 return NULL; 1970 1971 return bp->eth_dev->data->rx_queues[qid]; 1972 } 1973 1974 /* Return rxq corresponding to a given rss table ring/group ID. */ 1975 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1976 { 1977 struct bnxt_rx_queue *rxq; 1978 unsigned int i; 1979 1980 if (!BNXT_HAS_RING_GRPS(bp)) { 1981 for (i = 0; i < bp->rx_nr_rings; i++) { 1982 rxq = bp->eth_dev->data->rx_queues[i]; 1983 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1984 return rxq->index; 1985 } 1986 } else { 1987 for (i = 0; i < bp->rx_nr_rings; i++) { 1988 if (bp->grp_info[i].fw_grp_id == fwr) 1989 return i; 1990 } 1991 } 1992 1993 return INVALID_HW_RING_ID; 1994 } 1995 1996 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1997 struct rte_eth_rss_reta_entry64 *reta_conf, 1998 uint16_t reta_size) 1999 { 2000 struct bnxt *bp = eth_dev->data->dev_private; 2001 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2002 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2003 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2004 uint16_t idx, sft; 2005 int i, rc; 2006 2007 rc = is_bnxt_in_error(bp); 2008 if (rc) 2009 return rc; 2010 2011 if (!vnic->rss_table) 2012 return -EINVAL; 2013 2014 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 2015 return -EINVAL; 2016 2017 if (reta_size != tbl_size) { 2018 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2019 "(%d) must equal the size supported by the hardware " 2020 "(%d)\n", reta_size, tbl_size); 2021 return -EINVAL; 2022 } 2023 2024 for (i = 0; i < reta_size; i++) { 2025 struct bnxt_rx_queue *rxq; 2026 2027 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2028 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2029 2030 if (!(reta_conf[idx].mask & (1ULL << sft))) 2031 continue; 2032 2033 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2034 if (!rxq) { 2035 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 2036 return -EINVAL; 2037 } 2038 2039 if (BNXT_CHIP_P5(bp)) { 2040 vnic->rss_table[i * 2] = 2041 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2042 vnic->rss_table[i * 2 + 1] = 2043 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2044 } else { 2045 vnic->rss_table[i] = 2046 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2047 } 2048 } 2049 2050 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2051 return rc; 2052 } 2053 2054 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2055 struct rte_eth_rss_reta_entry64 *reta_conf, 2056 uint16_t reta_size) 2057 { 2058 struct bnxt *bp = eth_dev->data->dev_private; 2059 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2060 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2061 uint16_t idx, sft, i; 2062 int rc; 2063 2064 rc = is_bnxt_in_error(bp); 2065 if (rc) 2066 return rc; 2067 2068 if (!vnic) 2069 return -EINVAL; 2070 if (!vnic->rss_table) 2071 return -EINVAL; 2072 2073 if (reta_size != tbl_size) { 2074 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2075 "(%d) must equal the size supported by the hardware " 2076 "(%d)\n", reta_size, tbl_size); 2077 return -EINVAL; 2078 } 2079 2080 for (idx = 0, i = 0; i < reta_size; i++) { 2081 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2082 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2083 2084 if (reta_conf[idx].mask & (1ULL << sft)) { 2085 uint16_t qid; 2086 2087 if (BNXT_CHIP_P5(bp)) 2088 qid = bnxt_rss_to_qid(bp, 2089 vnic->rss_table[i * 2]); 2090 else 2091 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2092 2093 if (qid == INVALID_HW_RING_ID) { 2094 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2095 return -EINVAL; 2096 } 2097 reta_conf[idx].reta[sft] = qid; 2098 } 2099 } 2100 2101 return 0; 2102 } 2103 2104 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2105 struct rte_eth_rss_conf *rss_conf) 2106 { 2107 struct bnxt *bp = eth_dev->data->dev_private; 2108 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2109 struct bnxt_vnic_info *vnic; 2110 int rc; 2111 2112 rc = is_bnxt_in_error(bp); 2113 if (rc) 2114 return rc; 2115 2116 /* 2117 * If RSS enablement were different than dev_configure, 2118 * then return -EINVAL 2119 */ 2120 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2121 if (!rss_conf->rss_hf) 2122 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2123 } else { 2124 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2125 return -EINVAL; 2126 } 2127 2128 bp->flags |= BNXT_FLAG_UPDATE_HASH; 2129 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 2130 rss_conf, 2131 sizeof(*rss_conf)); 2132 2133 /* Update the default RSS VNIC(s) */ 2134 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2135 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2136 vnic->hash_mode = 2137 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2138 RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); 2139 2140 /* 2141 * If hashkey is not specified, use the previously configured 2142 * hashkey 2143 */ 2144 if (!rss_conf->rss_key) 2145 goto rss_config; 2146 2147 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2148 PMD_DRV_LOG(ERR, 2149 "Invalid hashkey length, should be %d bytes\n", 2150 HW_HASH_KEY_SIZE); 2151 return -EINVAL; 2152 } 2153 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2154 2155 rss_config: 2156 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2157 return rc; 2158 } 2159 2160 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2161 struct rte_eth_rss_conf *rss_conf) 2162 { 2163 struct bnxt *bp = eth_dev->data->dev_private; 2164 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2165 int len, rc; 2166 uint32_t hash_types; 2167 2168 rc = is_bnxt_in_error(bp); 2169 if (rc) 2170 return rc; 2171 2172 /* RSS configuration is the same for all VNICs */ 2173 if (vnic && vnic->rss_hash_key) { 2174 if (rss_conf->rss_key) { 2175 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2176 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2177 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2178 } 2179 2180 hash_types = vnic->hash_type; 2181 rss_conf->rss_hf = 0; 2182 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2183 rss_conf->rss_hf |= RTE_ETH_RSS_IPV4; 2184 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2185 } 2186 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2187 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 2188 hash_types &= 2189 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2190 } 2191 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2192 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; 2193 hash_types &= 2194 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2195 } 2196 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2197 rss_conf->rss_hf |= RTE_ETH_RSS_IPV6; 2198 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2199 } 2200 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2201 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 2202 hash_types &= 2203 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2204 } 2205 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2206 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; 2207 hash_types &= 2208 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2209 } 2210 2211 rss_conf->rss_hf |= 2212 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2213 2214 if (hash_types) { 2215 PMD_DRV_LOG(ERR, 2216 "Unknown RSS config from firmware (%08x), RSS disabled", 2217 vnic->hash_type); 2218 return -ENOTSUP; 2219 } 2220 } else { 2221 rss_conf->rss_hf = 0; 2222 } 2223 return 0; 2224 } 2225 2226 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2227 struct rte_eth_fc_conf *fc_conf) 2228 { 2229 struct bnxt *bp = dev->data->dev_private; 2230 struct rte_eth_link link_info; 2231 int rc; 2232 2233 rc = is_bnxt_in_error(bp); 2234 if (rc) 2235 return rc; 2236 2237 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2238 if (rc) 2239 return rc; 2240 2241 memset(fc_conf, 0, sizeof(*fc_conf)); 2242 if (bp->link_info->auto_pause) 2243 fc_conf->autoneg = 1; 2244 switch (bp->link_info->pause) { 2245 case 0: 2246 fc_conf->mode = RTE_ETH_FC_NONE; 2247 break; 2248 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2249 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2250 break; 2251 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2252 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2253 break; 2254 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2255 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2256 fc_conf->mode = RTE_ETH_FC_FULL; 2257 break; 2258 } 2259 return 0; 2260 } 2261 2262 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2263 struct rte_eth_fc_conf *fc_conf) 2264 { 2265 struct bnxt *bp = dev->data->dev_private; 2266 int rc; 2267 2268 rc = is_bnxt_in_error(bp); 2269 if (rc) 2270 return rc; 2271 2272 if (!BNXT_SINGLE_PF(bp)) { 2273 PMD_DRV_LOG(ERR, 2274 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2275 return -ENOTSUP; 2276 } 2277 2278 switch (fc_conf->mode) { 2279 case RTE_ETH_FC_NONE: 2280 bp->link_info->auto_pause = 0; 2281 bp->link_info->force_pause = 0; 2282 break; 2283 case RTE_ETH_FC_RX_PAUSE: 2284 if (fc_conf->autoneg) { 2285 bp->link_info->auto_pause = 2286 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2287 bp->link_info->force_pause = 0; 2288 } else { 2289 bp->link_info->auto_pause = 0; 2290 bp->link_info->force_pause = 2291 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2292 } 2293 break; 2294 case RTE_ETH_FC_TX_PAUSE: 2295 if (fc_conf->autoneg) { 2296 bp->link_info->auto_pause = 2297 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2298 bp->link_info->force_pause = 0; 2299 } else { 2300 bp->link_info->auto_pause = 0; 2301 bp->link_info->force_pause = 2302 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2303 } 2304 break; 2305 case RTE_ETH_FC_FULL: 2306 if (fc_conf->autoneg) { 2307 bp->link_info->auto_pause = 2308 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2309 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2310 bp->link_info->force_pause = 0; 2311 } else { 2312 bp->link_info->auto_pause = 0; 2313 bp->link_info->force_pause = 2314 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2315 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2316 } 2317 break; 2318 } 2319 return bnxt_set_hwrm_link_config(bp, true); 2320 } 2321 2322 /* Add UDP tunneling port */ 2323 static int 2324 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2325 struct rte_eth_udp_tunnel *udp_tunnel) 2326 { 2327 struct bnxt *bp = eth_dev->data->dev_private; 2328 uint16_t tunnel_type = 0; 2329 int rc = 0; 2330 2331 rc = is_bnxt_in_error(bp); 2332 if (rc) 2333 return rc; 2334 2335 switch (udp_tunnel->prot_type) { 2336 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2337 if (bp->vxlan_port_cnt) { 2338 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2339 udp_tunnel->udp_port); 2340 if (bp->vxlan_port != udp_tunnel->udp_port) { 2341 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2342 return -ENOSPC; 2343 } 2344 bp->vxlan_port_cnt++; 2345 return 0; 2346 } 2347 tunnel_type = 2348 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2349 break; 2350 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2351 if (bp->geneve_port_cnt) { 2352 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2353 udp_tunnel->udp_port); 2354 if (bp->geneve_port != udp_tunnel->udp_port) { 2355 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2356 return -ENOSPC; 2357 } 2358 bp->geneve_port_cnt++; 2359 return 0; 2360 } 2361 tunnel_type = 2362 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2363 break; 2364 default: 2365 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2366 return -ENOTSUP; 2367 } 2368 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2369 tunnel_type); 2370 2371 if (rc != 0) 2372 return rc; 2373 2374 if (tunnel_type == 2375 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2376 bp->vxlan_port_cnt++; 2377 2378 if (tunnel_type == 2379 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2380 bp->geneve_port_cnt++; 2381 2382 return rc; 2383 } 2384 2385 static int 2386 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2387 struct rte_eth_udp_tunnel *udp_tunnel) 2388 { 2389 struct bnxt *bp = eth_dev->data->dev_private; 2390 uint16_t tunnel_type = 0; 2391 uint16_t port = 0; 2392 int rc = 0; 2393 2394 rc = is_bnxt_in_error(bp); 2395 if (rc) 2396 return rc; 2397 2398 switch (udp_tunnel->prot_type) { 2399 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2400 if (!bp->vxlan_port_cnt) { 2401 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2402 return -EINVAL; 2403 } 2404 if (bp->vxlan_port != udp_tunnel->udp_port) { 2405 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2406 udp_tunnel->udp_port, bp->vxlan_port); 2407 return -EINVAL; 2408 } 2409 if (--bp->vxlan_port_cnt) 2410 return 0; 2411 2412 tunnel_type = 2413 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2414 port = bp->vxlan_fw_dst_port_id; 2415 break; 2416 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2417 if (!bp->geneve_port_cnt) { 2418 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2419 return -EINVAL; 2420 } 2421 if (bp->geneve_port != udp_tunnel->udp_port) { 2422 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2423 udp_tunnel->udp_port, bp->geneve_port); 2424 return -EINVAL; 2425 } 2426 if (--bp->geneve_port_cnt) 2427 return 0; 2428 2429 tunnel_type = 2430 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2431 port = bp->geneve_fw_dst_port_id; 2432 break; 2433 default: 2434 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2435 return -ENOTSUP; 2436 } 2437 2438 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2439 return rc; 2440 } 2441 2442 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2443 { 2444 struct bnxt_filter_info *filter; 2445 struct bnxt_vnic_info *vnic; 2446 int rc = 0; 2447 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2448 2449 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2450 filter = STAILQ_FIRST(&vnic->filter); 2451 while (filter) { 2452 /* Search for this matching MAC+VLAN filter */ 2453 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2454 /* Delete the filter */ 2455 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2456 if (rc) 2457 return rc; 2458 STAILQ_REMOVE(&vnic->filter, filter, 2459 bnxt_filter_info, next); 2460 bnxt_free_filter(bp, filter); 2461 PMD_DRV_LOG(INFO, 2462 "Deleted vlan filter for %d\n", 2463 vlan_id); 2464 return 0; 2465 } 2466 filter = STAILQ_NEXT(filter, next); 2467 } 2468 return -ENOENT; 2469 } 2470 2471 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2472 { 2473 struct bnxt_filter_info *filter; 2474 struct bnxt_vnic_info *vnic; 2475 int rc = 0; 2476 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2477 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2478 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2479 2480 /* Implementation notes on the use of VNIC in this command: 2481 * 2482 * By default, these filters belong to default vnic for the function. 2483 * Once these filters are set up, only destination VNIC can be modified. 2484 * If the destination VNIC is not specified in this command, 2485 * then the HWRM shall only create an l2 context id. 2486 */ 2487 2488 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2489 filter = STAILQ_FIRST(&vnic->filter); 2490 /* Check if the VLAN has already been added */ 2491 while (filter) { 2492 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2493 return -EEXIST; 2494 2495 filter = STAILQ_NEXT(filter, next); 2496 } 2497 2498 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2499 * command to create MAC+VLAN filter with the right flags, enables set. 2500 */ 2501 filter = bnxt_alloc_filter(bp); 2502 if (!filter) { 2503 PMD_DRV_LOG(ERR, 2504 "MAC/VLAN filter alloc failed\n"); 2505 return -ENOMEM; 2506 } 2507 /* MAC + VLAN ID filter */ 2508 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2509 * untagged packets are received 2510 * 2511 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2512 * packets and only the programmed vlan's packets are received 2513 */ 2514 filter->l2_ivlan = vlan_id; 2515 filter->l2_ivlan_mask = 0x0FFF; 2516 filter->enables |= en; 2517 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2518 2519 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2520 if (rc) { 2521 /* Free the newly allocated filter as we were 2522 * not able to create the filter in hardware. 2523 */ 2524 bnxt_free_filter(bp, filter); 2525 return rc; 2526 } 2527 2528 filter->mac_index = 0; 2529 /* Add this new filter to the list */ 2530 if (vlan_id == 0) 2531 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2532 else 2533 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2534 2535 PMD_DRV_LOG(INFO, 2536 "Added Vlan filter for %d\n", vlan_id); 2537 return rc; 2538 } 2539 2540 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2541 uint16_t vlan_id, int on) 2542 { 2543 struct bnxt *bp = eth_dev->data->dev_private; 2544 int rc; 2545 2546 rc = is_bnxt_in_error(bp); 2547 if (rc) 2548 return rc; 2549 2550 if (!eth_dev->data->dev_started) { 2551 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2552 return -EINVAL; 2553 } 2554 2555 /* These operations apply to ALL existing MAC/VLAN filters */ 2556 if (on) 2557 return bnxt_add_vlan_filter(bp, vlan_id); 2558 else 2559 return bnxt_del_vlan_filter(bp, vlan_id); 2560 } 2561 2562 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2563 struct bnxt_vnic_info *vnic) 2564 { 2565 struct bnxt_filter_info *filter; 2566 int rc; 2567 2568 filter = STAILQ_FIRST(&vnic->filter); 2569 while (filter) { 2570 if (filter->mac_index == 0 && 2571 !memcmp(filter->l2_addr, bp->mac_addr, 2572 RTE_ETHER_ADDR_LEN)) { 2573 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2574 if (!rc) { 2575 STAILQ_REMOVE(&vnic->filter, filter, 2576 bnxt_filter_info, next); 2577 bnxt_free_filter(bp, filter); 2578 } 2579 return rc; 2580 } 2581 filter = STAILQ_NEXT(filter, next); 2582 } 2583 return 0; 2584 } 2585 2586 static int 2587 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2588 { 2589 struct bnxt_vnic_info *vnic; 2590 unsigned int i; 2591 int rc; 2592 2593 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2594 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 2595 /* Remove any VLAN filters programmed */ 2596 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2597 bnxt_del_vlan_filter(bp, i); 2598 2599 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2600 if (rc) 2601 return rc; 2602 } else { 2603 /* Default filter will allow packets that match the 2604 * dest mac. So, it has to be deleted, otherwise, we 2605 * will endup receiving vlan packets for which the 2606 * filter is not programmed, when hw-vlan-filter 2607 * configuration is ON 2608 */ 2609 bnxt_del_dflt_mac_filter(bp, vnic); 2610 /* This filter will allow only untagged packets */ 2611 bnxt_add_vlan_filter(bp, 0); 2612 } 2613 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2614 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); 2615 2616 return 0; 2617 } 2618 2619 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2620 { 2621 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2622 unsigned int i; 2623 int rc; 2624 2625 /* Destroy vnic filters and vnic */ 2626 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2627 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2628 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2629 bnxt_del_vlan_filter(bp, i); 2630 } 2631 bnxt_del_dflt_mac_filter(bp, vnic); 2632 2633 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2634 if (rc) 2635 return rc; 2636 2637 rc = bnxt_hwrm_vnic_free(bp, vnic); 2638 if (rc) 2639 return rc; 2640 2641 rte_free(vnic->fw_grp_ids); 2642 vnic->fw_grp_ids = NULL; 2643 2644 vnic->rx_queue_cnt = 0; 2645 2646 return 0; 2647 } 2648 2649 static int 2650 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2651 { 2652 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2653 int rc; 2654 2655 /* Destroy, recreate and reconfigure the default vnic */ 2656 rc = bnxt_free_one_vnic(bp, 0); 2657 if (rc) 2658 return rc; 2659 2660 /* default vnic 0 */ 2661 rc = bnxt_setup_one_vnic(bp, 0); 2662 if (rc) 2663 return rc; 2664 2665 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2666 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2667 rc = bnxt_add_vlan_filter(bp, 0); 2668 if (rc) 2669 return rc; 2670 rc = bnxt_restore_vlan_filters(bp); 2671 if (rc) 2672 return rc; 2673 } else { 2674 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2675 if (rc) 2676 return rc; 2677 } 2678 2679 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2680 if (rc) 2681 return rc; 2682 2683 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2684 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); 2685 2686 return rc; 2687 } 2688 2689 static int 2690 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2691 { 2692 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2693 struct bnxt *bp = dev->data->dev_private; 2694 int rc; 2695 2696 rc = is_bnxt_in_error(bp); 2697 if (rc) 2698 return rc; 2699 2700 /* Filter settings will get applied when port is started */ 2701 if (!dev->data->dev_started) 2702 return 0; 2703 2704 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2705 /* Enable or disable VLAN filtering */ 2706 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2707 if (rc) 2708 return rc; 2709 } 2710 2711 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2712 /* Enable or disable VLAN stripping */ 2713 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2714 if (rc) 2715 return rc; 2716 } 2717 2718 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2719 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2720 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2721 else 2722 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2723 } 2724 2725 return 0; 2726 } 2727 2728 static int 2729 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2730 uint16_t tpid) 2731 { 2732 struct bnxt *bp = dev->data->dev_private; 2733 int qinq = dev->data->dev_conf.rxmode.offloads & 2734 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 2735 2736 if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && 2737 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { 2738 PMD_DRV_LOG(ERR, 2739 "Unsupported vlan type."); 2740 return -EINVAL; 2741 } 2742 if (!qinq) { 2743 PMD_DRV_LOG(ERR, 2744 "QinQ not enabled. Needs to be ON as we can " 2745 "accelerate only outer vlan\n"); 2746 return -EINVAL; 2747 } 2748 2749 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2750 switch (tpid) { 2751 case RTE_ETHER_TYPE_QINQ: 2752 bp->outer_tpid_bd = 2753 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2754 break; 2755 case RTE_ETHER_TYPE_VLAN: 2756 bp->outer_tpid_bd = 2757 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2758 break; 2759 case RTE_ETHER_TYPE_QINQ1: 2760 bp->outer_tpid_bd = 2761 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2762 break; 2763 case RTE_ETHER_TYPE_QINQ2: 2764 bp->outer_tpid_bd = 2765 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2766 break; 2767 case RTE_ETHER_TYPE_QINQ3: 2768 bp->outer_tpid_bd = 2769 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2770 break; 2771 default: 2772 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2773 return -EINVAL; 2774 } 2775 bp->outer_tpid_bd |= tpid; 2776 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2777 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 2778 PMD_DRV_LOG(ERR, 2779 "Can accelerate only outer vlan in QinQ\n"); 2780 return -EINVAL; 2781 } 2782 2783 return 0; 2784 } 2785 2786 static int 2787 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2788 struct rte_ether_addr *addr) 2789 { 2790 struct bnxt *bp = dev->data->dev_private; 2791 /* Default Filter is tied to VNIC 0 */ 2792 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2793 int rc; 2794 2795 rc = is_bnxt_in_error(bp); 2796 if (rc) 2797 return rc; 2798 2799 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2800 return -EPERM; 2801 2802 if (rte_is_zero_ether_addr(addr)) 2803 return -EINVAL; 2804 2805 /* Filter settings will get applied when port is started */ 2806 if (!dev->data->dev_started) 2807 return 0; 2808 2809 /* Check if the requested MAC is already added */ 2810 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2811 return 0; 2812 2813 /* Destroy filter and re-create it */ 2814 bnxt_del_dflt_mac_filter(bp, vnic); 2815 2816 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2817 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2818 /* This filter will allow only untagged packets */ 2819 rc = bnxt_add_vlan_filter(bp, 0); 2820 } else { 2821 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2822 } 2823 2824 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2825 return rc; 2826 } 2827 2828 static int 2829 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2830 struct rte_ether_addr *mc_addr_set, 2831 uint32_t nb_mc_addr) 2832 { 2833 struct bnxt *bp = eth_dev->data->dev_private; 2834 struct bnxt_vnic_info *vnic; 2835 uint32_t i = 0; 2836 int rc; 2837 2838 rc = is_bnxt_in_error(bp); 2839 if (rc) 2840 return rc; 2841 2842 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2843 2844 bp->nb_mc_addr = nb_mc_addr; 2845 2846 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2847 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2848 goto allmulti; 2849 } 2850 2851 /* TODO Check for Duplicate mcast addresses */ 2852 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2853 for (i = 0; i < nb_mc_addr; i++) 2854 rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]); 2855 2856 if (bp->nb_mc_addr) 2857 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2858 else 2859 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2860 2861 allmulti: 2862 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2863 } 2864 2865 static int 2866 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2867 { 2868 struct bnxt *bp = dev->data->dev_private; 2869 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2870 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2871 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2872 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2873 int ret; 2874 2875 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2876 fw_major, fw_minor, fw_updt, fw_rsvd); 2877 if (ret < 0) 2878 return -EINVAL; 2879 2880 ret += 1; /* add the size of '\0' */ 2881 if (fw_size < (size_t)ret) 2882 return ret; 2883 else 2884 return 0; 2885 } 2886 2887 static void 2888 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2889 struct rte_eth_rxq_info *qinfo) 2890 { 2891 struct bnxt *bp = dev->data->dev_private; 2892 struct bnxt_rx_queue *rxq; 2893 2894 if (is_bnxt_in_error(bp)) 2895 return; 2896 2897 rxq = dev->data->rx_queues[queue_id]; 2898 2899 qinfo->mp = rxq->mb_pool; 2900 qinfo->scattered_rx = dev->data->scattered_rx; 2901 qinfo->nb_desc = rxq->nb_rx_desc; 2902 2903 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2904 qinfo->conf.rx_drop_en = rxq->drop_en; 2905 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2906 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2907 } 2908 2909 static void 2910 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2911 struct rte_eth_txq_info *qinfo) 2912 { 2913 struct bnxt *bp = dev->data->dev_private; 2914 struct bnxt_tx_queue *txq; 2915 2916 if (is_bnxt_in_error(bp)) 2917 return; 2918 2919 txq = dev->data->tx_queues[queue_id]; 2920 2921 qinfo->nb_desc = txq->nb_tx_desc; 2922 2923 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2924 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2925 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2926 2927 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2928 qinfo->conf.tx_rs_thresh = 0; 2929 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2930 qinfo->conf.offloads = txq->offloads; 2931 } 2932 2933 static const struct { 2934 eth_rx_burst_t pkt_burst; 2935 const char *info; 2936 } bnxt_rx_burst_info[] = { 2937 {bnxt_recv_pkts, "Scalar"}, 2938 #if defined(RTE_ARCH_X86) 2939 {bnxt_recv_pkts_vec, "Vector SSE"}, 2940 #endif 2941 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2942 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 2943 #endif 2944 #if defined(RTE_ARCH_ARM64) 2945 {bnxt_recv_pkts_vec, "Vector Neon"}, 2946 #endif 2947 }; 2948 2949 static int 2950 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2951 struct rte_eth_burst_mode *mode) 2952 { 2953 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2954 size_t i; 2955 2956 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2957 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2958 snprintf(mode->info, sizeof(mode->info), "%s", 2959 bnxt_rx_burst_info[i].info); 2960 return 0; 2961 } 2962 } 2963 2964 return -EINVAL; 2965 } 2966 2967 static const struct { 2968 eth_tx_burst_t pkt_burst; 2969 const char *info; 2970 } bnxt_tx_burst_info[] = { 2971 {bnxt_xmit_pkts, "Scalar"}, 2972 #if defined(RTE_ARCH_X86) 2973 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2974 #endif 2975 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2976 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 2977 #endif 2978 #if defined(RTE_ARCH_ARM64) 2979 {bnxt_xmit_pkts_vec, "Vector Neon"}, 2980 #endif 2981 }; 2982 2983 static int 2984 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2985 struct rte_eth_burst_mode *mode) 2986 { 2987 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 2988 size_t i; 2989 2990 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 2991 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 2992 snprintf(mode->info, sizeof(mode->info), "%s", 2993 bnxt_tx_burst_info[i].info); 2994 return 0; 2995 } 2996 } 2997 2998 return -EINVAL; 2999 } 3000 3001 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3002 { 3003 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 3004 struct bnxt *bp = eth_dev->data->dev_private; 3005 uint32_t new_pkt_size; 3006 uint32_t rc; 3007 uint32_t i; 3008 3009 rc = is_bnxt_in_error(bp); 3010 if (rc) 3011 return rc; 3012 3013 /* Exit if receive queues are not configured yet */ 3014 if (!eth_dev->data->nb_rx_queues) 3015 return rc; 3016 3017 new_pkt_size = new_mtu + overhead; 3018 3019 /* 3020 * Disallow any MTU change that would require scattered receive support 3021 * if it is not already enabled. 3022 */ 3023 if (eth_dev->data->dev_started && 3024 !eth_dev->data->scattered_rx && 3025 (new_pkt_size > 3026 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3027 PMD_DRV_LOG(ERR, 3028 "MTU change would require scattered rx support. "); 3029 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 3030 return -EINVAL; 3031 } 3032 3033 if (new_mtu > RTE_ETHER_MTU) 3034 bp->flags |= BNXT_FLAG_JUMBO; 3035 else 3036 bp->flags &= ~BNXT_FLAG_JUMBO; 3037 3038 /* Is there a change in mtu setting? */ 3039 if (eth_dev->data->mtu == new_mtu) 3040 return rc; 3041 3042 for (i = 0; i < bp->nr_vnics; i++) { 3043 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3044 uint16_t size = 0; 3045 3046 vnic->mru = BNXT_VNIC_MRU(new_mtu); 3047 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 3048 if (rc) 3049 break; 3050 3051 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 3052 size -= RTE_PKTMBUF_HEADROOM; 3053 3054 if (size < new_mtu) { 3055 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 3056 if (rc) 3057 return rc; 3058 } 3059 } 3060 3061 if (bnxt_hwrm_config_host_mtu(bp)) 3062 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3063 3064 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3065 3066 return rc; 3067 } 3068 3069 static int 3070 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3071 { 3072 struct bnxt *bp = dev->data->dev_private; 3073 uint16_t vlan = bp->vlan; 3074 int rc; 3075 3076 rc = is_bnxt_in_error(bp); 3077 if (rc) 3078 return rc; 3079 3080 if (!BNXT_SINGLE_PF(bp)) { 3081 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3082 return -ENOTSUP; 3083 } 3084 bp->vlan = on ? pvid : 0; 3085 3086 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3087 if (rc) 3088 bp->vlan = vlan; 3089 return rc; 3090 } 3091 3092 static int 3093 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3094 { 3095 struct bnxt *bp = dev->data->dev_private; 3096 int rc; 3097 3098 rc = is_bnxt_in_error(bp); 3099 if (rc) 3100 return rc; 3101 3102 return bnxt_hwrm_port_led_cfg(bp, true); 3103 } 3104 3105 static int 3106 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3107 { 3108 struct bnxt *bp = dev->data->dev_private; 3109 int rc; 3110 3111 rc = is_bnxt_in_error(bp); 3112 if (rc) 3113 return rc; 3114 3115 return bnxt_hwrm_port_led_cfg(bp, false); 3116 } 3117 3118 static uint32_t 3119 bnxt_rx_queue_count_op(void *rx_queue) 3120 { 3121 struct bnxt *bp; 3122 struct bnxt_cp_ring_info *cpr; 3123 uint32_t desc = 0, raw_cons, cp_ring_size; 3124 struct bnxt_rx_queue *rxq; 3125 struct rx_pkt_cmpl *rxcmp; 3126 int rc; 3127 3128 rxq = rx_queue; 3129 bp = rxq->bp; 3130 3131 rc = is_bnxt_in_error(bp); 3132 if (rc) 3133 return rc; 3134 3135 cpr = rxq->cp_ring; 3136 raw_cons = cpr->cp_raw_cons; 3137 cp_ring_size = cpr->cp_ring_struct->ring_size; 3138 3139 while (1) { 3140 uint32_t agg_cnt, cons, cmpl_type; 3141 3142 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3143 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3144 3145 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3146 break; 3147 3148 cmpl_type = CMP_TYPE(rxcmp); 3149 3150 switch (cmpl_type) { 3151 case CMPL_BASE_TYPE_RX_L2: 3152 case CMPL_BASE_TYPE_RX_L2_V2: 3153 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3154 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3155 desc++; 3156 break; 3157 3158 case CMPL_BASE_TYPE_RX_TPA_END: 3159 if (BNXT_CHIP_P5(rxq->bp)) { 3160 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3161 3162 p5_tpa_end = (void *)rxcmp; 3163 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3164 } else { 3165 struct rx_tpa_end_cmpl *tpa_end; 3166 3167 tpa_end = (void *)rxcmp; 3168 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3169 } 3170 3171 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3172 desc++; 3173 break; 3174 3175 default: 3176 raw_cons += CMP_LEN(cmpl_type); 3177 } 3178 } 3179 3180 return desc; 3181 } 3182 3183 static int 3184 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3185 { 3186 struct bnxt_rx_queue *rxq = rx_queue; 3187 struct bnxt_cp_ring_info *cpr; 3188 struct bnxt_rx_ring_info *rxr; 3189 uint32_t desc, raw_cons, cp_ring_size; 3190 struct bnxt *bp = rxq->bp; 3191 struct rx_pkt_cmpl *rxcmp; 3192 int rc; 3193 3194 rc = is_bnxt_in_error(bp); 3195 if (rc) 3196 return rc; 3197 3198 if (offset >= rxq->nb_rx_desc) 3199 return -EINVAL; 3200 3201 rxr = rxq->rx_ring; 3202 cpr = rxq->cp_ring; 3203 cp_ring_size = cpr->cp_ring_struct->ring_size; 3204 3205 /* 3206 * For the vector receive case, the completion at the requested 3207 * offset can be indexed directly. 3208 */ 3209 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3210 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3211 struct rx_pkt_cmpl *rxcmp; 3212 uint32_t cons; 3213 3214 /* Check status of completion descriptor. */ 3215 raw_cons = cpr->cp_raw_cons + 3216 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3217 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3218 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3219 3220 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3221 return RTE_ETH_RX_DESC_DONE; 3222 3223 /* Check whether rx desc has an mbuf attached. */ 3224 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3225 if (cons >= rxq->rxrearm_start && 3226 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3227 return RTE_ETH_RX_DESC_UNAVAIL; 3228 } 3229 3230 return RTE_ETH_RX_DESC_AVAIL; 3231 } 3232 #endif 3233 3234 /* 3235 * For the non-vector receive case, scan the completion ring to 3236 * locate the completion descriptor for the requested offset. 3237 */ 3238 raw_cons = cpr->cp_raw_cons; 3239 desc = 0; 3240 while (1) { 3241 uint32_t agg_cnt, cons, cmpl_type; 3242 3243 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3244 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3245 3246 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3247 break; 3248 3249 cmpl_type = CMP_TYPE(rxcmp); 3250 3251 switch (cmpl_type) { 3252 case CMPL_BASE_TYPE_RX_L2: 3253 case CMPL_BASE_TYPE_RX_L2_V2: 3254 if (desc == offset) { 3255 cons = rxcmp->opaque; 3256 if (rxr->rx_buf_ring[cons]) 3257 return RTE_ETH_RX_DESC_DONE; 3258 else 3259 return RTE_ETH_RX_DESC_UNAVAIL; 3260 } 3261 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3262 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3263 desc++; 3264 break; 3265 3266 case CMPL_BASE_TYPE_RX_TPA_END: 3267 if (desc == offset) 3268 return RTE_ETH_RX_DESC_DONE; 3269 3270 if (BNXT_CHIP_P5(rxq->bp)) { 3271 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3272 3273 p5_tpa_end = (void *)rxcmp; 3274 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3275 } else { 3276 struct rx_tpa_end_cmpl *tpa_end; 3277 3278 tpa_end = (void *)rxcmp; 3279 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3280 } 3281 3282 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3283 desc++; 3284 break; 3285 3286 default: 3287 raw_cons += CMP_LEN(cmpl_type); 3288 } 3289 } 3290 3291 return RTE_ETH_RX_DESC_AVAIL; 3292 } 3293 3294 static int 3295 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3296 { 3297 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3298 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3299 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3300 struct cmpl_base *cp_desc_ring; 3301 int rc; 3302 3303 rc = is_bnxt_in_error(txq->bp); 3304 if (rc) 3305 return rc; 3306 3307 if (offset >= txq->nb_tx_desc) 3308 return -EINVAL; 3309 3310 /* Return "desc done" if descriptor is available for use. */ 3311 if (bnxt_tx_bds_in_hw(txq) <= offset) 3312 return RTE_ETH_TX_DESC_DONE; 3313 3314 raw_cons = cpr->cp_raw_cons; 3315 cp_desc_ring = cpr->cp_desc_ring; 3316 ring_mask = cpr->cp_ring_struct->ring_mask; 3317 3318 /* Check to see if hw has posted a completion for the descriptor. */ 3319 while (1) { 3320 struct tx_cmpl *txcmp; 3321 uint32_t cons; 3322 3323 cons = RING_CMPL(ring_mask, raw_cons); 3324 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3325 3326 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3327 break; 3328 3329 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3330 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3331 3332 if (nb_tx_pkts > offset) 3333 return RTE_ETH_TX_DESC_DONE; 3334 3335 raw_cons = NEXT_RAW_CMP(raw_cons); 3336 } 3337 3338 /* Descriptor is pending transmit, not yet completed by hardware. */ 3339 return RTE_ETH_TX_DESC_FULL; 3340 } 3341 3342 int 3343 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3344 const struct rte_flow_ops **ops) 3345 { 3346 struct bnxt *bp = dev->data->dev_private; 3347 int ret = 0; 3348 3349 if (!bp) 3350 return -EIO; 3351 3352 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3353 struct bnxt_representor *vfr = dev->data->dev_private; 3354 bp = vfr->parent_dev->data->dev_private; 3355 /* parent is deleted while children are still valid */ 3356 if (!bp) { 3357 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3358 dev->data->port_id); 3359 return -EIO; 3360 } 3361 } 3362 3363 ret = is_bnxt_in_error(bp); 3364 if (ret) 3365 return ret; 3366 3367 /* PMD supports thread-safe flow operations. rte_flow API 3368 * functions can avoid mutex for multi-thread safety. 3369 */ 3370 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3371 3372 if (BNXT_TRUFLOW_EN(bp)) 3373 *ops = &bnxt_ulp_rte_flow_ops; 3374 else 3375 *ops = &bnxt_flow_ops; 3376 3377 return ret; 3378 } 3379 3380 static const uint32_t * 3381 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3382 { 3383 static const uint32_t ptypes[] = { 3384 RTE_PTYPE_L2_ETHER_VLAN, 3385 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3386 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3387 RTE_PTYPE_L4_ICMP, 3388 RTE_PTYPE_L4_TCP, 3389 RTE_PTYPE_L4_UDP, 3390 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3391 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3392 RTE_PTYPE_INNER_L4_ICMP, 3393 RTE_PTYPE_INNER_L4_TCP, 3394 RTE_PTYPE_INNER_L4_UDP, 3395 RTE_PTYPE_UNKNOWN 3396 }; 3397 3398 if (!dev->rx_pkt_burst) 3399 return NULL; 3400 3401 return ptypes; 3402 } 3403 3404 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3405 int reg_win) 3406 { 3407 uint32_t reg_base = *reg_arr & 0xfffff000; 3408 uint32_t win_off; 3409 int i; 3410 3411 for (i = 0; i < count; i++) { 3412 if ((reg_arr[i] & 0xfffff000) != reg_base) 3413 return -ERANGE; 3414 } 3415 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3416 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3417 return 0; 3418 } 3419 3420 static int bnxt_map_ptp_regs(struct bnxt *bp) 3421 { 3422 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3423 uint32_t *reg_arr; 3424 int rc, i; 3425 3426 reg_arr = ptp->rx_regs; 3427 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3428 if (rc) 3429 return rc; 3430 3431 reg_arr = ptp->tx_regs; 3432 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3433 if (rc) 3434 return rc; 3435 3436 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3437 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3438 3439 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3440 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3441 3442 return 0; 3443 } 3444 3445 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3446 { 3447 rte_write32(0, (uint8_t *)bp->bar0 + 3448 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3449 rte_write32(0, (uint8_t *)bp->bar0 + 3450 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3451 } 3452 3453 static uint64_t bnxt_cc_read(struct bnxt *bp) 3454 { 3455 uint64_t ns; 3456 3457 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3458 BNXT_GRCPF_REG_SYNC_TIME)); 3459 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3460 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3461 return ns; 3462 } 3463 3464 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3465 { 3466 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3467 uint32_t fifo; 3468 3469 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3470 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3471 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3472 return -EAGAIN; 3473 3474 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3475 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3476 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3477 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3478 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3479 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3480 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3481 3482 return 0; 3483 } 3484 3485 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3486 { 3487 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3488 struct bnxt_pf_info *pf = bp->pf; 3489 uint16_t port_id; 3490 int i = 0; 3491 uint32_t fifo; 3492 3493 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3494 return -EINVAL; 3495 3496 port_id = pf->port_id; 3497 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3498 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3499 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3500 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3501 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3502 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3503 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3504 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3505 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3506 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3507 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3508 i++; 3509 } 3510 3511 if (i >= BNXT_PTP_RX_PND_CNT) 3512 return -EBUSY; 3513 3514 return 0; 3515 } 3516 3517 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3518 { 3519 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3520 struct bnxt_pf_info *pf = bp->pf; 3521 uint16_t port_id; 3522 uint32_t fifo; 3523 3524 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3525 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3526 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3527 return -EAGAIN; 3528 3529 port_id = pf->port_id; 3530 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3531 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3532 3533 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3534 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3535 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3536 return bnxt_clr_rx_ts(bp, ts); 3537 3538 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3539 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3540 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3541 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3542 3543 return 0; 3544 } 3545 3546 static int 3547 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3548 { 3549 uint64_t ns; 3550 struct bnxt *bp = dev->data->dev_private; 3551 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3552 3553 if (!ptp) 3554 return -ENOTSUP; 3555 3556 ns = rte_timespec_to_ns(ts); 3557 /* Set the timecounters to a new value. */ 3558 ptp->tc.nsec = ns; 3559 ptp->tx_tstamp_tc.nsec = ns; 3560 ptp->rx_tstamp_tc.nsec = ns; 3561 3562 return 0; 3563 } 3564 3565 static int 3566 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3567 { 3568 struct bnxt *bp = dev->data->dev_private; 3569 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3570 uint64_t ns, systime_cycles = 0; 3571 int rc = 0; 3572 3573 if (!ptp) 3574 return -ENOTSUP; 3575 3576 if (BNXT_CHIP_P5(bp)) 3577 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3578 &systime_cycles); 3579 else 3580 systime_cycles = bnxt_cc_read(bp); 3581 3582 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3583 *ts = rte_ns_to_timespec(ns); 3584 3585 return rc; 3586 } 3587 static int 3588 bnxt_timesync_enable(struct rte_eth_dev *dev) 3589 { 3590 struct bnxt *bp = dev->data->dev_private; 3591 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3592 uint32_t shift = 0; 3593 int rc; 3594 3595 if (!ptp) 3596 return -ENOTSUP; 3597 3598 ptp->rx_filter = 1; 3599 ptp->tx_tstamp_en = 1; 3600 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3601 3602 rc = bnxt_hwrm_ptp_cfg(bp); 3603 if (rc) 3604 return rc; 3605 3606 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3607 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3608 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3609 3610 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3611 ptp->tc.cc_shift = shift; 3612 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3613 3614 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3615 ptp->rx_tstamp_tc.cc_shift = shift; 3616 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3617 3618 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3619 ptp->tx_tstamp_tc.cc_shift = shift; 3620 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3621 3622 if (!BNXT_CHIP_P5(bp)) 3623 bnxt_map_ptp_regs(bp); 3624 else 3625 rc = bnxt_ptp_start(bp); 3626 3627 return rc; 3628 } 3629 3630 static int 3631 bnxt_timesync_disable(struct rte_eth_dev *dev) 3632 { 3633 struct bnxt *bp = dev->data->dev_private; 3634 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3635 3636 if (!ptp) 3637 return -ENOTSUP; 3638 3639 ptp->rx_filter = 0; 3640 ptp->tx_tstamp_en = 0; 3641 ptp->rxctl = 0; 3642 3643 bnxt_hwrm_ptp_cfg(bp); 3644 3645 if (!BNXT_CHIP_P5(bp)) 3646 bnxt_unmap_ptp_regs(bp); 3647 else 3648 bnxt_ptp_stop(bp); 3649 3650 return 0; 3651 } 3652 3653 static int 3654 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3655 struct timespec *timestamp, 3656 uint32_t flags __rte_unused) 3657 { 3658 struct bnxt *bp = dev->data->dev_private; 3659 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3660 uint64_t rx_tstamp_cycles = 0; 3661 uint64_t ns; 3662 3663 if (!ptp) 3664 return -ENOTSUP; 3665 3666 if (BNXT_CHIP_P5(bp)) 3667 rx_tstamp_cycles = ptp->rx_timestamp; 3668 else 3669 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3670 3671 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3672 *timestamp = rte_ns_to_timespec(ns); 3673 return 0; 3674 } 3675 3676 static int 3677 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3678 struct timespec *timestamp) 3679 { 3680 struct bnxt *bp = dev->data->dev_private; 3681 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3682 uint64_t tx_tstamp_cycles = 0; 3683 uint64_t ns; 3684 int rc = 0; 3685 3686 if (!ptp) 3687 return -ENOTSUP; 3688 3689 if (BNXT_CHIP_P5(bp)) 3690 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3691 &tx_tstamp_cycles); 3692 else 3693 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3694 3695 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3696 *timestamp = rte_ns_to_timespec(ns); 3697 3698 return rc; 3699 } 3700 3701 static int 3702 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3703 { 3704 struct bnxt *bp = dev->data->dev_private; 3705 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3706 3707 if (!ptp) 3708 return -ENOTSUP; 3709 3710 ptp->tc.nsec += delta; 3711 ptp->tx_tstamp_tc.nsec += delta; 3712 ptp->rx_tstamp_tc.nsec += delta; 3713 3714 return 0; 3715 } 3716 3717 static int 3718 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3719 { 3720 struct bnxt *bp = dev->data->dev_private; 3721 int rc; 3722 uint32_t dir_entries; 3723 uint32_t entry_length; 3724 3725 rc = is_bnxt_in_error(bp); 3726 if (rc) 3727 return rc; 3728 3729 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3730 bp->pdev->addr.domain, bp->pdev->addr.bus, 3731 bp->pdev->addr.devid, bp->pdev->addr.function); 3732 3733 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3734 if (rc != 0) 3735 return rc; 3736 3737 return dir_entries * entry_length; 3738 } 3739 3740 static int 3741 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3742 struct rte_dev_eeprom_info *in_eeprom) 3743 { 3744 struct bnxt *bp = dev->data->dev_private; 3745 uint32_t index; 3746 uint32_t offset; 3747 int rc; 3748 3749 rc = is_bnxt_in_error(bp); 3750 if (rc) 3751 return rc; 3752 3753 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3754 bp->pdev->addr.domain, bp->pdev->addr.bus, 3755 bp->pdev->addr.devid, bp->pdev->addr.function, 3756 in_eeprom->offset, in_eeprom->length); 3757 3758 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3759 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3760 in_eeprom->data); 3761 3762 index = in_eeprom->offset >> 24; 3763 offset = in_eeprom->offset & 0xffffff; 3764 3765 if (index != 0) 3766 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3767 in_eeprom->length, in_eeprom->data); 3768 3769 return 0; 3770 } 3771 3772 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3773 { 3774 switch (dir_type) { 3775 case BNX_DIR_TYPE_CHIMP_PATCH: 3776 case BNX_DIR_TYPE_BOOTCODE: 3777 case BNX_DIR_TYPE_BOOTCODE_2: 3778 case BNX_DIR_TYPE_APE_FW: 3779 case BNX_DIR_TYPE_APE_PATCH: 3780 case BNX_DIR_TYPE_KONG_FW: 3781 case BNX_DIR_TYPE_KONG_PATCH: 3782 case BNX_DIR_TYPE_BONO_FW: 3783 case BNX_DIR_TYPE_BONO_PATCH: 3784 /* FALLTHROUGH */ 3785 return true; 3786 } 3787 3788 return false; 3789 } 3790 3791 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3792 { 3793 switch (dir_type) { 3794 case BNX_DIR_TYPE_AVS: 3795 case BNX_DIR_TYPE_EXP_ROM_MBA: 3796 case BNX_DIR_TYPE_PCIE: 3797 case BNX_DIR_TYPE_TSCF_UCODE: 3798 case BNX_DIR_TYPE_EXT_PHY: 3799 case BNX_DIR_TYPE_CCM: 3800 case BNX_DIR_TYPE_ISCSI_BOOT: 3801 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3802 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3803 /* FALLTHROUGH */ 3804 return true; 3805 } 3806 3807 return false; 3808 } 3809 3810 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3811 { 3812 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3813 bnxt_dir_type_is_other_exec_format(dir_type); 3814 } 3815 3816 static int 3817 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3818 struct rte_dev_eeprom_info *in_eeprom) 3819 { 3820 struct bnxt *bp = dev->data->dev_private; 3821 uint8_t index, dir_op; 3822 uint16_t type, ext, ordinal, attr; 3823 int rc; 3824 3825 rc = is_bnxt_in_error(bp); 3826 if (rc) 3827 return rc; 3828 3829 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3830 bp->pdev->addr.domain, bp->pdev->addr.bus, 3831 bp->pdev->addr.devid, bp->pdev->addr.function, 3832 in_eeprom->offset, in_eeprom->length); 3833 3834 if (!BNXT_PF(bp)) { 3835 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3836 return -EINVAL; 3837 } 3838 3839 type = in_eeprom->magic >> 16; 3840 3841 if (type == 0xffff) { /* special value for directory operations */ 3842 index = in_eeprom->magic & 0xff; 3843 dir_op = in_eeprom->magic >> 8; 3844 if (index == 0) 3845 return -EINVAL; 3846 switch (dir_op) { 3847 case 0x0e: /* erase */ 3848 if (in_eeprom->offset != ~in_eeprom->magic) 3849 return -EINVAL; 3850 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3851 default: 3852 return -EINVAL; 3853 } 3854 } 3855 3856 /* Create or re-write an NVM item: */ 3857 if (bnxt_dir_type_is_executable(type) == true) 3858 return -EOPNOTSUPP; 3859 ext = in_eeprom->magic & 0xffff; 3860 ordinal = in_eeprom->offset >> 16; 3861 attr = in_eeprom->offset & 0xffff; 3862 3863 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3864 in_eeprom->data, in_eeprom->length); 3865 } 3866 3867 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3868 struct rte_eth_dev_module_info *modinfo) 3869 { 3870 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3871 struct bnxt *bp = dev->data->dev_private; 3872 int rc; 3873 3874 /* No point in going further if phy status indicates 3875 * module is not inserted or if it is powered down or 3876 * if it is of type 10GBase-T 3877 */ 3878 if (bp->link_info->module_status > 3879 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3880 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3881 dev->data->port_id); 3882 return -ENOTSUP; 3883 } 3884 3885 /* This feature is not supported in older firmware versions */ 3886 if (bp->hwrm_spec_code < 0x10202) { 3887 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3888 dev->data->port_id); 3889 return -ENOTSUP; 3890 } 3891 3892 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3893 SFF_DIAG_SUPPORT_OFFSET + 1, 3894 module_info); 3895 3896 if (rc) 3897 return rc; 3898 3899 switch (module_info[0]) { 3900 case SFF_MODULE_ID_SFP: 3901 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3902 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3903 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3904 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3905 break; 3906 case SFF_MODULE_ID_QSFP: 3907 case SFF_MODULE_ID_QSFP_PLUS: 3908 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3909 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3910 break; 3911 case SFF_MODULE_ID_QSFP28: 3912 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3913 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3914 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3915 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3916 break; 3917 default: 3918 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3919 return -ENOTSUP; 3920 } 3921 3922 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3923 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3924 3925 return 0; 3926 } 3927 3928 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3929 struct rte_dev_eeprom_info *info) 3930 { 3931 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3932 uint32_t offset = info->offset, length = info->length; 3933 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3934 struct bnxt *bp = dev->data->dev_private; 3935 uint8_t *data = info->data; 3936 uint8_t page = offset >> 7; 3937 uint8_t max_pages = 2; 3938 uint8_t opt_pages; 3939 int rc; 3940 3941 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3942 SFF_DIAG_SUPPORT_OFFSET + 1, 3943 module_info); 3944 if (rc) 3945 return rc; 3946 3947 switch (module_info[0]) { 3948 case SFF_MODULE_ID_SFP: 3949 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 3950 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 3951 pg_addr[2] = I2C_DEV_ADDR_A2; 3952 pg_addr[3] = I2C_DEV_ADDR_A2; 3953 max_pages = 4; 3954 } 3955 break; 3956 case SFF_MODULE_ID_QSFP28: 3957 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3958 SFF8636_OPT_PAGES_OFFSET, 3959 1, &opt_pages); 3960 if (rc) 3961 return rc; 3962 3963 if (opt_pages & SFF8636_PAGE1_MASK) { 3964 pg_addr[2] = I2C_DEV_ADDR_A0; 3965 max_pages = 3; 3966 } 3967 if (opt_pages & SFF8636_PAGE2_MASK) { 3968 pg_addr[3] = I2C_DEV_ADDR_A0; 3969 max_pages = 4; 3970 } 3971 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 3972 pg_addr[4] = I2C_DEV_ADDR_A0; 3973 max_pages = 5; 3974 } 3975 break; 3976 default: 3977 break; 3978 } 3979 3980 memset(data, 0, length); 3981 3982 offset &= 0xff; 3983 while (length && page < max_pages) { 3984 uint8_t raw_page = page ? page - 1 : 0; 3985 uint16_t chunk; 3986 3987 if (pg_addr[page] == I2C_DEV_ADDR_A2) 3988 raw_page = 0; 3989 else if (page) 3990 offset |= 0x80; 3991 chunk = RTE_MIN(length, 256 - offset); 3992 3993 if (pg_addr[page]) { 3994 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 3995 raw_page, offset, 3996 chunk, data); 3997 if (rc) 3998 return rc; 3999 } 4000 4001 data += chunk; 4002 length -= chunk; 4003 offset = 0; 4004 page += 1 + (chunk > 128); 4005 } 4006 4007 return length ? -EINVAL : 0; 4008 } 4009 4010 /* 4011 * Initialization 4012 */ 4013 4014 static const struct eth_dev_ops bnxt_dev_ops = { 4015 .dev_infos_get = bnxt_dev_info_get_op, 4016 .dev_close = bnxt_dev_close_op, 4017 .dev_configure = bnxt_dev_configure_op, 4018 .dev_start = bnxt_dev_start_op, 4019 .dev_stop = bnxt_dev_stop_op, 4020 .dev_set_link_up = bnxt_dev_set_link_up_op, 4021 .dev_set_link_down = bnxt_dev_set_link_down_op, 4022 .stats_get = bnxt_stats_get_op, 4023 .stats_reset = bnxt_stats_reset_op, 4024 .rx_queue_setup = bnxt_rx_queue_setup_op, 4025 .rx_queue_release = bnxt_rx_queue_release_op, 4026 .tx_queue_setup = bnxt_tx_queue_setup_op, 4027 .tx_queue_release = bnxt_tx_queue_release_op, 4028 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4029 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4030 .reta_update = bnxt_reta_update_op, 4031 .reta_query = bnxt_reta_query_op, 4032 .rss_hash_update = bnxt_rss_hash_update_op, 4033 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4034 .link_update = bnxt_link_update_op, 4035 .promiscuous_enable = bnxt_promiscuous_enable_op, 4036 .promiscuous_disable = bnxt_promiscuous_disable_op, 4037 .allmulticast_enable = bnxt_allmulticast_enable_op, 4038 .allmulticast_disable = bnxt_allmulticast_disable_op, 4039 .mac_addr_add = bnxt_mac_addr_add_op, 4040 .mac_addr_remove = bnxt_mac_addr_remove_op, 4041 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4042 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4043 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4044 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4045 .vlan_filter_set = bnxt_vlan_filter_set_op, 4046 .vlan_offload_set = bnxt_vlan_offload_set_op, 4047 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4048 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4049 .mtu_set = bnxt_mtu_set_op, 4050 .mac_addr_set = bnxt_set_default_mac_addr_op, 4051 .xstats_get = bnxt_dev_xstats_get_op, 4052 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4053 .xstats_reset = bnxt_dev_xstats_reset_op, 4054 .fw_version_get = bnxt_fw_version_get, 4055 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4056 .rxq_info_get = bnxt_rxq_info_get_op, 4057 .txq_info_get = bnxt_txq_info_get_op, 4058 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4059 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4060 .dev_led_on = bnxt_dev_led_on_op, 4061 .dev_led_off = bnxt_dev_led_off_op, 4062 .rx_queue_start = bnxt_rx_queue_start, 4063 .rx_queue_stop = bnxt_rx_queue_stop, 4064 .tx_queue_start = bnxt_tx_queue_start, 4065 .tx_queue_stop = bnxt_tx_queue_stop, 4066 .flow_ops_get = bnxt_flow_ops_get_op, 4067 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4068 .get_eeprom_length = bnxt_get_eeprom_length_op, 4069 .get_eeprom = bnxt_get_eeprom_op, 4070 .set_eeprom = bnxt_set_eeprom_op, 4071 .get_module_info = bnxt_get_module_info, 4072 .get_module_eeprom = bnxt_get_module_eeprom, 4073 .timesync_enable = bnxt_timesync_enable, 4074 .timesync_disable = bnxt_timesync_disable, 4075 .timesync_read_time = bnxt_timesync_read_time, 4076 .timesync_write_time = bnxt_timesync_write_time, 4077 .timesync_adjust_time = bnxt_timesync_adjust_time, 4078 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4079 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4080 }; 4081 4082 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4083 { 4084 uint32_t offset; 4085 4086 /* Only pre-map the reset GRC registers using window 3 */ 4087 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4088 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4089 4090 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4091 4092 return offset; 4093 } 4094 4095 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4096 { 4097 struct bnxt_error_recovery_info *info = bp->recovery_info; 4098 uint32_t reg_base = 0xffffffff; 4099 int i; 4100 4101 /* Only pre-map the monitoring GRC registers using window 2 */ 4102 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4103 uint32_t reg = info->status_regs[i]; 4104 4105 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4106 continue; 4107 4108 if (reg_base == 0xffffffff) 4109 reg_base = reg & 0xfffff000; 4110 if ((reg & 0xfffff000) != reg_base) 4111 return -ERANGE; 4112 4113 /* Use mask 0xffc as the Lower 2 bits indicates 4114 * address space location 4115 */ 4116 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4117 (reg & 0xffc); 4118 } 4119 4120 if (reg_base == 0xffffffff) 4121 return 0; 4122 4123 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4124 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4125 4126 return 0; 4127 } 4128 4129 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4130 { 4131 struct bnxt_error_recovery_info *info = bp->recovery_info; 4132 uint32_t delay = info->delay_after_reset[index]; 4133 uint32_t val = info->reset_reg_val[index]; 4134 uint32_t reg = info->reset_reg[index]; 4135 uint32_t type, offset; 4136 int ret; 4137 4138 type = BNXT_FW_STATUS_REG_TYPE(reg); 4139 offset = BNXT_FW_STATUS_REG_OFF(reg); 4140 4141 switch (type) { 4142 case BNXT_FW_STATUS_REG_TYPE_CFG: 4143 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4144 if (ret < 0) { 4145 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4146 val, offset); 4147 return; 4148 } 4149 break; 4150 case BNXT_FW_STATUS_REG_TYPE_GRC: 4151 offset = bnxt_map_reset_regs(bp, offset); 4152 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4153 break; 4154 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4155 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4156 break; 4157 } 4158 /* wait on a specific interval of time until core reset is complete */ 4159 if (delay) 4160 rte_delay_ms(delay); 4161 } 4162 4163 static void bnxt_dev_cleanup(struct bnxt *bp) 4164 { 4165 bp->eth_dev->data->dev_link.link_status = 0; 4166 bp->link_info->link_up = 0; 4167 if (bp->eth_dev->data->dev_started) 4168 bnxt_dev_stop(bp->eth_dev); 4169 4170 bnxt_uninit_resources(bp, true); 4171 } 4172 4173 static int 4174 bnxt_check_fw_reset_done(struct bnxt *bp) 4175 { 4176 int timeout = bp->fw_reset_max_msecs; 4177 uint16_t val = 0; 4178 int rc; 4179 4180 do { 4181 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4182 if (rc < 0) { 4183 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4184 return rc; 4185 } 4186 if (val != 0xffff) 4187 break; 4188 rte_delay_ms(1); 4189 } while (timeout--); 4190 4191 if (val == 0xffff) { 4192 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4193 return -1; 4194 } 4195 4196 return 0; 4197 } 4198 4199 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4200 { 4201 struct rte_eth_dev *dev = bp->eth_dev; 4202 struct rte_vlan_filter_conf *vfc; 4203 int vidx, vbit, rc; 4204 uint16_t vlan_id; 4205 4206 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4207 vfc = &dev->data->vlan_filter_conf; 4208 vidx = vlan_id / 64; 4209 vbit = vlan_id % 64; 4210 4211 /* Each bit corresponds to a VLAN id */ 4212 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4213 rc = bnxt_add_vlan_filter(bp, vlan_id); 4214 if (rc) 4215 return rc; 4216 } 4217 } 4218 4219 return 0; 4220 } 4221 4222 static int bnxt_restore_mac_filters(struct bnxt *bp) 4223 { 4224 struct rte_eth_dev *dev = bp->eth_dev; 4225 struct rte_eth_dev_info dev_info; 4226 struct rte_ether_addr *addr; 4227 uint64_t pool_mask; 4228 uint32_t pool = 0; 4229 uint32_t i; 4230 int rc; 4231 4232 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4233 return 0; 4234 4235 rc = bnxt_dev_info_get_op(dev, &dev_info); 4236 if (rc) 4237 return rc; 4238 4239 /* replay MAC address configuration */ 4240 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4241 addr = &dev->data->mac_addrs[i]; 4242 4243 /* skip zero address */ 4244 if (rte_is_zero_ether_addr(addr)) 4245 continue; 4246 4247 pool = 0; 4248 pool_mask = dev->data->mac_pool_sel[i]; 4249 4250 do { 4251 if (pool_mask & 1ULL) { 4252 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4253 if (rc) 4254 return rc; 4255 } 4256 pool_mask >>= 1; 4257 pool++; 4258 } while (pool_mask); 4259 } 4260 4261 return 0; 4262 } 4263 4264 static int bnxt_restore_filters(struct bnxt *bp) 4265 { 4266 struct rte_eth_dev *dev = bp->eth_dev; 4267 int ret = 0; 4268 4269 if (dev->data->all_multicast) { 4270 ret = bnxt_allmulticast_enable_op(dev); 4271 if (ret) 4272 return ret; 4273 } 4274 if (dev->data->promiscuous) { 4275 ret = bnxt_promiscuous_enable_op(dev); 4276 if (ret) 4277 return ret; 4278 } 4279 4280 ret = bnxt_restore_mac_filters(bp); 4281 if (ret) 4282 return ret; 4283 4284 ret = bnxt_restore_vlan_filters(bp); 4285 /* TODO restore other filters as well */ 4286 return ret; 4287 } 4288 4289 static int bnxt_check_fw_ready(struct bnxt *bp) 4290 { 4291 int timeout = bp->fw_reset_max_msecs; 4292 int rc = 0; 4293 4294 do { 4295 rc = bnxt_hwrm_poll_ver_get(bp); 4296 if (rc == 0) 4297 break; 4298 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4299 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4300 } while (rc && timeout > 0); 4301 4302 if (rc) 4303 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4304 4305 return rc; 4306 } 4307 4308 static void bnxt_dev_recover(void *arg) 4309 { 4310 struct bnxt *bp = arg; 4311 int rc = 0; 4312 4313 pthread_mutex_lock(&bp->err_recovery_lock); 4314 4315 if (!bp->fw_reset_min_msecs) { 4316 rc = bnxt_check_fw_reset_done(bp); 4317 if (rc) 4318 goto err; 4319 } 4320 4321 /* Clear Error flag so that device re-init should happen */ 4322 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4323 PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n", 4324 bp->eth_dev->data->port_id); 4325 4326 rc = bnxt_check_fw_ready(bp); 4327 if (rc) 4328 goto err; 4329 4330 rc = bnxt_init_resources(bp, true); 4331 if (rc) { 4332 PMD_DRV_LOG(ERR, 4333 "Failed to initialize resources after reset\n"); 4334 goto err; 4335 } 4336 /* clear reset flag as the device is initialized now */ 4337 bp->flags &= ~BNXT_FLAG_FW_RESET; 4338 4339 rc = bnxt_dev_start_op(bp->eth_dev); 4340 if (rc) { 4341 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4342 goto err_start; 4343 } 4344 4345 rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = 4346 bp->eth_dev->rx_pkt_burst; 4347 rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = 4348 bp->eth_dev->tx_pkt_burst; 4349 rte_mb(); 4350 4351 rc = bnxt_restore_filters(bp); 4352 if (rc) 4353 goto err_start; 4354 4355 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", 4356 bp->eth_dev->data->port_id); 4357 pthread_mutex_unlock(&bp->err_recovery_lock); 4358 4359 return; 4360 err_start: 4361 bnxt_dev_stop(bp->eth_dev); 4362 err: 4363 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4364 bnxt_uninit_resources(bp, false); 4365 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4366 rte_eth_dev_callback_process(bp->eth_dev, 4367 RTE_ETH_EVENT_INTR_RMV, 4368 NULL); 4369 pthread_mutex_unlock(&bp->err_recovery_lock); 4370 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4371 } 4372 4373 void bnxt_dev_reset_and_resume(void *arg) 4374 { 4375 struct bnxt *bp = arg; 4376 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4377 uint16_t val = 0; 4378 int rc; 4379 4380 bnxt_dev_cleanup(bp); 4381 PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n", 4382 bp->eth_dev->data->port_id); 4383 4384 bnxt_wait_for_device_shutdown(bp); 4385 4386 /* During some fatal firmware error conditions, the PCI config space 4387 * register 0x2e which normally contains the subsystem ID will become 4388 * 0xffff. This register will revert back to the normal value after 4389 * the chip has completed core reset. If we detect this condition, 4390 * we can poll this config register immediately for the value to revert. 4391 */ 4392 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4393 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4394 if (rc < 0) { 4395 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4396 return; 4397 } 4398 if (val == 0xffff) { 4399 bp->fw_reset_min_msecs = 0; 4400 us = 1; 4401 } 4402 } 4403 4404 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4405 if (rc) 4406 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4407 } 4408 4409 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4410 { 4411 struct bnxt_error_recovery_info *info = bp->recovery_info; 4412 uint32_t reg = info->status_regs[index]; 4413 uint32_t type, offset, val = 0; 4414 int ret = 0; 4415 4416 type = BNXT_FW_STATUS_REG_TYPE(reg); 4417 offset = BNXT_FW_STATUS_REG_OFF(reg); 4418 4419 switch (type) { 4420 case BNXT_FW_STATUS_REG_TYPE_CFG: 4421 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4422 if (ret < 0) 4423 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4424 offset); 4425 break; 4426 case BNXT_FW_STATUS_REG_TYPE_GRC: 4427 offset = info->mapped_status_regs[index]; 4428 /* FALLTHROUGH */ 4429 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4430 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4431 offset)); 4432 break; 4433 } 4434 4435 return val; 4436 } 4437 4438 static int bnxt_fw_reset_all(struct bnxt *bp) 4439 { 4440 struct bnxt_error_recovery_info *info = bp->recovery_info; 4441 uint32_t i; 4442 int rc = 0; 4443 4444 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4445 /* Reset through primary function driver */ 4446 for (i = 0; i < info->reg_array_cnt; i++) 4447 bnxt_write_fw_reset_reg(bp, i); 4448 /* Wait for time specified by FW after triggering reset */ 4449 rte_delay_ms(info->primary_func_wait_period_after_reset); 4450 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4451 /* Reset with the help of Kong processor */ 4452 rc = bnxt_hwrm_fw_reset(bp); 4453 if (rc) 4454 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4455 } 4456 4457 return rc; 4458 } 4459 4460 static void bnxt_fw_reset_cb(void *arg) 4461 { 4462 struct bnxt *bp = arg; 4463 struct bnxt_error_recovery_info *info = bp->recovery_info; 4464 int rc = 0; 4465 4466 /* Only Primary function can do FW reset */ 4467 if (bnxt_is_primary_func(bp) && 4468 bnxt_is_recovery_enabled(bp)) { 4469 rc = bnxt_fw_reset_all(bp); 4470 if (rc) { 4471 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4472 return; 4473 } 4474 } 4475 4476 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4477 * EXCEPTION_FATAL_ASYNC event to all the functions 4478 * (including MASTER FUNC). After receiving this Async, all the active 4479 * drivers should treat this case as FW initiated recovery 4480 */ 4481 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4482 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4483 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4484 4485 /* To recover from error */ 4486 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4487 (void *)bp); 4488 } 4489 } 4490 4491 /* Driver should poll FW heartbeat, reset_counter with the frequency 4492 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4493 * When the driver detects heartbeat stop or change in reset_counter, 4494 * it has to trigger a reset to recover from the error condition. 4495 * A “primary function” is the function who will have the privilege to 4496 * initiate the chimp reset. The primary function will be elected by the 4497 * firmware and will be notified through async message. 4498 */ 4499 static void bnxt_check_fw_health(void *arg) 4500 { 4501 struct bnxt *bp = arg; 4502 struct bnxt_error_recovery_info *info = bp->recovery_info; 4503 uint32_t val = 0, wait_msec; 4504 4505 if (!info || !bnxt_is_recovery_enabled(bp) || 4506 is_bnxt_in_error(bp)) 4507 return; 4508 4509 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4510 if (val == info->last_heart_beat) 4511 goto reset; 4512 4513 info->last_heart_beat = val; 4514 4515 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4516 if (val != info->last_reset_counter) 4517 goto reset; 4518 4519 info->last_reset_counter = val; 4520 4521 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4522 bnxt_check_fw_health, (void *)bp); 4523 4524 return; 4525 reset: 4526 /* Stop DMA to/from device */ 4527 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4528 bp->flags |= BNXT_FLAG_FW_RESET; 4529 4530 bnxt_stop_rxtx(bp); 4531 4532 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4533 4534 if (bnxt_is_primary_func(bp)) 4535 wait_msec = info->primary_func_wait_period; 4536 else 4537 wait_msec = info->normal_func_wait_period; 4538 4539 rte_eal_alarm_set(US_PER_MS * wait_msec, 4540 bnxt_fw_reset_cb, (void *)bp); 4541 } 4542 4543 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4544 { 4545 uint32_t polling_freq; 4546 4547 pthread_mutex_lock(&bp->health_check_lock); 4548 4549 if (!bnxt_is_recovery_enabled(bp)) 4550 goto done; 4551 4552 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4553 goto done; 4554 4555 polling_freq = bp->recovery_info->driver_polling_freq; 4556 4557 rte_eal_alarm_set(US_PER_MS * polling_freq, 4558 bnxt_check_fw_health, (void *)bp); 4559 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4560 4561 done: 4562 pthread_mutex_unlock(&bp->health_check_lock); 4563 } 4564 4565 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4566 { 4567 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4568 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4569 } 4570 4571 static bool bnxt_vf_pciid(uint16_t device_id) 4572 { 4573 switch (device_id) { 4574 case BROADCOM_DEV_ID_57304_VF: 4575 case BROADCOM_DEV_ID_57406_VF: 4576 case BROADCOM_DEV_ID_5731X_VF: 4577 case BROADCOM_DEV_ID_5741X_VF: 4578 case BROADCOM_DEV_ID_57414_VF: 4579 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4580 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4581 case BROADCOM_DEV_ID_58802_VF: 4582 case BROADCOM_DEV_ID_57500_VF1: 4583 case BROADCOM_DEV_ID_57500_VF2: 4584 case BROADCOM_DEV_ID_58818_VF: 4585 /* FALLTHROUGH */ 4586 return true; 4587 default: 4588 return false; 4589 } 4590 } 4591 4592 /* Phase 5 device */ 4593 static bool bnxt_p5_device(uint16_t device_id) 4594 { 4595 switch (device_id) { 4596 case BROADCOM_DEV_ID_57508: 4597 case BROADCOM_DEV_ID_57504: 4598 case BROADCOM_DEV_ID_57502: 4599 case BROADCOM_DEV_ID_57508_MF1: 4600 case BROADCOM_DEV_ID_57504_MF1: 4601 case BROADCOM_DEV_ID_57502_MF1: 4602 case BROADCOM_DEV_ID_57508_MF2: 4603 case BROADCOM_DEV_ID_57504_MF2: 4604 case BROADCOM_DEV_ID_57502_MF2: 4605 case BROADCOM_DEV_ID_57500_VF1: 4606 case BROADCOM_DEV_ID_57500_VF2: 4607 case BROADCOM_DEV_ID_58812: 4608 case BROADCOM_DEV_ID_58814: 4609 case BROADCOM_DEV_ID_58818: 4610 case BROADCOM_DEV_ID_58818_VF: 4611 /* FALLTHROUGH */ 4612 return true; 4613 default: 4614 return false; 4615 } 4616 } 4617 4618 bool bnxt_stratus_device(struct bnxt *bp) 4619 { 4620 uint16_t device_id = bp->pdev->id.device_id; 4621 4622 switch (device_id) { 4623 case BROADCOM_DEV_ID_STRATUS_NIC: 4624 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4625 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4626 /* FALLTHROUGH */ 4627 return true; 4628 default: 4629 return false; 4630 } 4631 } 4632 4633 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4634 { 4635 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4636 struct bnxt *bp = eth_dev->data->dev_private; 4637 4638 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4639 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4640 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4641 if (!bp->bar0 || !bp->doorbell_base) { 4642 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4643 return -ENODEV; 4644 } 4645 4646 bp->eth_dev = eth_dev; 4647 bp->pdev = pci_dev; 4648 4649 return 0; 4650 } 4651 4652 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4653 struct bnxt_ctx_pg_info *ctx_pg, 4654 uint32_t mem_size, 4655 const char *suffix, 4656 uint16_t idx) 4657 { 4658 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4659 const struct rte_memzone *mz = NULL; 4660 char mz_name[RTE_MEMZONE_NAMESIZE]; 4661 rte_iova_t mz_phys_addr; 4662 uint64_t valid_bits = 0; 4663 uint32_t sz; 4664 int i; 4665 4666 if (!mem_size) 4667 return 0; 4668 4669 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4670 BNXT_PAGE_SIZE; 4671 rmem->page_size = BNXT_PAGE_SIZE; 4672 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4673 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4674 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4675 4676 valid_bits = PTU_PTE_VALID; 4677 4678 if (rmem->nr_pages > 1) { 4679 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4680 "bnxt_ctx_pg_tbl%s_%x_%d", 4681 suffix, idx, bp->eth_dev->data->port_id); 4682 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4683 mz = rte_memzone_lookup(mz_name); 4684 if (!mz) { 4685 mz = rte_memzone_reserve_aligned(mz_name, 4686 rmem->nr_pages * 8, 4687 bp->eth_dev->device->numa_node, 4688 RTE_MEMZONE_2MB | 4689 RTE_MEMZONE_SIZE_HINT_ONLY | 4690 RTE_MEMZONE_IOVA_CONTIG, 4691 BNXT_PAGE_SIZE); 4692 if (mz == NULL) 4693 return -ENOMEM; 4694 } 4695 4696 memset(mz->addr, 0, mz->len); 4697 mz_phys_addr = mz->iova; 4698 4699 rmem->pg_tbl = mz->addr; 4700 rmem->pg_tbl_map = mz_phys_addr; 4701 rmem->pg_tbl_mz = mz; 4702 } 4703 4704 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4705 suffix, idx, bp->eth_dev->data->port_id); 4706 mz = rte_memzone_lookup(mz_name); 4707 if (!mz) { 4708 mz = rte_memzone_reserve_aligned(mz_name, 4709 mem_size, 4710 bp->eth_dev->device->numa_node, 4711 RTE_MEMZONE_1GB | 4712 RTE_MEMZONE_SIZE_HINT_ONLY | 4713 RTE_MEMZONE_IOVA_CONTIG, 4714 BNXT_PAGE_SIZE); 4715 if (mz == NULL) 4716 return -ENOMEM; 4717 } 4718 4719 memset(mz->addr, 0, mz->len); 4720 mz_phys_addr = mz->iova; 4721 4722 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4723 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4724 rmem->dma_arr[i] = mz_phys_addr + sz; 4725 4726 if (rmem->nr_pages > 1) { 4727 if (i == rmem->nr_pages - 2 && 4728 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4729 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4730 else if (i == rmem->nr_pages - 1 && 4731 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4732 valid_bits |= PTU_PTE_LAST; 4733 4734 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4735 valid_bits); 4736 } 4737 } 4738 4739 rmem->mz = mz; 4740 if (rmem->vmem_size) 4741 rmem->vmem = (void **)mz->addr; 4742 rmem->dma_arr[0] = mz_phys_addr; 4743 return 0; 4744 } 4745 4746 static void bnxt_free_ctx_mem(struct bnxt *bp) 4747 { 4748 int i; 4749 4750 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4751 return; 4752 4753 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4754 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4755 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4756 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4757 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4758 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4759 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4760 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4761 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4762 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4763 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4764 4765 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4766 if (bp->ctx->tqm_mem[i]) 4767 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4768 } 4769 4770 rte_free(bp->ctx); 4771 bp->ctx = NULL; 4772 } 4773 4774 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4775 4776 #define min_t(type, x, y) ({ \ 4777 type __min1 = (x); \ 4778 type __min2 = (y); \ 4779 __min1 < __min2 ? __min1 : __min2; }) 4780 4781 #define max_t(type, x, y) ({ \ 4782 type __max1 = (x); \ 4783 type __max2 = (y); \ 4784 __max1 > __max2 ? __max1 : __max2; }) 4785 4786 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4787 4788 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4789 { 4790 struct bnxt_ctx_pg_info *ctx_pg; 4791 struct bnxt_ctx_mem_info *ctx; 4792 uint32_t mem_size, ena, entries; 4793 uint32_t entries_sp, min; 4794 int i, rc; 4795 4796 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4797 if (rc) { 4798 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4799 return rc; 4800 } 4801 ctx = bp->ctx; 4802 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4803 return 0; 4804 4805 ctx_pg = &ctx->qp_mem; 4806 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4807 if (ctx->qp_entry_size) { 4808 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4809 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4810 if (rc) 4811 return rc; 4812 } 4813 4814 ctx_pg = &ctx->srq_mem; 4815 ctx_pg->entries = ctx->srq_max_l2_entries; 4816 if (ctx->srq_entry_size) { 4817 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4818 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4819 if (rc) 4820 return rc; 4821 } 4822 4823 ctx_pg = &ctx->cq_mem; 4824 ctx_pg->entries = ctx->cq_max_l2_entries; 4825 if (ctx->cq_entry_size) { 4826 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4827 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4828 if (rc) 4829 return rc; 4830 } 4831 4832 ctx_pg = &ctx->vnic_mem; 4833 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4834 ctx->vnic_max_ring_table_entries; 4835 if (ctx->vnic_entry_size) { 4836 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4837 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4838 if (rc) 4839 return rc; 4840 } 4841 4842 ctx_pg = &ctx->stat_mem; 4843 ctx_pg->entries = ctx->stat_max_entries; 4844 if (ctx->stat_entry_size) { 4845 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4846 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4847 if (rc) 4848 return rc; 4849 } 4850 4851 min = ctx->tqm_min_entries_per_ring; 4852 4853 entries_sp = ctx->qp_max_l2_entries + 4854 ctx->vnic_max_vnic_entries + 4855 2 * ctx->qp_min_qp1_entries + min; 4856 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4857 4858 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4859 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4860 entries = clamp_t(uint32_t, entries, min, 4861 ctx->tqm_max_entries_per_ring); 4862 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4863 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4864 * i > 8 is other ext rings. 4865 */ 4866 ctx_pg = ctx->tqm_mem[i]; 4867 ctx_pg->entries = i ? entries : entries_sp; 4868 if (ctx->tqm_entry_size) { 4869 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4870 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4871 "tqm_mem", i); 4872 if (rc) 4873 return rc; 4874 } 4875 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4876 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4877 else 4878 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4879 } 4880 4881 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4882 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4883 if (rc) 4884 PMD_DRV_LOG(ERR, 4885 "Failed to configure context mem: rc = %d\n", rc); 4886 else 4887 ctx->flags |= BNXT_CTX_FLAG_INITED; 4888 4889 return rc; 4890 } 4891 4892 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4893 { 4894 struct rte_pci_device *pci_dev = bp->pdev; 4895 char mz_name[RTE_MEMZONE_NAMESIZE]; 4896 const struct rte_memzone *mz = NULL; 4897 uint32_t total_alloc_len; 4898 rte_iova_t mz_phys_addr; 4899 4900 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4901 return 0; 4902 4903 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4904 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4905 pci_dev->addr.bus, pci_dev->addr.devid, 4906 pci_dev->addr.function, "rx_port_stats"); 4907 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4908 mz = rte_memzone_lookup(mz_name); 4909 total_alloc_len = 4910 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4911 sizeof(struct rx_port_stats_ext) + 512); 4912 if (!mz) { 4913 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4914 SOCKET_ID_ANY, 4915 RTE_MEMZONE_2MB | 4916 RTE_MEMZONE_SIZE_HINT_ONLY | 4917 RTE_MEMZONE_IOVA_CONTIG); 4918 if (mz == NULL) 4919 return -ENOMEM; 4920 } 4921 memset(mz->addr, 0, mz->len); 4922 mz_phys_addr = mz->iova; 4923 4924 bp->rx_mem_zone = (const void *)mz; 4925 bp->hw_rx_port_stats = mz->addr; 4926 bp->hw_rx_port_stats_map = mz_phys_addr; 4927 4928 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4929 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4930 pci_dev->addr.bus, pci_dev->addr.devid, 4931 pci_dev->addr.function, "tx_port_stats"); 4932 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4933 mz = rte_memzone_lookup(mz_name); 4934 total_alloc_len = 4935 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4936 sizeof(struct tx_port_stats_ext) + 512); 4937 if (!mz) { 4938 mz = rte_memzone_reserve(mz_name, 4939 total_alloc_len, 4940 SOCKET_ID_ANY, 4941 RTE_MEMZONE_2MB | 4942 RTE_MEMZONE_SIZE_HINT_ONLY | 4943 RTE_MEMZONE_IOVA_CONTIG); 4944 if (mz == NULL) 4945 return -ENOMEM; 4946 } 4947 memset(mz->addr, 0, mz->len); 4948 mz_phys_addr = mz->iova; 4949 4950 bp->tx_mem_zone = (const void *)mz; 4951 bp->hw_tx_port_stats = mz->addr; 4952 bp->hw_tx_port_stats_map = mz_phys_addr; 4953 bp->flags |= BNXT_FLAG_PORT_STATS; 4954 4955 /* Display extended statistics if FW supports it */ 4956 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4957 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4958 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4959 return 0; 4960 4961 bp->hw_rx_port_stats_ext = (void *) 4962 ((uint8_t *)bp->hw_rx_port_stats + 4963 sizeof(struct rx_port_stats)); 4964 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4965 sizeof(struct rx_port_stats); 4966 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4967 4968 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4969 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4970 bp->hw_tx_port_stats_ext = (void *) 4971 ((uint8_t *)bp->hw_tx_port_stats + 4972 sizeof(struct tx_port_stats)); 4973 bp->hw_tx_port_stats_ext_map = 4974 bp->hw_tx_port_stats_map + 4975 sizeof(struct tx_port_stats); 4976 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 4977 } 4978 4979 return 0; 4980 } 4981 4982 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 4983 { 4984 struct bnxt *bp = eth_dev->data->dev_private; 4985 int rc = 0; 4986 4987 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 4988 RTE_ETHER_ADDR_LEN * 4989 bp->max_l2_ctx, 4990 0); 4991 if (eth_dev->data->mac_addrs == NULL) { 4992 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 4993 return -ENOMEM; 4994 } 4995 4996 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 4997 if (BNXT_PF(bp)) 4998 return -EINVAL; 4999 5000 /* Generate a random MAC address, if none was assigned by PF */ 5001 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5002 bnxt_eth_hw_addr_random(bp->mac_addr); 5003 PMD_DRV_LOG(INFO, 5004 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5005 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5006 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5007 5008 rc = bnxt_hwrm_set_mac(bp); 5009 if (rc) 5010 return rc; 5011 } 5012 5013 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5014 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5015 5016 /* 5017 * Allocate memory to hold multicast mac addresses added. 5018 * Used to restore them during reset recovery 5019 */ 5020 bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl", 5021 sizeof(struct rte_ether_addr) * 5022 BNXT_MAX_MC_ADDRS, 0); 5023 if (bp->mcast_addr_list == NULL) { 5024 PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n"); 5025 return -ENOMEM; 5026 } 5027 bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list); 5028 if (bp->mc_list_dma_addr == RTE_BAD_IOVA) { 5029 PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n"); 5030 return -ENOMEM; 5031 } 5032 5033 return rc; 5034 } 5035 5036 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5037 { 5038 int rc = 0; 5039 5040 /* MAC is already configured in FW */ 5041 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5042 return 0; 5043 5044 /* Restore the old MAC configured */ 5045 rc = bnxt_hwrm_set_mac(bp); 5046 if (rc) 5047 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5048 5049 return rc; 5050 } 5051 5052 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5053 { 5054 if (!BNXT_PF(bp)) 5055 return; 5056 5057 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5058 5059 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5060 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5061 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5062 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5063 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5064 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5065 } 5066 5067 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5068 { 5069 struct bnxt_error_recovery_info *info = bp->recovery_info; 5070 5071 if (info) { 5072 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5073 memset(info, 0, sizeof(*info)); 5074 return; 5075 } 5076 5077 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5078 return; 5079 5080 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5081 sizeof(*info), 0); 5082 if (!info) 5083 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5084 5085 bp->recovery_info = info; 5086 } 5087 5088 static void bnxt_check_fw_status(struct bnxt *bp) 5089 { 5090 uint32_t fw_status; 5091 5092 if (!(bp->recovery_info && 5093 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5094 return; 5095 5096 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5097 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5098 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5099 fw_status); 5100 } 5101 5102 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5103 { 5104 struct bnxt_error_recovery_info *info = bp->recovery_info; 5105 uint32_t status_loc; 5106 uint32_t sig_ver; 5107 5108 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5109 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5110 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5111 BNXT_GRCP_WINDOW_2_BASE + 5112 offsetof(struct hcomm_status, 5113 sig_ver))); 5114 /* If the signature is absent, then FW does not support this feature */ 5115 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5116 HCOMM_STATUS_SIGNATURE_VAL) 5117 return 0; 5118 5119 if (!info) { 5120 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5121 sizeof(*info), 0); 5122 if (!info) 5123 return -ENOMEM; 5124 bp->recovery_info = info; 5125 } else { 5126 memset(info, 0, sizeof(*info)); 5127 } 5128 5129 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5130 BNXT_GRCP_WINDOW_2_BASE + 5131 offsetof(struct hcomm_status, 5132 fw_status_loc))); 5133 5134 /* Only pre-map the FW health status GRC register */ 5135 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5136 return 0; 5137 5138 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5139 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5140 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5141 5142 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5143 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5144 5145 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5146 5147 return 0; 5148 } 5149 5150 /* This function gets the FW version along with the 5151 * capabilities(MAX and current) of the function, vnic, 5152 * error recovery, phy and other chip related info 5153 */ 5154 static int bnxt_get_config(struct bnxt *bp) 5155 { 5156 uint16_t mtu; 5157 int rc = 0; 5158 5159 bp->fw_cap = 0; 5160 5161 rc = bnxt_map_hcomm_fw_status_reg(bp); 5162 if (rc) 5163 return rc; 5164 5165 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5166 if (rc) { 5167 bnxt_check_fw_status(bp); 5168 return rc; 5169 } 5170 5171 rc = bnxt_hwrm_func_reset(bp); 5172 if (rc) 5173 return -EIO; 5174 5175 rc = bnxt_hwrm_vnic_qcaps(bp); 5176 if (rc) 5177 return rc; 5178 5179 rc = bnxt_hwrm_queue_qportcfg(bp); 5180 if (rc) 5181 return rc; 5182 5183 /* Get the MAX capabilities for this function. 5184 * This function also allocates context memory for TQM rings and 5185 * informs the firmware about this allocated backing store memory. 5186 */ 5187 rc = bnxt_hwrm_func_qcaps(bp); 5188 if (rc) 5189 return rc; 5190 5191 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5192 if (rc) 5193 return rc; 5194 5195 rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); 5196 if (rc) 5197 return rc; 5198 5199 bnxt_hwrm_port_mac_qcfg(bp); 5200 5201 bnxt_hwrm_parent_pf_qcfg(bp); 5202 5203 bnxt_hwrm_port_phy_qcaps(bp); 5204 5205 bnxt_alloc_error_recovery_info(bp); 5206 /* Get the adapter error recovery support info */ 5207 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5208 if (rc) 5209 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5210 5211 bnxt_hwrm_port_led_qcaps(bp); 5212 5213 return 0; 5214 } 5215 5216 static int 5217 bnxt_init_locks(struct bnxt *bp) 5218 { 5219 int err; 5220 5221 err = pthread_mutex_init(&bp->flow_lock, NULL); 5222 if (err) { 5223 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5224 return err; 5225 } 5226 5227 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5228 if (err) { 5229 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5230 return err; 5231 } 5232 5233 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5234 if (err) { 5235 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5236 return err; 5237 } 5238 5239 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5240 if (err) 5241 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5242 5243 return err; 5244 } 5245 5246 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5247 { 5248 int rc = 0; 5249 5250 rc = bnxt_get_config(bp); 5251 if (rc) 5252 return rc; 5253 5254 if (!reconfig_dev) { 5255 rc = bnxt_setup_mac_addr(bp->eth_dev); 5256 if (rc) 5257 return rc; 5258 } else { 5259 rc = bnxt_restore_dflt_mac(bp); 5260 if (rc) 5261 return rc; 5262 } 5263 5264 bnxt_config_vf_req_fwd(bp); 5265 5266 rc = bnxt_hwrm_func_driver_register(bp); 5267 if (rc) { 5268 PMD_DRV_LOG(ERR, "Failed to register driver"); 5269 return -EBUSY; 5270 } 5271 5272 if (BNXT_PF(bp)) { 5273 if (bp->pdev->max_vfs) { 5274 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5275 if (rc) { 5276 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5277 return rc; 5278 } 5279 } else { 5280 rc = bnxt_hwrm_allocate_pf_only(bp); 5281 if (rc) { 5282 PMD_DRV_LOG(ERR, 5283 "Failed to allocate PF resources"); 5284 return rc; 5285 } 5286 } 5287 } 5288 5289 rc = bnxt_alloc_mem(bp, reconfig_dev); 5290 if (rc) 5291 return rc; 5292 5293 rc = bnxt_setup_int(bp); 5294 if (rc) 5295 return rc; 5296 5297 rc = bnxt_request_int(bp); 5298 if (rc) 5299 return rc; 5300 5301 rc = bnxt_init_ctx_mem(bp); 5302 if (rc) { 5303 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5304 return rc; 5305 } 5306 5307 return 0; 5308 } 5309 5310 static int 5311 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5312 const char *value, void *opaque_arg) 5313 { 5314 struct bnxt *bp = opaque_arg; 5315 unsigned long flow_xstat; 5316 char *end = NULL; 5317 5318 if (!value || !opaque_arg) { 5319 PMD_DRV_LOG(ERR, 5320 "Invalid parameter passed to flow_xstat devarg.\n"); 5321 return -EINVAL; 5322 } 5323 5324 flow_xstat = strtoul(value, &end, 10); 5325 if (end == NULL || *end != '\0' || 5326 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5327 PMD_DRV_LOG(ERR, 5328 "Invalid parameter passed to flow_xstat devarg.\n"); 5329 return -EINVAL; 5330 } 5331 5332 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5333 PMD_DRV_LOG(ERR, 5334 "Invalid value passed to flow_xstat devarg.\n"); 5335 return -EINVAL; 5336 } 5337 5338 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5339 if (BNXT_FLOW_XSTATS_EN(bp)) 5340 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5341 5342 return 0; 5343 } 5344 5345 static int 5346 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5347 const char *value, void *opaque_arg) 5348 { 5349 struct bnxt *bp = opaque_arg; 5350 unsigned long max_num_kflows; 5351 char *end = NULL; 5352 5353 if (!value || !opaque_arg) { 5354 PMD_DRV_LOG(ERR, 5355 "Invalid parameter passed to max_num_kflows devarg.\n"); 5356 return -EINVAL; 5357 } 5358 5359 max_num_kflows = strtoul(value, &end, 10); 5360 if (end == NULL || *end != '\0' || 5361 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5362 PMD_DRV_LOG(ERR, 5363 "Invalid parameter passed to max_num_kflows devarg.\n"); 5364 return -EINVAL; 5365 } 5366 5367 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5368 PMD_DRV_LOG(ERR, 5369 "Invalid value passed to max_num_kflows devarg.\n"); 5370 return -EINVAL; 5371 } 5372 5373 bp->max_num_kflows = max_num_kflows; 5374 if (bp->max_num_kflows) 5375 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5376 max_num_kflows); 5377 5378 return 0; 5379 } 5380 5381 static int 5382 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5383 const char *value, void *opaque_arg) 5384 { 5385 struct bnxt *bp = opaque_arg; 5386 unsigned long app_id; 5387 char *end = NULL; 5388 5389 if (!value || !opaque_arg) { 5390 PMD_DRV_LOG(ERR, 5391 "Invalid parameter passed to app-id " 5392 "devargs.\n"); 5393 return -EINVAL; 5394 } 5395 5396 app_id = strtoul(value, &end, 10); 5397 if (end == NULL || *end != '\0' || 5398 (app_id == ULONG_MAX && errno == ERANGE)) { 5399 PMD_DRV_LOG(ERR, 5400 "Invalid parameter passed to app_id " 5401 "devargs.\n"); 5402 return -EINVAL; 5403 } 5404 5405 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5406 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5407 (uint16_t)app_id); 5408 return -EINVAL; 5409 } 5410 5411 bp->app_id = app_id; 5412 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5413 5414 return 0; 5415 } 5416 5417 static int 5418 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5419 const char *value, void *opaque_arg) 5420 { 5421 struct bnxt_representor *vfr_bp = opaque_arg; 5422 unsigned long rep_is_pf; 5423 char *end = NULL; 5424 5425 if (!value || !opaque_arg) { 5426 PMD_DRV_LOG(ERR, 5427 "Invalid parameter passed to rep_is_pf devargs.\n"); 5428 return -EINVAL; 5429 } 5430 5431 rep_is_pf = strtoul(value, &end, 10); 5432 if (end == NULL || *end != '\0' || 5433 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5434 PMD_DRV_LOG(ERR, 5435 "Invalid parameter passed to rep_is_pf devargs.\n"); 5436 return -EINVAL; 5437 } 5438 5439 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5440 PMD_DRV_LOG(ERR, 5441 "Invalid value passed to rep_is_pf devargs.\n"); 5442 return -EINVAL; 5443 } 5444 5445 vfr_bp->flags |= rep_is_pf; 5446 if (BNXT_REP_PF(vfr_bp)) 5447 PMD_DRV_LOG(INFO, "PF representor\n"); 5448 else 5449 PMD_DRV_LOG(INFO, "VF representor\n"); 5450 5451 return 0; 5452 } 5453 5454 static int 5455 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5456 const char *value, void *opaque_arg) 5457 { 5458 struct bnxt_representor *vfr_bp = opaque_arg; 5459 unsigned long rep_based_pf; 5460 char *end = NULL; 5461 5462 if (!value || !opaque_arg) { 5463 PMD_DRV_LOG(ERR, 5464 "Invalid parameter passed to rep_based_pf " 5465 "devargs.\n"); 5466 return -EINVAL; 5467 } 5468 5469 rep_based_pf = strtoul(value, &end, 10); 5470 if (end == NULL || *end != '\0' || 5471 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5472 PMD_DRV_LOG(ERR, 5473 "Invalid parameter passed to rep_based_pf " 5474 "devargs.\n"); 5475 return -EINVAL; 5476 } 5477 5478 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5479 PMD_DRV_LOG(ERR, 5480 "Invalid value passed to rep_based_pf devargs.\n"); 5481 return -EINVAL; 5482 } 5483 5484 vfr_bp->rep_based_pf = rep_based_pf; 5485 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5486 5487 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5488 5489 return 0; 5490 } 5491 5492 static int 5493 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5494 const char *value, void *opaque_arg) 5495 { 5496 struct bnxt_representor *vfr_bp = opaque_arg; 5497 unsigned long rep_q_r2f; 5498 char *end = NULL; 5499 5500 if (!value || !opaque_arg) { 5501 PMD_DRV_LOG(ERR, 5502 "Invalid parameter passed to rep_q_r2f " 5503 "devargs.\n"); 5504 return -EINVAL; 5505 } 5506 5507 rep_q_r2f = strtoul(value, &end, 10); 5508 if (end == NULL || *end != '\0' || 5509 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5510 PMD_DRV_LOG(ERR, 5511 "Invalid parameter passed to rep_q_r2f " 5512 "devargs.\n"); 5513 return -EINVAL; 5514 } 5515 5516 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5517 PMD_DRV_LOG(ERR, 5518 "Invalid value passed to rep_q_r2f devargs.\n"); 5519 return -EINVAL; 5520 } 5521 5522 vfr_bp->rep_q_r2f = rep_q_r2f; 5523 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5524 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5525 5526 return 0; 5527 } 5528 5529 static int 5530 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5531 const char *value, void *opaque_arg) 5532 { 5533 struct bnxt_representor *vfr_bp = opaque_arg; 5534 unsigned long rep_q_f2r; 5535 char *end = NULL; 5536 5537 if (!value || !opaque_arg) { 5538 PMD_DRV_LOG(ERR, 5539 "Invalid parameter passed to rep_q_f2r " 5540 "devargs.\n"); 5541 return -EINVAL; 5542 } 5543 5544 rep_q_f2r = strtoul(value, &end, 10); 5545 if (end == NULL || *end != '\0' || 5546 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5547 PMD_DRV_LOG(ERR, 5548 "Invalid parameter passed to rep_q_f2r " 5549 "devargs.\n"); 5550 return -EINVAL; 5551 } 5552 5553 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5554 PMD_DRV_LOG(ERR, 5555 "Invalid value passed to rep_q_f2r devargs.\n"); 5556 return -EINVAL; 5557 } 5558 5559 vfr_bp->rep_q_f2r = rep_q_f2r; 5560 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5561 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5562 5563 return 0; 5564 } 5565 5566 static int 5567 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5568 const char *value, void *opaque_arg) 5569 { 5570 struct bnxt_representor *vfr_bp = opaque_arg; 5571 unsigned long rep_fc_r2f; 5572 char *end = NULL; 5573 5574 if (!value || !opaque_arg) { 5575 PMD_DRV_LOG(ERR, 5576 "Invalid parameter passed to rep_fc_r2f " 5577 "devargs.\n"); 5578 return -EINVAL; 5579 } 5580 5581 rep_fc_r2f = strtoul(value, &end, 10); 5582 if (end == NULL || *end != '\0' || 5583 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5584 PMD_DRV_LOG(ERR, 5585 "Invalid parameter passed to rep_fc_r2f " 5586 "devargs.\n"); 5587 return -EINVAL; 5588 } 5589 5590 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5591 PMD_DRV_LOG(ERR, 5592 "Invalid value passed to rep_fc_r2f devargs.\n"); 5593 return -EINVAL; 5594 } 5595 5596 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5597 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5598 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5599 5600 return 0; 5601 } 5602 5603 static int 5604 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5605 const char *value, void *opaque_arg) 5606 { 5607 struct bnxt_representor *vfr_bp = opaque_arg; 5608 unsigned long rep_fc_f2r; 5609 char *end = NULL; 5610 5611 if (!value || !opaque_arg) { 5612 PMD_DRV_LOG(ERR, 5613 "Invalid parameter passed to rep_fc_f2r " 5614 "devargs.\n"); 5615 return -EINVAL; 5616 } 5617 5618 rep_fc_f2r = strtoul(value, &end, 10); 5619 if (end == NULL || *end != '\0' || 5620 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5621 PMD_DRV_LOG(ERR, 5622 "Invalid parameter passed to rep_fc_f2r " 5623 "devargs.\n"); 5624 return -EINVAL; 5625 } 5626 5627 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5628 PMD_DRV_LOG(ERR, 5629 "Invalid value passed to rep_fc_f2r devargs.\n"); 5630 return -EINVAL; 5631 } 5632 5633 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5634 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5635 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5636 5637 return 0; 5638 } 5639 5640 static int 5641 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5642 { 5643 struct rte_kvargs *kvlist; 5644 int ret; 5645 5646 if (devargs == NULL) 5647 return 0; 5648 5649 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5650 if (kvlist == NULL) 5651 return -EINVAL; 5652 5653 /* 5654 * Handler for "flow_xstat" devarg. 5655 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5656 */ 5657 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5658 bnxt_parse_devarg_flow_xstat, bp); 5659 if (ret) 5660 goto err; 5661 5662 /* 5663 * Handler for "max_num_kflows" devarg. 5664 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5665 */ 5666 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5667 bnxt_parse_devarg_max_num_kflows, bp); 5668 if (ret) 5669 goto err; 5670 5671 err: 5672 /* 5673 * Handler for "app-id" devarg. 5674 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5675 */ 5676 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5677 bnxt_parse_devarg_app_id, bp); 5678 5679 rte_kvargs_free(kvlist); 5680 return ret; 5681 } 5682 5683 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5684 { 5685 int rc = 0; 5686 5687 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5688 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5689 if (rc) 5690 PMD_DRV_LOG(ERR, 5691 "Failed to alloc switch domain: %d\n", rc); 5692 else 5693 PMD_DRV_LOG(INFO, 5694 "Switch domain allocated %d\n", 5695 bp->switch_domain_id); 5696 } 5697 5698 return rc; 5699 } 5700 5701 /* Allocate and initialize various fields in bnxt struct that 5702 * need to be allocated/destroyed only once in the lifetime of the driver 5703 */ 5704 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5705 { 5706 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5707 struct bnxt *bp = eth_dev->data->dev_private; 5708 int rc = 0; 5709 5710 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5711 5712 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5713 bp->flags |= BNXT_FLAG_VF; 5714 5715 if (bnxt_p5_device(pci_dev->id.device_id)) 5716 bp->flags |= BNXT_FLAG_CHIP_P5; 5717 5718 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5719 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5720 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5721 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5722 bp->flags |= BNXT_FLAG_STINGRAY; 5723 5724 if (BNXT_TRUFLOW_EN(bp)) { 5725 /* extra mbuf field is required to store CFA code from mark */ 5726 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5727 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5728 .size = sizeof(bnxt_cfa_code_dynfield_t), 5729 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5730 }; 5731 bnxt_cfa_code_dynfield_offset = 5732 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5733 if (bnxt_cfa_code_dynfield_offset < 0) { 5734 PMD_DRV_LOG(ERR, 5735 "Failed to register mbuf field for TruFlow mark\n"); 5736 return -rte_errno; 5737 } 5738 } 5739 5740 rc = bnxt_map_pci_bars(eth_dev); 5741 if (rc) { 5742 PMD_DRV_LOG(ERR, 5743 "Failed to initialize board rc: %x\n", rc); 5744 return rc; 5745 } 5746 5747 rc = bnxt_alloc_pf_info(bp); 5748 if (rc) 5749 return rc; 5750 5751 rc = bnxt_alloc_link_info(bp); 5752 if (rc) 5753 return rc; 5754 5755 rc = bnxt_alloc_parent_info(bp); 5756 if (rc) 5757 return rc; 5758 5759 rc = bnxt_alloc_hwrm_resources(bp); 5760 if (rc) { 5761 PMD_DRV_LOG(ERR, 5762 "Failed to allocate response buffer rc: %x\n", rc); 5763 return rc; 5764 } 5765 rc = bnxt_alloc_leds_info(bp); 5766 if (rc) 5767 return rc; 5768 5769 rc = bnxt_alloc_cos_queues(bp); 5770 if (rc) 5771 return rc; 5772 5773 rc = bnxt_init_locks(bp); 5774 if (rc) 5775 return rc; 5776 5777 rc = bnxt_alloc_switch_domain(bp); 5778 if (rc) 5779 return rc; 5780 5781 return rc; 5782 } 5783 5784 static int 5785 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5786 { 5787 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5788 static int version_printed; 5789 struct bnxt *bp; 5790 int rc; 5791 5792 if (version_printed++ == 0) 5793 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5794 5795 eth_dev->dev_ops = &bnxt_dev_ops; 5796 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5797 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5798 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5799 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5800 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5801 5802 /* 5803 * For secondary processes, we don't initialise any further 5804 * as primary has already done this work. 5805 */ 5806 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5807 return 0; 5808 5809 rte_eth_copy_pci_info(eth_dev, pci_dev); 5810 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 5811 5812 bp = eth_dev->data->dev_private; 5813 5814 /* Parse dev arguments passed on when starting the DPDK application. */ 5815 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5816 if (rc) 5817 goto error_free; 5818 5819 rc = bnxt_drv_init(eth_dev); 5820 if (rc) 5821 goto error_free; 5822 5823 rc = bnxt_init_resources(bp, false); 5824 if (rc) 5825 goto error_free; 5826 5827 rc = bnxt_alloc_stats_mem(bp); 5828 if (rc) 5829 goto error_free; 5830 5831 PMD_DRV_LOG(INFO, 5832 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 5833 DRV_MODULE_NAME, 5834 pci_dev->mem_resource[0].phys_addr, 5835 pci_dev->mem_resource[0].addr); 5836 5837 return 0; 5838 5839 error_free: 5840 bnxt_dev_uninit(eth_dev); 5841 return rc; 5842 } 5843 5844 5845 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5846 { 5847 if (!ctx) 5848 return; 5849 5850 if (ctx->va) 5851 rte_free(ctx->va); 5852 5853 ctx->va = NULL; 5854 ctx->dma = RTE_BAD_IOVA; 5855 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 5856 } 5857 5858 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 5859 { 5860 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 5861 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5862 bp->flow_stat->rx_fc_out_tbl.ctx_id, 5863 bp->flow_stat->max_fc, 5864 false); 5865 5866 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 5867 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5868 bp->flow_stat->tx_fc_out_tbl.ctx_id, 5869 bp->flow_stat->max_fc, 5870 false); 5871 5872 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5873 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 5874 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5875 5876 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5877 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 5878 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5879 5880 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5881 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 5882 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5883 5884 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5885 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 5886 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5887 } 5888 5889 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 5890 { 5891 bnxt_unregister_fc_ctx_mem(bp); 5892 5893 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 5894 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 5895 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 5896 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 5897 } 5898 5899 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 5900 { 5901 if (BNXT_FLOW_XSTATS_EN(bp)) 5902 bnxt_uninit_fc_ctx_mem(bp); 5903 } 5904 5905 static void 5906 bnxt_free_error_recovery_info(struct bnxt *bp) 5907 { 5908 rte_free(bp->recovery_info); 5909 bp->recovery_info = NULL; 5910 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5911 } 5912 5913 static int 5914 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 5915 { 5916 int rc; 5917 5918 bnxt_free_int(bp); 5919 bnxt_free_mem(bp, reconfig_dev); 5920 5921 bnxt_hwrm_func_buf_unrgtr(bp); 5922 if (bp->pf != NULL) { 5923 rte_free(bp->pf->vf_req_buf); 5924 bp->pf->vf_req_buf = NULL; 5925 } 5926 5927 rc = bnxt_hwrm_func_driver_unregister(bp); 5928 bp->flags &= ~BNXT_FLAG_REGISTERED; 5929 bnxt_free_ctx_mem(bp); 5930 if (!reconfig_dev) { 5931 bnxt_free_hwrm_resources(bp); 5932 bnxt_free_error_recovery_info(bp); 5933 rte_free(bp->mcast_addr_list); 5934 bp->mcast_addr_list = NULL; 5935 } 5936 5937 bnxt_uninit_ctx_mem(bp); 5938 5939 bnxt_free_flow_stats_info(bp); 5940 if (bp->rep_info != NULL) 5941 bnxt_free_switch_domain(bp); 5942 bnxt_free_rep_info(bp); 5943 rte_free(bp->ptp_cfg); 5944 bp->ptp_cfg = NULL; 5945 return rc; 5946 } 5947 5948 static int 5949 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 5950 { 5951 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5952 return -EPERM; 5953 5954 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 5955 5956 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 5957 bnxt_dev_close_op(eth_dev); 5958 5959 return 0; 5960 } 5961 5962 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 5963 { 5964 struct bnxt *bp = eth_dev->data->dev_private; 5965 struct rte_eth_dev *vf_rep_eth_dev; 5966 int ret = 0, i; 5967 5968 if (!bp) 5969 return -EINVAL; 5970 5971 for (i = 0; i < bp->num_reps; i++) { 5972 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 5973 if (!vf_rep_eth_dev) 5974 continue; 5975 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 5976 vf_rep_eth_dev->data->port_id); 5977 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 5978 } 5979 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 5980 eth_dev->data->port_id); 5981 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 5982 5983 return ret; 5984 } 5985 5986 static void bnxt_free_rep_info(struct bnxt *bp) 5987 { 5988 rte_free(bp->rep_info); 5989 bp->rep_info = NULL; 5990 rte_free(bp->cfa_code_map); 5991 bp->cfa_code_map = NULL; 5992 } 5993 5994 static int bnxt_init_rep_info(struct bnxt *bp) 5995 { 5996 int i = 0, rc; 5997 5998 if (bp->rep_info) 5999 return 0; 6000 6001 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6002 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp), 6003 0); 6004 if (!bp->rep_info) { 6005 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6006 return -ENOMEM; 6007 } 6008 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6009 sizeof(*bp->cfa_code_map) * 6010 BNXT_MAX_CFA_CODE, 0); 6011 if (!bp->cfa_code_map) { 6012 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6013 bnxt_free_rep_info(bp); 6014 return -ENOMEM; 6015 } 6016 6017 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6018 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6019 6020 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6021 if (rc) { 6022 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6023 bnxt_free_rep_info(bp); 6024 return rc; 6025 } 6026 6027 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6028 if (rc) { 6029 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6030 bnxt_free_rep_info(bp); 6031 return rc; 6032 } 6033 6034 return rc; 6035 } 6036 6037 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6038 struct rte_eth_devargs *eth_da, 6039 struct rte_eth_dev *backing_eth_dev, 6040 const char *dev_args) 6041 { 6042 struct rte_eth_dev *vf_rep_eth_dev; 6043 char name[RTE_ETH_NAME_MAX_LEN]; 6044 struct bnxt *backing_bp = backing_eth_dev->data->dev_private; 6045 uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp); 6046 6047 uint16_t num_rep; 6048 int i, ret = 0; 6049 struct rte_kvargs *kvlist = NULL; 6050 6051 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6052 return 0; 6053 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6054 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6055 eth_da->type); 6056 return -ENOTSUP; 6057 } 6058 num_rep = eth_da->nb_representor_ports; 6059 if (num_rep > max_vf_reps) { 6060 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6061 num_rep, max_vf_reps); 6062 return -EINVAL; 6063 } 6064 6065 if (num_rep >= RTE_MAX_ETHPORTS) { 6066 PMD_DRV_LOG(ERR, 6067 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6068 num_rep, RTE_MAX_ETHPORTS); 6069 return -EINVAL; 6070 } 6071 6072 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6073 PMD_DRV_LOG(ERR, 6074 "Not a PF or trusted VF. No Representor support\n"); 6075 /* Returning an error is not an option. 6076 * Applications are not handling this correctly 6077 */ 6078 return 0; 6079 } 6080 6081 if (bnxt_init_rep_info(backing_bp)) 6082 return 0; 6083 6084 for (i = 0; i < num_rep; i++) { 6085 struct bnxt_representor representor = { 6086 .vf_id = eth_da->representor_ports[i], 6087 .switch_domain_id = backing_bp->switch_domain_id, 6088 .parent_dev = backing_eth_dev 6089 }; 6090 6091 if (representor.vf_id >= max_vf_reps) { 6092 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6093 representor.vf_id, max_vf_reps); 6094 continue; 6095 } 6096 6097 /* representor port net_bdf_port */ 6098 snprintf(name, sizeof(name), "net_%s_representor_%d", 6099 pci_dev->device.name, eth_da->representor_ports[i]); 6100 6101 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6102 if (kvlist) { 6103 /* 6104 * Handler for "rep_is_pf" devarg. 6105 * Invoked as for ex: "-a 000:00:0d.0, 6106 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6107 */ 6108 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6109 bnxt_parse_devarg_rep_is_pf, 6110 (void *)&representor); 6111 if (ret) { 6112 ret = -EINVAL; 6113 goto err; 6114 } 6115 /* 6116 * Handler for "rep_based_pf" devarg. 6117 * Invoked as for ex: "-a 000:00:0d.0, 6118 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6119 */ 6120 ret = rte_kvargs_process(kvlist, 6121 BNXT_DEVARG_REP_BASED_PF, 6122 bnxt_parse_devarg_rep_based_pf, 6123 (void *)&representor); 6124 if (ret) { 6125 ret = -EINVAL; 6126 goto err; 6127 } 6128 /* 6129 * Handler for "rep_based_pf" devarg. 6130 * Invoked as for ex: "-a 000:00:0d.0, 6131 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6132 */ 6133 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6134 bnxt_parse_devarg_rep_q_r2f, 6135 (void *)&representor); 6136 if (ret) { 6137 ret = -EINVAL; 6138 goto err; 6139 } 6140 /* 6141 * Handler for "rep_based_pf" devarg. 6142 * Invoked as for ex: "-a 000:00:0d.0, 6143 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6144 */ 6145 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6146 bnxt_parse_devarg_rep_q_f2r, 6147 (void *)&representor); 6148 if (ret) { 6149 ret = -EINVAL; 6150 goto err; 6151 } 6152 /* 6153 * Handler for "rep_based_pf" devarg. 6154 * Invoked as for ex: "-a 000:00:0d.0, 6155 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6156 */ 6157 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6158 bnxt_parse_devarg_rep_fc_r2f, 6159 (void *)&representor); 6160 if (ret) { 6161 ret = -EINVAL; 6162 goto err; 6163 } 6164 /* 6165 * Handler for "rep_based_pf" devarg. 6166 * Invoked as for ex: "-a 000:00:0d.0, 6167 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6168 */ 6169 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6170 bnxt_parse_devarg_rep_fc_f2r, 6171 (void *)&representor); 6172 if (ret) { 6173 ret = -EINVAL; 6174 goto err; 6175 } 6176 } 6177 6178 ret = rte_eth_dev_create(&pci_dev->device, name, 6179 sizeof(struct bnxt_representor), 6180 NULL, NULL, 6181 bnxt_representor_init, 6182 &representor); 6183 if (ret) { 6184 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6185 "representor %s.", name); 6186 goto err; 6187 } 6188 6189 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6190 if (!vf_rep_eth_dev) { 6191 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6192 " for VF-Rep: %s.", name); 6193 ret = -ENODEV; 6194 goto err; 6195 } 6196 6197 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6198 backing_eth_dev->data->port_id); 6199 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6200 vf_rep_eth_dev; 6201 backing_bp->num_reps++; 6202 6203 } 6204 6205 rte_kvargs_free(kvlist); 6206 return 0; 6207 6208 err: 6209 /* If num_rep > 1, then rollback already created 6210 * ports, since we'll be failing the probe anyway 6211 */ 6212 if (num_rep > 1) 6213 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6214 rte_errno = -ret; 6215 rte_kvargs_free(kvlist); 6216 6217 return ret; 6218 } 6219 6220 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6221 struct rte_pci_device *pci_dev) 6222 { 6223 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6224 struct rte_eth_dev *backing_eth_dev; 6225 uint16_t num_rep; 6226 int ret = 0; 6227 6228 if (pci_dev->device.devargs) { 6229 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6230 ð_da); 6231 if (ret) 6232 return ret; 6233 } 6234 6235 num_rep = eth_da.nb_representor_ports; 6236 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6237 num_rep); 6238 6239 /* We could come here after first level of probe is already invoked 6240 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6241 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6242 */ 6243 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6244 if (backing_eth_dev == NULL) { 6245 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6246 sizeof(struct bnxt), 6247 eth_dev_pci_specific_init, pci_dev, 6248 bnxt_dev_init, NULL); 6249 6250 if (ret || !num_rep) 6251 return ret; 6252 6253 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6254 } 6255 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6256 backing_eth_dev->data->port_id); 6257 6258 if (!num_rep) 6259 return ret; 6260 6261 /* probe representor ports now */ 6262 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6263 pci_dev->device.devargs->args); 6264 6265 return ret; 6266 } 6267 6268 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6269 { 6270 struct rte_eth_dev *eth_dev; 6271 6272 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6273 if (!eth_dev) 6274 return 0; /* Invoked typically only by OVS-DPDK, by the 6275 * time it comes here the eth_dev is already 6276 * deleted by rte_eth_dev_close(), so returning 6277 * +ve value will at least help in proper cleanup 6278 */ 6279 6280 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6281 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6282 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6283 return rte_eth_dev_destroy(eth_dev, 6284 bnxt_representor_uninit); 6285 else 6286 return rte_eth_dev_destroy(eth_dev, 6287 bnxt_dev_uninit); 6288 } else { 6289 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6290 } 6291 } 6292 6293 static struct rte_pci_driver bnxt_rte_pmd = { 6294 .id_table = bnxt_pci_id_map, 6295 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6296 RTE_PCI_DRV_INTR_RMV | 6297 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6298 * and OVS-DPDK 6299 */ 6300 .probe = bnxt_pci_probe, 6301 .remove = bnxt_pci_remove, 6302 }; 6303 6304 static bool 6305 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6306 { 6307 if (strcmp(dev->device->driver->name, drv->driver.name)) 6308 return false; 6309 6310 return true; 6311 } 6312 6313 bool is_bnxt_supported(struct rte_eth_dev *dev) 6314 { 6315 return is_device_supported(dev, &bnxt_rte_pmd); 6316 } 6317 6318 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6319 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6320 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6321 6322