1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 91 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 92 #define BNXT_DEVARG_REPRESENTOR "representor" 93 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 94 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 95 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 96 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 97 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 98 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 99 #define BNXT_DEVARG_APP_ID "app-id" 100 101 static const char *const bnxt_dev_args[] = { 102 BNXT_DEVARG_REPRESENTOR, 103 BNXT_DEVARG_FLOW_XSTAT, 104 BNXT_DEVARG_MAX_NUM_KFLOWS, 105 BNXT_DEVARG_REP_BASED_PF, 106 BNXT_DEVARG_REP_IS_PF, 107 BNXT_DEVARG_REP_Q_R2F, 108 BNXT_DEVARG_REP_Q_F2R, 109 BNXT_DEVARG_REP_FC_R2F, 110 BNXT_DEVARG_REP_FC_F2R, 111 BNXT_DEVARG_APP_ID, 112 NULL 113 }; 114 115 /* 116 * app-id = an non-negative 8-bit number 117 */ 118 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 119 120 /* 121 * flow_xstat == false to disable the feature 122 * flow_xstat == true to enable the feature 123 */ 124 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 125 126 /* 127 * rep_is_pf == false to indicate VF representor 128 * rep_is_pf == true to indicate PF representor 129 */ 130 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 131 132 /* 133 * rep_based_pf == Physical index of the PF 134 */ 135 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 136 /* 137 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 138 */ 139 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 140 141 /* 142 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 143 */ 144 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 145 146 /* 147 * rep_fc_r2f == Flow control for the representor to endpoint direction 148 */ 149 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 150 151 /* 152 * rep_fc_f2r == Flow control for the endpoint to representor direction 153 */ 154 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 155 156 int bnxt_cfa_code_dynfield_offset = -1; 157 158 /* 159 * max_num_kflows must be >= 32 160 * and must be a power-of-2 supported value 161 * return: 1 -> invalid 162 * 0 -> valid 163 */ 164 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 165 { 166 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 167 return 1; 168 return 0; 169 } 170 171 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 172 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 173 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 174 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 175 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 176 static int bnxt_restore_vlan_filters(struct bnxt *bp); 177 static void bnxt_dev_recover(void *arg); 178 static void bnxt_free_error_recovery_info(struct bnxt *bp); 179 static void bnxt_free_rep_info(struct bnxt *bp); 180 181 int is_bnxt_in_error(struct bnxt *bp) 182 { 183 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 184 return -EIO; 185 if (bp->flags & BNXT_FLAG_FW_RESET) 186 return -EBUSY; 187 188 return 0; 189 } 190 191 /***********************/ 192 193 /* 194 * High level utility functions 195 */ 196 197 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 198 { 199 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 200 BNXT_RSS_TBL_SIZE_P5); 201 202 if (!BNXT_CHIP_P5(bp)) 203 return 1; 204 205 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 206 BNXT_RSS_ENTRIES_PER_CTX_P5) / 207 BNXT_RSS_ENTRIES_PER_CTX_P5; 208 } 209 210 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 211 { 212 if (!BNXT_CHIP_P5(bp)) 213 return HW_HASH_INDEX_SIZE; 214 215 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 216 } 217 218 static void bnxt_free_parent_info(struct bnxt *bp) 219 { 220 rte_free(bp->parent); 221 bp->parent = NULL; 222 } 223 224 static void bnxt_free_pf_info(struct bnxt *bp) 225 { 226 rte_free(bp->pf); 227 bp->pf = NULL; 228 } 229 230 static void bnxt_free_link_info(struct bnxt *bp) 231 { 232 rte_free(bp->link_info); 233 bp->link_info = NULL; 234 } 235 236 static void bnxt_free_leds_info(struct bnxt *bp) 237 { 238 if (BNXT_VF(bp)) 239 return; 240 241 rte_free(bp->leds); 242 bp->leds = NULL; 243 } 244 245 static void bnxt_free_flow_stats_info(struct bnxt *bp) 246 { 247 rte_free(bp->flow_stat); 248 bp->flow_stat = NULL; 249 } 250 251 static void bnxt_free_cos_queues(struct bnxt *bp) 252 { 253 rte_free(bp->rx_cos_queue); 254 bp->rx_cos_queue = NULL; 255 rte_free(bp->tx_cos_queue); 256 bp->tx_cos_queue = NULL; 257 } 258 259 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 260 { 261 bnxt_free_filter_mem(bp); 262 bnxt_free_vnic_attributes(bp); 263 bnxt_free_vnic_mem(bp); 264 265 /* tx/rx rings are configured as part of *_queue_setup callbacks. 266 * If the number of rings change across fw update, 267 * we don't have much choice except to warn the user. 268 */ 269 if (!reconfig) { 270 bnxt_free_stats(bp); 271 bnxt_free_tx_rings(bp); 272 bnxt_free_rx_rings(bp); 273 } 274 bnxt_free_async_cp_ring(bp); 275 bnxt_free_rxtx_nq_ring(bp); 276 277 rte_free(bp->grp_info); 278 bp->grp_info = NULL; 279 } 280 281 static int bnxt_alloc_parent_info(struct bnxt *bp) 282 { 283 bp->parent = rte_zmalloc("bnxt_parent_info", 284 sizeof(struct bnxt_parent_info), 0); 285 if (bp->parent == NULL) 286 return -ENOMEM; 287 288 return 0; 289 } 290 291 static int bnxt_alloc_pf_info(struct bnxt *bp) 292 { 293 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 294 if (bp->pf == NULL) 295 return -ENOMEM; 296 297 return 0; 298 } 299 300 static int bnxt_alloc_link_info(struct bnxt *bp) 301 { 302 bp->link_info = 303 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 304 if (bp->link_info == NULL) 305 return -ENOMEM; 306 307 return 0; 308 } 309 310 static int bnxt_alloc_leds_info(struct bnxt *bp) 311 { 312 if (BNXT_VF(bp)) 313 return 0; 314 315 bp->leds = rte_zmalloc("bnxt_leds", 316 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 317 0); 318 if (bp->leds == NULL) 319 return -ENOMEM; 320 321 return 0; 322 } 323 324 static int bnxt_alloc_cos_queues(struct bnxt *bp) 325 { 326 bp->rx_cos_queue = 327 rte_zmalloc("bnxt_rx_cosq", 328 BNXT_COS_QUEUE_COUNT * 329 sizeof(struct bnxt_cos_queue_info), 330 0); 331 if (bp->rx_cos_queue == NULL) 332 return -ENOMEM; 333 334 bp->tx_cos_queue = 335 rte_zmalloc("bnxt_tx_cosq", 336 BNXT_COS_QUEUE_COUNT * 337 sizeof(struct bnxt_cos_queue_info), 338 0); 339 if (bp->tx_cos_queue == NULL) 340 return -ENOMEM; 341 342 return 0; 343 } 344 345 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 346 { 347 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 348 sizeof(struct bnxt_flow_stat_info), 0); 349 if (bp->flow_stat == NULL) 350 return -ENOMEM; 351 352 return 0; 353 } 354 355 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 356 { 357 int rc; 358 359 rc = bnxt_alloc_ring_grps(bp); 360 if (rc) 361 goto alloc_mem_err; 362 363 rc = bnxt_alloc_async_ring_struct(bp); 364 if (rc) 365 goto alloc_mem_err; 366 367 rc = bnxt_alloc_vnic_mem(bp); 368 if (rc) 369 goto alloc_mem_err; 370 371 rc = bnxt_alloc_vnic_attributes(bp); 372 if (rc) 373 goto alloc_mem_err; 374 375 rc = bnxt_alloc_filter_mem(bp); 376 if (rc) 377 goto alloc_mem_err; 378 379 rc = bnxt_alloc_async_cp_ring(bp); 380 if (rc) 381 goto alloc_mem_err; 382 383 rc = bnxt_alloc_rxtx_nq_ring(bp); 384 if (rc) 385 goto alloc_mem_err; 386 387 if (BNXT_FLOW_XSTATS_EN(bp)) { 388 rc = bnxt_alloc_flow_stats_info(bp); 389 if (rc) 390 goto alloc_mem_err; 391 } 392 393 return 0; 394 395 alloc_mem_err: 396 bnxt_free_mem(bp, reconfig); 397 return rc; 398 } 399 400 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 401 { 402 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 403 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 404 uint64_t rx_offloads = dev_conf->rxmode.offloads; 405 struct bnxt_rx_queue *rxq; 406 unsigned int j; 407 int rc; 408 409 rc = bnxt_vnic_grp_alloc(bp, vnic); 410 if (rc) 411 goto err_out; 412 413 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 414 vnic_id, vnic, vnic->fw_grp_ids); 415 416 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 417 if (rc) 418 goto err_out; 419 420 /* Alloc RSS context only if RSS mode is enabled */ 421 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { 422 int j, nr_ctxs = bnxt_rss_ctxts(bp); 423 424 /* RSS table size in Thor is 512. 425 * Cap max Rx rings to same value 426 */ 427 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 428 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 429 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 430 goto err_out; 431 } 432 433 rc = 0; 434 for (j = 0; j < nr_ctxs; j++) { 435 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 436 if (rc) 437 break; 438 } 439 if (rc) { 440 PMD_DRV_LOG(ERR, 441 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 442 vnic_id, j, rc); 443 goto err_out; 444 } 445 vnic->num_lb_ctxts = nr_ctxs; 446 } 447 448 /* 449 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 450 * setting is not available at this time, it will not be 451 * configured correctly in the CFA. 452 */ 453 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 454 vnic->vlan_strip = true; 455 else 456 vnic->vlan_strip = false; 457 458 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 459 if (rc) 460 goto err_out; 461 462 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 463 if (rc) 464 goto err_out; 465 466 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 467 rxq = bp->eth_dev->data->rx_queues[j]; 468 469 PMD_DRV_LOG(DEBUG, 470 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 471 j, rxq->vnic, rxq->vnic->fw_grp_ids); 472 473 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 474 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 475 else 476 vnic->rx_queue_cnt++; 477 } 478 479 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 480 481 rc = bnxt_vnic_rss_configure(bp, vnic); 482 if (rc) 483 goto err_out; 484 485 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 486 487 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 488 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? 489 true : false); 490 if (rc) 491 goto err_out; 492 493 return 0; 494 err_out: 495 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 496 vnic_id, rc); 497 return rc; 498 } 499 500 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 501 { 502 int rc = 0; 503 504 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 505 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 506 if (rc) 507 return rc; 508 509 PMD_DRV_LOG(DEBUG, 510 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 511 " rx_fc_in_tbl.ctx_id = %d\n", 512 bp->flow_stat->rx_fc_in_tbl.va, 513 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 514 bp->flow_stat->rx_fc_in_tbl.ctx_id); 515 516 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 517 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 518 if (rc) 519 return rc; 520 521 PMD_DRV_LOG(DEBUG, 522 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 523 " rx_fc_out_tbl.ctx_id = %d\n", 524 bp->flow_stat->rx_fc_out_tbl.va, 525 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 526 bp->flow_stat->rx_fc_out_tbl.ctx_id); 527 528 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 529 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 530 if (rc) 531 return rc; 532 533 PMD_DRV_LOG(DEBUG, 534 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 535 " tx_fc_in_tbl.ctx_id = %d\n", 536 bp->flow_stat->tx_fc_in_tbl.va, 537 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 538 bp->flow_stat->tx_fc_in_tbl.ctx_id); 539 540 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 541 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 542 if (rc) 543 return rc; 544 545 PMD_DRV_LOG(DEBUG, 546 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 547 " tx_fc_out_tbl.ctx_id = %d\n", 548 bp->flow_stat->tx_fc_out_tbl.va, 549 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 550 bp->flow_stat->tx_fc_out_tbl.ctx_id); 551 552 memset(bp->flow_stat->rx_fc_out_tbl.va, 553 0, 554 bp->flow_stat->rx_fc_out_tbl.size); 555 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 556 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 557 bp->flow_stat->rx_fc_out_tbl.ctx_id, 558 bp->flow_stat->max_fc, 559 true); 560 if (rc) 561 return rc; 562 563 memset(bp->flow_stat->tx_fc_out_tbl.va, 564 0, 565 bp->flow_stat->tx_fc_out_tbl.size); 566 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 567 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 568 bp->flow_stat->tx_fc_out_tbl.ctx_id, 569 bp->flow_stat->max_fc, 570 true); 571 572 return rc; 573 } 574 575 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 576 struct bnxt_ctx_mem_buf_info *ctx) 577 { 578 if (!ctx) 579 return -EINVAL; 580 581 ctx->va = rte_zmalloc_socket(type, size, 0, 582 bp->eth_dev->device->numa_node); 583 if (ctx->va == NULL) 584 return -ENOMEM; 585 rte_mem_lock_page(ctx->va); 586 ctx->size = size; 587 ctx->dma = rte_mem_virt2iova(ctx->va); 588 if (ctx->dma == RTE_BAD_IOVA) 589 return -ENOMEM; 590 591 return 0; 592 } 593 594 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 595 { 596 struct rte_pci_device *pdev = bp->pdev; 597 char type[RTE_MEMZONE_NAMESIZE]; 598 uint16_t max_fc; 599 int rc = 0; 600 601 max_fc = bp->flow_stat->max_fc; 602 603 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 604 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 605 /* 4 bytes for each counter-id */ 606 rc = bnxt_alloc_ctx_mem_buf(bp, type, 607 max_fc * 4, 608 &bp->flow_stat->rx_fc_in_tbl); 609 if (rc) 610 return rc; 611 612 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 613 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 614 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 615 rc = bnxt_alloc_ctx_mem_buf(bp, type, 616 max_fc * 16, 617 &bp->flow_stat->rx_fc_out_tbl); 618 if (rc) 619 return rc; 620 621 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 622 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 623 /* 4 bytes for each counter-id */ 624 rc = bnxt_alloc_ctx_mem_buf(bp, type, 625 max_fc * 4, 626 &bp->flow_stat->tx_fc_in_tbl); 627 if (rc) 628 return rc; 629 630 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 631 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 632 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 633 rc = bnxt_alloc_ctx_mem_buf(bp, type, 634 max_fc * 16, 635 &bp->flow_stat->tx_fc_out_tbl); 636 if (rc) 637 return rc; 638 639 rc = bnxt_register_fc_ctx_mem(bp); 640 641 return rc; 642 } 643 644 static int bnxt_init_ctx_mem(struct bnxt *bp) 645 { 646 int rc = 0; 647 648 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 649 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 650 !BNXT_FLOW_XSTATS_EN(bp)) 651 return 0; 652 653 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 654 if (rc) 655 return rc; 656 657 rc = bnxt_init_fc_ctx_mem(bp); 658 659 return rc; 660 } 661 662 static int bnxt_update_phy_setting(struct bnxt *bp) 663 { 664 struct rte_eth_link new; 665 int rc; 666 667 rc = bnxt_get_hwrm_link_config(bp, &new); 668 if (rc) { 669 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 670 return rc; 671 } 672 673 /* 674 * On BCM957508-N2100 adapters, FW will not allow any user other 675 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 676 * always returns link up. Force phy update always in that case. 677 */ 678 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 679 rc = bnxt_set_hwrm_link_config(bp, true); 680 if (rc) { 681 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 682 return rc; 683 } 684 } 685 686 return rc; 687 } 688 689 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 690 { 691 rte_free(bp->prev_rx_ring_stats); 692 rte_free(bp->prev_tx_ring_stats); 693 694 bp->prev_rx_ring_stats = NULL; 695 bp->prev_tx_ring_stats = NULL; 696 } 697 698 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 699 { 700 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 701 sizeof(struct bnxt_ring_stats) * 702 bp->rx_cp_nr_rings, 703 0); 704 if (bp->prev_rx_ring_stats == NULL) 705 return -ENOMEM; 706 707 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 708 sizeof(struct bnxt_ring_stats) * 709 bp->tx_cp_nr_rings, 710 0); 711 if (bp->prev_tx_ring_stats == NULL) 712 goto error; 713 714 return 0; 715 716 error: 717 bnxt_free_prev_ring_stats(bp); 718 return -ENOMEM; 719 } 720 721 static int bnxt_start_nic(struct bnxt *bp) 722 { 723 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 724 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 725 uint32_t intr_vector = 0; 726 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 727 uint32_t vec = BNXT_MISC_VEC_ID; 728 unsigned int i, j; 729 int rc; 730 731 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) 732 bp->flags |= BNXT_FLAG_JUMBO; 733 else 734 bp->flags &= ~BNXT_FLAG_JUMBO; 735 736 /* THOR does not support ring groups. 737 * But we will use the array to save RSS context IDs. 738 */ 739 if (BNXT_CHIP_P5(bp)) 740 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 741 742 rc = bnxt_alloc_hwrm_rings(bp); 743 if (rc) { 744 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 745 goto err_out; 746 } 747 748 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 749 if (rc) { 750 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 751 goto err_out; 752 } 753 754 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 755 goto skip_cosq_cfg; 756 757 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 758 if (bp->rx_cos_queue[i].id != 0xff) { 759 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 760 761 if (!vnic) { 762 PMD_DRV_LOG(ERR, 763 "Num pools more than FW profile\n"); 764 rc = -EINVAL; 765 goto err_out; 766 } 767 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 768 bp->rx_cosq_cnt++; 769 } 770 } 771 772 skip_cosq_cfg: 773 rc = bnxt_mq_rx_configure(bp); 774 if (rc) { 775 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 776 goto err_out; 777 } 778 779 for (j = 0; j < bp->rx_nr_rings; j++) { 780 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 781 782 if (!rxq->rx_deferred_start) { 783 bp->eth_dev->data->rx_queue_state[j] = 784 RTE_ETH_QUEUE_STATE_STARTED; 785 rxq->rx_started = true; 786 } 787 } 788 789 /* default vnic 0 */ 790 rc = bnxt_setup_one_vnic(bp, 0); 791 if (rc) 792 goto err_out; 793 /* VNIC configuration */ 794 if (BNXT_RFS_NEEDS_VNIC(bp)) { 795 for (i = 1; i < bp->nr_vnics; i++) { 796 rc = bnxt_setup_one_vnic(bp, i); 797 if (rc) 798 goto err_out; 799 } 800 } 801 802 for (j = 0; j < bp->tx_nr_rings; j++) { 803 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 804 805 if (!txq->tx_deferred_start) { 806 bp->eth_dev->data->tx_queue_state[j] = 807 RTE_ETH_QUEUE_STATE_STARTED; 808 txq->tx_started = true; 809 } 810 } 811 812 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 813 if (rc) { 814 PMD_DRV_LOG(ERR, 815 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 816 goto err_out; 817 } 818 819 /* check and configure queue intr-vector mapping */ 820 if ((rte_intr_cap_multiple(intr_handle) || 821 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 822 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 823 intr_vector = bp->eth_dev->data->nb_rx_queues; 824 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 825 if (intr_vector > bp->rx_cp_nr_rings) { 826 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 827 bp->rx_cp_nr_rings); 828 return -ENOTSUP; 829 } 830 rc = rte_intr_efd_enable(intr_handle, intr_vector); 831 if (rc) 832 return rc; 833 } 834 835 if (rte_intr_dp_is_en(intr_handle)) { 836 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 837 bp->eth_dev->data->nb_rx_queues)) { 838 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 839 " intr_vec", bp->eth_dev->data->nb_rx_queues); 840 rc = -ENOMEM; 841 goto err_out; 842 } 843 PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " 844 "intr_handle->max_intr = %d\n", 845 rte_intr_nb_efd_get(intr_handle), 846 rte_intr_max_intr_get(intr_handle)); 847 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 848 queue_id++) { 849 rte_intr_vec_list_index_set(intr_handle, 850 queue_id, vec + BNXT_RX_VEC_START); 851 if (vec < base + rte_intr_nb_efd_get(intr_handle) 852 - 1) 853 vec++; 854 } 855 } 856 857 /* enable uio/vfio intr/eventfd mapping */ 858 rc = rte_intr_enable(intr_handle); 859 #ifndef RTE_EXEC_ENV_FREEBSD 860 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 861 if (rc) 862 goto err_out; 863 #endif 864 865 rc = bnxt_update_phy_setting(bp); 866 if (rc) 867 goto err_out; 868 869 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 870 if (!bp->mark_table) 871 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 872 873 return 0; 874 875 err_out: 876 /* Some of the error status returned by FW may not be from errno.h */ 877 if (rc > 0) 878 rc = -EIO; 879 880 return rc; 881 } 882 883 static int bnxt_shutdown_nic(struct bnxt *bp) 884 { 885 bnxt_free_all_hwrm_resources(bp); 886 bnxt_free_all_filters(bp); 887 bnxt_free_all_vnics(bp); 888 return 0; 889 } 890 891 /* 892 * Device configuration and status function 893 */ 894 895 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 896 { 897 uint32_t link_speed = 0; 898 uint32_t speed_capa = 0; 899 900 if (bp->link_info == NULL) 901 return 0; 902 903 link_speed = bp->link_info->support_speeds; 904 905 /* If PAM4 is configured, use PAM4 supported speed */ 906 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 907 link_speed = bp->link_info->support_pam4_speeds; 908 909 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 910 speed_capa |= RTE_ETH_LINK_SPEED_100M; 911 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 912 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 913 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 914 speed_capa |= RTE_ETH_LINK_SPEED_1G; 915 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 916 speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 917 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 918 speed_capa |= RTE_ETH_LINK_SPEED_10G; 919 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 920 speed_capa |= RTE_ETH_LINK_SPEED_20G; 921 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 922 speed_capa |= RTE_ETH_LINK_SPEED_25G; 923 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 924 speed_capa |= RTE_ETH_LINK_SPEED_40G; 925 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 926 speed_capa |= RTE_ETH_LINK_SPEED_50G; 927 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 928 speed_capa |= RTE_ETH_LINK_SPEED_100G; 929 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 930 speed_capa |= RTE_ETH_LINK_SPEED_50G; 931 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 932 speed_capa |= RTE_ETH_LINK_SPEED_100G; 933 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 934 speed_capa |= RTE_ETH_LINK_SPEED_200G; 935 936 if (bp->link_info->auto_mode == 937 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 938 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 939 940 return speed_capa; 941 } 942 943 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 944 struct rte_eth_dev_info *dev_info) 945 { 946 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 947 struct bnxt *bp = eth_dev->data->dev_private; 948 uint16_t max_vnics, i, j, vpool, vrxq; 949 unsigned int max_rx_rings; 950 int rc; 951 952 rc = is_bnxt_in_error(bp); 953 if (rc) 954 return rc; 955 956 /* MAC Specifics */ 957 dev_info->max_mac_addrs = bp->max_l2_ctx; 958 dev_info->max_hash_mac_addrs = 0; 959 960 /* PF/VF specifics */ 961 if (BNXT_PF(bp)) 962 dev_info->max_vfs = pdev->max_vfs; 963 964 max_rx_rings = bnxt_max_rings(bp); 965 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 966 dev_info->max_rx_queues = max_rx_rings; 967 dev_info->max_tx_queues = max_rx_rings; 968 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 969 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 970 max_vnics = bp->max_vnics; 971 972 /* MTU specifics */ 973 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 974 dev_info->max_mtu = BNXT_MAX_MTU; 975 976 /* Fast path specifics */ 977 dev_info->min_rx_bufsize = 1; 978 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 979 980 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 981 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 982 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 983 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) 984 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 985 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 986 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 987 dev_info->tx_queue_offload_capa; 988 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 989 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 990 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 991 992 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 993 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 994 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 995 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 996 997 dev_info->default_rxconf = (struct rte_eth_rxconf) { 998 .rx_thresh = { 999 .pthresh = 8, 1000 .hthresh = 8, 1001 .wthresh = 0, 1002 }, 1003 .rx_free_thresh = 32, 1004 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1005 }; 1006 1007 dev_info->default_txconf = (struct rte_eth_txconf) { 1008 .tx_thresh = { 1009 .pthresh = 32, 1010 .hthresh = 0, 1011 .wthresh = 0, 1012 }, 1013 .tx_free_thresh = 32, 1014 .tx_rs_thresh = 32, 1015 }; 1016 eth_dev->data->dev_conf.intr_conf.lsc = 1; 1017 1018 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1019 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1020 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1021 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1022 1023 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1024 dev_info->switch_info.name = eth_dev->device->name; 1025 dev_info->switch_info.domain_id = bp->switch_domain_id; 1026 dev_info->switch_info.port_id = 1027 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1028 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1029 } 1030 1031 /* 1032 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1033 * need further investigation. 1034 */ 1035 1036 /* VMDq resources */ 1037 vpool = 64; /* RTE_ETH_64_POOLS */ 1038 vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ 1039 for (i = 0; i < 4; vpool >>= 1, i++) { 1040 if (max_vnics > vpool) { 1041 for (j = 0; j < 5; vrxq >>= 1, j++) { 1042 if (dev_info->max_rx_queues > vrxq) { 1043 if (vpool > vrxq) 1044 vpool = vrxq; 1045 goto found; 1046 } 1047 } 1048 /* Not enough resources to support VMDq */ 1049 break; 1050 } 1051 } 1052 /* Not enough resources to support VMDq */ 1053 vpool = 0; 1054 vrxq = 0; 1055 found: 1056 dev_info->max_vmdq_pools = vpool; 1057 dev_info->vmdq_queue_num = vrxq; 1058 1059 dev_info->vmdq_pool_base = 0; 1060 dev_info->vmdq_queue_base = 0; 1061 1062 return 0; 1063 } 1064 1065 /* Configure the device based on the configuration provided */ 1066 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1067 { 1068 struct bnxt *bp = eth_dev->data->dev_private; 1069 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1070 int rc; 1071 1072 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1073 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1074 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1075 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1076 1077 rc = is_bnxt_in_error(bp); 1078 if (rc) 1079 return rc; 1080 1081 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1082 rc = bnxt_hwrm_check_vf_rings(bp); 1083 if (rc) { 1084 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1085 return -ENOSPC; 1086 } 1087 1088 /* If a resource has already been allocated - in this case 1089 * it is the async completion ring, free it. Reallocate it after 1090 * resource reservation. This will ensure the resource counts 1091 * are calculated correctly. 1092 */ 1093 1094 pthread_mutex_lock(&bp->def_cp_lock); 1095 1096 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1097 bnxt_disable_int(bp); 1098 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1099 } 1100 1101 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1102 if (rc) { 1103 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1104 pthread_mutex_unlock(&bp->def_cp_lock); 1105 return -ENOSPC; 1106 } 1107 1108 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1109 rc = bnxt_alloc_async_cp_ring(bp); 1110 if (rc) { 1111 pthread_mutex_unlock(&bp->def_cp_lock); 1112 return rc; 1113 } 1114 bnxt_enable_int(bp); 1115 } 1116 1117 pthread_mutex_unlock(&bp->def_cp_lock); 1118 } 1119 1120 /* Inherit new configurations */ 1121 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1122 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1123 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1124 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1125 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1126 bp->max_stat_ctx) 1127 goto resource_error; 1128 1129 if (BNXT_HAS_RING_GRPS(bp) && 1130 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1131 goto resource_error; 1132 1133 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && 1134 bp->max_vnics < eth_dev->data->nb_rx_queues) 1135 goto resource_error; 1136 1137 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1138 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1139 1140 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1141 rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1142 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1143 1144 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1145 1146 return 0; 1147 1148 resource_error: 1149 PMD_DRV_LOG(ERR, 1150 "Insufficient resources to support requested config\n"); 1151 PMD_DRV_LOG(ERR, 1152 "Num Queues Requested: Tx %d, Rx %d\n", 1153 eth_dev->data->nb_tx_queues, 1154 eth_dev->data->nb_rx_queues); 1155 PMD_DRV_LOG(ERR, 1156 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1157 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1158 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1159 return -ENOSPC; 1160 } 1161 1162 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1163 { 1164 struct rte_eth_link *link = ð_dev->data->dev_link; 1165 1166 if (link->link_status) 1167 PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n", 1168 eth_dev->data->port_id, 1169 (uint32_t)link->link_speed, 1170 (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 1171 ("full-duplex") : ("half-duplex\n")); 1172 else 1173 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1174 eth_dev->data->port_id); 1175 } 1176 1177 /* 1178 * Determine whether the current configuration requires support for scattered 1179 * receive; return 1 if scattered receive is required and 0 if not. 1180 */ 1181 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1182 { 1183 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 1184 uint16_t buf_size; 1185 int i; 1186 1187 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1188 return 1; 1189 1190 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 1191 return 1; 1192 1193 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1194 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1195 1196 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1197 RTE_PKTMBUF_HEADROOM); 1198 if (eth_dev->data->mtu + overhead > buf_size) 1199 return 1; 1200 } 1201 return 0; 1202 } 1203 1204 static eth_rx_burst_t 1205 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1206 { 1207 struct bnxt *bp = eth_dev->data->dev_private; 1208 1209 /* Disable vector mode RX for Stingray2 for now */ 1210 if (BNXT_CHIP_SR2(bp)) { 1211 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1212 return bnxt_recv_pkts; 1213 } 1214 1215 #if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \ 1216 !defined(RTE_LIBRTE_IEEE1588) 1217 1218 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1219 if (eth_dev->data->scattered_rx) 1220 goto use_scalar_rx; 1221 1222 /* 1223 * Vector mode receive cannot be enabled if Truflow is enabled or if 1224 * asynchronous completions and receive completions can be placed in 1225 * the same completion ring. 1226 */ 1227 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1228 goto use_scalar_rx; 1229 1230 /* 1231 * Vector mode receive cannot be enabled if any receive offloads outside 1232 * a limited subset have been enabled. 1233 */ 1234 if (eth_dev->data->dev_conf.rxmode.offloads & 1235 ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1236 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1237 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1238 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1239 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1240 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1241 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 1242 RTE_ETH_RX_OFFLOAD_RSS_HASH | 1243 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) 1244 goto use_scalar_rx; 1245 1246 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1247 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1248 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1249 PMD_DRV_LOG(INFO, 1250 "Using AVX2 vector mode receive for port %d\n", 1251 eth_dev->data->port_id); 1252 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1253 return bnxt_recv_pkts_vec_avx2; 1254 } 1255 #endif 1256 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1257 PMD_DRV_LOG(INFO, 1258 "Using SSE vector mode receive for port %d\n", 1259 eth_dev->data->port_id); 1260 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1261 return bnxt_recv_pkts_vec; 1262 } 1263 1264 use_scalar_rx: 1265 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1266 eth_dev->data->port_id); 1267 PMD_DRV_LOG(INFO, 1268 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1269 eth_dev->data->port_id, 1270 eth_dev->data->scattered_rx, 1271 eth_dev->data->dev_conf.rxmode.offloads); 1272 #endif 1273 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1274 return bnxt_recv_pkts; 1275 } 1276 1277 static eth_tx_burst_t 1278 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1279 { 1280 struct bnxt *bp = eth_dev->data->dev_private; 1281 1282 /* Disable vector mode TX for Stingray2 for now */ 1283 if (BNXT_CHIP_SR2(bp)) 1284 return bnxt_xmit_pkts; 1285 1286 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \ 1287 !defined(RTE_LIBRTE_IEEE1588) 1288 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1289 1290 /* 1291 * Vector mode transmit can be enabled only if not using scatter rx 1292 * or tx offloads. 1293 */ 1294 if (eth_dev->data->scattered_rx || 1295 (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || 1296 BNXT_TRUFLOW_EN(bp)) 1297 goto use_scalar_tx; 1298 1299 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1300 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1301 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1302 PMD_DRV_LOG(INFO, 1303 "Using AVX2 vector mode transmit for port %d\n", 1304 eth_dev->data->port_id); 1305 return bnxt_xmit_pkts_vec_avx2; 1306 } 1307 #endif 1308 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1309 PMD_DRV_LOG(INFO, 1310 "Using SSE vector mode transmit for port %d\n", 1311 eth_dev->data->port_id); 1312 return bnxt_xmit_pkts_vec; 1313 } 1314 1315 use_scalar_tx: 1316 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1317 eth_dev->data->port_id); 1318 PMD_DRV_LOG(INFO, 1319 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1320 eth_dev->data->port_id, 1321 eth_dev->data->scattered_rx, 1322 offloads); 1323 #endif 1324 return bnxt_xmit_pkts; 1325 } 1326 1327 static int bnxt_handle_if_change_status(struct bnxt *bp) 1328 { 1329 int rc; 1330 1331 /* Since fw has undergone a reset and lost all contexts, 1332 * set fatal flag to not issue hwrm during cleanup 1333 */ 1334 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1335 bnxt_uninit_resources(bp, true); 1336 1337 /* clear fatal flag so that re-init happens */ 1338 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1339 rc = bnxt_init_resources(bp, true); 1340 1341 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1342 1343 return rc; 1344 } 1345 1346 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1347 { 1348 struct bnxt *bp = eth_dev->data->dev_private; 1349 int rc = 0; 1350 1351 if (!BNXT_SINGLE_PF(bp)) 1352 return -ENOTSUP; 1353 1354 if (!bp->link_info->link_up) 1355 rc = bnxt_set_hwrm_link_config(bp, true); 1356 if (!rc) 1357 eth_dev->data->dev_link.link_status = 1; 1358 1359 bnxt_print_link_info(eth_dev); 1360 return rc; 1361 } 1362 1363 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1364 { 1365 struct bnxt *bp = eth_dev->data->dev_private; 1366 1367 if (!BNXT_SINGLE_PF(bp)) 1368 return -ENOTSUP; 1369 1370 eth_dev->data->dev_link.link_status = 0; 1371 bnxt_set_hwrm_link_config(bp, false); 1372 bp->link_info->link_up = 0; 1373 1374 return 0; 1375 } 1376 1377 static void bnxt_free_switch_domain(struct bnxt *bp) 1378 { 1379 int rc = 0; 1380 1381 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1382 return; 1383 1384 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1385 if (rc) 1386 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1387 bp->switch_domain_id, rc); 1388 } 1389 1390 static void bnxt_ptp_get_current_time(void *arg) 1391 { 1392 struct bnxt *bp = arg; 1393 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1394 int rc; 1395 1396 rc = is_bnxt_in_error(bp); 1397 if (rc) 1398 return; 1399 1400 if (!ptp) 1401 return; 1402 1403 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1404 &ptp->current_time); 1405 1406 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1407 if (rc != 0) { 1408 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1409 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1410 } 1411 } 1412 1413 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1414 { 1415 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1416 int rc; 1417 1418 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1419 return 0; 1420 1421 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1422 &ptp->current_time); 1423 1424 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1425 return rc; 1426 } 1427 1428 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1429 { 1430 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1431 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1432 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1433 } 1434 } 1435 1436 static void bnxt_ptp_stop(struct bnxt *bp) 1437 { 1438 bnxt_cancel_ptp_alarm(bp); 1439 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1440 } 1441 1442 static int bnxt_ptp_start(struct bnxt *bp) 1443 { 1444 int rc; 1445 1446 rc = bnxt_schedule_ptp_alarm(bp); 1447 if (rc != 0) { 1448 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1449 } else { 1450 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1451 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1452 } 1453 1454 return rc; 1455 } 1456 1457 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1458 { 1459 struct bnxt *bp = eth_dev->data->dev_private; 1460 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1461 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1462 struct rte_eth_link link; 1463 int ret; 1464 1465 eth_dev->data->dev_started = 0; 1466 1467 /* Prevent crashes when queues are still in use */ 1468 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1469 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1470 1471 bnxt_disable_int(bp); 1472 1473 /* disable uio/vfio intr/eventfd mapping */ 1474 rte_intr_disable(intr_handle); 1475 1476 /* Stop the child representors for this device */ 1477 ret = bnxt_rep_stop_all(bp); 1478 if (ret != 0) 1479 return ret; 1480 1481 /* delete the bnxt ULP port details */ 1482 bnxt_ulp_port_deinit(bp); 1483 1484 bnxt_cancel_fw_health_check(bp); 1485 1486 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1487 bnxt_cancel_ptp_alarm(bp); 1488 1489 /* Do not bring link down during reset recovery */ 1490 if (!is_bnxt_in_error(bp)) { 1491 bnxt_dev_set_link_down_op(eth_dev); 1492 /* Wait for link to be reset */ 1493 if (BNXT_SINGLE_PF(bp)) 1494 rte_delay_ms(500); 1495 /* clear the recorded link status */ 1496 memset(&link, 0, sizeof(link)); 1497 rte_eth_linkstatus_set(eth_dev, &link); 1498 } 1499 1500 /* Clean queue intr-vector mapping */ 1501 rte_intr_efd_disable(intr_handle); 1502 rte_intr_vec_list_free(intr_handle); 1503 1504 bnxt_hwrm_port_clr_stats(bp); 1505 bnxt_free_tx_mbufs(bp); 1506 bnxt_free_rx_mbufs(bp); 1507 /* Process any remaining notifications in default completion queue */ 1508 bnxt_int_handler(eth_dev); 1509 bnxt_shutdown_nic(bp); 1510 bnxt_hwrm_if_change(bp, false); 1511 1512 bnxt_free_prev_ring_stats(bp); 1513 rte_free(bp->mark_table); 1514 bp->mark_table = NULL; 1515 1516 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1517 bp->rx_cosq_cnt = 0; 1518 /* All filters are deleted on a port stop. */ 1519 if (BNXT_FLOW_XSTATS_EN(bp)) 1520 bp->flow_stat->flow_count = 0; 1521 1522 eth_dev->data->scattered_rx = 0; 1523 1524 return 0; 1525 } 1526 1527 /* Unload the driver, release resources */ 1528 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1529 { 1530 struct bnxt *bp = eth_dev->data->dev_private; 1531 1532 pthread_mutex_lock(&bp->err_recovery_lock); 1533 if (bp->flags & BNXT_FLAG_FW_RESET) { 1534 PMD_DRV_LOG(ERR, 1535 "Adapter recovering from error..Please retry\n"); 1536 pthread_mutex_unlock(&bp->err_recovery_lock); 1537 return -EAGAIN; 1538 } 1539 pthread_mutex_unlock(&bp->err_recovery_lock); 1540 1541 return bnxt_dev_stop(eth_dev); 1542 } 1543 1544 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1545 { 1546 struct bnxt *bp = eth_dev->data->dev_private; 1547 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1548 int vlan_mask = 0; 1549 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1550 1551 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1552 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1553 return -EINVAL; 1554 } 1555 1556 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1557 PMD_DRV_LOG(ERR, 1558 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1559 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1560 1561 do { 1562 rc = bnxt_hwrm_if_change(bp, true); 1563 if (rc == 0 || rc != -EAGAIN) 1564 break; 1565 1566 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1567 } while (retry_cnt--); 1568 1569 if (rc) 1570 return rc; 1571 1572 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1573 rc = bnxt_handle_if_change_status(bp); 1574 if (rc) 1575 return rc; 1576 } 1577 1578 bnxt_enable_int(bp); 1579 1580 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1581 1582 rc = bnxt_start_nic(bp); 1583 if (rc) 1584 goto error; 1585 1586 rc = bnxt_alloc_prev_ring_stats(bp); 1587 if (rc) 1588 goto error; 1589 1590 eth_dev->data->dev_started = 1; 1591 1592 bnxt_link_update_op(eth_dev, 1); 1593 1594 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1595 vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; 1596 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1597 vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; 1598 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1599 if (rc) 1600 goto error; 1601 1602 /* Initialize bnxt ULP port details */ 1603 rc = bnxt_ulp_port_init(bp); 1604 if (rc) 1605 goto error; 1606 1607 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1608 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1609 1610 bnxt_schedule_fw_health_check(bp); 1611 1612 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1613 bnxt_schedule_ptp_alarm(bp); 1614 1615 return 0; 1616 1617 error: 1618 bnxt_dev_stop(eth_dev); 1619 return rc; 1620 } 1621 1622 static void 1623 bnxt_uninit_locks(struct bnxt *bp) 1624 { 1625 pthread_mutex_destroy(&bp->flow_lock); 1626 pthread_mutex_destroy(&bp->def_cp_lock); 1627 pthread_mutex_destroy(&bp->health_check_lock); 1628 pthread_mutex_destroy(&bp->err_recovery_lock); 1629 if (bp->rep_info) { 1630 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1631 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1632 } 1633 } 1634 1635 static void bnxt_drv_uninit(struct bnxt *bp) 1636 { 1637 bnxt_free_leds_info(bp); 1638 bnxt_free_cos_queues(bp); 1639 bnxt_free_link_info(bp); 1640 bnxt_free_parent_info(bp); 1641 bnxt_uninit_locks(bp); 1642 1643 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1644 bp->tx_mem_zone = NULL; 1645 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1646 bp->rx_mem_zone = NULL; 1647 1648 bnxt_free_vf_info(bp); 1649 bnxt_free_pf_info(bp); 1650 1651 rte_free(bp->grp_info); 1652 bp->grp_info = NULL; 1653 } 1654 1655 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1656 { 1657 struct bnxt *bp = eth_dev->data->dev_private; 1658 int ret = 0; 1659 1660 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1661 return 0; 1662 1663 pthread_mutex_lock(&bp->err_recovery_lock); 1664 if (bp->flags & BNXT_FLAG_FW_RESET) { 1665 PMD_DRV_LOG(ERR, 1666 "Adapter recovering from error...Please retry\n"); 1667 pthread_mutex_unlock(&bp->err_recovery_lock); 1668 return -EAGAIN; 1669 } 1670 pthread_mutex_unlock(&bp->err_recovery_lock); 1671 1672 /* cancel the recovery handler before remove dev */ 1673 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1674 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1675 bnxt_cancel_fc_thread(bp); 1676 1677 if (eth_dev->data->dev_started) 1678 ret = bnxt_dev_stop(eth_dev); 1679 1680 bnxt_uninit_resources(bp, false); 1681 1682 bnxt_drv_uninit(bp); 1683 1684 return ret; 1685 } 1686 1687 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1688 uint32_t index) 1689 { 1690 struct bnxt *bp = eth_dev->data->dev_private; 1691 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1692 struct bnxt_vnic_info *vnic; 1693 struct bnxt_filter_info *filter, *temp_filter; 1694 uint32_t i; 1695 1696 if (is_bnxt_in_error(bp)) 1697 return; 1698 1699 /* 1700 * Loop through all VNICs from the specified filter flow pools to 1701 * remove the corresponding MAC addr filter 1702 */ 1703 for (i = 0; i < bp->nr_vnics; i++) { 1704 if (!(pool_mask & (1ULL << i))) 1705 continue; 1706 1707 vnic = &bp->vnic_info[i]; 1708 filter = STAILQ_FIRST(&vnic->filter); 1709 while (filter) { 1710 temp_filter = STAILQ_NEXT(filter, next); 1711 if (filter->mac_index == index) { 1712 STAILQ_REMOVE(&vnic->filter, filter, 1713 bnxt_filter_info, next); 1714 bnxt_hwrm_clear_l2_filter(bp, filter); 1715 bnxt_free_filter(bp, filter); 1716 } 1717 filter = temp_filter; 1718 } 1719 } 1720 } 1721 1722 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1723 struct rte_ether_addr *mac_addr, uint32_t index, 1724 uint32_t pool) 1725 { 1726 struct bnxt_filter_info *filter; 1727 int rc = 0; 1728 1729 /* Attach requested MAC address to the new l2_filter */ 1730 STAILQ_FOREACH(filter, &vnic->filter, next) { 1731 if (filter->mac_index == index) { 1732 PMD_DRV_LOG(DEBUG, 1733 "MAC addr already existed for pool %d\n", 1734 pool); 1735 return 0; 1736 } 1737 } 1738 1739 filter = bnxt_alloc_filter(bp); 1740 if (!filter) { 1741 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1742 return -ENODEV; 1743 } 1744 1745 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1746 * if the MAC that's been programmed now is a different one, then, 1747 * copy that addr to filter->l2_addr 1748 */ 1749 if (mac_addr) 1750 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1751 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1752 1753 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1754 if (!rc) { 1755 filter->mac_index = index; 1756 if (filter->mac_index == 0) 1757 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1758 else 1759 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1760 } else { 1761 bnxt_free_filter(bp, filter); 1762 } 1763 1764 return rc; 1765 } 1766 1767 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1768 struct rte_ether_addr *mac_addr, 1769 uint32_t index, uint32_t pool) 1770 { 1771 struct bnxt *bp = eth_dev->data->dev_private; 1772 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1773 int rc = 0; 1774 1775 rc = is_bnxt_in_error(bp); 1776 if (rc) 1777 return rc; 1778 1779 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1780 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1781 return -ENOTSUP; 1782 } 1783 1784 if (!vnic) { 1785 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1786 return -EINVAL; 1787 } 1788 1789 /* Filter settings will get applied when port is started */ 1790 if (!eth_dev->data->dev_started) 1791 return 0; 1792 1793 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1794 1795 return rc; 1796 } 1797 1798 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1799 { 1800 int rc = 0; 1801 struct bnxt *bp = eth_dev->data->dev_private; 1802 struct rte_eth_link new; 1803 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1804 BNXT_MIN_LINK_WAIT_CNT; 1805 1806 rc = is_bnxt_in_error(bp); 1807 if (rc) 1808 return rc; 1809 1810 memset(&new, 0, sizeof(new)); 1811 1812 if (bp->link_info == NULL) 1813 goto out; 1814 1815 do { 1816 /* Retrieve link info from hardware */ 1817 rc = bnxt_get_hwrm_link_config(bp, &new); 1818 if (rc) { 1819 new.link_speed = RTE_ETH_LINK_SPEED_100M; 1820 new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1821 PMD_DRV_LOG(ERR, 1822 "Failed to retrieve link rc = 0x%x!\n", rc); 1823 goto out; 1824 } 1825 1826 if (!wait_to_complete || new.link_status) 1827 break; 1828 1829 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1830 } while (cnt--); 1831 1832 /* Only single function PF can bring phy down. 1833 * When port is stopped, report link down for VF/MH/NPAR functions. 1834 */ 1835 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1836 memset(&new, 0, sizeof(new)); 1837 1838 out: 1839 /* Timed out or success */ 1840 if (new.link_status != eth_dev->data->dev_link.link_status || 1841 new.link_speed != eth_dev->data->dev_link.link_speed) { 1842 rte_eth_linkstatus_set(eth_dev, &new); 1843 bnxt_print_link_info(eth_dev); 1844 } 1845 1846 return rc; 1847 } 1848 1849 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1850 { 1851 struct bnxt *bp = eth_dev->data->dev_private; 1852 struct bnxt_vnic_info *vnic; 1853 uint32_t old_flags; 1854 int rc; 1855 1856 rc = is_bnxt_in_error(bp); 1857 if (rc) 1858 return rc; 1859 1860 /* Filter settings will get applied when port is started */ 1861 if (!eth_dev->data->dev_started) 1862 return 0; 1863 1864 if (bp->vnic_info == NULL) 1865 return 0; 1866 1867 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1868 1869 old_flags = vnic->flags; 1870 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1871 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1872 if (rc != 0) 1873 vnic->flags = old_flags; 1874 1875 return rc; 1876 } 1877 1878 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1879 { 1880 struct bnxt *bp = eth_dev->data->dev_private; 1881 struct bnxt_vnic_info *vnic; 1882 uint32_t old_flags; 1883 int rc; 1884 1885 rc = is_bnxt_in_error(bp); 1886 if (rc) 1887 return rc; 1888 1889 /* Filter settings will get applied when port is started */ 1890 if (!eth_dev->data->dev_started) 1891 return 0; 1892 1893 if (bp->vnic_info == NULL) 1894 return 0; 1895 1896 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1897 1898 old_flags = vnic->flags; 1899 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1900 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1901 if (rc != 0) 1902 vnic->flags = old_flags; 1903 1904 return rc; 1905 } 1906 1907 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1908 { 1909 struct bnxt *bp = eth_dev->data->dev_private; 1910 struct bnxt_vnic_info *vnic; 1911 uint32_t old_flags; 1912 int rc; 1913 1914 rc = is_bnxt_in_error(bp); 1915 if (rc) 1916 return rc; 1917 1918 /* Filter settings will get applied when port is started */ 1919 if (!eth_dev->data->dev_started) 1920 return 0; 1921 1922 if (bp->vnic_info == NULL) 1923 return 0; 1924 1925 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1926 1927 old_flags = vnic->flags; 1928 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1929 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1930 if (rc != 0) 1931 vnic->flags = old_flags; 1932 1933 return rc; 1934 } 1935 1936 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1937 { 1938 struct bnxt *bp = eth_dev->data->dev_private; 1939 struct bnxt_vnic_info *vnic; 1940 uint32_t old_flags; 1941 int rc; 1942 1943 rc = is_bnxt_in_error(bp); 1944 if (rc) 1945 return rc; 1946 1947 /* Filter settings will get applied when port is started */ 1948 if (!eth_dev->data->dev_started) 1949 return 0; 1950 1951 if (bp->vnic_info == NULL) 1952 return 0; 1953 1954 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1955 1956 old_flags = vnic->flags; 1957 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1958 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1959 if (rc != 0) 1960 vnic->flags = old_flags; 1961 1962 return rc; 1963 } 1964 1965 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1966 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1967 { 1968 if (qid >= bp->rx_nr_rings) 1969 return NULL; 1970 1971 return bp->eth_dev->data->rx_queues[qid]; 1972 } 1973 1974 /* Return rxq corresponding to a given rss table ring/group ID. */ 1975 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1976 { 1977 struct bnxt_rx_queue *rxq; 1978 unsigned int i; 1979 1980 if (!BNXT_HAS_RING_GRPS(bp)) { 1981 for (i = 0; i < bp->rx_nr_rings; i++) { 1982 rxq = bp->eth_dev->data->rx_queues[i]; 1983 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1984 return rxq->index; 1985 } 1986 } else { 1987 for (i = 0; i < bp->rx_nr_rings; i++) { 1988 if (bp->grp_info[i].fw_grp_id == fwr) 1989 return i; 1990 } 1991 } 1992 1993 return INVALID_HW_RING_ID; 1994 } 1995 1996 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1997 struct rte_eth_rss_reta_entry64 *reta_conf, 1998 uint16_t reta_size) 1999 { 2000 struct bnxt *bp = eth_dev->data->dev_private; 2001 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2002 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2003 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2004 uint16_t idx, sft; 2005 int i, rc; 2006 2007 rc = is_bnxt_in_error(bp); 2008 if (rc) 2009 return rc; 2010 2011 if (!vnic->rss_table) 2012 return -EINVAL; 2013 2014 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 2015 return -EINVAL; 2016 2017 if (reta_size != tbl_size) { 2018 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2019 "(%d) must equal the size supported by the hardware " 2020 "(%d)\n", reta_size, tbl_size); 2021 return -EINVAL; 2022 } 2023 2024 for (i = 0; i < reta_size; i++) { 2025 struct bnxt_rx_queue *rxq; 2026 2027 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2028 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2029 2030 if (!(reta_conf[idx].mask & (1ULL << sft))) 2031 continue; 2032 2033 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2034 if (!rxq) { 2035 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 2036 return -EINVAL; 2037 } 2038 2039 if (BNXT_CHIP_P5(bp)) { 2040 vnic->rss_table[i * 2] = 2041 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2042 vnic->rss_table[i * 2 + 1] = 2043 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2044 } else { 2045 vnic->rss_table[i] = 2046 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2047 } 2048 } 2049 2050 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2051 return rc; 2052 } 2053 2054 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2055 struct rte_eth_rss_reta_entry64 *reta_conf, 2056 uint16_t reta_size) 2057 { 2058 struct bnxt *bp = eth_dev->data->dev_private; 2059 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2060 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2061 uint16_t idx, sft, i; 2062 int rc; 2063 2064 rc = is_bnxt_in_error(bp); 2065 if (rc) 2066 return rc; 2067 2068 if (!vnic) 2069 return -EINVAL; 2070 if (!vnic->rss_table) 2071 return -EINVAL; 2072 2073 if (reta_size != tbl_size) { 2074 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2075 "(%d) must equal the size supported by the hardware " 2076 "(%d)\n", reta_size, tbl_size); 2077 return -EINVAL; 2078 } 2079 2080 for (idx = 0, i = 0; i < reta_size; i++) { 2081 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2082 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2083 2084 if (reta_conf[idx].mask & (1ULL << sft)) { 2085 uint16_t qid; 2086 2087 if (BNXT_CHIP_P5(bp)) 2088 qid = bnxt_rss_to_qid(bp, 2089 vnic->rss_table[i * 2]); 2090 else 2091 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2092 2093 if (qid == INVALID_HW_RING_ID) { 2094 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2095 return -EINVAL; 2096 } 2097 reta_conf[idx].reta[sft] = qid; 2098 } 2099 } 2100 2101 return 0; 2102 } 2103 2104 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2105 struct rte_eth_rss_conf *rss_conf) 2106 { 2107 struct bnxt *bp = eth_dev->data->dev_private; 2108 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2109 struct bnxt_vnic_info *vnic; 2110 int rc; 2111 2112 rc = is_bnxt_in_error(bp); 2113 if (rc) 2114 return rc; 2115 2116 /* 2117 * If RSS enablement were different than dev_configure, 2118 * then return -EINVAL 2119 */ 2120 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2121 if (!rss_conf->rss_hf) 2122 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2123 } else { 2124 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2125 return -EINVAL; 2126 } 2127 2128 bp->flags |= BNXT_FLAG_UPDATE_HASH; 2129 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 2130 rss_conf, 2131 sizeof(*rss_conf)); 2132 2133 /* Update the default RSS VNIC(s) */ 2134 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2135 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2136 vnic->hash_mode = 2137 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2138 RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); 2139 2140 /* 2141 * If hashkey is not specified, use the previously configured 2142 * hashkey 2143 */ 2144 if (!rss_conf->rss_key) 2145 goto rss_config; 2146 2147 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2148 PMD_DRV_LOG(ERR, 2149 "Invalid hashkey length, should be %d bytes\n", 2150 HW_HASH_KEY_SIZE); 2151 return -EINVAL; 2152 } 2153 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2154 2155 rss_config: 2156 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2157 return rc; 2158 } 2159 2160 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2161 struct rte_eth_rss_conf *rss_conf) 2162 { 2163 struct bnxt *bp = eth_dev->data->dev_private; 2164 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2165 int len, rc; 2166 uint32_t hash_types; 2167 2168 rc = is_bnxt_in_error(bp); 2169 if (rc) 2170 return rc; 2171 2172 /* RSS configuration is the same for all VNICs */ 2173 if (vnic && vnic->rss_hash_key) { 2174 if (rss_conf->rss_key) { 2175 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2176 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2177 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2178 } 2179 2180 hash_types = vnic->hash_type; 2181 rss_conf->rss_hf = 0; 2182 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2183 rss_conf->rss_hf |= RTE_ETH_RSS_IPV4; 2184 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2185 } 2186 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2187 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 2188 hash_types &= 2189 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2190 } 2191 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2192 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; 2193 hash_types &= 2194 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2195 } 2196 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2197 rss_conf->rss_hf |= RTE_ETH_RSS_IPV6; 2198 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2199 } 2200 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2201 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 2202 hash_types &= 2203 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2204 } 2205 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2206 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; 2207 hash_types &= 2208 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2209 } 2210 2211 rss_conf->rss_hf |= 2212 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2213 2214 if (hash_types) { 2215 PMD_DRV_LOG(ERR, 2216 "Unknown RSS config from firmware (%08x), RSS disabled", 2217 vnic->hash_type); 2218 return -ENOTSUP; 2219 } 2220 } else { 2221 rss_conf->rss_hf = 0; 2222 } 2223 return 0; 2224 } 2225 2226 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2227 struct rte_eth_fc_conf *fc_conf) 2228 { 2229 struct bnxt *bp = dev->data->dev_private; 2230 struct rte_eth_link link_info; 2231 int rc; 2232 2233 rc = is_bnxt_in_error(bp); 2234 if (rc) 2235 return rc; 2236 2237 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2238 if (rc) 2239 return rc; 2240 2241 memset(fc_conf, 0, sizeof(*fc_conf)); 2242 if (bp->link_info->auto_pause) 2243 fc_conf->autoneg = 1; 2244 switch (bp->link_info->pause) { 2245 case 0: 2246 fc_conf->mode = RTE_ETH_FC_NONE; 2247 break; 2248 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2249 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2250 break; 2251 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2252 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2253 break; 2254 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2255 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2256 fc_conf->mode = RTE_ETH_FC_FULL; 2257 break; 2258 } 2259 return 0; 2260 } 2261 2262 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2263 struct rte_eth_fc_conf *fc_conf) 2264 { 2265 struct bnxt *bp = dev->data->dev_private; 2266 int rc; 2267 2268 rc = is_bnxt_in_error(bp); 2269 if (rc) 2270 return rc; 2271 2272 if (!BNXT_SINGLE_PF(bp)) { 2273 PMD_DRV_LOG(ERR, 2274 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2275 return -ENOTSUP; 2276 } 2277 2278 switch (fc_conf->mode) { 2279 case RTE_ETH_FC_NONE: 2280 bp->link_info->auto_pause = 0; 2281 bp->link_info->force_pause = 0; 2282 break; 2283 case RTE_ETH_FC_RX_PAUSE: 2284 if (fc_conf->autoneg) { 2285 bp->link_info->auto_pause = 2286 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2287 bp->link_info->force_pause = 0; 2288 } else { 2289 bp->link_info->auto_pause = 0; 2290 bp->link_info->force_pause = 2291 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2292 } 2293 break; 2294 case RTE_ETH_FC_TX_PAUSE: 2295 if (fc_conf->autoneg) { 2296 bp->link_info->auto_pause = 2297 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2298 bp->link_info->force_pause = 0; 2299 } else { 2300 bp->link_info->auto_pause = 0; 2301 bp->link_info->force_pause = 2302 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2303 } 2304 break; 2305 case RTE_ETH_FC_FULL: 2306 if (fc_conf->autoneg) { 2307 bp->link_info->auto_pause = 2308 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2309 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2310 bp->link_info->force_pause = 0; 2311 } else { 2312 bp->link_info->auto_pause = 0; 2313 bp->link_info->force_pause = 2314 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2315 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2316 } 2317 break; 2318 } 2319 return bnxt_set_hwrm_link_config(bp, true); 2320 } 2321 2322 /* Add UDP tunneling port */ 2323 static int 2324 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2325 struct rte_eth_udp_tunnel *udp_tunnel) 2326 { 2327 struct bnxt *bp = eth_dev->data->dev_private; 2328 uint16_t tunnel_type = 0; 2329 int rc = 0; 2330 2331 rc = is_bnxt_in_error(bp); 2332 if (rc) 2333 return rc; 2334 2335 switch (udp_tunnel->prot_type) { 2336 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2337 if (bp->vxlan_port_cnt) { 2338 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2339 udp_tunnel->udp_port); 2340 if (bp->vxlan_port != udp_tunnel->udp_port) { 2341 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2342 return -ENOSPC; 2343 } 2344 bp->vxlan_port_cnt++; 2345 return 0; 2346 } 2347 tunnel_type = 2348 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2349 break; 2350 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2351 if (bp->geneve_port_cnt) { 2352 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2353 udp_tunnel->udp_port); 2354 if (bp->geneve_port != udp_tunnel->udp_port) { 2355 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2356 return -ENOSPC; 2357 } 2358 bp->geneve_port_cnt++; 2359 return 0; 2360 } 2361 tunnel_type = 2362 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2363 break; 2364 default: 2365 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2366 return -ENOTSUP; 2367 } 2368 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2369 tunnel_type); 2370 2371 if (rc != 0) 2372 return rc; 2373 2374 if (tunnel_type == 2375 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2376 bp->vxlan_port_cnt++; 2377 2378 if (tunnel_type == 2379 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2380 bp->geneve_port_cnt++; 2381 2382 return rc; 2383 } 2384 2385 static int 2386 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2387 struct rte_eth_udp_tunnel *udp_tunnel) 2388 { 2389 struct bnxt *bp = eth_dev->data->dev_private; 2390 uint16_t tunnel_type = 0; 2391 uint16_t port = 0; 2392 int rc = 0; 2393 2394 rc = is_bnxt_in_error(bp); 2395 if (rc) 2396 return rc; 2397 2398 switch (udp_tunnel->prot_type) { 2399 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2400 if (!bp->vxlan_port_cnt) { 2401 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2402 return -EINVAL; 2403 } 2404 if (bp->vxlan_port != udp_tunnel->udp_port) { 2405 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2406 udp_tunnel->udp_port, bp->vxlan_port); 2407 return -EINVAL; 2408 } 2409 if (--bp->vxlan_port_cnt) 2410 return 0; 2411 2412 tunnel_type = 2413 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2414 port = bp->vxlan_fw_dst_port_id; 2415 break; 2416 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2417 if (!bp->geneve_port_cnt) { 2418 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2419 return -EINVAL; 2420 } 2421 if (bp->geneve_port != udp_tunnel->udp_port) { 2422 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2423 udp_tunnel->udp_port, bp->geneve_port); 2424 return -EINVAL; 2425 } 2426 if (--bp->geneve_port_cnt) 2427 return 0; 2428 2429 tunnel_type = 2430 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2431 port = bp->geneve_fw_dst_port_id; 2432 break; 2433 default: 2434 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2435 return -ENOTSUP; 2436 } 2437 2438 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2439 return rc; 2440 } 2441 2442 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2443 { 2444 struct bnxt_filter_info *filter; 2445 struct bnxt_vnic_info *vnic; 2446 int rc = 0; 2447 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2448 2449 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2450 filter = STAILQ_FIRST(&vnic->filter); 2451 while (filter) { 2452 /* Search for this matching MAC+VLAN filter */ 2453 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2454 /* Delete the filter */ 2455 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2456 if (rc) 2457 return rc; 2458 STAILQ_REMOVE(&vnic->filter, filter, 2459 bnxt_filter_info, next); 2460 bnxt_free_filter(bp, filter); 2461 PMD_DRV_LOG(INFO, 2462 "Deleted vlan filter for %d\n", 2463 vlan_id); 2464 return 0; 2465 } 2466 filter = STAILQ_NEXT(filter, next); 2467 } 2468 return -ENOENT; 2469 } 2470 2471 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2472 { 2473 struct bnxt_filter_info *filter; 2474 struct bnxt_vnic_info *vnic; 2475 int rc = 0; 2476 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2477 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2478 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2479 2480 /* Implementation notes on the use of VNIC in this command: 2481 * 2482 * By default, these filters belong to default vnic for the function. 2483 * Once these filters are set up, only destination VNIC can be modified. 2484 * If the destination VNIC is not specified in this command, 2485 * then the HWRM shall only create an l2 context id. 2486 */ 2487 2488 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2489 filter = STAILQ_FIRST(&vnic->filter); 2490 /* Check if the VLAN has already been added */ 2491 while (filter) { 2492 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2493 return -EEXIST; 2494 2495 filter = STAILQ_NEXT(filter, next); 2496 } 2497 2498 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2499 * command to create MAC+VLAN filter with the right flags, enables set. 2500 */ 2501 filter = bnxt_alloc_filter(bp); 2502 if (!filter) { 2503 PMD_DRV_LOG(ERR, 2504 "MAC/VLAN filter alloc failed\n"); 2505 return -ENOMEM; 2506 } 2507 /* MAC + VLAN ID filter */ 2508 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2509 * untagged packets are received 2510 * 2511 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2512 * packets and only the programmed vlan's packets are received 2513 */ 2514 filter->l2_ivlan = vlan_id; 2515 filter->l2_ivlan_mask = 0x0FFF; 2516 filter->enables |= en; 2517 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2518 2519 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2520 if (rc) { 2521 /* Free the newly allocated filter as we were 2522 * not able to create the filter in hardware. 2523 */ 2524 bnxt_free_filter(bp, filter); 2525 return rc; 2526 } 2527 2528 filter->mac_index = 0; 2529 /* Add this new filter to the list */ 2530 if (vlan_id == 0) 2531 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2532 else 2533 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2534 2535 PMD_DRV_LOG(INFO, 2536 "Added Vlan filter for %d\n", vlan_id); 2537 return rc; 2538 } 2539 2540 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2541 uint16_t vlan_id, int on) 2542 { 2543 struct bnxt *bp = eth_dev->data->dev_private; 2544 int rc; 2545 2546 rc = is_bnxt_in_error(bp); 2547 if (rc) 2548 return rc; 2549 2550 if (!eth_dev->data->dev_started) { 2551 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2552 return -EINVAL; 2553 } 2554 2555 /* These operations apply to ALL existing MAC/VLAN filters */ 2556 if (on) 2557 return bnxt_add_vlan_filter(bp, vlan_id); 2558 else 2559 return bnxt_del_vlan_filter(bp, vlan_id); 2560 } 2561 2562 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2563 struct bnxt_vnic_info *vnic) 2564 { 2565 struct bnxt_filter_info *filter; 2566 int rc; 2567 2568 filter = STAILQ_FIRST(&vnic->filter); 2569 while (filter) { 2570 if (filter->mac_index == 0 && 2571 !memcmp(filter->l2_addr, bp->mac_addr, 2572 RTE_ETHER_ADDR_LEN)) { 2573 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2574 if (!rc) { 2575 STAILQ_REMOVE(&vnic->filter, filter, 2576 bnxt_filter_info, next); 2577 bnxt_free_filter(bp, filter); 2578 } 2579 return rc; 2580 } 2581 filter = STAILQ_NEXT(filter, next); 2582 } 2583 return 0; 2584 } 2585 2586 static int 2587 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2588 { 2589 struct bnxt_vnic_info *vnic; 2590 unsigned int i; 2591 int rc; 2592 2593 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2594 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 2595 /* Remove any VLAN filters programmed */ 2596 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2597 bnxt_del_vlan_filter(bp, i); 2598 2599 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2600 if (rc) 2601 return rc; 2602 } else { 2603 /* Default filter will allow packets that match the 2604 * dest mac. So, it has to be deleted, otherwise, we 2605 * will endup receiving vlan packets for which the 2606 * filter is not programmed, when hw-vlan-filter 2607 * configuration is ON 2608 */ 2609 bnxt_del_dflt_mac_filter(bp, vnic); 2610 /* This filter will allow only untagged packets */ 2611 bnxt_add_vlan_filter(bp, 0); 2612 } 2613 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2614 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); 2615 2616 return 0; 2617 } 2618 2619 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2620 { 2621 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2622 unsigned int i; 2623 int rc; 2624 2625 /* Destroy vnic filters and vnic */ 2626 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2627 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2628 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2629 bnxt_del_vlan_filter(bp, i); 2630 } 2631 bnxt_del_dflt_mac_filter(bp, vnic); 2632 2633 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2634 if (rc) 2635 return rc; 2636 2637 rc = bnxt_hwrm_vnic_free(bp, vnic); 2638 if (rc) 2639 return rc; 2640 2641 rte_free(vnic->fw_grp_ids); 2642 vnic->fw_grp_ids = NULL; 2643 2644 vnic->rx_queue_cnt = 0; 2645 2646 return 0; 2647 } 2648 2649 static int 2650 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2651 { 2652 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2653 int rc; 2654 2655 /* Destroy, recreate and reconfigure the default vnic */ 2656 rc = bnxt_free_one_vnic(bp, 0); 2657 if (rc) 2658 return rc; 2659 2660 /* default vnic 0 */ 2661 rc = bnxt_setup_one_vnic(bp, 0); 2662 if (rc) 2663 return rc; 2664 2665 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2666 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2667 rc = bnxt_add_vlan_filter(bp, 0); 2668 if (rc) 2669 return rc; 2670 rc = bnxt_restore_vlan_filters(bp); 2671 if (rc) 2672 return rc; 2673 } else { 2674 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2675 if (rc) 2676 return rc; 2677 } 2678 2679 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2680 if (rc) 2681 return rc; 2682 2683 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2684 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); 2685 2686 return rc; 2687 } 2688 2689 static int 2690 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2691 { 2692 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2693 struct bnxt *bp = dev->data->dev_private; 2694 int rc; 2695 2696 rc = is_bnxt_in_error(bp); 2697 if (rc) 2698 return rc; 2699 2700 /* Filter settings will get applied when port is started */ 2701 if (!dev->data->dev_started) 2702 return 0; 2703 2704 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2705 /* Enable or disable VLAN filtering */ 2706 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2707 if (rc) 2708 return rc; 2709 } 2710 2711 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2712 /* Enable or disable VLAN stripping */ 2713 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2714 if (rc) 2715 return rc; 2716 } 2717 2718 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2719 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2720 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2721 else 2722 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2723 } 2724 2725 return 0; 2726 } 2727 2728 static int 2729 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2730 uint16_t tpid) 2731 { 2732 struct bnxt *bp = dev->data->dev_private; 2733 int qinq = dev->data->dev_conf.rxmode.offloads & 2734 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 2735 2736 if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && 2737 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { 2738 PMD_DRV_LOG(ERR, 2739 "Unsupported vlan type."); 2740 return -EINVAL; 2741 } 2742 if (!qinq) { 2743 PMD_DRV_LOG(ERR, 2744 "QinQ not enabled. Needs to be ON as we can " 2745 "accelerate only outer vlan\n"); 2746 return -EINVAL; 2747 } 2748 2749 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2750 switch (tpid) { 2751 case RTE_ETHER_TYPE_QINQ: 2752 bp->outer_tpid_bd = 2753 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2754 break; 2755 case RTE_ETHER_TYPE_VLAN: 2756 bp->outer_tpid_bd = 2757 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2758 break; 2759 case RTE_ETHER_TYPE_QINQ1: 2760 bp->outer_tpid_bd = 2761 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2762 break; 2763 case RTE_ETHER_TYPE_QINQ2: 2764 bp->outer_tpid_bd = 2765 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2766 break; 2767 case RTE_ETHER_TYPE_QINQ3: 2768 bp->outer_tpid_bd = 2769 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2770 break; 2771 default: 2772 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2773 return -EINVAL; 2774 } 2775 bp->outer_tpid_bd |= tpid; 2776 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2777 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 2778 PMD_DRV_LOG(ERR, 2779 "Can accelerate only outer vlan in QinQ\n"); 2780 return -EINVAL; 2781 } 2782 2783 return 0; 2784 } 2785 2786 static int 2787 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2788 struct rte_ether_addr *addr) 2789 { 2790 struct bnxt *bp = dev->data->dev_private; 2791 /* Default Filter is tied to VNIC 0 */ 2792 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2793 int rc; 2794 2795 rc = is_bnxt_in_error(bp); 2796 if (rc) 2797 return rc; 2798 2799 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2800 return -EPERM; 2801 2802 if (rte_is_zero_ether_addr(addr)) 2803 return -EINVAL; 2804 2805 /* Filter settings will get applied when port is started */ 2806 if (!dev->data->dev_started) 2807 return 0; 2808 2809 /* Check if the requested MAC is already added */ 2810 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2811 return 0; 2812 2813 /* Destroy filter and re-create it */ 2814 bnxt_del_dflt_mac_filter(bp, vnic); 2815 2816 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2817 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2818 /* This filter will allow only untagged packets */ 2819 rc = bnxt_add_vlan_filter(bp, 0); 2820 } else { 2821 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2822 } 2823 2824 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2825 return rc; 2826 } 2827 2828 static int 2829 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2830 struct rte_ether_addr *mc_addr_set, 2831 uint32_t nb_mc_addr) 2832 { 2833 struct bnxt *bp = eth_dev->data->dev_private; 2834 char *mc_addr_list = (char *)mc_addr_set; 2835 struct bnxt_vnic_info *vnic; 2836 uint32_t off = 0, i = 0; 2837 int rc; 2838 2839 rc = is_bnxt_in_error(bp); 2840 if (rc) 2841 return rc; 2842 2843 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2844 2845 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2846 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2847 goto allmulti; 2848 } 2849 2850 /* TODO Check for Duplicate mcast addresses */ 2851 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2852 for (i = 0; i < nb_mc_addr; i++) { 2853 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2854 RTE_ETHER_ADDR_LEN); 2855 off += RTE_ETHER_ADDR_LEN; 2856 } 2857 2858 vnic->mc_addr_cnt = i; 2859 if (vnic->mc_addr_cnt) 2860 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2861 else 2862 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2863 2864 allmulti: 2865 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2866 } 2867 2868 static int 2869 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2870 { 2871 struct bnxt *bp = dev->data->dev_private; 2872 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2873 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2874 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2875 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2876 int ret; 2877 2878 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2879 fw_major, fw_minor, fw_updt, fw_rsvd); 2880 if (ret < 0) 2881 return -EINVAL; 2882 2883 ret += 1; /* add the size of '\0' */ 2884 if (fw_size < (size_t)ret) 2885 return ret; 2886 else 2887 return 0; 2888 } 2889 2890 static void 2891 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2892 struct rte_eth_rxq_info *qinfo) 2893 { 2894 struct bnxt *bp = dev->data->dev_private; 2895 struct bnxt_rx_queue *rxq; 2896 2897 if (is_bnxt_in_error(bp)) 2898 return; 2899 2900 rxq = dev->data->rx_queues[queue_id]; 2901 2902 qinfo->mp = rxq->mb_pool; 2903 qinfo->scattered_rx = dev->data->scattered_rx; 2904 qinfo->nb_desc = rxq->nb_rx_desc; 2905 2906 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2907 qinfo->conf.rx_drop_en = rxq->drop_en; 2908 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2909 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2910 } 2911 2912 static void 2913 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2914 struct rte_eth_txq_info *qinfo) 2915 { 2916 struct bnxt *bp = dev->data->dev_private; 2917 struct bnxt_tx_queue *txq; 2918 2919 if (is_bnxt_in_error(bp)) 2920 return; 2921 2922 txq = dev->data->tx_queues[queue_id]; 2923 2924 qinfo->nb_desc = txq->nb_tx_desc; 2925 2926 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2927 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2928 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2929 2930 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2931 qinfo->conf.tx_rs_thresh = 0; 2932 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2933 qinfo->conf.offloads = txq->offloads; 2934 } 2935 2936 static const struct { 2937 eth_rx_burst_t pkt_burst; 2938 const char *info; 2939 } bnxt_rx_burst_info[] = { 2940 {bnxt_recv_pkts, "Scalar"}, 2941 #if defined(RTE_ARCH_X86) 2942 {bnxt_recv_pkts_vec, "Vector SSE"}, 2943 #endif 2944 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2945 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 2946 #endif 2947 #if defined(RTE_ARCH_ARM64) 2948 {bnxt_recv_pkts_vec, "Vector Neon"}, 2949 #endif 2950 }; 2951 2952 static int 2953 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2954 struct rte_eth_burst_mode *mode) 2955 { 2956 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2957 size_t i; 2958 2959 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2960 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2961 snprintf(mode->info, sizeof(mode->info), "%s", 2962 bnxt_rx_burst_info[i].info); 2963 return 0; 2964 } 2965 } 2966 2967 return -EINVAL; 2968 } 2969 2970 static const struct { 2971 eth_tx_burst_t pkt_burst; 2972 const char *info; 2973 } bnxt_tx_burst_info[] = { 2974 {bnxt_xmit_pkts, "Scalar"}, 2975 #if defined(RTE_ARCH_X86) 2976 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2977 #endif 2978 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2979 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 2980 #endif 2981 #if defined(RTE_ARCH_ARM64) 2982 {bnxt_xmit_pkts_vec, "Vector Neon"}, 2983 #endif 2984 }; 2985 2986 static int 2987 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2988 struct rte_eth_burst_mode *mode) 2989 { 2990 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 2991 size_t i; 2992 2993 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 2994 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 2995 snprintf(mode->info, sizeof(mode->info), "%s", 2996 bnxt_tx_burst_info[i].info); 2997 return 0; 2998 } 2999 } 3000 3001 return -EINVAL; 3002 } 3003 3004 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3005 { 3006 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 3007 struct bnxt *bp = eth_dev->data->dev_private; 3008 uint32_t new_pkt_size; 3009 uint32_t rc; 3010 uint32_t i; 3011 3012 rc = is_bnxt_in_error(bp); 3013 if (rc) 3014 return rc; 3015 3016 /* Exit if receive queues are not configured yet */ 3017 if (!eth_dev->data->nb_rx_queues) 3018 return rc; 3019 3020 new_pkt_size = new_mtu + overhead; 3021 3022 /* 3023 * Disallow any MTU change that would require scattered receive support 3024 * if it is not already enabled. 3025 */ 3026 if (eth_dev->data->dev_started && 3027 !eth_dev->data->scattered_rx && 3028 (new_pkt_size > 3029 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3030 PMD_DRV_LOG(ERR, 3031 "MTU change would require scattered rx support. "); 3032 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 3033 return -EINVAL; 3034 } 3035 3036 if (new_mtu > RTE_ETHER_MTU) 3037 bp->flags |= BNXT_FLAG_JUMBO; 3038 else 3039 bp->flags &= ~BNXT_FLAG_JUMBO; 3040 3041 /* Is there a change in mtu setting? */ 3042 if (eth_dev->data->mtu == new_mtu) 3043 return rc; 3044 3045 for (i = 0; i < bp->nr_vnics; i++) { 3046 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3047 uint16_t size = 0; 3048 3049 vnic->mru = BNXT_VNIC_MRU(new_mtu); 3050 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 3051 if (rc) 3052 break; 3053 3054 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 3055 size -= RTE_PKTMBUF_HEADROOM; 3056 3057 if (size < new_mtu) { 3058 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 3059 if (rc) 3060 return rc; 3061 } 3062 } 3063 3064 if (bnxt_hwrm_config_host_mtu(bp)) 3065 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3066 3067 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3068 3069 return rc; 3070 } 3071 3072 static int 3073 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3074 { 3075 struct bnxt *bp = dev->data->dev_private; 3076 uint16_t vlan = bp->vlan; 3077 int rc; 3078 3079 rc = is_bnxt_in_error(bp); 3080 if (rc) 3081 return rc; 3082 3083 if (!BNXT_SINGLE_PF(bp)) { 3084 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3085 return -ENOTSUP; 3086 } 3087 bp->vlan = on ? pvid : 0; 3088 3089 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3090 if (rc) 3091 bp->vlan = vlan; 3092 return rc; 3093 } 3094 3095 static int 3096 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3097 { 3098 struct bnxt *bp = dev->data->dev_private; 3099 int rc; 3100 3101 rc = is_bnxt_in_error(bp); 3102 if (rc) 3103 return rc; 3104 3105 return bnxt_hwrm_port_led_cfg(bp, true); 3106 } 3107 3108 static int 3109 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3110 { 3111 struct bnxt *bp = dev->data->dev_private; 3112 int rc; 3113 3114 rc = is_bnxt_in_error(bp); 3115 if (rc) 3116 return rc; 3117 3118 return bnxt_hwrm_port_led_cfg(bp, false); 3119 } 3120 3121 static uint32_t 3122 bnxt_rx_queue_count_op(void *rx_queue) 3123 { 3124 struct bnxt *bp; 3125 struct bnxt_cp_ring_info *cpr; 3126 uint32_t desc = 0, raw_cons, cp_ring_size; 3127 struct bnxt_rx_queue *rxq; 3128 struct rx_pkt_cmpl *rxcmp; 3129 int rc; 3130 3131 rxq = rx_queue; 3132 bp = rxq->bp; 3133 3134 rc = is_bnxt_in_error(bp); 3135 if (rc) 3136 return rc; 3137 3138 cpr = rxq->cp_ring; 3139 raw_cons = cpr->cp_raw_cons; 3140 cp_ring_size = cpr->cp_ring_struct->ring_size; 3141 3142 while (1) { 3143 uint32_t agg_cnt, cons, cmpl_type; 3144 3145 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3146 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3147 3148 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3149 break; 3150 3151 cmpl_type = CMP_TYPE(rxcmp); 3152 3153 switch (cmpl_type) { 3154 case CMPL_BASE_TYPE_RX_L2: 3155 case CMPL_BASE_TYPE_RX_L2_V2: 3156 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3157 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3158 desc++; 3159 break; 3160 3161 case CMPL_BASE_TYPE_RX_TPA_END: 3162 if (BNXT_CHIP_P5(rxq->bp)) { 3163 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3164 3165 p5_tpa_end = (void *)rxcmp; 3166 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3167 } else { 3168 struct rx_tpa_end_cmpl *tpa_end; 3169 3170 tpa_end = (void *)rxcmp; 3171 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3172 } 3173 3174 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3175 desc++; 3176 break; 3177 3178 default: 3179 raw_cons += CMP_LEN(cmpl_type); 3180 } 3181 } 3182 3183 return desc; 3184 } 3185 3186 static int 3187 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3188 { 3189 struct bnxt_rx_queue *rxq = rx_queue; 3190 struct bnxt_cp_ring_info *cpr; 3191 struct bnxt_rx_ring_info *rxr; 3192 uint32_t desc, raw_cons, cp_ring_size; 3193 struct bnxt *bp = rxq->bp; 3194 struct rx_pkt_cmpl *rxcmp; 3195 int rc; 3196 3197 rc = is_bnxt_in_error(bp); 3198 if (rc) 3199 return rc; 3200 3201 if (offset >= rxq->nb_rx_desc) 3202 return -EINVAL; 3203 3204 rxr = rxq->rx_ring; 3205 cpr = rxq->cp_ring; 3206 cp_ring_size = cpr->cp_ring_struct->ring_size; 3207 3208 /* 3209 * For the vector receive case, the completion at the requested 3210 * offset can be indexed directly. 3211 */ 3212 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3213 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3214 struct rx_pkt_cmpl *rxcmp; 3215 uint32_t cons; 3216 3217 /* Check status of completion descriptor. */ 3218 raw_cons = cpr->cp_raw_cons + 3219 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3220 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3221 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3222 3223 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3224 return RTE_ETH_RX_DESC_DONE; 3225 3226 /* Check whether rx desc has an mbuf attached. */ 3227 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3228 if (cons >= rxq->rxrearm_start && 3229 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3230 return RTE_ETH_RX_DESC_UNAVAIL; 3231 } 3232 3233 return RTE_ETH_RX_DESC_AVAIL; 3234 } 3235 #endif 3236 3237 /* 3238 * For the non-vector receive case, scan the completion ring to 3239 * locate the completion descriptor for the requested offset. 3240 */ 3241 raw_cons = cpr->cp_raw_cons; 3242 desc = 0; 3243 while (1) { 3244 uint32_t agg_cnt, cons, cmpl_type; 3245 3246 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3247 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3248 3249 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3250 break; 3251 3252 cmpl_type = CMP_TYPE(rxcmp); 3253 3254 switch (cmpl_type) { 3255 case CMPL_BASE_TYPE_RX_L2: 3256 case CMPL_BASE_TYPE_RX_L2_V2: 3257 if (desc == offset) { 3258 cons = rxcmp->opaque; 3259 if (rxr->rx_buf_ring[cons]) 3260 return RTE_ETH_RX_DESC_DONE; 3261 else 3262 return RTE_ETH_RX_DESC_UNAVAIL; 3263 } 3264 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3265 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3266 desc++; 3267 break; 3268 3269 case CMPL_BASE_TYPE_RX_TPA_END: 3270 if (desc == offset) 3271 return RTE_ETH_RX_DESC_DONE; 3272 3273 if (BNXT_CHIP_P5(rxq->bp)) { 3274 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3275 3276 p5_tpa_end = (void *)rxcmp; 3277 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3278 } else { 3279 struct rx_tpa_end_cmpl *tpa_end; 3280 3281 tpa_end = (void *)rxcmp; 3282 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3283 } 3284 3285 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3286 desc++; 3287 break; 3288 3289 default: 3290 raw_cons += CMP_LEN(cmpl_type); 3291 } 3292 } 3293 3294 return RTE_ETH_RX_DESC_AVAIL; 3295 } 3296 3297 static int 3298 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3299 { 3300 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3301 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3302 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3303 struct cmpl_base *cp_desc_ring; 3304 int rc; 3305 3306 rc = is_bnxt_in_error(txq->bp); 3307 if (rc) 3308 return rc; 3309 3310 if (offset >= txq->nb_tx_desc) 3311 return -EINVAL; 3312 3313 /* Return "desc done" if descriptor is available for use. */ 3314 if (bnxt_tx_bds_in_hw(txq) <= offset) 3315 return RTE_ETH_TX_DESC_DONE; 3316 3317 raw_cons = cpr->cp_raw_cons; 3318 cp_desc_ring = cpr->cp_desc_ring; 3319 ring_mask = cpr->cp_ring_struct->ring_mask; 3320 3321 /* Check to see if hw has posted a completion for the descriptor. */ 3322 while (1) { 3323 struct tx_cmpl *txcmp; 3324 uint32_t cons; 3325 3326 cons = RING_CMPL(ring_mask, raw_cons); 3327 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3328 3329 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3330 break; 3331 3332 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3333 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3334 3335 if (nb_tx_pkts > offset) 3336 return RTE_ETH_TX_DESC_DONE; 3337 3338 raw_cons = NEXT_RAW_CMP(raw_cons); 3339 } 3340 3341 /* Descriptor is pending transmit, not yet completed by hardware. */ 3342 return RTE_ETH_TX_DESC_FULL; 3343 } 3344 3345 int 3346 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3347 const struct rte_flow_ops **ops) 3348 { 3349 struct bnxt *bp = dev->data->dev_private; 3350 int ret = 0; 3351 3352 if (!bp) 3353 return -EIO; 3354 3355 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3356 struct bnxt_representor *vfr = dev->data->dev_private; 3357 bp = vfr->parent_dev->data->dev_private; 3358 /* parent is deleted while children are still valid */ 3359 if (!bp) { 3360 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3361 dev->data->port_id); 3362 return -EIO; 3363 } 3364 } 3365 3366 ret = is_bnxt_in_error(bp); 3367 if (ret) 3368 return ret; 3369 3370 /* PMD supports thread-safe flow operations. rte_flow API 3371 * functions can avoid mutex for multi-thread safety. 3372 */ 3373 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3374 3375 if (BNXT_TRUFLOW_EN(bp)) 3376 *ops = &bnxt_ulp_rte_flow_ops; 3377 else 3378 *ops = &bnxt_flow_ops; 3379 3380 return ret; 3381 } 3382 3383 static const uint32_t * 3384 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3385 { 3386 static const uint32_t ptypes[] = { 3387 RTE_PTYPE_L2_ETHER_VLAN, 3388 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3389 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3390 RTE_PTYPE_L4_ICMP, 3391 RTE_PTYPE_L4_TCP, 3392 RTE_PTYPE_L4_UDP, 3393 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3394 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3395 RTE_PTYPE_INNER_L4_ICMP, 3396 RTE_PTYPE_INNER_L4_TCP, 3397 RTE_PTYPE_INNER_L4_UDP, 3398 RTE_PTYPE_UNKNOWN 3399 }; 3400 3401 if (!dev->rx_pkt_burst) 3402 return NULL; 3403 3404 return ptypes; 3405 } 3406 3407 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3408 int reg_win) 3409 { 3410 uint32_t reg_base = *reg_arr & 0xfffff000; 3411 uint32_t win_off; 3412 int i; 3413 3414 for (i = 0; i < count; i++) { 3415 if ((reg_arr[i] & 0xfffff000) != reg_base) 3416 return -ERANGE; 3417 } 3418 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3419 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3420 return 0; 3421 } 3422 3423 static int bnxt_map_ptp_regs(struct bnxt *bp) 3424 { 3425 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3426 uint32_t *reg_arr; 3427 int rc, i; 3428 3429 reg_arr = ptp->rx_regs; 3430 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3431 if (rc) 3432 return rc; 3433 3434 reg_arr = ptp->tx_regs; 3435 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3436 if (rc) 3437 return rc; 3438 3439 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3440 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3441 3442 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3443 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3444 3445 return 0; 3446 } 3447 3448 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3449 { 3450 rte_write32(0, (uint8_t *)bp->bar0 + 3451 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3452 rte_write32(0, (uint8_t *)bp->bar0 + 3453 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3454 } 3455 3456 static uint64_t bnxt_cc_read(struct bnxt *bp) 3457 { 3458 uint64_t ns; 3459 3460 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3461 BNXT_GRCPF_REG_SYNC_TIME)); 3462 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3463 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3464 return ns; 3465 } 3466 3467 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3468 { 3469 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3470 uint32_t fifo; 3471 3472 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3473 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3474 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3475 return -EAGAIN; 3476 3477 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3478 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3479 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3480 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3481 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3482 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3483 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3484 3485 return 0; 3486 } 3487 3488 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3489 { 3490 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3491 struct bnxt_pf_info *pf = bp->pf; 3492 uint16_t port_id; 3493 int i = 0; 3494 uint32_t fifo; 3495 3496 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3497 return -EINVAL; 3498 3499 port_id = pf->port_id; 3500 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3501 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3502 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3503 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3504 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3505 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3506 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3507 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3508 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3509 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3510 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3511 i++; 3512 } 3513 3514 if (i >= BNXT_PTP_RX_PND_CNT) 3515 return -EBUSY; 3516 3517 return 0; 3518 } 3519 3520 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3521 { 3522 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3523 struct bnxt_pf_info *pf = bp->pf; 3524 uint16_t port_id; 3525 uint32_t fifo; 3526 3527 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3528 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3529 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3530 return -EAGAIN; 3531 3532 port_id = pf->port_id; 3533 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3534 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3535 3536 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3537 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3538 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3539 return bnxt_clr_rx_ts(bp, ts); 3540 3541 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3542 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3543 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3544 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3545 3546 return 0; 3547 } 3548 3549 static int 3550 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3551 { 3552 uint64_t ns; 3553 struct bnxt *bp = dev->data->dev_private; 3554 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3555 3556 if (!ptp) 3557 return -ENOTSUP; 3558 3559 ns = rte_timespec_to_ns(ts); 3560 /* Set the timecounters to a new value. */ 3561 ptp->tc.nsec = ns; 3562 ptp->tx_tstamp_tc.nsec = ns; 3563 ptp->rx_tstamp_tc.nsec = ns; 3564 3565 return 0; 3566 } 3567 3568 static int 3569 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3570 { 3571 struct bnxt *bp = dev->data->dev_private; 3572 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3573 uint64_t ns, systime_cycles = 0; 3574 int rc = 0; 3575 3576 if (!ptp) 3577 return -ENOTSUP; 3578 3579 if (BNXT_CHIP_P5(bp)) 3580 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3581 &systime_cycles); 3582 else 3583 systime_cycles = bnxt_cc_read(bp); 3584 3585 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3586 *ts = rte_ns_to_timespec(ns); 3587 3588 return rc; 3589 } 3590 static int 3591 bnxt_timesync_enable(struct rte_eth_dev *dev) 3592 { 3593 struct bnxt *bp = dev->data->dev_private; 3594 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3595 uint32_t shift = 0; 3596 int rc; 3597 3598 if (!ptp) 3599 return -ENOTSUP; 3600 3601 ptp->rx_filter = 1; 3602 ptp->tx_tstamp_en = 1; 3603 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3604 3605 rc = bnxt_hwrm_ptp_cfg(bp); 3606 if (rc) 3607 return rc; 3608 3609 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3610 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3611 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3612 3613 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3614 ptp->tc.cc_shift = shift; 3615 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3616 3617 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3618 ptp->rx_tstamp_tc.cc_shift = shift; 3619 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3620 3621 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3622 ptp->tx_tstamp_tc.cc_shift = shift; 3623 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3624 3625 if (!BNXT_CHIP_P5(bp)) 3626 bnxt_map_ptp_regs(bp); 3627 else 3628 rc = bnxt_ptp_start(bp); 3629 3630 return rc; 3631 } 3632 3633 static int 3634 bnxt_timesync_disable(struct rte_eth_dev *dev) 3635 { 3636 struct bnxt *bp = dev->data->dev_private; 3637 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3638 3639 if (!ptp) 3640 return -ENOTSUP; 3641 3642 ptp->rx_filter = 0; 3643 ptp->tx_tstamp_en = 0; 3644 ptp->rxctl = 0; 3645 3646 bnxt_hwrm_ptp_cfg(bp); 3647 3648 if (!BNXT_CHIP_P5(bp)) 3649 bnxt_unmap_ptp_regs(bp); 3650 else 3651 bnxt_ptp_stop(bp); 3652 3653 return 0; 3654 } 3655 3656 static int 3657 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3658 struct timespec *timestamp, 3659 uint32_t flags __rte_unused) 3660 { 3661 struct bnxt *bp = dev->data->dev_private; 3662 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3663 uint64_t rx_tstamp_cycles = 0; 3664 uint64_t ns; 3665 3666 if (!ptp) 3667 return -ENOTSUP; 3668 3669 if (BNXT_CHIP_P5(bp)) 3670 rx_tstamp_cycles = ptp->rx_timestamp; 3671 else 3672 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3673 3674 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3675 *timestamp = rte_ns_to_timespec(ns); 3676 return 0; 3677 } 3678 3679 static int 3680 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3681 struct timespec *timestamp) 3682 { 3683 struct bnxt *bp = dev->data->dev_private; 3684 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3685 uint64_t tx_tstamp_cycles = 0; 3686 uint64_t ns; 3687 int rc = 0; 3688 3689 if (!ptp) 3690 return -ENOTSUP; 3691 3692 if (BNXT_CHIP_P5(bp)) 3693 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3694 &tx_tstamp_cycles); 3695 else 3696 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3697 3698 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3699 *timestamp = rte_ns_to_timespec(ns); 3700 3701 return rc; 3702 } 3703 3704 static int 3705 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3706 { 3707 struct bnxt *bp = dev->data->dev_private; 3708 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3709 3710 if (!ptp) 3711 return -ENOTSUP; 3712 3713 ptp->tc.nsec += delta; 3714 ptp->tx_tstamp_tc.nsec += delta; 3715 ptp->rx_tstamp_tc.nsec += delta; 3716 3717 return 0; 3718 } 3719 3720 static int 3721 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3722 { 3723 struct bnxt *bp = dev->data->dev_private; 3724 int rc; 3725 uint32_t dir_entries; 3726 uint32_t entry_length; 3727 3728 rc = is_bnxt_in_error(bp); 3729 if (rc) 3730 return rc; 3731 3732 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3733 bp->pdev->addr.domain, bp->pdev->addr.bus, 3734 bp->pdev->addr.devid, bp->pdev->addr.function); 3735 3736 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3737 if (rc != 0) 3738 return rc; 3739 3740 return dir_entries * entry_length; 3741 } 3742 3743 static int 3744 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3745 struct rte_dev_eeprom_info *in_eeprom) 3746 { 3747 struct bnxt *bp = dev->data->dev_private; 3748 uint32_t index; 3749 uint32_t offset; 3750 int rc; 3751 3752 rc = is_bnxt_in_error(bp); 3753 if (rc) 3754 return rc; 3755 3756 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3757 bp->pdev->addr.domain, bp->pdev->addr.bus, 3758 bp->pdev->addr.devid, bp->pdev->addr.function, 3759 in_eeprom->offset, in_eeprom->length); 3760 3761 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3762 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3763 in_eeprom->data); 3764 3765 index = in_eeprom->offset >> 24; 3766 offset = in_eeprom->offset & 0xffffff; 3767 3768 if (index != 0) 3769 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3770 in_eeprom->length, in_eeprom->data); 3771 3772 return 0; 3773 } 3774 3775 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3776 { 3777 switch (dir_type) { 3778 case BNX_DIR_TYPE_CHIMP_PATCH: 3779 case BNX_DIR_TYPE_BOOTCODE: 3780 case BNX_DIR_TYPE_BOOTCODE_2: 3781 case BNX_DIR_TYPE_APE_FW: 3782 case BNX_DIR_TYPE_APE_PATCH: 3783 case BNX_DIR_TYPE_KONG_FW: 3784 case BNX_DIR_TYPE_KONG_PATCH: 3785 case BNX_DIR_TYPE_BONO_FW: 3786 case BNX_DIR_TYPE_BONO_PATCH: 3787 /* FALLTHROUGH */ 3788 return true; 3789 } 3790 3791 return false; 3792 } 3793 3794 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3795 { 3796 switch (dir_type) { 3797 case BNX_DIR_TYPE_AVS: 3798 case BNX_DIR_TYPE_EXP_ROM_MBA: 3799 case BNX_DIR_TYPE_PCIE: 3800 case BNX_DIR_TYPE_TSCF_UCODE: 3801 case BNX_DIR_TYPE_EXT_PHY: 3802 case BNX_DIR_TYPE_CCM: 3803 case BNX_DIR_TYPE_ISCSI_BOOT: 3804 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3805 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3806 /* FALLTHROUGH */ 3807 return true; 3808 } 3809 3810 return false; 3811 } 3812 3813 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3814 { 3815 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3816 bnxt_dir_type_is_other_exec_format(dir_type); 3817 } 3818 3819 static int 3820 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3821 struct rte_dev_eeprom_info *in_eeprom) 3822 { 3823 struct bnxt *bp = dev->data->dev_private; 3824 uint8_t index, dir_op; 3825 uint16_t type, ext, ordinal, attr; 3826 int rc; 3827 3828 rc = is_bnxt_in_error(bp); 3829 if (rc) 3830 return rc; 3831 3832 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3833 bp->pdev->addr.domain, bp->pdev->addr.bus, 3834 bp->pdev->addr.devid, bp->pdev->addr.function, 3835 in_eeprom->offset, in_eeprom->length); 3836 3837 if (!BNXT_PF(bp)) { 3838 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3839 return -EINVAL; 3840 } 3841 3842 type = in_eeprom->magic >> 16; 3843 3844 if (type == 0xffff) { /* special value for directory operations */ 3845 index = in_eeprom->magic & 0xff; 3846 dir_op = in_eeprom->magic >> 8; 3847 if (index == 0) 3848 return -EINVAL; 3849 switch (dir_op) { 3850 case 0x0e: /* erase */ 3851 if (in_eeprom->offset != ~in_eeprom->magic) 3852 return -EINVAL; 3853 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3854 default: 3855 return -EINVAL; 3856 } 3857 } 3858 3859 /* Create or re-write an NVM item: */ 3860 if (bnxt_dir_type_is_executable(type) == true) 3861 return -EOPNOTSUPP; 3862 ext = in_eeprom->magic & 0xffff; 3863 ordinal = in_eeprom->offset >> 16; 3864 attr = in_eeprom->offset & 0xffff; 3865 3866 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3867 in_eeprom->data, in_eeprom->length); 3868 } 3869 3870 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3871 struct rte_eth_dev_module_info *modinfo) 3872 { 3873 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3874 struct bnxt *bp = dev->data->dev_private; 3875 int rc; 3876 3877 /* No point in going further if phy status indicates 3878 * module is not inserted or if it is powered down or 3879 * if it is of type 10GBase-T 3880 */ 3881 if (bp->link_info->module_status > 3882 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3883 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3884 dev->data->port_id); 3885 return -ENOTSUP; 3886 } 3887 3888 /* This feature is not supported in older firmware versions */ 3889 if (bp->hwrm_spec_code < 0x10202) { 3890 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3891 dev->data->port_id); 3892 return -ENOTSUP; 3893 } 3894 3895 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3896 SFF_DIAG_SUPPORT_OFFSET + 1, 3897 module_info); 3898 3899 if (rc) 3900 return rc; 3901 3902 switch (module_info[0]) { 3903 case SFF_MODULE_ID_SFP: 3904 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3905 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3906 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3907 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3908 break; 3909 case SFF_MODULE_ID_QSFP: 3910 case SFF_MODULE_ID_QSFP_PLUS: 3911 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3912 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3913 break; 3914 case SFF_MODULE_ID_QSFP28: 3915 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3916 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3917 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3918 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3919 break; 3920 default: 3921 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3922 return -ENOTSUP; 3923 } 3924 3925 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3926 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3927 3928 return 0; 3929 } 3930 3931 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3932 struct rte_dev_eeprom_info *info) 3933 { 3934 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3935 uint32_t offset = info->offset, length = info->length; 3936 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3937 struct bnxt *bp = dev->data->dev_private; 3938 uint8_t *data = info->data; 3939 uint8_t page = offset >> 7; 3940 uint8_t max_pages = 2; 3941 uint8_t opt_pages; 3942 int rc; 3943 3944 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3945 SFF_DIAG_SUPPORT_OFFSET + 1, 3946 module_info); 3947 if (rc) 3948 return rc; 3949 3950 switch (module_info[0]) { 3951 case SFF_MODULE_ID_SFP: 3952 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 3953 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 3954 pg_addr[2] = I2C_DEV_ADDR_A2; 3955 pg_addr[3] = I2C_DEV_ADDR_A2; 3956 max_pages = 4; 3957 } 3958 break; 3959 case SFF_MODULE_ID_QSFP28: 3960 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3961 SFF8636_OPT_PAGES_OFFSET, 3962 1, &opt_pages); 3963 if (rc) 3964 return rc; 3965 3966 if (opt_pages & SFF8636_PAGE1_MASK) { 3967 pg_addr[2] = I2C_DEV_ADDR_A0; 3968 max_pages = 3; 3969 } 3970 if (opt_pages & SFF8636_PAGE2_MASK) { 3971 pg_addr[3] = I2C_DEV_ADDR_A0; 3972 max_pages = 4; 3973 } 3974 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 3975 pg_addr[4] = I2C_DEV_ADDR_A0; 3976 max_pages = 5; 3977 } 3978 break; 3979 default: 3980 break; 3981 } 3982 3983 memset(data, 0, length); 3984 3985 offset &= 0xff; 3986 while (length && page < max_pages) { 3987 uint8_t raw_page = page ? page - 1 : 0; 3988 uint16_t chunk; 3989 3990 if (pg_addr[page] == I2C_DEV_ADDR_A2) 3991 raw_page = 0; 3992 else if (page) 3993 offset |= 0x80; 3994 chunk = RTE_MIN(length, 256 - offset); 3995 3996 if (pg_addr[page]) { 3997 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 3998 raw_page, offset, 3999 chunk, data); 4000 if (rc) 4001 return rc; 4002 } 4003 4004 data += chunk; 4005 length -= chunk; 4006 offset = 0; 4007 page += 1 + (chunk > 128); 4008 } 4009 4010 return length ? -EINVAL : 0; 4011 } 4012 4013 /* 4014 * Initialization 4015 */ 4016 4017 static const struct eth_dev_ops bnxt_dev_ops = { 4018 .dev_infos_get = bnxt_dev_info_get_op, 4019 .dev_close = bnxt_dev_close_op, 4020 .dev_configure = bnxt_dev_configure_op, 4021 .dev_start = bnxt_dev_start_op, 4022 .dev_stop = bnxt_dev_stop_op, 4023 .dev_set_link_up = bnxt_dev_set_link_up_op, 4024 .dev_set_link_down = bnxt_dev_set_link_down_op, 4025 .stats_get = bnxt_stats_get_op, 4026 .stats_reset = bnxt_stats_reset_op, 4027 .rx_queue_setup = bnxt_rx_queue_setup_op, 4028 .rx_queue_release = bnxt_rx_queue_release_op, 4029 .tx_queue_setup = bnxt_tx_queue_setup_op, 4030 .tx_queue_release = bnxt_tx_queue_release_op, 4031 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4032 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4033 .reta_update = bnxt_reta_update_op, 4034 .reta_query = bnxt_reta_query_op, 4035 .rss_hash_update = bnxt_rss_hash_update_op, 4036 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4037 .link_update = bnxt_link_update_op, 4038 .promiscuous_enable = bnxt_promiscuous_enable_op, 4039 .promiscuous_disable = bnxt_promiscuous_disable_op, 4040 .allmulticast_enable = bnxt_allmulticast_enable_op, 4041 .allmulticast_disable = bnxt_allmulticast_disable_op, 4042 .mac_addr_add = bnxt_mac_addr_add_op, 4043 .mac_addr_remove = bnxt_mac_addr_remove_op, 4044 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4045 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4046 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4047 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4048 .vlan_filter_set = bnxt_vlan_filter_set_op, 4049 .vlan_offload_set = bnxt_vlan_offload_set_op, 4050 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4051 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4052 .mtu_set = bnxt_mtu_set_op, 4053 .mac_addr_set = bnxt_set_default_mac_addr_op, 4054 .xstats_get = bnxt_dev_xstats_get_op, 4055 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4056 .xstats_reset = bnxt_dev_xstats_reset_op, 4057 .fw_version_get = bnxt_fw_version_get, 4058 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4059 .rxq_info_get = bnxt_rxq_info_get_op, 4060 .txq_info_get = bnxt_txq_info_get_op, 4061 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4062 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4063 .dev_led_on = bnxt_dev_led_on_op, 4064 .dev_led_off = bnxt_dev_led_off_op, 4065 .rx_queue_start = bnxt_rx_queue_start, 4066 .rx_queue_stop = bnxt_rx_queue_stop, 4067 .tx_queue_start = bnxt_tx_queue_start, 4068 .tx_queue_stop = bnxt_tx_queue_stop, 4069 .flow_ops_get = bnxt_flow_ops_get_op, 4070 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4071 .get_eeprom_length = bnxt_get_eeprom_length_op, 4072 .get_eeprom = bnxt_get_eeprom_op, 4073 .set_eeprom = bnxt_set_eeprom_op, 4074 .get_module_info = bnxt_get_module_info, 4075 .get_module_eeprom = bnxt_get_module_eeprom, 4076 .timesync_enable = bnxt_timesync_enable, 4077 .timesync_disable = bnxt_timesync_disable, 4078 .timesync_read_time = bnxt_timesync_read_time, 4079 .timesync_write_time = bnxt_timesync_write_time, 4080 .timesync_adjust_time = bnxt_timesync_adjust_time, 4081 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4082 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4083 }; 4084 4085 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4086 { 4087 uint32_t offset; 4088 4089 /* Only pre-map the reset GRC registers using window 3 */ 4090 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4091 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4092 4093 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4094 4095 return offset; 4096 } 4097 4098 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4099 { 4100 struct bnxt_error_recovery_info *info = bp->recovery_info; 4101 uint32_t reg_base = 0xffffffff; 4102 int i; 4103 4104 /* Only pre-map the monitoring GRC registers using window 2 */ 4105 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4106 uint32_t reg = info->status_regs[i]; 4107 4108 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4109 continue; 4110 4111 if (reg_base == 0xffffffff) 4112 reg_base = reg & 0xfffff000; 4113 if ((reg & 0xfffff000) != reg_base) 4114 return -ERANGE; 4115 4116 /* Use mask 0xffc as the Lower 2 bits indicates 4117 * address space location 4118 */ 4119 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4120 (reg & 0xffc); 4121 } 4122 4123 if (reg_base == 0xffffffff) 4124 return 0; 4125 4126 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4127 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4128 4129 return 0; 4130 } 4131 4132 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4133 { 4134 struct bnxt_error_recovery_info *info = bp->recovery_info; 4135 uint32_t delay = info->delay_after_reset[index]; 4136 uint32_t val = info->reset_reg_val[index]; 4137 uint32_t reg = info->reset_reg[index]; 4138 uint32_t type, offset; 4139 int ret; 4140 4141 type = BNXT_FW_STATUS_REG_TYPE(reg); 4142 offset = BNXT_FW_STATUS_REG_OFF(reg); 4143 4144 switch (type) { 4145 case BNXT_FW_STATUS_REG_TYPE_CFG: 4146 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4147 if (ret < 0) { 4148 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4149 val, offset); 4150 return; 4151 } 4152 break; 4153 case BNXT_FW_STATUS_REG_TYPE_GRC: 4154 offset = bnxt_map_reset_regs(bp, offset); 4155 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4156 break; 4157 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4158 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4159 break; 4160 } 4161 /* wait on a specific interval of time until core reset is complete */ 4162 if (delay) 4163 rte_delay_ms(delay); 4164 } 4165 4166 static void bnxt_dev_cleanup(struct bnxt *bp) 4167 { 4168 bp->eth_dev->data->dev_link.link_status = 0; 4169 bp->link_info->link_up = 0; 4170 if (bp->eth_dev->data->dev_started) 4171 bnxt_dev_stop(bp->eth_dev); 4172 4173 bnxt_uninit_resources(bp, true); 4174 } 4175 4176 static int 4177 bnxt_check_fw_reset_done(struct bnxt *bp) 4178 { 4179 int timeout = bp->fw_reset_max_msecs; 4180 uint16_t val = 0; 4181 int rc; 4182 4183 do { 4184 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4185 if (rc < 0) { 4186 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4187 return rc; 4188 } 4189 if (val != 0xffff) 4190 break; 4191 rte_delay_ms(1); 4192 } while (timeout--); 4193 4194 if (val == 0xffff) { 4195 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4196 return -1; 4197 } 4198 4199 return 0; 4200 } 4201 4202 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4203 { 4204 struct rte_eth_dev *dev = bp->eth_dev; 4205 struct rte_vlan_filter_conf *vfc; 4206 int vidx, vbit, rc; 4207 uint16_t vlan_id; 4208 4209 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4210 vfc = &dev->data->vlan_filter_conf; 4211 vidx = vlan_id / 64; 4212 vbit = vlan_id % 64; 4213 4214 /* Each bit corresponds to a VLAN id */ 4215 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4216 rc = bnxt_add_vlan_filter(bp, vlan_id); 4217 if (rc) 4218 return rc; 4219 } 4220 } 4221 4222 return 0; 4223 } 4224 4225 static int bnxt_restore_mac_filters(struct bnxt *bp) 4226 { 4227 struct rte_eth_dev *dev = bp->eth_dev; 4228 struct rte_eth_dev_info dev_info; 4229 struct rte_ether_addr *addr; 4230 uint64_t pool_mask; 4231 uint32_t pool = 0; 4232 uint32_t i; 4233 int rc; 4234 4235 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4236 return 0; 4237 4238 rc = bnxt_dev_info_get_op(dev, &dev_info); 4239 if (rc) 4240 return rc; 4241 4242 /* replay MAC address configuration */ 4243 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4244 addr = &dev->data->mac_addrs[i]; 4245 4246 /* skip zero address */ 4247 if (rte_is_zero_ether_addr(addr)) 4248 continue; 4249 4250 pool = 0; 4251 pool_mask = dev->data->mac_pool_sel[i]; 4252 4253 do { 4254 if (pool_mask & 1ULL) { 4255 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4256 if (rc) 4257 return rc; 4258 } 4259 pool_mask >>= 1; 4260 pool++; 4261 } while (pool_mask); 4262 } 4263 4264 return 0; 4265 } 4266 4267 static int bnxt_restore_filters(struct bnxt *bp) 4268 { 4269 struct rte_eth_dev *dev = bp->eth_dev; 4270 int ret = 0; 4271 4272 if (dev->data->all_multicast) { 4273 ret = bnxt_allmulticast_enable_op(dev); 4274 if (ret) 4275 return ret; 4276 } 4277 if (dev->data->promiscuous) { 4278 ret = bnxt_promiscuous_enable_op(dev); 4279 if (ret) 4280 return ret; 4281 } 4282 4283 ret = bnxt_restore_mac_filters(bp); 4284 if (ret) 4285 return ret; 4286 4287 ret = bnxt_restore_vlan_filters(bp); 4288 /* TODO restore other filters as well */ 4289 return ret; 4290 } 4291 4292 static int bnxt_check_fw_ready(struct bnxt *bp) 4293 { 4294 int timeout = bp->fw_reset_max_msecs; 4295 int rc = 0; 4296 4297 do { 4298 rc = bnxt_hwrm_poll_ver_get(bp); 4299 if (rc == 0) 4300 break; 4301 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4302 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4303 } while (rc && timeout > 0); 4304 4305 if (rc) 4306 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4307 4308 return rc; 4309 } 4310 4311 static void bnxt_dev_recover(void *arg) 4312 { 4313 struct bnxt *bp = arg; 4314 int rc = 0; 4315 4316 pthread_mutex_lock(&bp->err_recovery_lock); 4317 4318 if (!bp->fw_reset_min_msecs) { 4319 rc = bnxt_check_fw_reset_done(bp); 4320 if (rc) 4321 goto err; 4322 } 4323 4324 /* Clear Error flag so that device re-init should happen */ 4325 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4326 PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n", 4327 bp->eth_dev->data->port_id); 4328 4329 rc = bnxt_check_fw_ready(bp); 4330 if (rc) 4331 goto err; 4332 4333 rc = bnxt_init_resources(bp, true); 4334 if (rc) { 4335 PMD_DRV_LOG(ERR, 4336 "Failed to initialize resources after reset\n"); 4337 goto err; 4338 } 4339 /* clear reset flag as the device is initialized now */ 4340 bp->flags &= ~BNXT_FLAG_FW_RESET; 4341 4342 rc = bnxt_dev_start_op(bp->eth_dev); 4343 if (rc) { 4344 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4345 goto err_start; 4346 } 4347 4348 rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = 4349 bp->eth_dev->rx_pkt_burst; 4350 rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = 4351 bp->eth_dev->tx_pkt_burst; 4352 rte_mb(); 4353 4354 rc = bnxt_restore_filters(bp); 4355 if (rc) 4356 goto err_start; 4357 4358 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", 4359 bp->eth_dev->data->port_id); 4360 pthread_mutex_unlock(&bp->err_recovery_lock); 4361 4362 return; 4363 err_start: 4364 bnxt_dev_stop(bp->eth_dev); 4365 err: 4366 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4367 bnxt_uninit_resources(bp, false); 4368 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4369 rte_eth_dev_callback_process(bp->eth_dev, 4370 RTE_ETH_EVENT_INTR_RMV, 4371 NULL); 4372 pthread_mutex_unlock(&bp->err_recovery_lock); 4373 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4374 } 4375 4376 void bnxt_dev_reset_and_resume(void *arg) 4377 { 4378 struct bnxt *bp = arg; 4379 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4380 uint16_t val = 0; 4381 int rc; 4382 4383 bnxt_dev_cleanup(bp); 4384 PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n", 4385 bp->eth_dev->data->port_id); 4386 4387 bnxt_wait_for_device_shutdown(bp); 4388 4389 /* During some fatal firmware error conditions, the PCI config space 4390 * register 0x2e which normally contains the subsystem ID will become 4391 * 0xffff. This register will revert back to the normal value after 4392 * the chip has completed core reset. If we detect this condition, 4393 * we can poll this config register immediately for the value to revert. 4394 */ 4395 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4396 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4397 if (rc < 0) { 4398 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4399 return; 4400 } 4401 if (val == 0xffff) { 4402 bp->fw_reset_min_msecs = 0; 4403 us = 1; 4404 } 4405 } 4406 4407 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4408 if (rc) 4409 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4410 } 4411 4412 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4413 { 4414 struct bnxt_error_recovery_info *info = bp->recovery_info; 4415 uint32_t reg = info->status_regs[index]; 4416 uint32_t type, offset, val = 0; 4417 int ret = 0; 4418 4419 type = BNXT_FW_STATUS_REG_TYPE(reg); 4420 offset = BNXT_FW_STATUS_REG_OFF(reg); 4421 4422 switch (type) { 4423 case BNXT_FW_STATUS_REG_TYPE_CFG: 4424 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4425 if (ret < 0) 4426 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4427 offset); 4428 break; 4429 case BNXT_FW_STATUS_REG_TYPE_GRC: 4430 offset = info->mapped_status_regs[index]; 4431 /* FALLTHROUGH */ 4432 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4433 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4434 offset)); 4435 break; 4436 } 4437 4438 return val; 4439 } 4440 4441 static int bnxt_fw_reset_all(struct bnxt *bp) 4442 { 4443 struct bnxt_error_recovery_info *info = bp->recovery_info; 4444 uint32_t i; 4445 int rc = 0; 4446 4447 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4448 /* Reset through primary function driver */ 4449 for (i = 0; i < info->reg_array_cnt; i++) 4450 bnxt_write_fw_reset_reg(bp, i); 4451 /* Wait for time specified by FW after triggering reset */ 4452 rte_delay_ms(info->primary_func_wait_period_after_reset); 4453 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4454 /* Reset with the help of Kong processor */ 4455 rc = bnxt_hwrm_fw_reset(bp); 4456 if (rc) 4457 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4458 } 4459 4460 return rc; 4461 } 4462 4463 static void bnxt_fw_reset_cb(void *arg) 4464 { 4465 struct bnxt *bp = arg; 4466 struct bnxt_error_recovery_info *info = bp->recovery_info; 4467 int rc = 0; 4468 4469 /* Only Primary function can do FW reset */ 4470 if (bnxt_is_primary_func(bp) && 4471 bnxt_is_recovery_enabled(bp)) { 4472 rc = bnxt_fw_reset_all(bp); 4473 if (rc) { 4474 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4475 return; 4476 } 4477 } 4478 4479 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4480 * EXCEPTION_FATAL_ASYNC event to all the functions 4481 * (including MASTER FUNC). After receiving this Async, all the active 4482 * drivers should treat this case as FW initiated recovery 4483 */ 4484 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4485 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4486 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4487 4488 /* To recover from error */ 4489 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4490 (void *)bp); 4491 } 4492 } 4493 4494 /* Driver should poll FW heartbeat, reset_counter with the frequency 4495 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4496 * When the driver detects heartbeat stop or change in reset_counter, 4497 * it has to trigger a reset to recover from the error condition. 4498 * A “primary function” is the function who will have the privilege to 4499 * initiate the chimp reset. The primary function will be elected by the 4500 * firmware and will be notified through async message. 4501 */ 4502 static void bnxt_check_fw_health(void *arg) 4503 { 4504 struct bnxt *bp = arg; 4505 struct bnxt_error_recovery_info *info = bp->recovery_info; 4506 uint32_t val = 0, wait_msec; 4507 4508 if (!info || !bnxt_is_recovery_enabled(bp) || 4509 is_bnxt_in_error(bp)) 4510 return; 4511 4512 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4513 if (val == info->last_heart_beat) 4514 goto reset; 4515 4516 info->last_heart_beat = val; 4517 4518 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4519 if (val != info->last_reset_counter) 4520 goto reset; 4521 4522 info->last_reset_counter = val; 4523 4524 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4525 bnxt_check_fw_health, (void *)bp); 4526 4527 return; 4528 reset: 4529 /* Stop DMA to/from device */ 4530 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4531 bp->flags |= BNXT_FLAG_FW_RESET; 4532 4533 bnxt_stop_rxtx(bp); 4534 4535 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4536 4537 if (bnxt_is_primary_func(bp)) 4538 wait_msec = info->primary_func_wait_period; 4539 else 4540 wait_msec = info->normal_func_wait_period; 4541 4542 rte_eal_alarm_set(US_PER_MS * wait_msec, 4543 bnxt_fw_reset_cb, (void *)bp); 4544 } 4545 4546 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4547 { 4548 uint32_t polling_freq; 4549 4550 pthread_mutex_lock(&bp->health_check_lock); 4551 4552 if (!bnxt_is_recovery_enabled(bp)) 4553 goto done; 4554 4555 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4556 goto done; 4557 4558 polling_freq = bp->recovery_info->driver_polling_freq; 4559 4560 rte_eal_alarm_set(US_PER_MS * polling_freq, 4561 bnxt_check_fw_health, (void *)bp); 4562 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4563 4564 done: 4565 pthread_mutex_unlock(&bp->health_check_lock); 4566 } 4567 4568 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4569 { 4570 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4571 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4572 } 4573 4574 static bool bnxt_vf_pciid(uint16_t device_id) 4575 { 4576 switch (device_id) { 4577 case BROADCOM_DEV_ID_57304_VF: 4578 case BROADCOM_DEV_ID_57406_VF: 4579 case BROADCOM_DEV_ID_5731X_VF: 4580 case BROADCOM_DEV_ID_5741X_VF: 4581 case BROADCOM_DEV_ID_57414_VF: 4582 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4583 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4584 case BROADCOM_DEV_ID_58802_VF: 4585 case BROADCOM_DEV_ID_57500_VF1: 4586 case BROADCOM_DEV_ID_57500_VF2: 4587 case BROADCOM_DEV_ID_58818_VF: 4588 /* FALLTHROUGH */ 4589 return true; 4590 default: 4591 return false; 4592 } 4593 } 4594 4595 /* Phase 5 device */ 4596 static bool bnxt_p5_device(uint16_t device_id) 4597 { 4598 switch (device_id) { 4599 case BROADCOM_DEV_ID_57508: 4600 case BROADCOM_DEV_ID_57504: 4601 case BROADCOM_DEV_ID_57502: 4602 case BROADCOM_DEV_ID_57508_MF1: 4603 case BROADCOM_DEV_ID_57504_MF1: 4604 case BROADCOM_DEV_ID_57502_MF1: 4605 case BROADCOM_DEV_ID_57508_MF2: 4606 case BROADCOM_DEV_ID_57504_MF2: 4607 case BROADCOM_DEV_ID_57502_MF2: 4608 case BROADCOM_DEV_ID_57500_VF1: 4609 case BROADCOM_DEV_ID_57500_VF2: 4610 case BROADCOM_DEV_ID_58812: 4611 case BROADCOM_DEV_ID_58814: 4612 case BROADCOM_DEV_ID_58818: 4613 case BROADCOM_DEV_ID_58818_VF: 4614 /* FALLTHROUGH */ 4615 return true; 4616 default: 4617 return false; 4618 } 4619 } 4620 4621 bool bnxt_stratus_device(struct bnxt *bp) 4622 { 4623 uint16_t device_id = bp->pdev->id.device_id; 4624 4625 switch (device_id) { 4626 case BROADCOM_DEV_ID_STRATUS_NIC: 4627 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4628 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4629 /* FALLTHROUGH */ 4630 return true; 4631 default: 4632 return false; 4633 } 4634 } 4635 4636 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4637 { 4638 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4639 struct bnxt *bp = eth_dev->data->dev_private; 4640 4641 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4642 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4643 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4644 if (!bp->bar0 || !bp->doorbell_base) { 4645 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4646 return -ENODEV; 4647 } 4648 4649 bp->eth_dev = eth_dev; 4650 bp->pdev = pci_dev; 4651 4652 return 0; 4653 } 4654 4655 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4656 struct bnxt_ctx_pg_info *ctx_pg, 4657 uint32_t mem_size, 4658 const char *suffix, 4659 uint16_t idx) 4660 { 4661 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4662 const struct rte_memzone *mz = NULL; 4663 char mz_name[RTE_MEMZONE_NAMESIZE]; 4664 rte_iova_t mz_phys_addr; 4665 uint64_t valid_bits = 0; 4666 uint32_t sz; 4667 int i; 4668 4669 if (!mem_size) 4670 return 0; 4671 4672 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4673 BNXT_PAGE_SIZE; 4674 rmem->page_size = BNXT_PAGE_SIZE; 4675 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4676 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4677 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4678 4679 valid_bits = PTU_PTE_VALID; 4680 4681 if (rmem->nr_pages > 1) { 4682 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4683 "bnxt_ctx_pg_tbl%s_%x_%d", 4684 suffix, idx, bp->eth_dev->data->port_id); 4685 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4686 mz = rte_memzone_lookup(mz_name); 4687 if (!mz) { 4688 mz = rte_memzone_reserve_aligned(mz_name, 4689 rmem->nr_pages * 8, 4690 bp->eth_dev->device->numa_node, 4691 RTE_MEMZONE_2MB | 4692 RTE_MEMZONE_SIZE_HINT_ONLY | 4693 RTE_MEMZONE_IOVA_CONTIG, 4694 BNXT_PAGE_SIZE); 4695 if (mz == NULL) 4696 return -ENOMEM; 4697 } 4698 4699 memset(mz->addr, 0, mz->len); 4700 mz_phys_addr = mz->iova; 4701 4702 rmem->pg_tbl = mz->addr; 4703 rmem->pg_tbl_map = mz_phys_addr; 4704 rmem->pg_tbl_mz = mz; 4705 } 4706 4707 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4708 suffix, idx, bp->eth_dev->data->port_id); 4709 mz = rte_memzone_lookup(mz_name); 4710 if (!mz) { 4711 mz = rte_memzone_reserve_aligned(mz_name, 4712 mem_size, 4713 bp->eth_dev->device->numa_node, 4714 RTE_MEMZONE_1GB | 4715 RTE_MEMZONE_SIZE_HINT_ONLY | 4716 RTE_MEMZONE_IOVA_CONTIG, 4717 BNXT_PAGE_SIZE); 4718 if (mz == NULL) 4719 return -ENOMEM; 4720 } 4721 4722 memset(mz->addr, 0, mz->len); 4723 mz_phys_addr = mz->iova; 4724 4725 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4726 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4727 rmem->dma_arr[i] = mz_phys_addr + sz; 4728 4729 if (rmem->nr_pages > 1) { 4730 if (i == rmem->nr_pages - 2 && 4731 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4732 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4733 else if (i == rmem->nr_pages - 1 && 4734 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4735 valid_bits |= PTU_PTE_LAST; 4736 4737 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4738 valid_bits); 4739 } 4740 } 4741 4742 rmem->mz = mz; 4743 if (rmem->vmem_size) 4744 rmem->vmem = (void **)mz->addr; 4745 rmem->dma_arr[0] = mz_phys_addr; 4746 return 0; 4747 } 4748 4749 static void bnxt_free_ctx_mem(struct bnxt *bp) 4750 { 4751 int i; 4752 4753 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4754 return; 4755 4756 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4757 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4758 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4759 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4760 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4761 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4762 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4763 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4764 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4765 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4766 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4767 4768 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4769 if (bp->ctx->tqm_mem[i]) 4770 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4771 } 4772 4773 rte_free(bp->ctx); 4774 bp->ctx = NULL; 4775 } 4776 4777 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4778 4779 #define min_t(type, x, y) ({ \ 4780 type __min1 = (x); \ 4781 type __min2 = (y); \ 4782 __min1 < __min2 ? __min1 : __min2; }) 4783 4784 #define max_t(type, x, y) ({ \ 4785 type __max1 = (x); \ 4786 type __max2 = (y); \ 4787 __max1 > __max2 ? __max1 : __max2; }) 4788 4789 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4790 4791 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4792 { 4793 struct bnxt_ctx_pg_info *ctx_pg; 4794 struct bnxt_ctx_mem_info *ctx; 4795 uint32_t mem_size, ena, entries; 4796 uint32_t entries_sp, min; 4797 int i, rc; 4798 4799 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4800 if (rc) { 4801 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4802 return rc; 4803 } 4804 ctx = bp->ctx; 4805 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4806 return 0; 4807 4808 ctx_pg = &ctx->qp_mem; 4809 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4810 if (ctx->qp_entry_size) { 4811 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4812 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4813 if (rc) 4814 return rc; 4815 } 4816 4817 ctx_pg = &ctx->srq_mem; 4818 ctx_pg->entries = ctx->srq_max_l2_entries; 4819 if (ctx->srq_entry_size) { 4820 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4821 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4822 if (rc) 4823 return rc; 4824 } 4825 4826 ctx_pg = &ctx->cq_mem; 4827 ctx_pg->entries = ctx->cq_max_l2_entries; 4828 if (ctx->cq_entry_size) { 4829 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4830 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4831 if (rc) 4832 return rc; 4833 } 4834 4835 ctx_pg = &ctx->vnic_mem; 4836 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4837 ctx->vnic_max_ring_table_entries; 4838 if (ctx->vnic_entry_size) { 4839 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4840 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4841 if (rc) 4842 return rc; 4843 } 4844 4845 ctx_pg = &ctx->stat_mem; 4846 ctx_pg->entries = ctx->stat_max_entries; 4847 if (ctx->stat_entry_size) { 4848 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4849 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4850 if (rc) 4851 return rc; 4852 } 4853 4854 min = ctx->tqm_min_entries_per_ring; 4855 4856 entries_sp = ctx->qp_max_l2_entries + 4857 ctx->vnic_max_vnic_entries + 4858 2 * ctx->qp_min_qp1_entries + min; 4859 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4860 4861 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4862 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4863 entries = clamp_t(uint32_t, entries, min, 4864 ctx->tqm_max_entries_per_ring); 4865 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4866 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4867 * i > 8 is other ext rings. 4868 */ 4869 ctx_pg = ctx->tqm_mem[i]; 4870 ctx_pg->entries = i ? entries : entries_sp; 4871 if (ctx->tqm_entry_size) { 4872 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4873 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4874 "tqm_mem", i); 4875 if (rc) 4876 return rc; 4877 } 4878 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4879 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4880 else 4881 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4882 } 4883 4884 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4885 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4886 if (rc) 4887 PMD_DRV_LOG(ERR, 4888 "Failed to configure context mem: rc = %d\n", rc); 4889 else 4890 ctx->flags |= BNXT_CTX_FLAG_INITED; 4891 4892 return rc; 4893 } 4894 4895 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4896 { 4897 struct rte_pci_device *pci_dev = bp->pdev; 4898 char mz_name[RTE_MEMZONE_NAMESIZE]; 4899 const struct rte_memzone *mz = NULL; 4900 uint32_t total_alloc_len; 4901 rte_iova_t mz_phys_addr; 4902 4903 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4904 return 0; 4905 4906 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4907 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4908 pci_dev->addr.bus, pci_dev->addr.devid, 4909 pci_dev->addr.function, "rx_port_stats"); 4910 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4911 mz = rte_memzone_lookup(mz_name); 4912 total_alloc_len = 4913 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4914 sizeof(struct rx_port_stats_ext) + 512); 4915 if (!mz) { 4916 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4917 SOCKET_ID_ANY, 4918 RTE_MEMZONE_2MB | 4919 RTE_MEMZONE_SIZE_HINT_ONLY | 4920 RTE_MEMZONE_IOVA_CONTIG); 4921 if (mz == NULL) 4922 return -ENOMEM; 4923 } 4924 memset(mz->addr, 0, mz->len); 4925 mz_phys_addr = mz->iova; 4926 4927 bp->rx_mem_zone = (const void *)mz; 4928 bp->hw_rx_port_stats = mz->addr; 4929 bp->hw_rx_port_stats_map = mz_phys_addr; 4930 4931 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4932 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4933 pci_dev->addr.bus, pci_dev->addr.devid, 4934 pci_dev->addr.function, "tx_port_stats"); 4935 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4936 mz = rte_memzone_lookup(mz_name); 4937 total_alloc_len = 4938 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4939 sizeof(struct tx_port_stats_ext) + 512); 4940 if (!mz) { 4941 mz = rte_memzone_reserve(mz_name, 4942 total_alloc_len, 4943 SOCKET_ID_ANY, 4944 RTE_MEMZONE_2MB | 4945 RTE_MEMZONE_SIZE_HINT_ONLY | 4946 RTE_MEMZONE_IOVA_CONTIG); 4947 if (mz == NULL) 4948 return -ENOMEM; 4949 } 4950 memset(mz->addr, 0, mz->len); 4951 mz_phys_addr = mz->iova; 4952 4953 bp->tx_mem_zone = (const void *)mz; 4954 bp->hw_tx_port_stats = mz->addr; 4955 bp->hw_tx_port_stats_map = mz_phys_addr; 4956 bp->flags |= BNXT_FLAG_PORT_STATS; 4957 4958 /* Display extended statistics if FW supports it */ 4959 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4960 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4961 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4962 return 0; 4963 4964 bp->hw_rx_port_stats_ext = (void *) 4965 ((uint8_t *)bp->hw_rx_port_stats + 4966 sizeof(struct rx_port_stats)); 4967 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4968 sizeof(struct rx_port_stats); 4969 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4970 4971 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4972 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4973 bp->hw_tx_port_stats_ext = (void *) 4974 ((uint8_t *)bp->hw_tx_port_stats + 4975 sizeof(struct tx_port_stats)); 4976 bp->hw_tx_port_stats_ext_map = 4977 bp->hw_tx_port_stats_map + 4978 sizeof(struct tx_port_stats); 4979 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 4980 } 4981 4982 return 0; 4983 } 4984 4985 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 4986 { 4987 struct bnxt *bp = eth_dev->data->dev_private; 4988 int rc = 0; 4989 4990 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 4991 RTE_ETHER_ADDR_LEN * 4992 bp->max_l2_ctx, 4993 0); 4994 if (eth_dev->data->mac_addrs == NULL) { 4995 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 4996 return -ENOMEM; 4997 } 4998 4999 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5000 if (BNXT_PF(bp)) 5001 return -EINVAL; 5002 5003 /* Generate a random MAC address, if none was assigned by PF */ 5004 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5005 bnxt_eth_hw_addr_random(bp->mac_addr); 5006 PMD_DRV_LOG(INFO, 5007 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5008 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5009 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5010 5011 rc = bnxt_hwrm_set_mac(bp); 5012 if (rc) 5013 return rc; 5014 } 5015 5016 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5017 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5018 5019 return rc; 5020 } 5021 5022 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5023 { 5024 int rc = 0; 5025 5026 /* MAC is already configured in FW */ 5027 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5028 return 0; 5029 5030 /* Restore the old MAC configured */ 5031 rc = bnxt_hwrm_set_mac(bp); 5032 if (rc) 5033 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5034 5035 return rc; 5036 } 5037 5038 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5039 { 5040 if (!BNXT_PF(bp)) 5041 return; 5042 5043 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5044 5045 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5046 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5047 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5048 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5049 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5050 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5051 } 5052 5053 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5054 { 5055 struct bnxt_error_recovery_info *info = bp->recovery_info; 5056 5057 if (info) { 5058 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5059 memset(info, 0, sizeof(*info)); 5060 return; 5061 } 5062 5063 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5064 return; 5065 5066 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5067 sizeof(*info), 0); 5068 if (!info) 5069 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5070 5071 bp->recovery_info = info; 5072 } 5073 5074 static void bnxt_check_fw_status(struct bnxt *bp) 5075 { 5076 uint32_t fw_status; 5077 5078 if (!(bp->recovery_info && 5079 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5080 return; 5081 5082 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5083 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5084 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5085 fw_status); 5086 } 5087 5088 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5089 { 5090 struct bnxt_error_recovery_info *info = bp->recovery_info; 5091 uint32_t status_loc; 5092 uint32_t sig_ver; 5093 5094 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5095 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5096 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5097 BNXT_GRCP_WINDOW_2_BASE + 5098 offsetof(struct hcomm_status, 5099 sig_ver))); 5100 /* If the signature is absent, then FW does not support this feature */ 5101 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5102 HCOMM_STATUS_SIGNATURE_VAL) 5103 return 0; 5104 5105 if (!info) { 5106 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5107 sizeof(*info), 0); 5108 if (!info) 5109 return -ENOMEM; 5110 bp->recovery_info = info; 5111 } else { 5112 memset(info, 0, sizeof(*info)); 5113 } 5114 5115 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5116 BNXT_GRCP_WINDOW_2_BASE + 5117 offsetof(struct hcomm_status, 5118 fw_status_loc))); 5119 5120 /* Only pre-map the FW health status GRC register */ 5121 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5122 return 0; 5123 5124 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5125 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5126 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5127 5128 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5129 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5130 5131 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5132 5133 return 0; 5134 } 5135 5136 /* This function gets the FW version along with the 5137 * capabilities(MAX and current) of the function, vnic, 5138 * error recovery, phy and other chip related info 5139 */ 5140 static int bnxt_get_config(struct bnxt *bp) 5141 { 5142 uint16_t mtu; 5143 int rc = 0; 5144 5145 bp->fw_cap = 0; 5146 5147 rc = bnxt_map_hcomm_fw_status_reg(bp); 5148 if (rc) 5149 return rc; 5150 5151 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5152 if (rc) { 5153 bnxt_check_fw_status(bp); 5154 return rc; 5155 } 5156 5157 rc = bnxt_hwrm_func_reset(bp); 5158 if (rc) 5159 return -EIO; 5160 5161 rc = bnxt_hwrm_vnic_qcaps(bp); 5162 if (rc) 5163 return rc; 5164 5165 rc = bnxt_hwrm_queue_qportcfg(bp); 5166 if (rc) 5167 return rc; 5168 5169 /* Get the MAX capabilities for this function. 5170 * This function also allocates context memory for TQM rings and 5171 * informs the firmware about this allocated backing store memory. 5172 */ 5173 rc = bnxt_hwrm_func_qcaps(bp); 5174 if (rc) 5175 return rc; 5176 5177 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5178 if (rc) 5179 return rc; 5180 5181 rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); 5182 if (rc) 5183 return rc; 5184 5185 bnxt_hwrm_port_mac_qcfg(bp); 5186 5187 bnxt_hwrm_parent_pf_qcfg(bp); 5188 5189 bnxt_hwrm_port_phy_qcaps(bp); 5190 5191 bnxt_alloc_error_recovery_info(bp); 5192 /* Get the adapter error recovery support info */ 5193 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5194 if (rc) 5195 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5196 5197 bnxt_hwrm_port_led_qcaps(bp); 5198 5199 return 0; 5200 } 5201 5202 static int 5203 bnxt_init_locks(struct bnxt *bp) 5204 { 5205 int err; 5206 5207 err = pthread_mutex_init(&bp->flow_lock, NULL); 5208 if (err) { 5209 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5210 return err; 5211 } 5212 5213 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5214 if (err) { 5215 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5216 return err; 5217 } 5218 5219 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5220 if (err) { 5221 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5222 return err; 5223 } 5224 5225 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5226 if (err) 5227 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5228 5229 return err; 5230 } 5231 5232 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5233 { 5234 int rc = 0; 5235 5236 rc = bnxt_get_config(bp); 5237 if (rc) 5238 return rc; 5239 5240 if (!reconfig_dev) { 5241 rc = bnxt_setup_mac_addr(bp->eth_dev); 5242 if (rc) 5243 return rc; 5244 } else { 5245 rc = bnxt_restore_dflt_mac(bp); 5246 if (rc) 5247 return rc; 5248 } 5249 5250 bnxt_config_vf_req_fwd(bp); 5251 5252 rc = bnxt_hwrm_func_driver_register(bp); 5253 if (rc) { 5254 PMD_DRV_LOG(ERR, "Failed to register driver"); 5255 return -EBUSY; 5256 } 5257 5258 if (BNXT_PF(bp)) { 5259 if (bp->pdev->max_vfs) { 5260 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5261 if (rc) { 5262 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5263 return rc; 5264 } 5265 } else { 5266 rc = bnxt_hwrm_allocate_pf_only(bp); 5267 if (rc) { 5268 PMD_DRV_LOG(ERR, 5269 "Failed to allocate PF resources"); 5270 return rc; 5271 } 5272 } 5273 } 5274 5275 rc = bnxt_alloc_mem(bp, reconfig_dev); 5276 if (rc) 5277 return rc; 5278 5279 rc = bnxt_setup_int(bp); 5280 if (rc) 5281 return rc; 5282 5283 rc = bnxt_request_int(bp); 5284 if (rc) 5285 return rc; 5286 5287 rc = bnxt_init_ctx_mem(bp); 5288 if (rc) { 5289 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5290 return rc; 5291 } 5292 5293 return 0; 5294 } 5295 5296 static int 5297 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5298 const char *value, void *opaque_arg) 5299 { 5300 struct bnxt *bp = opaque_arg; 5301 unsigned long flow_xstat; 5302 char *end = NULL; 5303 5304 if (!value || !opaque_arg) { 5305 PMD_DRV_LOG(ERR, 5306 "Invalid parameter passed to flow_xstat devarg.\n"); 5307 return -EINVAL; 5308 } 5309 5310 flow_xstat = strtoul(value, &end, 10); 5311 if (end == NULL || *end != '\0' || 5312 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5313 PMD_DRV_LOG(ERR, 5314 "Invalid parameter passed to flow_xstat devarg.\n"); 5315 return -EINVAL; 5316 } 5317 5318 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5319 PMD_DRV_LOG(ERR, 5320 "Invalid value passed to flow_xstat devarg.\n"); 5321 return -EINVAL; 5322 } 5323 5324 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5325 if (BNXT_FLOW_XSTATS_EN(bp)) 5326 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5327 5328 return 0; 5329 } 5330 5331 static int 5332 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5333 const char *value, void *opaque_arg) 5334 { 5335 struct bnxt *bp = opaque_arg; 5336 unsigned long max_num_kflows; 5337 char *end = NULL; 5338 5339 if (!value || !opaque_arg) { 5340 PMD_DRV_LOG(ERR, 5341 "Invalid parameter passed to max_num_kflows devarg.\n"); 5342 return -EINVAL; 5343 } 5344 5345 max_num_kflows = strtoul(value, &end, 10); 5346 if (end == NULL || *end != '\0' || 5347 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5348 PMD_DRV_LOG(ERR, 5349 "Invalid parameter passed to max_num_kflows devarg.\n"); 5350 return -EINVAL; 5351 } 5352 5353 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5354 PMD_DRV_LOG(ERR, 5355 "Invalid value passed to max_num_kflows devarg.\n"); 5356 return -EINVAL; 5357 } 5358 5359 bp->max_num_kflows = max_num_kflows; 5360 if (bp->max_num_kflows) 5361 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5362 max_num_kflows); 5363 5364 return 0; 5365 } 5366 5367 static int 5368 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5369 const char *value, void *opaque_arg) 5370 { 5371 struct bnxt *bp = opaque_arg; 5372 unsigned long app_id; 5373 char *end = NULL; 5374 5375 if (!value || !opaque_arg) { 5376 PMD_DRV_LOG(ERR, 5377 "Invalid parameter passed to app-id " 5378 "devargs.\n"); 5379 return -EINVAL; 5380 } 5381 5382 app_id = strtoul(value, &end, 10); 5383 if (end == NULL || *end != '\0' || 5384 (app_id == ULONG_MAX && errno == ERANGE)) { 5385 PMD_DRV_LOG(ERR, 5386 "Invalid parameter passed to app_id " 5387 "devargs.\n"); 5388 return -EINVAL; 5389 } 5390 5391 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5392 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5393 (uint16_t)app_id); 5394 return -EINVAL; 5395 } 5396 5397 bp->app_id = app_id; 5398 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5399 5400 return 0; 5401 } 5402 5403 static int 5404 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5405 const char *value, void *opaque_arg) 5406 { 5407 struct bnxt_representor *vfr_bp = opaque_arg; 5408 unsigned long rep_is_pf; 5409 char *end = NULL; 5410 5411 if (!value || !opaque_arg) { 5412 PMD_DRV_LOG(ERR, 5413 "Invalid parameter passed to rep_is_pf devargs.\n"); 5414 return -EINVAL; 5415 } 5416 5417 rep_is_pf = strtoul(value, &end, 10); 5418 if (end == NULL || *end != '\0' || 5419 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5420 PMD_DRV_LOG(ERR, 5421 "Invalid parameter passed to rep_is_pf devargs.\n"); 5422 return -EINVAL; 5423 } 5424 5425 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5426 PMD_DRV_LOG(ERR, 5427 "Invalid value passed to rep_is_pf devargs.\n"); 5428 return -EINVAL; 5429 } 5430 5431 vfr_bp->flags |= rep_is_pf; 5432 if (BNXT_REP_PF(vfr_bp)) 5433 PMD_DRV_LOG(INFO, "PF representor\n"); 5434 else 5435 PMD_DRV_LOG(INFO, "VF representor\n"); 5436 5437 return 0; 5438 } 5439 5440 static int 5441 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5442 const char *value, void *opaque_arg) 5443 { 5444 struct bnxt_representor *vfr_bp = opaque_arg; 5445 unsigned long rep_based_pf; 5446 char *end = NULL; 5447 5448 if (!value || !opaque_arg) { 5449 PMD_DRV_LOG(ERR, 5450 "Invalid parameter passed to rep_based_pf " 5451 "devargs.\n"); 5452 return -EINVAL; 5453 } 5454 5455 rep_based_pf = strtoul(value, &end, 10); 5456 if (end == NULL || *end != '\0' || 5457 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5458 PMD_DRV_LOG(ERR, 5459 "Invalid parameter passed to rep_based_pf " 5460 "devargs.\n"); 5461 return -EINVAL; 5462 } 5463 5464 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5465 PMD_DRV_LOG(ERR, 5466 "Invalid value passed to rep_based_pf devargs.\n"); 5467 return -EINVAL; 5468 } 5469 5470 vfr_bp->rep_based_pf = rep_based_pf; 5471 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5472 5473 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5474 5475 return 0; 5476 } 5477 5478 static int 5479 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5480 const char *value, void *opaque_arg) 5481 { 5482 struct bnxt_representor *vfr_bp = opaque_arg; 5483 unsigned long rep_q_r2f; 5484 char *end = NULL; 5485 5486 if (!value || !opaque_arg) { 5487 PMD_DRV_LOG(ERR, 5488 "Invalid parameter passed to rep_q_r2f " 5489 "devargs.\n"); 5490 return -EINVAL; 5491 } 5492 5493 rep_q_r2f = strtoul(value, &end, 10); 5494 if (end == NULL || *end != '\0' || 5495 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5496 PMD_DRV_LOG(ERR, 5497 "Invalid parameter passed to rep_q_r2f " 5498 "devargs.\n"); 5499 return -EINVAL; 5500 } 5501 5502 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5503 PMD_DRV_LOG(ERR, 5504 "Invalid value passed to rep_q_r2f devargs.\n"); 5505 return -EINVAL; 5506 } 5507 5508 vfr_bp->rep_q_r2f = rep_q_r2f; 5509 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5510 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5511 5512 return 0; 5513 } 5514 5515 static int 5516 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5517 const char *value, void *opaque_arg) 5518 { 5519 struct bnxt_representor *vfr_bp = opaque_arg; 5520 unsigned long rep_q_f2r; 5521 char *end = NULL; 5522 5523 if (!value || !opaque_arg) { 5524 PMD_DRV_LOG(ERR, 5525 "Invalid parameter passed to rep_q_f2r " 5526 "devargs.\n"); 5527 return -EINVAL; 5528 } 5529 5530 rep_q_f2r = strtoul(value, &end, 10); 5531 if (end == NULL || *end != '\0' || 5532 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5533 PMD_DRV_LOG(ERR, 5534 "Invalid parameter passed to rep_q_f2r " 5535 "devargs.\n"); 5536 return -EINVAL; 5537 } 5538 5539 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5540 PMD_DRV_LOG(ERR, 5541 "Invalid value passed to rep_q_f2r devargs.\n"); 5542 return -EINVAL; 5543 } 5544 5545 vfr_bp->rep_q_f2r = rep_q_f2r; 5546 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5547 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5548 5549 return 0; 5550 } 5551 5552 static int 5553 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5554 const char *value, void *opaque_arg) 5555 { 5556 struct bnxt_representor *vfr_bp = opaque_arg; 5557 unsigned long rep_fc_r2f; 5558 char *end = NULL; 5559 5560 if (!value || !opaque_arg) { 5561 PMD_DRV_LOG(ERR, 5562 "Invalid parameter passed to rep_fc_r2f " 5563 "devargs.\n"); 5564 return -EINVAL; 5565 } 5566 5567 rep_fc_r2f = strtoul(value, &end, 10); 5568 if (end == NULL || *end != '\0' || 5569 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5570 PMD_DRV_LOG(ERR, 5571 "Invalid parameter passed to rep_fc_r2f " 5572 "devargs.\n"); 5573 return -EINVAL; 5574 } 5575 5576 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5577 PMD_DRV_LOG(ERR, 5578 "Invalid value passed to rep_fc_r2f devargs.\n"); 5579 return -EINVAL; 5580 } 5581 5582 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5583 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5584 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5585 5586 return 0; 5587 } 5588 5589 static int 5590 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5591 const char *value, void *opaque_arg) 5592 { 5593 struct bnxt_representor *vfr_bp = opaque_arg; 5594 unsigned long rep_fc_f2r; 5595 char *end = NULL; 5596 5597 if (!value || !opaque_arg) { 5598 PMD_DRV_LOG(ERR, 5599 "Invalid parameter passed to rep_fc_f2r " 5600 "devargs.\n"); 5601 return -EINVAL; 5602 } 5603 5604 rep_fc_f2r = strtoul(value, &end, 10); 5605 if (end == NULL || *end != '\0' || 5606 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5607 PMD_DRV_LOG(ERR, 5608 "Invalid parameter passed to rep_fc_f2r " 5609 "devargs.\n"); 5610 return -EINVAL; 5611 } 5612 5613 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5614 PMD_DRV_LOG(ERR, 5615 "Invalid value passed to rep_fc_f2r devargs.\n"); 5616 return -EINVAL; 5617 } 5618 5619 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5620 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5621 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5622 5623 return 0; 5624 } 5625 5626 static int 5627 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5628 { 5629 struct rte_kvargs *kvlist; 5630 int ret; 5631 5632 if (devargs == NULL) 5633 return 0; 5634 5635 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5636 if (kvlist == NULL) 5637 return -EINVAL; 5638 5639 /* 5640 * Handler for "flow_xstat" devarg. 5641 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5642 */ 5643 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5644 bnxt_parse_devarg_flow_xstat, bp); 5645 if (ret) 5646 goto err; 5647 5648 /* 5649 * Handler for "max_num_kflows" devarg. 5650 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5651 */ 5652 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5653 bnxt_parse_devarg_max_num_kflows, bp); 5654 if (ret) 5655 goto err; 5656 5657 err: 5658 /* 5659 * Handler for "app-id" devarg. 5660 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5661 */ 5662 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5663 bnxt_parse_devarg_app_id, bp); 5664 5665 rte_kvargs_free(kvlist); 5666 return ret; 5667 } 5668 5669 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5670 { 5671 int rc = 0; 5672 5673 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5674 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5675 if (rc) 5676 PMD_DRV_LOG(ERR, 5677 "Failed to alloc switch domain: %d\n", rc); 5678 else 5679 PMD_DRV_LOG(INFO, 5680 "Switch domain allocated %d\n", 5681 bp->switch_domain_id); 5682 } 5683 5684 return rc; 5685 } 5686 5687 /* Allocate and initialize various fields in bnxt struct that 5688 * need to be allocated/destroyed only once in the lifetime of the driver 5689 */ 5690 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5691 { 5692 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5693 struct bnxt *bp = eth_dev->data->dev_private; 5694 int rc = 0; 5695 5696 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5697 5698 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5699 bp->flags |= BNXT_FLAG_VF; 5700 5701 if (bnxt_p5_device(pci_dev->id.device_id)) 5702 bp->flags |= BNXT_FLAG_CHIP_P5; 5703 5704 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5705 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5706 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5707 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5708 bp->flags |= BNXT_FLAG_STINGRAY; 5709 5710 if (BNXT_TRUFLOW_EN(bp)) { 5711 /* extra mbuf field is required to store CFA code from mark */ 5712 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5713 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5714 .size = sizeof(bnxt_cfa_code_dynfield_t), 5715 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5716 }; 5717 bnxt_cfa_code_dynfield_offset = 5718 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5719 if (bnxt_cfa_code_dynfield_offset < 0) { 5720 PMD_DRV_LOG(ERR, 5721 "Failed to register mbuf field for TruFlow mark\n"); 5722 return -rte_errno; 5723 } 5724 } 5725 5726 rc = bnxt_map_pci_bars(eth_dev); 5727 if (rc) { 5728 PMD_DRV_LOG(ERR, 5729 "Failed to initialize board rc: %x\n", rc); 5730 return rc; 5731 } 5732 5733 rc = bnxt_alloc_pf_info(bp); 5734 if (rc) 5735 return rc; 5736 5737 rc = bnxt_alloc_link_info(bp); 5738 if (rc) 5739 return rc; 5740 5741 rc = bnxt_alloc_parent_info(bp); 5742 if (rc) 5743 return rc; 5744 5745 rc = bnxt_alloc_hwrm_resources(bp); 5746 if (rc) { 5747 PMD_DRV_LOG(ERR, 5748 "Failed to allocate response buffer rc: %x\n", rc); 5749 return rc; 5750 } 5751 rc = bnxt_alloc_leds_info(bp); 5752 if (rc) 5753 return rc; 5754 5755 rc = bnxt_alloc_cos_queues(bp); 5756 if (rc) 5757 return rc; 5758 5759 rc = bnxt_init_locks(bp); 5760 if (rc) 5761 return rc; 5762 5763 rc = bnxt_alloc_switch_domain(bp); 5764 if (rc) 5765 return rc; 5766 5767 return rc; 5768 } 5769 5770 static int 5771 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5772 { 5773 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5774 static int version_printed; 5775 struct bnxt *bp; 5776 int rc; 5777 5778 if (version_printed++ == 0) 5779 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5780 5781 eth_dev->dev_ops = &bnxt_dev_ops; 5782 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5783 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5784 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5785 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5786 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5787 5788 /* 5789 * For secondary processes, we don't initialise any further 5790 * as primary has already done this work. 5791 */ 5792 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5793 return 0; 5794 5795 rte_eth_copy_pci_info(eth_dev, pci_dev); 5796 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 5797 5798 bp = eth_dev->data->dev_private; 5799 5800 /* Parse dev arguments passed on when starting the DPDK application. */ 5801 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5802 if (rc) 5803 goto error_free; 5804 5805 rc = bnxt_drv_init(eth_dev); 5806 if (rc) 5807 goto error_free; 5808 5809 rc = bnxt_init_resources(bp, false); 5810 if (rc) 5811 goto error_free; 5812 5813 rc = bnxt_alloc_stats_mem(bp); 5814 if (rc) 5815 goto error_free; 5816 5817 PMD_DRV_LOG(INFO, 5818 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 5819 DRV_MODULE_NAME, 5820 pci_dev->mem_resource[0].phys_addr, 5821 pci_dev->mem_resource[0].addr); 5822 5823 return 0; 5824 5825 error_free: 5826 bnxt_dev_uninit(eth_dev); 5827 return rc; 5828 } 5829 5830 5831 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5832 { 5833 if (!ctx) 5834 return; 5835 5836 if (ctx->va) 5837 rte_free(ctx->va); 5838 5839 ctx->va = NULL; 5840 ctx->dma = RTE_BAD_IOVA; 5841 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 5842 } 5843 5844 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 5845 { 5846 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 5847 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5848 bp->flow_stat->rx_fc_out_tbl.ctx_id, 5849 bp->flow_stat->max_fc, 5850 false); 5851 5852 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 5853 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5854 bp->flow_stat->tx_fc_out_tbl.ctx_id, 5855 bp->flow_stat->max_fc, 5856 false); 5857 5858 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5859 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 5860 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5861 5862 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5863 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 5864 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5865 5866 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5867 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 5868 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5869 5870 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5871 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 5872 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5873 } 5874 5875 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 5876 { 5877 bnxt_unregister_fc_ctx_mem(bp); 5878 5879 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 5880 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 5881 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 5882 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 5883 } 5884 5885 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 5886 { 5887 if (BNXT_FLOW_XSTATS_EN(bp)) 5888 bnxt_uninit_fc_ctx_mem(bp); 5889 } 5890 5891 static void 5892 bnxt_free_error_recovery_info(struct bnxt *bp) 5893 { 5894 rte_free(bp->recovery_info); 5895 bp->recovery_info = NULL; 5896 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5897 } 5898 5899 static int 5900 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 5901 { 5902 int rc; 5903 5904 bnxt_free_int(bp); 5905 bnxt_free_mem(bp, reconfig_dev); 5906 5907 bnxt_hwrm_func_buf_unrgtr(bp); 5908 if (bp->pf != NULL) { 5909 rte_free(bp->pf->vf_req_buf); 5910 bp->pf->vf_req_buf = NULL; 5911 } 5912 5913 rc = bnxt_hwrm_func_driver_unregister(bp); 5914 bp->flags &= ~BNXT_FLAG_REGISTERED; 5915 bnxt_free_ctx_mem(bp); 5916 if (!reconfig_dev) { 5917 bnxt_free_hwrm_resources(bp); 5918 bnxt_free_error_recovery_info(bp); 5919 } 5920 5921 bnxt_uninit_ctx_mem(bp); 5922 5923 bnxt_free_flow_stats_info(bp); 5924 if (bp->rep_info != NULL) 5925 bnxt_free_switch_domain(bp); 5926 bnxt_free_rep_info(bp); 5927 rte_free(bp->ptp_cfg); 5928 bp->ptp_cfg = NULL; 5929 return rc; 5930 } 5931 5932 static int 5933 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 5934 { 5935 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5936 return -EPERM; 5937 5938 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 5939 5940 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 5941 bnxt_dev_close_op(eth_dev); 5942 5943 return 0; 5944 } 5945 5946 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 5947 { 5948 struct bnxt *bp = eth_dev->data->dev_private; 5949 struct rte_eth_dev *vf_rep_eth_dev; 5950 int ret = 0, i; 5951 5952 if (!bp) 5953 return -EINVAL; 5954 5955 for (i = 0; i < bp->num_reps; i++) { 5956 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 5957 if (!vf_rep_eth_dev) 5958 continue; 5959 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 5960 vf_rep_eth_dev->data->port_id); 5961 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 5962 } 5963 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 5964 eth_dev->data->port_id); 5965 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 5966 5967 return ret; 5968 } 5969 5970 static void bnxt_free_rep_info(struct bnxt *bp) 5971 { 5972 rte_free(bp->rep_info); 5973 bp->rep_info = NULL; 5974 rte_free(bp->cfa_code_map); 5975 bp->cfa_code_map = NULL; 5976 } 5977 5978 static int bnxt_init_rep_info(struct bnxt *bp) 5979 { 5980 int i = 0, rc; 5981 5982 if (bp->rep_info) 5983 return 0; 5984 5985 bp->rep_info = rte_zmalloc("bnxt_rep_info", 5986 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp), 5987 0); 5988 if (!bp->rep_info) { 5989 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 5990 return -ENOMEM; 5991 } 5992 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 5993 sizeof(*bp->cfa_code_map) * 5994 BNXT_MAX_CFA_CODE, 0); 5995 if (!bp->cfa_code_map) { 5996 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 5997 bnxt_free_rep_info(bp); 5998 return -ENOMEM; 5999 } 6000 6001 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6002 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6003 6004 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6005 if (rc) { 6006 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6007 bnxt_free_rep_info(bp); 6008 return rc; 6009 } 6010 6011 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6012 if (rc) { 6013 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6014 bnxt_free_rep_info(bp); 6015 return rc; 6016 } 6017 6018 return rc; 6019 } 6020 6021 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6022 struct rte_eth_devargs *eth_da, 6023 struct rte_eth_dev *backing_eth_dev, 6024 const char *dev_args) 6025 { 6026 struct rte_eth_dev *vf_rep_eth_dev; 6027 char name[RTE_ETH_NAME_MAX_LEN]; 6028 struct bnxt *backing_bp = backing_eth_dev->data->dev_private; 6029 uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp); 6030 6031 uint16_t num_rep; 6032 int i, ret = 0; 6033 struct rte_kvargs *kvlist = NULL; 6034 6035 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6036 return 0; 6037 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6038 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6039 eth_da->type); 6040 return -ENOTSUP; 6041 } 6042 num_rep = eth_da->nb_representor_ports; 6043 if (num_rep > max_vf_reps) { 6044 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6045 num_rep, max_vf_reps); 6046 return -EINVAL; 6047 } 6048 6049 if (num_rep >= RTE_MAX_ETHPORTS) { 6050 PMD_DRV_LOG(ERR, 6051 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6052 num_rep, RTE_MAX_ETHPORTS); 6053 return -EINVAL; 6054 } 6055 6056 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6057 PMD_DRV_LOG(ERR, 6058 "Not a PF or trusted VF. No Representor support\n"); 6059 /* Returning an error is not an option. 6060 * Applications are not handling this correctly 6061 */ 6062 return 0; 6063 } 6064 6065 if (bnxt_init_rep_info(backing_bp)) 6066 return 0; 6067 6068 for (i = 0; i < num_rep; i++) { 6069 struct bnxt_representor representor = { 6070 .vf_id = eth_da->representor_ports[i], 6071 .switch_domain_id = backing_bp->switch_domain_id, 6072 .parent_dev = backing_eth_dev 6073 }; 6074 6075 if (representor.vf_id >= max_vf_reps) { 6076 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6077 representor.vf_id, max_vf_reps); 6078 continue; 6079 } 6080 6081 /* representor port net_bdf_port */ 6082 snprintf(name, sizeof(name), "net_%s_representor_%d", 6083 pci_dev->device.name, eth_da->representor_ports[i]); 6084 6085 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6086 if (kvlist) { 6087 /* 6088 * Handler for "rep_is_pf" devarg. 6089 * Invoked as for ex: "-a 000:00:0d.0, 6090 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6091 */ 6092 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6093 bnxt_parse_devarg_rep_is_pf, 6094 (void *)&representor); 6095 if (ret) { 6096 ret = -EINVAL; 6097 goto err; 6098 } 6099 /* 6100 * Handler for "rep_based_pf" devarg. 6101 * Invoked as for ex: "-a 000:00:0d.0, 6102 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6103 */ 6104 ret = rte_kvargs_process(kvlist, 6105 BNXT_DEVARG_REP_BASED_PF, 6106 bnxt_parse_devarg_rep_based_pf, 6107 (void *)&representor); 6108 if (ret) { 6109 ret = -EINVAL; 6110 goto err; 6111 } 6112 /* 6113 * Handler for "rep_based_pf" devarg. 6114 * Invoked as for ex: "-a 000:00:0d.0, 6115 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6116 */ 6117 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6118 bnxt_parse_devarg_rep_q_r2f, 6119 (void *)&representor); 6120 if (ret) { 6121 ret = -EINVAL; 6122 goto err; 6123 } 6124 /* 6125 * Handler for "rep_based_pf" devarg. 6126 * Invoked as for ex: "-a 000:00:0d.0, 6127 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6128 */ 6129 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6130 bnxt_parse_devarg_rep_q_f2r, 6131 (void *)&representor); 6132 if (ret) { 6133 ret = -EINVAL; 6134 goto err; 6135 } 6136 /* 6137 * Handler for "rep_based_pf" devarg. 6138 * Invoked as for ex: "-a 000:00:0d.0, 6139 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6140 */ 6141 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6142 bnxt_parse_devarg_rep_fc_r2f, 6143 (void *)&representor); 6144 if (ret) { 6145 ret = -EINVAL; 6146 goto err; 6147 } 6148 /* 6149 * Handler for "rep_based_pf" devarg. 6150 * Invoked as for ex: "-a 000:00:0d.0, 6151 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6152 */ 6153 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6154 bnxt_parse_devarg_rep_fc_f2r, 6155 (void *)&representor); 6156 if (ret) { 6157 ret = -EINVAL; 6158 goto err; 6159 } 6160 } 6161 6162 ret = rte_eth_dev_create(&pci_dev->device, name, 6163 sizeof(struct bnxt_representor), 6164 NULL, NULL, 6165 bnxt_representor_init, 6166 &representor); 6167 if (ret) { 6168 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6169 "representor %s.", name); 6170 goto err; 6171 } 6172 6173 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6174 if (!vf_rep_eth_dev) { 6175 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6176 " for VF-Rep: %s.", name); 6177 ret = -ENODEV; 6178 goto err; 6179 } 6180 6181 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6182 backing_eth_dev->data->port_id); 6183 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6184 vf_rep_eth_dev; 6185 backing_bp->num_reps++; 6186 6187 } 6188 6189 rte_kvargs_free(kvlist); 6190 return 0; 6191 6192 err: 6193 /* If num_rep > 1, then rollback already created 6194 * ports, since we'll be failing the probe anyway 6195 */ 6196 if (num_rep > 1) 6197 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6198 rte_errno = -ret; 6199 rte_kvargs_free(kvlist); 6200 6201 return ret; 6202 } 6203 6204 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6205 struct rte_pci_device *pci_dev) 6206 { 6207 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6208 struct rte_eth_dev *backing_eth_dev; 6209 uint16_t num_rep; 6210 int ret = 0; 6211 6212 if (pci_dev->device.devargs) { 6213 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6214 ð_da); 6215 if (ret) 6216 return ret; 6217 } 6218 6219 num_rep = eth_da.nb_representor_ports; 6220 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6221 num_rep); 6222 6223 /* We could come here after first level of probe is already invoked 6224 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6225 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6226 */ 6227 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6228 if (backing_eth_dev == NULL) { 6229 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6230 sizeof(struct bnxt), 6231 eth_dev_pci_specific_init, pci_dev, 6232 bnxt_dev_init, NULL); 6233 6234 if (ret || !num_rep) 6235 return ret; 6236 6237 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6238 } 6239 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6240 backing_eth_dev->data->port_id); 6241 6242 if (!num_rep) 6243 return ret; 6244 6245 /* probe representor ports now */ 6246 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6247 pci_dev->device.devargs->args); 6248 6249 return ret; 6250 } 6251 6252 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6253 { 6254 struct rte_eth_dev *eth_dev; 6255 6256 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6257 if (!eth_dev) 6258 return 0; /* Invoked typically only by OVS-DPDK, by the 6259 * time it comes here the eth_dev is already 6260 * deleted by rte_eth_dev_close(), so returning 6261 * +ve value will at least help in proper cleanup 6262 */ 6263 6264 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6265 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6266 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6267 return rte_eth_dev_destroy(eth_dev, 6268 bnxt_representor_uninit); 6269 else 6270 return rte_eth_dev_destroy(eth_dev, 6271 bnxt_dev_uninit); 6272 } else { 6273 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6274 } 6275 } 6276 6277 static struct rte_pci_driver bnxt_rte_pmd = { 6278 .id_table = bnxt_pci_id_map, 6279 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6280 RTE_PCI_DRV_INTR_RMV | 6281 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6282 * and OVS-DPDK 6283 */ 6284 .probe = bnxt_pci_probe, 6285 .remove = bnxt_pci_remove, 6286 }; 6287 6288 static bool 6289 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6290 { 6291 if (strcmp(dev->device->driver->name, drv->driver.name)) 6292 return false; 6293 6294 return true; 6295 } 6296 6297 bool is_bnxt_supported(struct rte_eth_dev *dev) 6298 { 6299 return is_device_supported(dev, &bnxt_rte_pmd); 6300 } 6301 6302 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6303 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6304 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6305 6306