1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdalign.h> 8 #include <stdbool.h> 9 10 #include <dev_driver.h> 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_malloc.h> 14 #include <rte_cycles.h> 15 #include <rte_alarm.h> 16 #include <rte_kvargs.h> 17 #include <rte_vect.h> 18 19 #include "bnxt.h" 20 #include "bnxt_filter.h" 21 #include "bnxt_hwrm.h" 22 #include "bnxt_irq.h" 23 #include "bnxt_reps.h" 24 #include "bnxt_ring.h" 25 #include "bnxt_rxq.h" 26 #include "bnxt_rxr.h" 27 #include "bnxt_stats.h" 28 #include "bnxt_txq.h" 29 #include "bnxt_txr.h" 30 #include "bnxt_vnic.h" 31 #include "hsi_struct_def_dpdk.h" 32 #include "bnxt_nvm_defs.h" 33 #include "bnxt_tf_common.h" 34 #include "ulp_flow_db.h" 35 #include "rte_pmd_bnxt.h" 36 37 #define DRV_MODULE_NAME "bnxt" 38 static const char bnxt_version[] = 39 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 40 41 /* 42 * The set of PCI devices this driver supports 43 */ 44 static const struct rte_pci_id bnxt_pci_id_map[] = { 45 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 46 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 47 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 48 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57608) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57604) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57602) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57601) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5760X_VF) }, 93 { .vendor_id = 0, /* sentinel */ }, 94 }; 95 96 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 97 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 98 #define BNXT_DEVARG_REPRESENTOR "representor" 99 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 100 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 101 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 102 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 103 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 104 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 105 #define BNXT_DEVARG_APP_ID "app-id" 106 #define BNXT_DEVARG_IEEE_1588 "ieee-1588" 107 #define BNXT_DEVARG_CQE_MODE "cqe-mode" 108 109 static const char *const bnxt_dev_args[] = { 110 BNXT_DEVARG_REPRESENTOR, 111 BNXT_DEVARG_FLOW_XSTAT, 112 BNXT_DEVARG_MAX_NUM_KFLOWS, 113 BNXT_DEVARG_REP_BASED_PF, 114 BNXT_DEVARG_REP_IS_PF, 115 BNXT_DEVARG_REP_Q_R2F, 116 BNXT_DEVARG_REP_Q_F2R, 117 BNXT_DEVARG_REP_FC_R2F, 118 BNXT_DEVARG_REP_FC_F2R, 119 BNXT_DEVARG_APP_ID, 120 BNXT_DEVARG_IEEE_1588, 121 BNXT_DEVARG_CQE_MODE, 122 NULL 123 }; 124 125 /* 126 * cqe-mode = an non-negative 8-bit number 127 */ 128 #define BNXT_DEVARG_CQE_MODE_INVALID(val) ((val) > 1) 129 130 /* 131 * app-id = an non-negative 8-bit number 132 */ 133 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 134 135 /* 136 * ieee-1588 = an non-negative 8-bit number 137 */ 138 #define BNXT_DEVARG_IEEE_1588_INVALID(val) ((val) > 255) 139 140 /* 141 * flow_xstat == false to disable the feature 142 * flow_xstat == true to enable the feature 143 */ 144 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 145 146 /* 147 * rep_is_pf == false to indicate VF representor 148 * rep_is_pf == true to indicate PF representor 149 */ 150 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 151 152 /* 153 * rep_based_pf == Physical index of the PF 154 */ 155 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 156 /* 157 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 158 */ 159 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 160 161 /* 162 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 163 */ 164 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 165 166 /* 167 * rep_fc_r2f == Flow control for the representor to endpoint direction 168 */ 169 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 170 171 /* 172 * rep_fc_f2r == Flow control for the endpoint to representor direction 173 */ 174 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 175 176 int bnxt_cfa_code_dynfield_offset = -1; 177 178 /* 179 * max_num_kflows must be >= 32 180 * and must be a power-of-2 supported value 181 * return: 1 -> invalid 182 * 0 -> valid 183 */ 184 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 185 { 186 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 187 return 1; 188 return 0; 189 } 190 191 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 192 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 193 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 194 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 195 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 196 static int bnxt_restore_vlan_filters(struct bnxt *bp); 197 static void bnxt_dev_recover(void *arg); 198 static void bnxt_free_error_recovery_info(struct bnxt *bp); 199 static void bnxt_free_rep_info(struct bnxt *bp); 200 static int bnxt_check_fw_ready(struct bnxt *bp); 201 static bool bnxt_enable_ulp(struct bnxt *bp); 202 203 int is_bnxt_in_error(struct bnxt *bp) 204 { 205 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 206 return -EIO; 207 if (bp->flags & BNXT_FLAG_FW_RESET) 208 return -EBUSY; 209 210 return 0; 211 } 212 213 /***********************/ 214 215 /* 216 * High level utility functions 217 */ 218 219 uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 220 { 221 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 222 BNXT_RSS_TBL_SIZE_P5); 223 224 if (!BNXT_CHIP_P5_P7(bp)) 225 return 1; 226 227 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 228 BNXT_RSS_ENTRIES_PER_CTX_P5) / 229 BNXT_RSS_ENTRIES_PER_CTX_P5; 230 } 231 232 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 233 { 234 if (!BNXT_CHIP_P5_P7(bp)) 235 return HW_HASH_INDEX_SIZE; 236 237 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 238 } 239 240 static void bnxt_free_parent_info(struct bnxt *bp) 241 { 242 rte_free(bp->parent); 243 bp->parent = NULL; 244 } 245 246 static void bnxt_free_pf_info(struct bnxt *bp) 247 { 248 rte_free(bp->pf); 249 bp->pf = NULL; 250 } 251 252 static void bnxt_free_link_info(struct bnxt *bp) 253 { 254 rte_free(bp->link_info); 255 bp->link_info = NULL; 256 } 257 258 static void bnxt_free_leds_info(struct bnxt *bp) 259 { 260 if (BNXT_VF(bp)) 261 return; 262 263 rte_free(bp->leds); 264 bp->leds = NULL; 265 } 266 267 static void bnxt_free_flow_stats_info(struct bnxt *bp) 268 { 269 rte_free(bp->flow_stat); 270 bp->flow_stat = NULL; 271 } 272 273 static void bnxt_free_cos_queues(struct bnxt *bp) 274 { 275 rte_free(bp->rx_cos_queue); 276 bp->rx_cos_queue = NULL; 277 rte_free(bp->tx_cos_queue); 278 bp->tx_cos_queue = NULL; 279 } 280 281 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 282 { 283 bnxt_free_filter_mem(bp); 284 bnxt_free_vnic_attributes(bp); 285 bnxt_free_vnic_mem(bp); 286 287 /* tx/rx rings are configured as part of *_queue_setup callbacks. 288 * If the number of rings change across fw update, 289 * we don't have much choice except to warn the user. 290 */ 291 if (!reconfig) { 292 bnxt_free_stats(bp); 293 bnxt_free_tx_rings(bp); 294 bnxt_free_rx_rings(bp); 295 } 296 bnxt_free_async_cp_ring(bp); 297 bnxt_free_rxtx_nq_ring(bp); 298 299 rte_free(bp->grp_info); 300 bp->grp_info = NULL; 301 } 302 303 static int bnxt_alloc_parent_info(struct bnxt *bp) 304 { 305 bp->parent = rte_zmalloc("bnxt_parent_info", 306 sizeof(struct bnxt_parent_info), 0); 307 if (bp->parent == NULL) 308 return -ENOMEM; 309 310 return 0; 311 } 312 313 static int bnxt_alloc_pf_info(struct bnxt *bp) 314 { 315 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 316 if (bp->pf == NULL) 317 return -ENOMEM; 318 319 return 0; 320 } 321 322 static int bnxt_alloc_link_info(struct bnxt *bp) 323 { 324 bp->link_info = 325 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 326 if (bp->link_info == NULL) 327 return -ENOMEM; 328 329 return 0; 330 } 331 332 static int bnxt_alloc_leds_info(struct bnxt *bp) 333 { 334 if (BNXT_VF(bp)) 335 return 0; 336 337 bp->leds = rte_zmalloc("bnxt_leds", 338 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 339 0); 340 if (bp->leds == NULL) 341 return -ENOMEM; 342 343 return 0; 344 } 345 346 static int bnxt_alloc_cos_queues(struct bnxt *bp) 347 { 348 bp->rx_cos_queue = 349 rte_zmalloc("bnxt_rx_cosq", 350 BNXT_COS_QUEUE_COUNT * 351 sizeof(struct bnxt_cos_queue_info), 352 0); 353 if (bp->rx_cos_queue == NULL) 354 return -ENOMEM; 355 356 bp->tx_cos_queue = 357 rte_zmalloc("bnxt_tx_cosq", 358 BNXT_COS_QUEUE_COUNT * 359 sizeof(struct bnxt_cos_queue_info), 360 0); 361 if (bp->tx_cos_queue == NULL) 362 return -ENOMEM; 363 364 return 0; 365 } 366 367 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 368 { 369 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 370 sizeof(struct bnxt_flow_stat_info), 0); 371 if (bp->flow_stat == NULL) 372 return -ENOMEM; 373 374 return 0; 375 } 376 377 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 378 { 379 int rc; 380 381 rc = bnxt_alloc_ring_grps(bp); 382 if (rc) 383 goto alloc_mem_err; 384 385 rc = bnxt_alloc_async_ring_struct(bp); 386 if (rc) 387 goto alloc_mem_err; 388 389 rc = bnxt_alloc_vnic_mem(bp); 390 if (rc) 391 goto alloc_mem_err; 392 393 rc = bnxt_alloc_vnic_attributes(bp, reconfig); 394 if (rc) 395 goto alloc_mem_err; 396 397 rc = bnxt_alloc_filter_mem(bp); 398 if (rc) 399 goto alloc_mem_err; 400 401 rc = bnxt_alloc_async_cp_ring(bp); 402 if (rc) 403 goto alloc_mem_err; 404 405 rc = bnxt_alloc_rxtx_nq_ring(bp); 406 if (rc) 407 goto alloc_mem_err; 408 409 if (BNXT_FLOW_XSTATS_EN(bp)) { 410 rc = bnxt_alloc_flow_stats_info(bp); 411 if (rc) 412 goto alloc_mem_err; 413 } 414 415 return 0; 416 417 alloc_mem_err: 418 bnxt_free_mem(bp, reconfig); 419 return rc; 420 } 421 422 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 423 { 424 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 425 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 426 uint64_t rx_offloads = dev_conf->rxmode.offloads; 427 struct bnxt_rx_queue *rxq; 428 unsigned int j; 429 int rc; 430 431 rc = bnxt_vnic_grp_alloc(bp, vnic); 432 if (rc) 433 goto err_out; 434 435 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 436 vnic_id, vnic, vnic->fw_grp_ids); 437 438 /* populate the fw group table */ 439 bnxt_vnic_ring_grp_populate(bp, vnic); 440 bnxt_vnic_rules_init(vnic); 441 442 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 443 if (rc) 444 goto err_out; 445 446 /* Alloc RSS context only if RSS mode is enabled */ 447 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { 448 int j, nr_ctxs = bnxt_rss_ctxts(bp); 449 450 /* RSS table size in P5 is 512. 451 * Cap max Rx rings to same value 452 */ 453 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 454 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 455 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 456 goto err_out; 457 } 458 459 rc = 0; 460 for (j = 0; j < nr_ctxs; j++) { 461 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 462 if (rc) 463 break; 464 } 465 if (rc) { 466 PMD_DRV_LOG(ERR, 467 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 468 vnic_id, j, rc); 469 goto err_out; 470 } 471 vnic->num_lb_ctxts = nr_ctxs; 472 } 473 474 /* 475 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 476 * setting is not available at this time, it will not be 477 * configured correctly in the CFA. 478 */ 479 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 480 vnic->vlan_strip = true; 481 else 482 vnic->vlan_strip = false; 483 484 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 485 if (rc) 486 goto err_out; 487 488 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 489 if (rc) 490 goto err_out; 491 492 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 493 rxq = bp->eth_dev->data->rx_queues[j]; 494 495 PMD_DRV_LOG(DEBUG, 496 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 497 j, rxq->vnic, rxq->vnic->fw_grp_ids); 498 499 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 500 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 501 } 502 503 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 504 505 rc = bnxt_vnic_rss_configure(bp, vnic); 506 if (rc) 507 goto err_out; 508 509 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 510 511 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 512 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? 513 true : false); 514 if (rc) 515 goto err_out; 516 517 return 0; 518 err_out: 519 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 520 vnic_id, rc); 521 return rc; 522 } 523 524 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 525 { 526 int rc = 0; 527 528 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 529 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 530 if (rc) 531 return rc; 532 533 PMD_DRV_LOG(DEBUG, 534 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 535 " rx_fc_in_tbl.ctx_id = %d\n", 536 bp->flow_stat->rx_fc_in_tbl.va, 537 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 538 bp->flow_stat->rx_fc_in_tbl.ctx_id); 539 540 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 541 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 542 if (rc) 543 return rc; 544 545 PMD_DRV_LOG(DEBUG, 546 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 547 " rx_fc_out_tbl.ctx_id = %d\n", 548 bp->flow_stat->rx_fc_out_tbl.va, 549 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 550 bp->flow_stat->rx_fc_out_tbl.ctx_id); 551 552 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 553 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 554 if (rc) 555 return rc; 556 557 PMD_DRV_LOG(DEBUG, 558 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 559 " tx_fc_in_tbl.ctx_id = %d\n", 560 bp->flow_stat->tx_fc_in_tbl.va, 561 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 562 bp->flow_stat->tx_fc_in_tbl.ctx_id); 563 564 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 565 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 566 if (rc) 567 return rc; 568 569 PMD_DRV_LOG(DEBUG, 570 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 571 " tx_fc_out_tbl.ctx_id = %d\n", 572 bp->flow_stat->tx_fc_out_tbl.va, 573 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 574 bp->flow_stat->tx_fc_out_tbl.ctx_id); 575 576 memset(bp->flow_stat->rx_fc_out_tbl.va, 577 0, 578 bp->flow_stat->rx_fc_out_tbl.size); 579 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 580 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 581 bp->flow_stat->rx_fc_out_tbl.ctx_id, 582 bp->flow_stat->max_fc, 583 true); 584 if (rc) 585 return rc; 586 587 memset(bp->flow_stat->tx_fc_out_tbl.va, 588 0, 589 bp->flow_stat->tx_fc_out_tbl.size); 590 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 591 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 592 bp->flow_stat->tx_fc_out_tbl.ctx_id, 593 bp->flow_stat->max_fc, 594 true); 595 596 return rc; 597 } 598 599 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 600 struct bnxt_ctx_mem_buf_info *ctx) 601 { 602 if (!ctx) 603 return -EINVAL; 604 605 ctx->va = rte_zmalloc_socket(type, size, 0, 606 bp->eth_dev->device->numa_node); 607 if (ctx->va == NULL) 608 return -ENOMEM; 609 rte_mem_lock_page(ctx->va); 610 ctx->size = size; 611 ctx->dma = rte_mem_virt2iova(ctx->va); 612 if (ctx->dma == RTE_BAD_IOVA) 613 return -ENOMEM; 614 615 return 0; 616 } 617 618 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 619 { 620 struct rte_pci_device *pdev = bp->pdev; 621 char type[RTE_MEMZONE_NAMESIZE]; 622 uint16_t max_fc; 623 int rc = 0; 624 625 max_fc = bp->flow_stat->max_fc; 626 627 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 628 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 629 /* 4 bytes for each counter-id */ 630 rc = bnxt_alloc_ctx_mem_buf(bp, type, 631 max_fc * 4, 632 &bp->flow_stat->rx_fc_in_tbl); 633 if (rc) 634 return rc; 635 636 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 637 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 638 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 639 rc = bnxt_alloc_ctx_mem_buf(bp, type, 640 max_fc * 16, 641 &bp->flow_stat->rx_fc_out_tbl); 642 if (rc) 643 return rc; 644 645 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 646 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 647 /* 4 bytes for each counter-id */ 648 rc = bnxt_alloc_ctx_mem_buf(bp, type, 649 max_fc * 4, 650 &bp->flow_stat->tx_fc_in_tbl); 651 if (rc) 652 return rc; 653 654 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 655 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 656 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 657 rc = bnxt_alloc_ctx_mem_buf(bp, type, 658 max_fc * 16, 659 &bp->flow_stat->tx_fc_out_tbl); 660 if (rc) 661 return rc; 662 663 rc = bnxt_register_fc_ctx_mem(bp); 664 665 return rc; 666 } 667 668 static int bnxt_init_ctx_mem(struct bnxt *bp) 669 { 670 int rc = 0; 671 672 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 673 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 674 !BNXT_FLOW_XSTATS_EN(bp)) 675 return 0; 676 677 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 678 if (rc) 679 return rc; 680 681 rc = bnxt_init_fc_ctx_mem(bp); 682 683 return rc; 684 } 685 686 static inline bool bnxt_force_link_config(struct bnxt *bp) 687 { 688 uint16_t subsystem_device_id = bp->pdev->id.subsystem_device_id; 689 690 switch (subsystem_device_id) { 691 case BROADCOM_DEV_957508_N2100: 692 case BROADCOM_DEV_957414_N225: 693 return true; 694 default: 695 return false; 696 } 697 } 698 699 static int bnxt_update_phy_setting(struct bnxt *bp) 700 { 701 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 702 struct rte_eth_link *link = &bp->eth_dev->data->dev_link; 703 struct rte_eth_link new; 704 uint32_t curr_speed_bit; 705 int rc; 706 707 rc = bnxt_get_hwrm_link_config(bp, &new); 708 if (rc) { 709 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 710 return rc; 711 } 712 713 /* convert to speedbit flag */ 714 curr_speed_bit = rte_eth_speed_bitflag((uint32_t)link->link_speed, 1); 715 716 /* 717 * Device is not obliged link down in certain scenarios, even 718 * when forced. When FW does not allow any user other than BMC 719 * to shutdown the port, bnxt_get_hwrm_link_config() call always 720 * returns link up. Force phy update always in that case. 721 */ 722 if (!new.link_status || bnxt_force_link_config(bp) || 723 (BNXT_LINK_SPEEDS_V2(bp) && dev_conf->link_speeds != curr_speed_bit)) { 724 rc = bnxt_set_hwrm_link_config(bp, true); 725 if (rc) { 726 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 727 return rc; 728 } 729 } 730 731 return rc; 732 } 733 734 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 735 { 736 /* tpa v2 devices use ext variant local struct */ 737 if (BNXT_TPA_V2_P7(bp)) { 738 rte_free(bp->prev_rx_ring_stats_ext); 739 rte_free(bp->prev_tx_ring_stats_ext); 740 bp->prev_rx_ring_stats_ext = NULL; 741 bp->prev_tx_ring_stats_ext = NULL; 742 return; 743 } 744 rte_free(bp->prev_rx_ring_stats); 745 rte_free(bp->prev_tx_ring_stats); 746 bp->prev_rx_ring_stats = NULL; 747 bp->prev_tx_ring_stats = NULL; 748 } 749 750 static int bnxt_alloc_prev_ring_ext_stats(struct bnxt *bp) 751 { 752 bp->prev_rx_ring_stats_ext = rte_zmalloc("bnxt_prev_rx_ring_stats_ext", 753 sizeof(struct bnxt_ring_stats_ext) * 754 bp->rx_cp_nr_rings, 755 0); 756 if (bp->prev_rx_ring_stats_ext == NULL) 757 return -ENOMEM; 758 759 bp->prev_tx_ring_stats_ext = rte_zmalloc("bnxt_prev_tx_ring_stats_ext", 760 sizeof(struct bnxt_ring_stats_ext) * 761 bp->tx_cp_nr_rings, 762 0); 763 764 if (bp->tx_cp_nr_rings > 0 && bp->prev_tx_ring_stats_ext == NULL) 765 goto error; 766 767 return 0; 768 769 error: 770 bnxt_free_prev_ring_stats(bp); 771 return -ENOMEM; 772 } 773 774 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 775 { 776 if (BNXT_TPA_V2_P7(bp)) 777 return bnxt_alloc_prev_ring_ext_stats(bp); 778 779 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 780 sizeof(struct bnxt_ring_stats) * 781 bp->rx_cp_nr_rings, 782 0); 783 if (bp->prev_rx_ring_stats == NULL) 784 return -ENOMEM; 785 786 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 787 sizeof(struct bnxt_ring_stats) * 788 bp->tx_cp_nr_rings, 789 0); 790 if (bp->tx_cp_nr_rings > 0 && bp->prev_tx_ring_stats == NULL) 791 goto error; 792 793 return 0; 794 795 error: 796 bnxt_free_prev_ring_stats(bp); 797 return -ENOMEM; 798 } 799 800 static int bnxt_start_nic(struct bnxt *bp) 801 { 802 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 803 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 804 uint32_t intr_vector = 0; 805 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 806 uint32_t vec = BNXT_MISC_VEC_ID; 807 unsigned int i, j; 808 int rc; 809 810 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) 811 bp->flags |= BNXT_FLAG_JUMBO; 812 else 813 bp->flags &= ~BNXT_FLAG_JUMBO; 814 815 /* P5 does not support ring groups. 816 * But we will use the array to save RSS context IDs. 817 */ 818 if (BNXT_CHIP_P5_P7(bp)) 819 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 820 821 rc = bnxt_vnic_queue_db_init(bp); 822 if (rc) { 823 PMD_DRV_LOG(ERR, "could not allocate vnic db\n"); 824 goto err_out; 825 } 826 827 rc = bnxt_alloc_hwrm_rings(bp); 828 if (rc) { 829 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 830 goto err_out; 831 } 832 833 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 834 if (rc) { 835 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 836 goto err_out; 837 } 838 839 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 840 goto skip_cosq_cfg; 841 842 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 843 if (bp->rx_cos_queue[i].id != 0xff) { 844 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 845 846 if (!vnic) { 847 PMD_DRV_LOG(ERR, 848 "Num pools more than FW profile\n"); 849 rc = -EINVAL; 850 goto err_out; 851 } 852 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 853 bp->rx_cosq_cnt++; 854 } 855 } 856 857 skip_cosq_cfg: 858 rc = bnxt_mq_rx_configure(bp); 859 if (rc) { 860 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 861 goto err_out; 862 } 863 864 for (j = 0; j < bp->rx_nr_rings; j++) { 865 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 866 867 if (!rxq->rx_deferred_start) { 868 bp->eth_dev->data->rx_queue_state[j] = 869 RTE_ETH_QUEUE_STATE_STARTED; 870 rxq->rx_started = true; 871 } 872 } 873 874 /* setup the default vnic details*/ 875 bnxt_vnic_queue_db_update_dlft_vnic(bp); 876 877 /* VNIC configuration */ 878 for (i = 0; i < bp->nr_vnics; i++) { 879 rc = bnxt_setup_one_vnic(bp, i); 880 if (rc) 881 goto err_out; 882 } 883 884 for (j = 0; j < bp->tx_nr_rings; j++) { 885 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 886 887 if (!txq->tx_deferred_start) { 888 bp->eth_dev->data->tx_queue_state[j] = 889 RTE_ETH_QUEUE_STATE_STARTED; 890 txq->tx_started = true; 891 } 892 } 893 894 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 895 if (rc) { 896 PMD_DRV_LOG(ERR, 897 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 898 goto err_out; 899 } 900 901 /* check and configure queue intr-vector mapping */ 902 if ((rte_intr_cap_multiple(intr_handle) || 903 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 904 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 905 intr_vector = bp->eth_dev->data->nb_rx_queues; 906 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 907 if (intr_vector > bp->rx_cp_nr_rings) { 908 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 909 bp->rx_cp_nr_rings); 910 return -ENOTSUP; 911 } 912 rc = rte_intr_efd_enable(intr_handle, intr_vector); 913 if (rc) 914 return rc; 915 } 916 917 if (rte_intr_dp_is_en(intr_handle)) { 918 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 919 bp->eth_dev->data->nb_rx_queues)) { 920 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 921 " intr_vec", bp->eth_dev->data->nb_rx_queues); 922 rc = -ENOMEM; 923 goto err_out; 924 } 925 PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " 926 "intr_handle->max_intr = %d\n", 927 rte_intr_nb_efd_get(intr_handle), 928 rte_intr_max_intr_get(intr_handle)); 929 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 930 queue_id++) { 931 rte_intr_vec_list_index_set(intr_handle, 932 queue_id, vec + BNXT_RX_VEC_START); 933 if (vec < base + rte_intr_nb_efd_get(intr_handle) 934 - 1) 935 vec++; 936 } 937 } 938 939 /* enable uio/vfio intr/eventfd mapping */ 940 rc = rte_intr_enable(intr_handle); 941 #ifndef RTE_EXEC_ENV_FREEBSD 942 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 943 if (rc) 944 goto err_out; 945 #endif 946 947 rc = bnxt_update_phy_setting(bp); 948 if (rc) 949 goto err_out; 950 951 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 952 if (!bp->mark_table) 953 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 954 955 return 0; 956 957 err_out: 958 /* Some of the error status returned by FW may not be from errno.h */ 959 if (rc > 0) 960 rc = -EIO; 961 962 return rc; 963 } 964 965 static int bnxt_shutdown_nic(struct bnxt *bp) 966 { 967 bnxt_free_all_hwrm_resources(bp); 968 bnxt_free_all_filters(bp); 969 bnxt_free_all_vnics(bp); 970 bnxt_vnic_queue_db_deinit(bp); 971 return 0; 972 } 973 974 /* 975 * Device configuration and status function 976 */ 977 978 static uint32_t bnxt_get_speed_capabilities_v2(struct bnxt *bp) 979 { 980 uint32_t link_speed = 0; 981 uint32_t speed_capa = 0; 982 983 if (bp->link_info == NULL) 984 return 0; 985 986 link_speed = bp->link_info->support_speeds2; 987 988 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB) 989 speed_capa |= RTE_ETH_LINK_SPEED_1G; 990 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_10GB) 991 speed_capa |= RTE_ETH_LINK_SPEED_10G; 992 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_25GB) 993 speed_capa |= RTE_ETH_LINK_SPEED_25G; 994 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_40GB) 995 speed_capa |= RTE_ETH_LINK_SPEED_40G; 996 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB) 997 speed_capa |= RTE_ETH_LINK_SPEED_50G; 998 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB) 999 speed_capa |= RTE_ETH_LINK_SPEED_100G; 1000 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56) 1001 speed_capa |= RTE_ETH_LINK_SPEED_50G; 1002 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56) 1003 speed_capa |= RTE_ETH_LINK_SPEED_100G; 1004 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56) 1005 speed_capa |= RTE_ETH_LINK_SPEED_200G; 1006 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56) 1007 speed_capa |= RTE_ETH_LINK_SPEED_400G; 1008 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112) 1009 speed_capa |= RTE_ETH_LINK_SPEED_100G; 1010 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112) 1011 speed_capa |= RTE_ETH_LINK_SPEED_200G; 1012 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112) 1013 speed_capa |= RTE_ETH_LINK_SPEED_400G; 1014 1015 if (bp->link_info->auto_mode == 1016 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 1017 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 1018 1019 return speed_capa; 1020 } 1021 1022 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 1023 { 1024 uint32_t pam4_link_speed = 0; 1025 uint32_t link_speed = 0; 1026 uint32_t speed_capa = 0; 1027 1028 if (bp->link_info == NULL) 1029 return 0; 1030 1031 /* P7 uses speeds_v2 */ 1032 if (BNXT_LINK_SPEEDS_V2(bp)) 1033 return bnxt_get_speed_capabilities_v2(bp); 1034 1035 link_speed = bp->link_info->support_speeds; 1036 1037 /* If PAM4 is configured, use PAM4 supported speed */ 1038 if (bp->link_info->support_pam4_speeds > 0) 1039 pam4_link_speed = bp->link_info->support_pam4_speeds; 1040 1041 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 1042 speed_capa |= RTE_ETH_LINK_SPEED_100M; 1043 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 1044 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 1045 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 1046 speed_capa |= RTE_ETH_LINK_SPEED_1G; 1047 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 1048 speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 1049 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 1050 speed_capa |= RTE_ETH_LINK_SPEED_10G; 1051 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 1052 speed_capa |= RTE_ETH_LINK_SPEED_20G; 1053 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 1054 speed_capa |= RTE_ETH_LINK_SPEED_25G; 1055 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 1056 speed_capa |= RTE_ETH_LINK_SPEED_40G; 1057 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 1058 speed_capa |= RTE_ETH_LINK_SPEED_50G; 1059 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 1060 speed_capa |= RTE_ETH_LINK_SPEED_100G; 1061 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 1062 speed_capa |= RTE_ETH_LINK_SPEED_50G; 1063 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 1064 speed_capa |= RTE_ETH_LINK_SPEED_100G; 1065 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 1066 speed_capa |= RTE_ETH_LINK_SPEED_200G; 1067 1068 if (bp->link_info->auto_mode == 1069 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 1070 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 1071 1072 return speed_capa; 1073 } 1074 1075 uint64_t bnxt_eth_rss_support(struct bnxt *bp) 1076 { 1077 uint64_t support; 1078 1079 support = RTE_ETH_RSS_IPV4 | 1080 RTE_ETH_RSS_NONFRAG_IPV4_TCP | 1081 RTE_ETH_RSS_NONFRAG_IPV4_UDP | 1082 RTE_ETH_RSS_IPV6 | 1083 RTE_ETH_RSS_NONFRAG_IPV6_TCP | 1084 RTE_ETH_RSS_NONFRAG_IPV6_UDP | 1085 RTE_ETH_RSS_LEVEL_MASK; 1086 1087 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_CHKSM_MODE) 1088 support |= RTE_ETH_RSS_IPV4_CHKSUM | 1089 RTE_ETH_RSS_L4_CHKSUM; 1090 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_IPV6_FLOW_LABEL_MODE) 1091 support |= RTE_ETH_RSS_IPV6_FLOW_LABEL; 1092 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_AH_SPI_CAP) 1093 support |= RTE_ETH_RSS_AH; 1094 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_ESP_SPI_CAP) 1095 support |= RTE_ETH_RSS_ESP; 1096 1097 return support; 1098 } 1099 1100 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 1101 struct rte_eth_dev_info *dev_info) 1102 { 1103 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 1104 struct bnxt *bp = eth_dev->data->dev_private; 1105 uint16_t max_vnics, i, j, vpool, vrxq; 1106 unsigned int max_rx_rings; 1107 int rc; 1108 1109 rc = is_bnxt_in_error(bp); 1110 if (rc) 1111 return rc; 1112 1113 /* MAC Specifics */ 1114 dev_info->max_mac_addrs = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 1115 dev_info->max_hash_mac_addrs = 0; 1116 1117 /* PF/VF specifics */ 1118 if (BNXT_PF(bp)) 1119 dev_info->max_vfs = pdev->max_vfs; 1120 1121 max_rx_rings = bnxt_max_rings(bp); 1122 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 1123 dev_info->max_rx_queues = max_rx_rings; 1124 dev_info->max_tx_queues = max_rx_rings; 1125 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 1126 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 1127 max_vnics = bp->max_vnics; 1128 1129 /* MTU specifics */ 1130 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1131 dev_info->max_mtu = BNXT_MAX_MTU; 1132 1133 /* Fast path specifics */ 1134 dev_info->min_rx_bufsize = 1; 1135 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 1136 1137 dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp); 1138 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1139 dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) | 1140 dev_info->tx_queue_offload_capa; 1141 dev_info->flow_type_rss_offloads = bnxt_eth_rss_support(bp); 1142 dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT) | 1143 RTE_ETH_HASH_ALGO_CAPA_MASK(TOEPLITZ); 1144 1145 if (BNXT_CHIP_P7(bp)) 1146 dev_info->rss_algo_capa |= RTE_ETH_HASH_ALGO_CAPA_MASK(SIMPLE_XOR); 1147 1148 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 1149 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1150 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1151 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1152 1153 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1154 .rx_thresh = { 1155 .pthresh = 8, 1156 .hthresh = 8, 1157 .wthresh = 0, 1158 }, 1159 .rx_free_thresh = 32, 1160 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1161 }; 1162 1163 dev_info->default_txconf = (struct rte_eth_txconf) { 1164 .tx_thresh = { 1165 .pthresh = 32, 1166 .hthresh = 0, 1167 .wthresh = 0, 1168 }, 1169 .tx_free_thresh = 32, 1170 .tx_rs_thresh = 32, 1171 }; 1172 1173 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1174 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1175 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1176 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1177 1178 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1179 dev_info->switch_info.name = eth_dev->device->name; 1180 dev_info->switch_info.domain_id = bp->switch_domain_id; 1181 dev_info->switch_info.port_id = 1182 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1183 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1184 } 1185 1186 /* 1187 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1188 * need further investigation. 1189 */ 1190 1191 /* VMDq resources */ 1192 vpool = 64; /* RTE_ETH_64_POOLS */ 1193 vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ 1194 for (i = 0; i < 4; vpool >>= 1, i++) { 1195 if (max_vnics > vpool) { 1196 for (j = 0; j < 5; vrxq >>= 1, j++) { 1197 if (dev_info->max_rx_queues > vrxq) { 1198 if (vpool > vrxq) 1199 vpool = vrxq; 1200 goto found; 1201 } 1202 } 1203 /* Not enough resources to support VMDq */ 1204 break; 1205 } 1206 } 1207 /* Not enough resources to support VMDq */ 1208 vpool = 0; 1209 vrxq = 0; 1210 found: 1211 dev_info->max_vmdq_pools = vpool; 1212 dev_info->vmdq_queue_num = vrxq; 1213 1214 dev_info->vmdq_pool_base = 0; 1215 dev_info->vmdq_queue_base = 0; 1216 1217 dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE; 1218 1219 return 0; 1220 } 1221 1222 /* Configure the device based on the configuration provided */ 1223 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1224 { 1225 struct bnxt *bp = eth_dev->data->dev_private; 1226 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1227 struct rte_eth_rss_conf *rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf; 1228 int rc; 1229 1230 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1231 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1232 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1233 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1234 1235 rc = is_bnxt_in_error(bp); 1236 if (rc) 1237 return rc; 1238 1239 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1240 rc = bnxt_hwrm_check_vf_rings(bp); 1241 if (rc) { 1242 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1243 return -ENOSPC; 1244 } 1245 1246 /* If a resource has already been allocated - in this case 1247 * it is the async completion ring, free it. Reallocate it after 1248 * resource reservation. This will ensure the resource counts 1249 * are calculated correctly. 1250 */ 1251 1252 pthread_mutex_lock(&bp->def_cp_lock); 1253 1254 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1255 bnxt_disable_int(bp); 1256 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1257 } 1258 1259 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1260 if (rc) { 1261 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1262 pthread_mutex_unlock(&bp->def_cp_lock); 1263 return -ENOSPC; 1264 } 1265 1266 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1267 rc = bnxt_alloc_async_cp_ring(bp); 1268 if (rc) { 1269 pthread_mutex_unlock(&bp->def_cp_lock); 1270 return rc; 1271 } 1272 bnxt_enable_int(bp); 1273 } 1274 1275 pthread_mutex_unlock(&bp->def_cp_lock); 1276 } 1277 1278 /* Inherit new configurations */ 1279 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1280 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1281 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1282 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1283 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1284 bp->max_stat_ctx) 1285 goto resource_error; 1286 1287 if (BNXT_HAS_RING_GRPS(bp) && 1288 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1289 goto resource_error; 1290 1291 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && 1292 bp->max_vnics < eth_dev->data->nb_rx_queues) 1293 goto resource_error; 1294 1295 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1296 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1297 1298 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1299 rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1300 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1301 1302 /* application provides the hash key to program */ 1303 if (rss_conf->rss_key != NULL) { 1304 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) 1305 PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long", 1306 eth_dev->data->port_id, HW_HASH_KEY_SIZE); 1307 else 1308 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 1309 } 1310 bp->rss_conf.rss_key_len = HW_HASH_KEY_SIZE; 1311 bp->rss_conf.rss_hf = rss_conf->rss_hf; 1312 1313 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1314 1315 return 0; 1316 1317 resource_error: 1318 PMD_DRV_LOG(ERR, 1319 "Insufficient resources to support requested config\n"); 1320 PMD_DRV_LOG(ERR, 1321 "Num Queues Requested: Tx %d, Rx %d\n", 1322 eth_dev->data->nb_tx_queues, 1323 eth_dev->data->nb_rx_queues); 1324 PMD_DRV_LOG(ERR, 1325 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1326 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1327 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1328 return -ENOSPC; 1329 } 1330 1331 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1332 { 1333 struct rte_eth_link *link = ð_dev->data->dev_link; 1334 1335 if (link->link_status) 1336 PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n", 1337 eth_dev->data->port_id, 1338 (uint32_t)link->link_speed, 1339 (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 1340 ("full-duplex") : ("half-duplex\n")); 1341 else 1342 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1343 eth_dev->data->port_id); 1344 } 1345 1346 /* 1347 * Determine whether the current configuration requires support for scattered 1348 * receive; return 1 if scattered receive is required and 0 if not. 1349 */ 1350 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1351 { 1352 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 1353 uint16_t buf_size; 1354 int i; 1355 1356 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1357 return 1; 1358 1359 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 1360 return 1; 1361 1362 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1363 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1364 1365 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1366 RTE_PKTMBUF_HEADROOM); 1367 if (eth_dev->data->mtu + overhead > buf_size) 1368 return 1; 1369 } 1370 return 0; 1371 } 1372 1373 static eth_rx_burst_t 1374 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1375 { 1376 struct bnxt *bp = eth_dev->data->dev_private; 1377 1378 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1379 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1380 if (eth_dev->data->scattered_rx) 1381 goto use_scalar_rx; 1382 1383 /* 1384 * Vector mode receive cannot be enabled if Truflow is enabled or if 1385 * asynchronous completions and receive completions can be placed in 1386 * the same completion ring. 1387 */ 1388 if ((BNXT_TRUFLOW_EN(bp) && !BNXT_CHIP_P7(bp)) || 1389 !BNXT_NUM_ASYNC_CPR(bp)) 1390 goto use_scalar_rx; 1391 1392 /* 1393 * Vector mode receive cannot be enabled if any receive offloads outside 1394 * a limited subset have been enabled. 1395 */ 1396 if (eth_dev->data->dev_conf.rxmode.offloads & 1397 ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1398 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1399 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1400 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1401 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1402 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1403 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 1404 RTE_ETH_RX_OFFLOAD_RSS_HASH | 1405 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) 1406 goto use_scalar_rx; 1407 1408 if (bp->ieee_1588) 1409 goto use_scalar_rx; 1410 1411 #if defined(RTE_ARCH_X86) 1412 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1413 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1414 PMD_DRV_LOG(INFO, 1415 "Using AVX2 vector mode receive for port %d\n", 1416 eth_dev->data->port_id); 1417 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1418 if (bnxt_compressed_rx_cqe_mode_enabled(bp)) 1419 return bnxt_crx_pkts_vec_avx2; 1420 return bnxt_recv_pkts_vec_avx2; 1421 } 1422 #endif 1423 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1424 PMD_DRV_LOG(INFO, 1425 "Using SSE vector mode receive for port %d\n", 1426 eth_dev->data->port_id); 1427 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1428 if (bnxt_compressed_rx_cqe_mode_enabled(bp)) { 1429 #if defined(RTE_ARCH_ARM64) 1430 goto use_scalar_rx; 1431 #else 1432 return bnxt_crx_pkts_vec; 1433 #endif 1434 } 1435 return bnxt_recv_pkts_vec; 1436 } 1437 1438 use_scalar_rx: 1439 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1440 eth_dev->data->port_id); 1441 PMD_DRV_LOG(INFO, 1442 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1443 eth_dev->data->port_id, 1444 eth_dev->data->scattered_rx, 1445 eth_dev->data->dev_conf.rxmode.offloads); 1446 #endif 1447 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1448 return bnxt_recv_pkts; 1449 } 1450 1451 static eth_tx_burst_t 1452 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 1453 { 1454 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1455 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1456 struct bnxt *bp = eth_dev->data->dev_private; 1457 1458 /* 1459 * Vector mode transmit can be enabled only if not using scatter rx 1460 * or tx offloads. 1461 */ 1462 if (eth_dev->data->scattered_rx || 1463 (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || 1464 (BNXT_TRUFLOW_EN(bp) && !BNXT_CHIP_P7(bp)) || 1465 bp->ieee_1588) 1466 goto use_scalar_tx; 1467 1468 #if defined(RTE_ARCH_X86) 1469 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1470 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1471 PMD_DRV_LOG(INFO, 1472 "Using AVX2 vector mode transmit for port %d\n", 1473 eth_dev->data->port_id); 1474 return bnxt_xmit_pkts_vec_avx2; 1475 } 1476 #endif 1477 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1478 PMD_DRV_LOG(INFO, 1479 "Using SSE vector mode transmit for port %d\n", 1480 eth_dev->data->port_id); 1481 return bnxt_xmit_pkts_vec; 1482 } 1483 1484 use_scalar_tx: 1485 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1486 eth_dev->data->port_id); 1487 PMD_DRV_LOG(INFO, 1488 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1489 eth_dev->data->port_id, 1490 eth_dev->data->scattered_rx, 1491 offloads); 1492 #endif 1493 return bnxt_xmit_pkts; 1494 } 1495 1496 static int bnxt_handle_if_change_status(struct bnxt *bp) 1497 { 1498 int rc; 1499 1500 /* Since fw has undergone a reset and lost all contexts, 1501 * set fatal flag to not issue hwrm during cleanup 1502 */ 1503 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1504 bnxt_uninit_resources(bp, true); 1505 1506 /* clear fatal flag so that re-init happens */ 1507 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1508 1509 rc = bnxt_check_fw_ready(bp); 1510 if (rc) 1511 return rc; 1512 1513 rc = bnxt_init_resources(bp, true); 1514 1515 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1516 1517 return rc; 1518 } 1519 1520 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1521 { 1522 struct bnxt *bp = eth_dev->data->dev_private; 1523 int rc = 0; 1524 1525 if (!BNXT_SINGLE_PF(bp)) 1526 return -ENOTSUP; 1527 1528 if (!bp->link_info->link_up) 1529 rc = bnxt_set_hwrm_link_config(bp, true); 1530 if (!rc) 1531 eth_dev->data->dev_link.link_status = 1; 1532 1533 bnxt_print_link_info(eth_dev); 1534 return rc; 1535 } 1536 1537 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1538 { 1539 struct bnxt *bp = eth_dev->data->dev_private; 1540 1541 if (!BNXT_SINGLE_PF(bp)) 1542 return -ENOTSUP; 1543 1544 eth_dev->data->dev_link.link_status = 0; 1545 bnxt_set_hwrm_link_config(bp, false); 1546 bp->link_info->link_up = 0; 1547 1548 return 0; 1549 } 1550 1551 static void bnxt_free_switch_domain(struct bnxt *bp) 1552 { 1553 int rc = 0; 1554 1555 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1556 return; 1557 1558 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1559 if (rc) 1560 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1561 bp->switch_domain_id, rc); 1562 } 1563 1564 static void bnxt_ptp_get_current_time(void *arg) 1565 { 1566 struct bnxt *bp = arg; 1567 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1568 int rc; 1569 1570 rc = is_bnxt_in_error(bp); 1571 if (rc) 1572 return; 1573 1574 if (!ptp) 1575 return; 1576 1577 rte_spinlock_lock(&ptp->ptp_lock); 1578 ptp->old_time = ptp->current_time; 1579 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1580 &ptp->current_time); 1581 rte_spinlock_unlock(&ptp->ptp_lock); 1582 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1583 if (rc != 0) { 1584 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1585 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1586 } 1587 } 1588 1589 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1590 { 1591 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1592 int rc; 1593 1594 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1595 return 0; 1596 1597 rte_spinlock_lock(&ptp->ptp_lock); 1598 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1599 &ptp->current_time); 1600 ptp->old_time = ptp->current_time; 1601 rte_spinlock_unlock(&ptp->ptp_lock); 1602 1603 1604 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1605 return rc; 1606 } 1607 1608 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1609 { 1610 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1611 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1612 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1613 } 1614 } 1615 1616 static void bnxt_ptp_stop(struct bnxt *bp) 1617 { 1618 bnxt_cancel_ptp_alarm(bp); 1619 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1620 } 1621 1622 static int bnxt_ptp_start(struct bnxt *bp) 1623 { 1624 int rc; 1625 1626 rc = bnxt_schedule_ptp_alarm(bp); 1627 if (rc != 0) { 1628 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1629 } else { 1630 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1631 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1632 } 1633 1634 return rc; 1635 } 1636 1637 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1638 { 1639 struct bnxt *bp = eth_dev->data->dev_private; 1640 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1641 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1642 struct rte_eth_link link; 1643 uint16_t i; 1644 int ret; 1645 1646 eth_dev->data->dev_started = 0; 1647 1648 /* Prevent crashes when queues are still in use */ 1649 bnxt_stop_rxtx(eth_dev); 1650 1651 bnxt_disable_int(bp); 1652 1653 /* disable uio/vfio intr/eventfd mapping */ 1654 rte_intr_disable(intr_handle); 1655 1656 /* Stop the child representors for this device */ 1657 ret = bnxt_rep_stop_all(bp); 1658 if (ret != 0) 1659 return ret; 1660 1661 /* delete the bnxt ULP port details */ 1662 if (bnxt_enable_ulp(bp)) 1663 bnxt_ulp_port_deinit(bp); 1664 1665 bnxt_cancel_fw_health_check(bp); 1666 1667 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1668 bnxt_cancel_ptp_alarm(bp); 1669 1670 /* Do not bring link down during reset recovery */ 1671 if (!is_bnxt_in_error(bp)) { 1672 bnxt_dev_set_link_down_op(eth_dev); 1673 /* Wait for link to be reset */ 1674 if (BNXT_SINGLE_PF(bp)) 1675 rte_delay_ms(500); 1676 /* clear the recorded link status */ 1677 memset(&link, 0, sizeof(link)); 1678 rte_eth_linkstatus_set(eth_dev, &link); 1679 } 1680 1681 /* Clean queue intr-vector mapping */ 1682 rte_intr_efd_disable(intr_handle); 1683 rte_intr_vec_list_free(intr_handle); 1684 1685 bnxt_hwrm_port_clr_stats(bp); 1686 bnxt_free_tx_mbufs(bp); 1687 bnxt_free_rx_mbufs(bp); 1688 /* Process any remaining notifications in default completion queue */ 1689 bnxt_int_handler(eth_dev); 1690 bnxt_shutdown_nic(bp); 1691 bnxt_hwrm_if_change(bp, false); 1692 1693 bnxt_free_prev_ring_stats(bp); 1694 rte_free(bp->mark_table); 1695 bp->mark_table = NULL; 1696 1697 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1698 bp->rx_cosq_cnt = 0; 1699 /* All filters are deleted on a port stop. */ 1700 if (BNXT_FLOW_XSTATS_EN(bp)) 1701 bp->flow_stat->flow_count = 0; 1702 1703 eth_dev->data->scattered_rx = 0; 1704 1705 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 1706 eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1707 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 1708 eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1709 1710 return 0; 1711 } 1712 1713 /* Unload the driver, release resources */ 1714 int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1715 { 1716 struct bnxt *bp = eth_dev->data->dev_private; 1717 1718 pthread_mutex_lock(&bp->err_recovery_lock); 1719 if (bp->flags & BNXT_FLAG_FW_RESET) { 1720 PMD_DRV_LOG(ERR, 1721 "Adapter recovering from error..Please retry\n"); 1722 pthread_mutex_unlock(&bp->err_recovery_lock); 1723 return -EAGAIN; 1724 } 1725 pthread_mutex_unlock(&bp->err_recovery_lock); 1726 1727 return bnxt_dev_stop(eth_dev); 1728 } 1729 1730 int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1731 { 1732 struct bnxt *bp = eth_dev->data->dev_private; 1733 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1734 int vlan_mask = 0; 1735 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1736 1737 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1738 PMD_DRV_LOG(ERR, 1739 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1740 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1741 1742 do { 1743 rc = bnxt_hwrm_if_change(bp, true); 1744 if (rc == 0 || rc != -EAGAIN) 1745 break; 1746 1747 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1748 } while (retry_cnt--); 1749 1750 if (rc) 1751 return rc; 1752 1753 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1754 rc = bnxt_handle_if_change_status(bp); 1755 if (rc) 1756 return rc; 1757 } 1758 1759 bnxt_enable_int(bp); 1760 1761 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1762 1763 rc = bnxt_start_nic(bp); 1764 if (rc) 1765 goto error; 1766 1767 rc = bnxt_alloc_prev_ring_stats(bp); 1768 if (rc) 1769 goto error; 1770 1771 eth_dev->data->dev_started = 1; 1772 1773 bnxt_link_update_op(eth_dev, 0); 1774 1775 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1776 vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; 1777 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1778 vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; 1779 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1780 if (rc) 1781 goto error; 1782 1783 /* Initialize bnxt ULP port details */ 1784 if (bnxt_enable_ulp(bp)) { 1785 rc = bnxt_ulp_port_init(bp); 1786 if (rc) 1787 goto error; 1788 } 1789 1790 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1791 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1792 1793 bnxt_schedule_fw_health_check(bp); 1794 1795 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1796 bnxt_schedule_ptp_alarm(bp); 1797 1798 return 0; 1799 1800 error: 1801 bnxt_dev_stop(eth_dev); 1802 return rc; 1803 } 1804 1805 static void 1806 bnxt_uninit_locks(struct bnxt *bp) 1807 { 1808 pthread_mutex_destroy(&bp->flow_lock); 1809 pthread_mutex_destroy(&bp->def_cp_lock); 1810 pthread_mutex_destroy(&bp->health_check_lock); 1811 pthread_mutex_destroy(&bp->err_recovery_lock); 1812 if (bp->rep_info) 1813 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1814 } 1815 1816 static void bnxt_drv_uninit(struct bnxt *bp) 1817 { 1818 bnxt_free_leds_info(bp); 1819 bnxt_free_cos_queues(bp); 1820 bnxt_free_link_info(bp); 1821 bnxt_free_parent_info(bp); 1822 bnxt_uninit_locks(bp); 1823 bnxt_free_rep_info(bp); 1824 1825 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1826 bp->tx_mem_zone = NULL; 1827 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1828 bp->rx_mem_zone = NULL; 1829 1830 bnxt_free_vf_info(bp); 1831 bnxt_free_pf_info(bp); 1832 1833 rte_free(bp->grp_info); 1834 bp->grp_info = NULL; 1835 } 1836 1837 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1838 { 1839 struct bnxt *bp = eth_dev->data->dev_private; 1840 int ret = 0; 1841 1842 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1843 return 0; 1844 1845 pthread_mutex_lock(&bp->err_recovery_lock); 1846 if (bp->flags & BNXT_FLAG_FW_RESET) { 1847 PMD_DRV_LOG(ERR, 1848 "Adapter recovering from error...Please retry\n"); 1849 pthread_mutex_unlock(&bp->err_recovery_lock); 1850 return -EAGAIN; 1851 } 1852 pthread_mutex_unlock(&bp->err_recovery_lock); 1853 1854 /* cancel the recovery handler before remove dev */ 1855 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1856 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1857 bnxt_cancel_fc_thread(bp); 1858 rte_eal_alarm_cancel(bnxt_handle_vf_cfg_change, (void *)bp); 1859 1860 if (eth_dev->data->dev_started) 1861 ret = bnxt_dev_stop(eth_dev); 1862 1863 bnxt_uninit_resources(bp, false); 1864 1865 bnxt_drv_uninit(bp); 1866 1867 return ret; 1868 } 1869 1870 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1871 uint32_t index) 1872 { 1873 struct bnxt *bp = eth_dev->data->dev_private; 1874 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1875 struct bnxt_vnic_info *vnic; 1876 struct bnxt_filter_info *filter, *temp_filter; 1877 uint32_t i; 1878 1879 if (is_bnxt_in_error(bp)) 1880 return; 1881 1882 /* 1883 * Loop through all VNICs from the specified filter flow pools to 1884 * remove the corresponding MAC addr filter 1885 */ 1886 for (i = 0; i < bp->nr_vnics; i++) { 1887 if (!(pool_mask & (1ULL << i))) 1888 continue; 1889 1890 vnic = &bp->vnic_info[i]; 1891 filter = STAILQ_FIRST(&vnic->filter); 1892 while (filter) { 1893 temp_filter = STAILQ_NEXT(filter, next); 1894 if (filter->mac_index == index) { 1895 STAILQ_REMOVE(&vnic->filter, filter, 1896 bnxt_filter_info, next); 1897 bnxt_hwrm_clear_l2_filter(bp, filter); 1898 bnxt_free_filter(bp, filter); 1899 } 1900 filter = temp_filter; 1901 } 1902 } 1903 } 1904 1905 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1906 struct rte_ether_addr *mac_addr, uint32_t index, 1907 uint32_t pool) 1908 { 1909 struct bnxt_filter_info *filter; 1910 int rc = 0; 1911 1912 /* Attach requested MAC address to the new l2_filter */ 1913 STAILQ_FOREACH(filter, &vnic->filter, next) { 1914 if (filter->mac_index == index) { 1915 PMD_DRV_LOG(DEBUG, 1916 "MAC addr already existed for pool %d\n", 1917 pool); 1918 return 0; 1919 } 1920 } 1921 1922 filter = bnxt_alloc_filter(bp); 1923 if (!filter) { 1924 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1925 return -ENODEV; 1926 } 1927 1928 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1929 * if the MAC that's been programmed now is a different one, then, 1930 * copy that addr to filter->l2_addr 1931 */ 1932 if (mac_addr) 1933 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1934 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1935 1936 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1937 if (!rc) { 1938 filter->mac_index = index; 1939 if (filter->mac_index == 0) 1940 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1941 else 1942 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1943 } else { 1944 bnxt_free_filter(bp, filter); 1945 } 1946 1947 return rc; 1948 } 1949 1950 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1951 struct rte_ether_addr *mac_addr, 1952 uint32_t index, uint32_t pool) 1953 { 1954 struct bnxt *bp = eth_dev->data->dev_private; 1955 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1956 int rc = 0; 1957 1958 rc = is_bnxt_in_error(bp); 1959 if (rc) 1960 return rc; 1961 1962 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1963 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1964 return -ENOTSUP; 1965 } 1966 1967 if (!vnic) { 1968 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1969 return -EINVAL; 1970 } 1971 1972 /* Filter settings will get applied when port is started */ 1973 if (!eth_dev->data->dev_started) 1974 return 0; 1975 1976 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1977 1978 return rc; 1979 } 1980 1981 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1982 { 1983 int rc = 0; 1984 struct bnxt *bp = eth_dev->data->dev_private; 1985 struct rte_eth_link new; 1986 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1987 BNXT_MIN_LINK_WAIT_CNT; 1988 1989 rc = is_bnxt_in_error(bp); 1990 if (rc) 1991 return rc; 1992 1993 memset(&new, 0, sizeof(new)); 1994 1995 if (bp->link_info == NULL) 1996 goto out; 1997 1998 /* Only single function PF can bring the phy down. 1999 * In certain scenarios, device is not obliged link down even when forced. 2000 * When port is stopped, report link down in those cases. 2001 */ 2002 if (!eth_dev->data->dev_started && 2003 (!BNXT_SINGLE_PF(bp) || bnxt_force_link_config(bp))) 2004 goto out; 2005 2006 do { 2007 /* Retrieve link info from hardware */ 2008 rc = bnxt_get_hwrm_link_config(bp, &new); 2009 if (rc) { 2010 new.link_speed = RTE_ETH_LINK_SPEED_100M; 2011 new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 2012 PMD_DRV_LOG(ERR, 2013 "Failed to retrieve link rc = 0x%x!\n", rc); 2014 goto out; 2015 } 2016 2017 if (!wait_to_complete || new.link_status) 2018 break; 2019 2020 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 2021 } while (cnt--); 2022 2023 out: 2024 /* Timed out or success */ 2025 if (new.link_status != eth_dev->data->dev_link.link_status || 2026 new.link_speed != eth_dev->data->dev_link.link_speed) { 2027 rte_eth_linkstatus_set(eth_dev, &new); 2028 bnxt_print_link_info(eth_dev); 2029 } 2030 2031 return rc; 2032 } 2033 2034 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 2035 { 2036 struct bnxt *bp = eth_dev->data->dev_private; 2037 struct bnxt_vnic_info *vnic; 2038 uint32_t old_flags; 2039 int rc; 2040 2041 rc = is_bnxt_in_error(bp); 2042 if (rc) 2043 return rc; 2044 2045 /* Filter settings will get applied when port is started */ 2046 if (!eth_dev->data->dev_started) 2047 return 0; 2048 2049 if (bp->vnic_info == NULL) 2050 return 0; 2051 2052 vnic = bnxt_get_default_vnic(bp); 2053 2054 old_flags = vnic->flags; 2055 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 2056 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2057 if (rc != 0) 2058 vnic->flags = old_flags; 2059 2060 return rc; 2061 } 2062 2063 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 2064 { 2065 struct bnxt *bp = eth_dev->data->dev_private; 2066 struct bnxt_vnic_info *vnic; 2067 uint32_t old_flags; 2068 int rc; 2069 2070 rc = is_bnxt_in_error(bp); 2071 if (rc) 2072 return rc; 2073 2074 /* Filter settings will get applied when port is started */ 2075 if (!eth_dev->data->dev_started) 2076 return 0; 2077 2078 if (bp->vnic_info == NULL) 2079 return 0; 2080 2081 vnic = bnxt_get_default_vnic(bp); 2082 2083 old_flags = vnic->flags; 2084 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 2085 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2086 if (rc != 0) 2087 vnic->flags = old_flags; 2088 2089 return rc; 2090 } 2091 2092 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 2093 { 2094 struct bnxt *bp = eth_dev->data->dev_private; 2095 struct bnxt_vnic_info *vnic; 2096 uint32_t old_flags; 2097 int rc; 2098 2099 rc = is_bnxt_in_error(bp); 2100 if (rc) 2101 return rc; 2102 2103 /* Filter settings will get applied when port is started */ 2104 if (!eth_dev->data->dev_started) 2105 return 0; 2106 2107 if (bp->vnic_info == NULL) 2108 return 0; 2109 2110 vnic = bnxt_get_default_vnic(bp); 2111 2112 old_flags = vnic->flags; 2113 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2114 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2115 if (rc != 0) 2116 vnic->flags = old_flags; 2117 2118 return rc; 2119 } 2120 2121 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 2122 { 2123 struct bnxt *bp = eth_dev->data->dev_private; 2124 struct bnxt_vnic_info *vnic; 2125 uint32_t old_flags; 2126 int rc; 2127 2128 rc = is_bnxt_in_error(bp); 2129 if (rc) 2130 return rc; 2131 2132 /* Filter settings will get applied when port is started */ 2133 if (!eth_dev->data->dev_started) 2134 return 0; 2135 2136 if (bp->vnic_info == NULL) 2137 return 0; 2138 2139 vnic = bnxt_get_default_vnic(bp); 2140 2141 old_flags = vnic->flags; 2142 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2143 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2144 if (rc != 0) 2145 vnic->flags = old_flags; 2146 2147 return rc; 2148 } 2149 2150 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 2151 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 2152 { 2153 if (qid >= bp->rx_nr_rings) 2154 return NULL; 2155 2156 return bp->eth_dev->data->rx_queues[qid]; 2157 } 2158 2159 /* Return rxq corresponding to a given rss table ring/group ID. */ 2160 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 2161 { 2162 struct bnxt_rx_queue *rxq; 2163 unsigned int i; 2164 2165 if (!BNXT_HAS_RING_GRPS(bp)) { 2166 for (i = 0; i < bp->rx_nr_rings; i++) { 2167 rxq = bp->eth_dev->data->rx_queues[i]; 2168 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 2169 return rxq->index; 2170 } 2171 } else { 2172 for (i = 0; i < bp->rx_nr_rings; i++) { 2173 if (bp->grp_info[i].fw_grp_id == fwr) 2174 return i; 2175 } 2176 } 2177 2178 return INVALID_HW_RING_ID; 2179 } 2180 2181 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2182 struct rte_eth_rss_reta_entry64 *reta_conf, 2183 uint16_t reta_size) 2184 { 2185 struct bnxt *bp = eth_dev->data->dev_private; 2186 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2187 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2188 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2189 uint16_t idx, sft; 2190 int i, rc; 2191 2192 rc = is_bnxt_in_error(bp); 2193 if (rc) 2194 return rc; 2195 2196 if (!vnic->rss_table) 2197 return -EINVAL; 2198 2199 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 2200 return -EINVAL; 2201 2202 if (reta_size != tbl_size) { 2203 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2204 "(%d) must equal the size supported by the hardware " 2205 "(%d)\n", reta_size, tbl_size); 2206 return -EINVAL; 2207 } 2208 2209 if (bnxt_vnic_reta_config_update(bp, vnic, reta_conf, reta_size)) { 2210 PMD_DRV_LOG(ERR, "Error in setting the reta config\n"); 2211 return -EINVAL; 2212 } 2213 for (i = 0; i < reta_size; i++) { 2214 struct bnxt_rx_queue *rxq; 2215 2216 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2217 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2218 2219 if (!(reta_conf[idx].mask & (1ULL << sft))) 2220 continue; 2221 2222 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2223 if (BNXT_CHIP_P5_P7(bp)) { 2224 vnic->rss_table[i * 2] = 2225 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2226 vnic->rss_table[i * 2 + 1] = 2227 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2228 } else { 2229 vnic->rss_table[i] = 2230 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2231 } 2232 } 2233 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2234 return rc; 2235 } 2236 2237 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2238 struct rte_eth_rss_reta_entry64 *reta_conf, 2239 uint16_t reta_size) 2240 { 2241 struct bnxt *bp = eth_dev->data->dev_private; 2242 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2243 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2244 uint16_t idx, sft, i; 2245 int rc; 2246 2247 rc = is_bnxt_in_error(bp); 2248 if (rc) 2249 return rc; 2250 2251 if (!vnic) 2252 return -EINVAL; 2253 if (!vnic->rss_table) 2254 return -EINVAL; 2255 2256 if (reta_size != tbl_size) { 2257 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2258 "(%d) must equal the size supported by the hardware " 2259 "(%d)\n", reta_size, tbl_size); 2260 return -EINVAL; 2261 } 2262 2263 for (idx = 0, i = 0; i < reta_size; i++) { 2264 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2265 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2266 2267 if (reta_conf[idx].mask & (1ULL << sft)) { 2268 uint16_t qid; 2269 2270 if (BNXT_CHIP_P5_P7(bp)) 2271 qid = bnxt_rss_to_qid(bp, 2272 vnic->rss_table[i * 2]); 2273 else 2274 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2275 2276 if (qid == INVALID_HW_RING_ID) { 2277 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2278 return -EINVAL; 2279 } 2280 reta_conf[idx].reta[sft] = qid; 2281 } 2282 } 2283 2284 return 0; 2285 } 2286 2287 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2288 struct rte_eth_rss_conf *rss_conf) 2289 { 2290 struct bnxt *bp = eth_dev->data->dev_private; 2291 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2292 struct bnxt_vnic_info *vnic; 2293 int rc; 2294 2295 rc = is_bnxt_in_error(bp); 2296 if (rc) 2297 return rc; 2298 2299 /* 2300 * If RSS enablement were different than dev_configure, 2301 * then return -EINVAL 2302 */ 2303 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2304 if (!rss_conf->rss_hf) 2305 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2306 } else { 2307 if (rss_conf->rss_hf & bnxt_eth_rss_support(bp)) 2308 return -EINVAL; 2309 } 2310 2311 /* Update the default RSS VNIC(s) */ 2312 vnic = bnxt_get_default_vnic(bp); 2313 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2314 vnic->hash_mode = 2315 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2316 RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); 2317 rc = bnxt_rte_eth_to_hwrm_ring_select_mode(bp, rss_conf->rss_hf, vnic); 2318 if (rc != 0) 2319 return rc; 2320 2321 /* Cache the hash function */ 2322 bp->rss_conf.rss_hf = rss_conf->rss_hf; 2323 2324 /* Cache the hash function */ 2325 bp->rss_conf.rss_hf = rss_conf->rss_hf; 2326 2327 /* 2328 * If hashkey is not specified, use the previously configured 2329 * hashkey 2330 */ 2331 if (!rss_conf->rss_key) 2332 goto rss_config; 2333 2334 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2335 PMD_DRV_LOG(ERR, 2336 "Invalid hashkey length, should be %d bytes\n", 2337 HW_HASH_KEY_SIZE); 2338 return -EINVAL; 2339 } 2340 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2341 2342 /* Cache the hash key */ 2343 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 2344 2345 rss_config: 2346 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2347 return rc; 2348 } 2349 2350 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2351 struct rte_eth_rss_conf *rss_conf) 2352 { 2353 struct bnxt *bp = eth_dev->data->dev_private; 2354 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2355 int len, rc; 2356 2357 rc = is_bnxt_in_error(bp); 2358 if (rc) 2359 return rc; 2360 2361 /* Return the RSS configuration of the default VNIC. */ 2362 if (vnic && vnic->rss_hash_key) { 2363 if (rss_conf->rss_key) { 2364 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2365 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2366 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2367 } 2368 bnxt_hwrm_rss_to_rte_hash_conf(vnic, &rss_conf->rss_hf); 2369 rss_conf->rss_hf |= 2370 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2371 } else { 2372 rss_conf->rss_hf = 0; 2373 } 2374 return 0; 2375 } 2376 2377 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2378 struct rte_eth_fc_conf *fc_conf) 2379 { 2380 struct bnxt *bp = dev->data->dev_private; 2381 struct rte_eth_link link_info; 2382 int rc; 2383 2384 rc = is_bnxt_in_error(bp); 2385 if (rc) 2386 return rc; 2387 2388 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2389 if (rc) 2390 return rc; 2391 2392 memset(fc_conf, 0, sizeof(*fc_conf)); 2393 if (bp->link_info->auto_pause) 2394 fc_conf->autoneg = 1; 2395 switch (bp->link_info->pause) { 2396 case 0: 2397 fc_conf->mode = RTE_ETH_FC_NONE; 2398 break; 2399 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2400 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2401 break; 2402 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2403 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2404 break; 2405 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2406 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2407 fc_conf->mode = RTE_ETH_FC_FULL; 2408 break; 2409 } 2410 return 0; 2411 } 2412 2413 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2414 struct rte_eth_fc_conf *fc_conf) 2415 { 2416 struct bnxt *bp = dev->data->dev_private; 2417 int rc; 2418 2419 rc = is_bnxt_in_error(bp); 2420 if (rc) 2421 return rc; 2422 2423 if (!BNXT_SINGLE_PF(bp)) { 2424 PMD_DRV_LOG(ERR, 2425 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2426 return -ENOTSUP; 2427 } 2428 2429 switch (fc_conf->mode) { 2430 case RTE_ETH_FC_NONE: 2431 bp->link_info->auto_pause = 0; 2432 bp->link_info->force_pause = 0; 2433 break; 2434 case RTE_ETH_FC_RX_PAUSE: 2435 if (fc_conf->autoneg) { 2436 bp->link_info->auto_pause = 2437 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2438 bp->link_info->force_pause = 0; 2439 } else { 2440 bp->link_info->auto_pause = 0; 2441 bp->link_info->force_pause = 2442 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2443 } 2444 break; 2445 case RTE_ETH_FC_TX_PAUSE: 2446 if (fc_conf->autoneg) { 2447 bp->link_info->auto_pause = 2448 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2449 bp->link_info->force_pause = 0; 2450 } else { 2451 bp->link_info->auto_pause = 0; 2452 bp->link_info->force_pause = 2453 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2454 } 2455 break; 2456 case RTE_ETH_FC_FULL: 2457 if (fc_conf->autoneg) { 2458 bp->link_info->auto_pause = 2459 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2460 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2461 bp->link_info->force_pause = 0; 2462 } else { 2463 bp->link_info->auto_pause = 0; 2464 bp->link_info->force_pause = 2465 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2466 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2467 } 2468 break; 2469 } 2470 return bnxt_set_hwrm_link_config(bp, true); 2471 } 2472 2473 /* Add UDP tunneling port */ 2474 int 2475 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2476 struct rte_eth_udp_tunnel *udp_tunnel) 2477 { 2478 struct bnxt *bp = eth_dev->data->dev_private; 2479 uint16_t tunnel_type = 0; 2480 int rc = 0; 2481 2482 rc = is_bnxt_in_error(bp); 2483 if (rc) 2484 return rc; 2485 2486 switch (udp_tunnel->prot_type) { 2487 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2488 if (bp->vxlan_port_cnt) { 2489 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2490 udp_tunnel->udp_port); 2491 if (bp->vxlan_port != udp_tunnel->udp_port) { 2492 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2493 return -ENOSPC; 2494 } 2495 bp->vxlan_port_cnt++; 2496 return 0; 2497 } 2498 tunnel_type = 2499 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2500 break; 2501 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2502 if (bp->geneve_port_cnt) { 2503 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2504 udp_tunnel->udp_port); 2505 if (bp->geneve_port != udp_tunnel->udp_port) { 2506 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2507 return -ENOSPC; 2508 } 2509 bp->geneve_port_cnt++; 2510 return 0; 2511 } 2512 tunnel_type = 2513 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2514 break; 2515 case RTE_ETH_TUNNEL_TYPE_ECPRI: 2516 if (bp->ecpri_port_cnt) { 2517 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2518 udp_tunnel->udp_port); 2519 if (bp->ecpri_port != udp_tunnel->udp_port) { 2520 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2521 return -ENOSPC; 2522 } 2523 bp->ecpri_port_cnt++; 2524 return 0; 2525 } 2526 tunnel_type = 2527 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI; 2528 break; 2529 default: 2530 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2531 return -ENOTSUP; 2532 } 2533 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2534 tunnel_type); 2535 2536 if (rc != 0) 2537 return rc; 2538 2539 if (tunnel_type == 2540 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2541 bp->vxlan_port_cnt++; 2542 2543 if (tunnel_type == 2544 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2545 bp->geneve_port_cnt++; 2546 2547 if (tunnel_type == 2548 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI) 2549 bp->ecpri_port_cnt++; 2550 2551 return rc; 2552 } 2553 2554 int 2555 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2556 struct rte_eth_udp_tunnel *udp_tunnel) 2557 { 2558 struct bnxt *bp = eth_dev->data->dev_private; 2559 uint16_t tunnel_type = 0; 2560 uint16_t port = 0; 2561 int rc = 0; 2562 2563 rc = is_bnxt_in_error(bp); 2564 if (rc) 2565 return rc; 2566 2567 switch (udp_tunnel->prot_type) { 2568 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2569 if (!bp->vxlan_port_cnt) { 2570 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2571 return -EINVAL; 2572 } 2573 if (bp->vxlan_port != udp_tunnel->udp_port) { 2574 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2575 udp_tunnel->udp_port, bp->vxlan_port); 2576 return -EINVAL; 2577 } 2578 if (--bp->vxlan_port_cnt) 2579 return 0; 2580 2581 tunnel_type = 2582 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2583 port = bp->vxlan_fw_dst_port_id; 2584 break; 2585 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2586 if (!bp->geneve_port_cnt) { 2587 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2588 return -EINVAL; 2589 } 2590 if (bp->geneve_port != udp_tunnel->udp_port) { 2591 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2592 udp_tunnel->udp_port, bp->geneve_port); 2593 return -EINVAL; 2594 } 2595 if (--bp->geneve_port_cnt) 2596 return 0; 2597 2598 tunnel_type = 2599 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2600 port = bp->geneve_fw_dst_port_id; 2601 break; 2602 case RTE_ETH_TUNNEL_TYPE_ECPRI: 2603 if (!bp->ecpri_port_cnt) { 2604 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2605 return -EINVAL; 2606 } 2607 if (bp->ecpri_port != udp_tunnel->udp_port) { 2608 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2609 udp_tunnel->udp_port, bp->ecpri_port); 2610 return -EINVAL; 2611 } 2612 if (--bp->ecpri_port_cnt) 2613 return 0; 2614 2615 tunnel_type = 2616 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI; 2617 port = bp->ecpri_fw_dst_port_id; 2618 break; 2619 default: 2620 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2621 return -ENOTSUP; 2622 } 2623 2624 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2625 return rc; 2626 } 2627 2628 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2629 { 2630 struct bnxt_filter_info *filter; 2631 struct bnxt_vnic_info *vnic; 2632 int rc = 0; 2633 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2634 2635 vnic = bnxt_get_default_vnic(bp); 2636 filter = STAILQ_FIRST(&vnic->filter); 2637 while (filter) { 2638 /* Search for this matching MAC+VLAN filter */ 2639 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2640 /* Delete the filter */ 2641 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2642 if (rc) 2643 return rc; 2644 STAILQ_REMOVE(&vnic->filter, filter, 2645 bnxt_filter_info, next); 2646 bnxt_free_filter(bp, filter); 2647 PMD_DRV_LOG(INFO, 2648 "Deleted vlan filter for %d\n", 2649 vlan_id); 2650 return 0; 2651 } 2652 filter = STAILQ_NEXT(filter, next); 2653 } 2654 return -ENOENT; 2655 } 2656 2657 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2658 { 2659 struct bnxt_filter_info *filter; 2660 struct bnxt_vnic_info *vnic; 2661 int rc = 0; 2662 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2663 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2664 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2665 2666 /* Implementation notes on the use of VNIC in this command: 2667 * 2668 * By default, these filters belong to default vnic for the function. 2669 * Once these filters are set up, only destination VNIC can be modified. 2670 * If the destination VNIC is not specified in this command, 2671 * then the HWRM shall only create an l2 context id. 2672 */ 2673 2674 vnic = bnxt_get_default_vnic(bp); 2675 filter = STAILQ_FIRST(&vnic->filter); 2676 /* Check if the VLAN has already been added */ 2677 while (filter) { 2678 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2679 return -EEXIST; 2680 2681 filter = STAILQ_NEXT(filter, next); 2682 } 2683 2684 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2685 * command to create MAC+VLAN filter with the right flags, enables set. 2686 */ 2687 filter = bnxt_alloc_filter(bp); 2688 if (!filter) { 2689 PMD_DRV_LOG(ERR, 2690 "MAC/VLAN filter alloc failed\n"); 2691 return -ENOMEM; 2692 } 2693 /* MAC + VLAN ID filter */ 2694 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2695 * untagged packets are received 2696 * 2697 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2698 * packets and only the programmed vlan's packets are received 2699 */ 2700 filter->l2_ivlan = vlan_id; 2701 filter->l2_ivlan_mask = 0x0FFF; 2702 filter->enables |= en; 2703 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2704 2705 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2706 if (rc) { 2707 /* Free the newly allocated filter as we were 2708 * not able to create the filter in hardware. 2709 */ 2710 bnxt_free_filter(bp, filter); 2711 return rc; 2712 } 2713 2714 filter->mac_index = 0; 2715 /* Add this new filter to the list */ 2716 if (vlan_id == 0) 2717 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2718 else 2719 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2720 2721 PMD_DRV_LOG(INFO, 2722 "Added Vlan filter for %d\n", vlan_id); 2723 return rc; 2724 } 2725 2726 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2727 uint16_t vlan_id, int on) 2728 { 2729 struct bnxt *bp = eth_dev->data->dev_private; 2730 int rc; 2731 2732 rc = is_bnxt_in_error(bp); 2733 if (rc) 2734 return rc; 2735 2736 if (!eth_dev->data->dev_started) { 2737 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2738 return -EINVAL; 2739 } 2740 2741 /* These operations apply to ALL existing MAC/VLAN filters */ 2742 if (on) 2743 return bnxt_add_vlan_filter(bp, vlan_id); 2744 else 2745 return bnxt_del_vlan_filter(bp, vlan_id); 2746 } 2747 2748 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2749 struct bnxt_vnic_info *vnic) 2750 { 2751 struct bnxt_filter_info *filter; 2752 int rc; 2753 2754 filter = STAILQ_FIRST(&vnic->filter); 2755 while (filter) { 2756 if (filter->mac_index == 0 && 2757 !memcmp(filter->l2_addr, bp->mac_addr, 2758 RTE_ETHER_ADDR_LEN)) { 2759 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2760 if (!rc) { 2761 STAILQ_REMOVE(&vnic->filter, filter, 2762 bnxt_filter_info, next); 2763 bnxt_free_filter(bp, filter); 2764 } 2765 return rc; 2766 } 2767 filter = STAILQ_NEXT(filter, next); 2768 } 2769 return 0; 2770 } 2771 2772 static int 2773 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2774 { 2775 struct bnxt_vnic_info *vnic; 2776 unsigned int i; 2777 int rc; 2778 2779 vnic = bnxt_get_default_vnic(bp); 2780 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 2781 /* Remove any VLAN filters programmed */ 2782 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2783 bnxt_del_vlan_filter(bp, i); 2784 2785 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2786 if (rc) 2787 return rc; 2788 } else { 2789 /* Default filter will allow packets that match the 2790 * dest mac. So, it has to be deleted, otherwise, we 2791 * will endup receiving vlan packets for which the 2792 * filter is not programmed, when hw-vlan-filter 2793 * configuration is ON 2794 */ 2795 bnxt_del_dflt_mac_filter(bp, vnic); 2796 /* This filter will allow only untagged packets */ 2797 bnxt_add_vlan_filter(bp, 0); 2798 } 2799 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2800 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); 2801 2802 return 0; 2803 } 2804 2805 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2806 { 2807 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2808 unsigned int i; 2809 int rc; 2810 2811 /* Destroy vnic filters and vnic */ 2812 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2813 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2814 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2815 bnxt_del_vlan_filter(bp, i); 2816 } 2817 bnxt_del_dflt_mac_filter(bp, vnic); 2818 2819 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2820 if (rc) 2821 return rc; 2822 2823 rc = bnxt_hwrm_vnic_free(bp, vnic); 2824 if (rc) 2825 return rc; 2826 2827 rte_free(vnic->fw_grp_ids); 2828 vnic->fw_grp_ids = NULL; 2829 2830 vnic->rx_queue_cnt = 0; 2831 2832 return 0; 2833 } 2834 2835 static int 2836 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2837 { 2838 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2839 int rc; 2840 2841 /* Destroy, recreate and reconfigure the default vnic */ 2842 rc = bnxt_free_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id); 2843 if (rc) 2844 return rc; 2845 2846 /* setup the default vnic details*/ 2847 bnxt_vnic_queue_db_update_dlft_vnic(bp); 2848 2849 rc = bnxt_setup_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id); 2850 if (rc) 2851 return rc; 2852 2853 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2854 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2855 rc = bnxt_add_vlan_filter(bp, 0); 2856 if (rc) 2857 return rc; 2858 rc = bnxt_restore_vlan_filters(bp); 2859 if (rc) 2860 return rc; 2861 } else { 2862 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2863 if (rc) 2864 return rc; 2865 } 2866 2867 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2868 if (rc) 2869 return rc; 2870 2871 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2872 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); 2873 2874 return rc; 2875 } 2876 2877 static int 2878 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2879 { 2880 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2881 struct bnxt *bp = dev->data->dev_private; 2882 int rc; 2883 2884 rc = is_bnxt_in_error(bp); 2885 if (rc) 2886 return rc; 2887 2888 /* Filter settings will get applied when port is started */ 2889 if (!dev->data->dev_started) 2890 return 0; 2891 2892 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2893 /* Enable or disable VLAN filtering */ 2894 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2895 if (rc) 2896 return rc; 2897 } 2898 2899 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2900 /* Enable or disable VLAN stripping */ 2901 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2902 if (rc) 2903 return rc; 2904 } 2905 2906 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2907 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2908 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2909 else 2910 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2911 } 2912 2913 return 0; 2914 } 2915 2916 static int 2917 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2918 uint16_t tpid) 2919 { 2920 struct bnxt *bp = dev->data->dev_private; 2921 int qinq = dev->data->dev_conf.rxmode.offloads & 2922 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 2923 2924 if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && 2925 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { 2926 PMD_DRV_LOG(ERR, 2927 "Unsupported vlan type."); 2928 return -EINVAL; 2929 } 2930 if (!qinq) { 2931 PMD_DRV_LOG(ERR, 2932 "QinQ not enabled. Needs to be ON as we can " 2933 "accelerate only outer vlan\n"); 2934 return -EINVAL; 2935 } 2936 2937 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2938 switch (tpid) { 2939 case RTE_ETHER_TYPE_QINQ: 2940 bp->outer_tpid_bd = 2941 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2942 break; 2943 case RTE_ETHER_TYPE_VLAN: 2944 bp->outer_tpid_bd = 2945 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2946 break; 2947 case RTE_ETHER_TYPE_QINQ1: 2948 bp->outer_tpid_bd = 2949 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2950 break; 2951 case RTE_ETHER_TYPE_QINQ2: 2952 bp->outer_tpid_bd = 2953 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2954 break; 2955 case RTE_ETHER_TYPE_QINQ3: 2956 bp->outer_tpid_bd = 2957 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2958 break; 2959 default: 2960 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2961 return -EINVAL; 2962 } 2963 bp->outer_tpid_bd |= tpid; 2964 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2965 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 2966 PMD_DRV_LOG(ERR, 2967 "Can accelerate only outer vlan in QinQ\n"); 2968 return -EINVAL; 2969 } 2970 2971 return 0; 2972 } 2973 2974 static int 2975 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2976 struct rte_ether_addr *addr) 2977 { 2978 struct bnxt *bp = dev->data->dev_private; 2979 /* Default Filter is tied to VNIC 0 */ 2980 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2981 int rc; 2982 2983 rc = is_bnxt_in_error(bp); 2984 if (rc) 2985 return rc; 2986 2987 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2988 return -EPERM; 2989 2990 if (rte_is_zero_ether_addr(addr)) 2991 return -EINVAL; 2992 2993 /* Filter settings will get applied when port is started */ 2994 if (!dev->data->dev_started) 2995 return 0; 2996 2997 /* Check if the requested MAC is already added */ 2998 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2999 return 0; 3000 3001 /* Destroy filter and re-create it */ 3002 bnxt_del_dflt_mac_filter(bp, vnic); 3003 3004 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 3005 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 3006 /* This filter will allow only untagged packets */ 3007 rc = bnxt_add_vlan_filter(bp, 0); 3008 } else { 3009 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 3010 } 3011 3012 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 3013 return rc; 3014 } 3015 3016 static int 3017 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 3018 struct rte_ether_addr *mc_addr_set, 3019 uint32_t nb_mc_addr) 3020 { 3021 struct bnxt *bp = eth_dev->data->dev_private; 3022 struct bnxt_vnic_info *vnic; 3023 uint32_t i = 0; 3024 int rc; 3025 3026 rc = is_bnxt_in_error(bp); 3027 if (rc) 3028 return rc; 3029 3030 vnic = bnxt_get_default_vnic(bp); 3031 3032 bp->nb_mc_addr = nb_mc_addr; 3033 3034 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 3035 PMD_DRV_LOG(INFO, "Number of Mcast MACs added (%u) exceeded Max supported (%u)\n", 3036 nb_mc_addr, BNXT_MAX_MC_ADDRS); 3037 PMD_DRV_LOG(INFO, "Turning on Mcast promiscuous mode\n"); 3038 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 3039 goto allmulti; 3040 } 3041 3042 /* TODO Check for Duplicate mcast addresses */ 3043 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) { 3044 PMD_DRV_LOG(INFO, "Turning off Mcast promiscuous mode\n"); 3045 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 3046 } 3047 for (i = 0; i < nb_mc_addr; i++) 3048 rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]); 3049 3050 if (bp->nb_mc_addr) 3051 vnic->flags |= BNXT_VNIC_INFO_MCAST; 3052 else 3053 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 3054 3055 allmulti: 3056 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 3057 if (rc == -ENOSPC && (vnic->flags & BNXT_VNIC_INFO_MCAST)) { 3058 /* If MCAST addition failed because FW ran out of 3059 * multicast filters, enable all multicast mode. 3060 */ 3061 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 3062 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 3063 goto allmulti; 3064 } 3065 3066 return rc; 3067 } 3068 3069 static int 3070 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3071 { 3072 struct bnxt *bp = dev->data->dev_private; 3073 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 3074 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 3075 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 3076 uint8_t fw_rsvd = bp->fw_ver & 0xff; 3077 int ret; 3078 3079 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 3080 fw_major, fw_minor, fw_updt, fw_rsvd); 3081 if (ret < 0) 3082 return -EINVAL; 3083 3084 ret += 1; /* add the size of '\0' */ 3085 if (fw_size < (size_t)ret) 3086 return ret; 3087 else 3088 return 0; 3089 } 3090 3091 static void 3092 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 3093 struct rte_eth_rxq_info *qinfo) 3094 { 3095 struct bnxt *bp = dev->data->dev_private; 3096 struct bnxt_rx_queue *rxq; 3097 3098 if (is_bnxt_in_error(bp)) 3099 return; 3100 3101 rxq = dev->data->rx_queues[queue_id]; 3102 3103 qinfo->mp = rxq->mb_pool; 3104 qinfo->scattered_rx = dev->data->scattered_rx; 3105 qinfo->nb_desc = rxq->nb_rx_desc; 3106 3107 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 3108 qinfo->conf.rx_drop_en = rxq->drop_en; 3109 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 3110 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 3111 } 3112 3113 static void 3114 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 3115 struct rte_eth_txq_info *qinfo) 3116 { 3117 struct bnxt *bp = dev->data->dev_private; 3118 struct bnxt_tx_queue *txq; 3119 3120 if (is_bnxt_in_error(bp)) 3121 return; 3122 3123 txq = dev->data->tx_queues[queue_id]; 3124 3125 qinfo->nb_desc = txq->nb_tx_desc; 3126 3127 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 3128 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 3129 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 3130 3131 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 3132 qinfo->conf.tx_rs_thresh = 0; 3133 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 3134 qinfo->conf.offloads = txq->offloads; 3135 } 3136 3137 static const struct { 3138 eth_rx_burst_t pkt_burst; 3139 const char *info; 3140 } bnxt_rx_burst_info[] = { 3141 {bnxt_recv_pkts, "Scalar"}, 3142 #if defined(RTE_ARCH_X86) 3143 {bnxt_crx_pkts_vec, "Vector SSE"}, 3144 {bnxt_recv_pkts_vec, "Vector SSE"}, 3145 #endif 3146 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 3147 {bnxt_crx_pkts_vec_avx2, "Vector AVX2"}, 3148 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 3149 #endif 3150 #if defined(RTE_ARCH_ARM64) 3151 {bnxt_recv_pkts_vec, "Vector Neon"}, 3152 #endif 3153 }; 3154 3155 static int 3156 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3157 struct rte_eth_burst_mode *mode) 3158 { 3159 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 3160 size_t i; 3161 3162 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 3163 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 3164 snprintf(mode->info, sizeof(mode->info), "%s", 3165 bnxt_rx_burst_info[i].info); 3166 return 0; 3167 } 3168 } 3169 3170 return -EINVAL; 3171 } 3172 3173 static const struct { 3174 eth_tx_burst_t pkt_burst; 3175 const char *info; 3176 } bnxt_tx_burst_info[] = { 3177 {bnxt_xmit_pkts, "Scalar"}, 3178 #if defined(RTE_ARCH_X86) 3179 {bnxt_xmit_pkts_vec, "Vector SSE"}, 3180 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 3181 #endif 3182 #if defined(RTE_ARCH_ARM64) 3183 {bnxt_xmit_pkts_vec, "Vector Neon"}, 3184 #endif 3185 }; 3186 3187 static int 3188 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3189 struct rte_eth_burst_mode *mode) 3190 { 3191 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 3192 size_t i; 3193 3194 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 3195 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 3196 snprintf(mode->info, sizeof(mode->info), "%s", 3197 bnxt_tx_burst_info[i].info); 3198 return 0; 3199 } 3200 } 3201 3202 return -EINVAL; 3203 } 3204 3205 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3206 { 3207 struct bnxt *bp = eth_dev->data->dev_private; 3208 uint32_t rc = 0; 3209 3210 rc = is_bnxt_in_error(bp); 3211 if (rc) 3212 return rc; 3213 3214 /* Return if port is active */ 3215 if (eth_dev->data->dev_started) { 3216 PMD_DRV_LOG(ERR, "Stop port before changing MTU\n"); 3217 return -EBUSY; 3218 } 3219 3220 /* Exit if receive queues are not configured yet */ 3221 if (!eth_dev->data->nb_rx_queues) 3222 return -ENOTSUP; 3223 3224 /* Is there a change in mtu setting? */ 3225 if (eth_dev->data->mtu == new_mtu) 3226 return rc; 3227 3228 if (new_mtu > RTE_ETHER_MTU) 3229 bp->flags |= BNXT_FLAG_JUMBO; 3230 else 3231 bp->flags &= ~BNXT_FLAG_JUMBO; 3232 3233 rc = bnxt_vnic_mru_config(bp, new_mtu); 3234 if (rc) { 3235 PMD_DRV_LOG(ERR, "failed to update mtu in vnic context\n"); 3236 return rc; 3237 } 3238 3239 if (bnxt_hwrm_config_host_mtu(bp)) 3240 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3241 3242 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3243 3244 return rc; 3245 } 3246 3247 static int 3248 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3249 { 3250 struct bnxt *bp = dev->data->dev_private; 3251 uint16_t vlan = bp->vlan; 3252 int rc; 3253 3254 rc = is_bnxt_in_error(bp); 3255 if (rc) 3256 return rc; 3257 3258 if (!BNXT_SINGLE_PF(bp)) { 3259 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3260 return -ENOTSUP; 3261 } 3262 bp->vlan = on ? pvid : 0; 3263 3264 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3265 if (rc) 3266 bp->vlan = vlan; 3267 return rc; 3268 } 3269 3270 static int 3271 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3272 { 3273 struct bnxt *bp = dev->data->dev_private; 3274 int rc; 3275 3276 rc = is_bnxt_in_error(bp); 3277 if (rc) 3278 return rc; 3279 3280 return bnxt_hwrm_port_led_cfg(bp, true); 3281 } 3282 3283 static int 3284 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3285 { 3286 struct bnxt *bp = dev->data->dev_private; 3287 int rc; 3288 3289 rc = is_bnxt_in_error(bp); 3290 if (rc) 3291 return rc; 3292 3293 return bnxt_hwrm_port_led_cfg(bp, false); 3294 } 3295 3296 static uint32_t 3297 bnxt_rx_queue_count_op(void *rx_queue) 3298 { 3299 struct bnxt *bp; 3300 struct bnxt_cp_ring_info *cpr; 3301 uint32_t desc = 0, raw_cons, cp_ring_size; 3302 struct bnxt_rx_queue *rxq; 3303 struct rx_pkt_cmpl *rxcmp; 3304 int rc; 3305 3306 rxq = rx_queue; 3307 bp = rxq->bp; 3308 3309 rc = is_bnxt_in_error(bp); 3310 if (rc) 3311 return rc; 3312 3313 cpr = rxq->cp_ring; 3314 raw_cons = cpr->cp_raw_cons; 3315 cp_ring_size = cpr->cp_ring_struct->ring_size; 3316 3317 while (1) { 3318 uint32_t agg_cnt, cons, cmpl_type; 3319 3320 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3321 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3322 3323 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3324 break; 3325 3326 cmpl_type = CMP_TYPE(rxcmp); 3327 3328 switch (cmpl_type) { 3329 case CMPL_BASE_TYPE_RX_L2: 3330 case CMPL_BASE_TYPE_RX_L2_V2: 3331 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3332 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3333 desc++; 3334 break; 3335 3336 case CMPL_BASE_TYPE_RX_TPA_END: 3337 if (BNXT_CHIP_P5_P7(rxq->bp)) { 3338 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3339 3340 p5_tpa_end = (void *)rxcmp; 3341 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3342 } else { 3343 struct rx_tpa_end_cmpl *tpa_end; 3344 3345 tpa_end = (void *)rxcmp; 3346 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3347 } 3348 3349 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3350 desc++; 3351 break; 3352 3353 default: 3354 raw_cons += CMP_LEN(cmpl_type); 3355 } 3356 } 3357 3358 return desc; 3359 } 3360 3361 static int 3362 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3363 { 3364 struct bnxt_rx_queue *rxq = rx_queue; 3365 struct bnxt_cp_ring_info *cpr; 3366 struct bnxt_rx_ring_info *rxr; 3367 uint32_t desc, raw_cons, cp_ring_size; 3368 struct bnxt *bp = rxq->bp; 3369 struct rx_pkt_cmpl *rxcmp; 3370 int rc; 3371 3372 rc = is_bnxt_in_error(bp); 3373 if (rc) 3374 return rc; 3375 3376 if (offset >= rxq->nb_rx_desc) 3377 return -EINVAL; 3378 3379 rxr = rxq->rx_ring; 3380 cpr = rxq->cp_ring; 3381 cp_ring_size = cpr->cp_ring_struct->ring_size; 3382 3383 /* 3384 * For the vector receive case, the completion at the requested 3385 * offset can be indexed directly. 3386 */ 3387 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3388 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3389 struct rx_pkt_cmpl *rxcmp; 3390 uint32_t cons; 3391 3392 /* Check status of completion descriptor. */ 3393 raw_cons = cpr->cp_raw_cons + 3394 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3395 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3396 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3397 3398 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3399 return RTE_ETH_RX_DESC_DONE; 3400 3401 /* Check whether rx desc has an mbuf attached. */ 3402 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3403 if (cons >= rxq->rxrearm_start && 3404 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3405 return RTE_ETH_RX_DESC_UNAVAIL; 3406 } 3407 3408 return RTE_ETH_RX_DESC_AVAIL; 3409 } 3410 #endif 3411 3412 /* 3413 * For the non-vector receive case, scan the completion ring to 3414 * locate the completion descriptor for the requested offset. 3415 */ 3416 raw_cons = cpr->cp_raw_cons; 3417 desc = 0; 3418 while (1) { 3419 uint32_t agg_cnt, cons, cmpl_type; 3420 3421 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3422 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3423 3424 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3425 break; 3426 3427 cmpl_type = CMP_TYPE(rxcmp); 3428 3429 switch (cmpl_type) { 3430 case CMPL_BASE_TYPE_RX_L2: 3431 case CMPL_BASE_TYPE_RX_L2_V2: 3432 if (desc == offset) { 3433 cons = rxcmp->opaque; 3434 if (rxr->rx_buf_ring[cons]) 3435 return RTE_ETH_RX_DESC_DONE; 3436 else 3437 return RTE_ETH_RX_DESC_UNAVAIL; 3438 } 3439 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3440 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3441 desc++; 3442 break; 3443 3444 case CMPL_BASE_TYPE_RX_TPA_END: 3445 if (desc == offset) 3446 return RTE_ETH_RX_DESC_DONE; 3447 3448 if (BNXT_CHIP_P5_P7(rxq->bp)) { 3449 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3450 3451 p5_tpa_end = (void *)rxcmp; 3452 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3453 } else { 3454 struct rx_tpa_end_cmpl *tpa_end; 3455 3456 tpa_end = (void *)rxcmp; 3457 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3458 } 3459 3460 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3461 desc++; 3462 break; 3463 3464 default: 3465 raw_cons += CMP_LEN(cmpl_type); 3466 } 3467 } 3468 3469 return RTE_ETH_RX_DESC_AVAIL; 3470 } 3471 3472 static int 3473 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3474 { 3475 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3476 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3477 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3478 struct cmpl_base *cp_desc_ring; 3479 int rc; 3480 3481 rc = is_bnxt_in_error(txq->bp); 3482 if (rc) 3483 return rc; 3484 3485 if (offset >= txq->nb_tx_desc) 3486 return -EINVAL; 3487 3488 /* Return "desc done" if descriptor is available for use. */ 3489 if (bnxt_tx_bds_in_hw(txq) <= offset) 3490 return RTE_ETH_TX_DESC_DONE; 3491 3492 raw_cons = cpr->cp_raw_cons; 3493 cp_desc_ring = cpr->cp_desc_ring; 3494 ring_mask = cpr->cp_ring_struct->ring_mask; 3495 3496 /* Check to see if hw has posted a completion for the descriptor. */ 3497 while (1) { 3498 struct tx_cmpl *txcmp; 3499 uint32_t cons; 3500 3501 cons = RING_CMPL(ring_mask, raw_cons); 3502 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3503 3504 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3505 break; 3506 3507 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3508 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3509 3510 if (nb_tx_pkts > offset) 3511 return RTE_ETH_TX_DESC_DONE; 3512 3513 raw_cons = NEXT_RAW_CMP(raw_cons); 3514 } 3515 3516 /* Descriptor is pending transmit, not yet completed by hardware. */ 3517 return RTE_ETH_TX_DESC_FULL; 3518 } 3519 3520 int 3521 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3522 const struct rte_flow_ops **ops) 3523 { 3524 struct bnxt *bp = dev->data->dev_private; 3525 int ret = 0; 3526 3527 if (!bp) 3528 return -EIO; 3529 3530 if (rte_eth_dev_is_repr(dev)) { 3531 struct bnxt_representor *vfr = dev->data->dev_private; 3532 bp = vfr->parent_dev->data->dev_private; 3533 /* parent is deleted while children are still valid */ 3534 if (!bp) { 3535 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3536 dev->data->port_id); 3537 return -EIO; 3538 } 3539 } 3540 3541 ret = is_bnxt_in_error(bp); 3542 if (ret) 3543 return ret; 3544 3545 /* PMD supports thread-safe flow operations. rte_flow API 3546 * functions can avoid mutex for multi-thread safety. 3547 */ 3548 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3549 3550 if (bnxt_enable_ulp(bp)) 3551 *ops = &bnxt_ulp_rte_flow_ops; 3552 else 3553 *ops = &bnxt_flow_ops; 3554 3555 return ret; 3556 } 3557 3558 static const uint32_t * 3559 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev, 3560 size_t *no_of_elements) 3561 { 3562 static const uint32_t ptypes[] = { 3563 RTE_PTYPE_L2_ETHER_VLAN, 3564 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3565 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3566 RTE_PTYPE_L4_ICMP, 3567 RTE_PTYPE_L4_TCP, 3568 RTE_PTYPE_L4_UDP, 3569 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3570 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3571 RTE_PTYPE_INNER_L4_ICMP, 3572 RTE_PTYPE_INNER_L4_TCP, 3573 RTE_PTYPE_INNER_L4_UDP, 3574 }; 3575 3576 if (!dev->rx_pkt_burst) 3577 return NULL; 3578 3579 *no_of_elements = RTE_DIM(ptypes); 3580 return ptypes; 3581 } 3582 3583 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3584 int reg_win) 3585 { 3586 uint32_t reg_base = *reg_arr & 0xfffff000; 3587 uint32_t win_off; 3588 int i; 3589 3590 for (i = 0; i < count; i++) { 3591 if ((reg_arr[i] & 0xfffff000) != reg_base) 3592 return -ERANGE; 3593 } 3594 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3595 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3596 return 0; 3597 } 3598 3599 static int bnxt_map_ptp_regs(struct bnxt *bp) 3600 { 3601 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3602 uint32_t *reg_arr; 3603 int rc, i; 3604 3605 reg_arr = ptp->rx_regs; 3606 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3607 if (rc) 3608 return rc; 3609 3610 reg_arr = ptp->tx_regs; 3611 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3612 if (rc) 3613 return rc; 3614 3615 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3616 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3617 3618 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3619 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3620 3621 return 0; 3622 } 3623 3624 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3625 { 3626 rte_write32(0, (uint8_t *)bp->bar0 + 3627 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3628 rte_write32(0, (uint8_t *)bp->bar0 + 3629 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3630 } 3631 3632 static uint64_t bnxt_cc_read(struct bnxt *bp) 3633 { 3634 uint64_t ns; 3635 3636 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3637 BNXT_GRCPF_REG_SYNC_TIME)); 3638 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3639 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3640 return ns; 3641 } 3642 3643 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3644 { 3645 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3646 uint32_t fifo; 3647 3648 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3649 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3650 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3651 return -EAGAIN; 3652 3653 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3654 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3655 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3656 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3657 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3658 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3659 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3660 3661 return 0; 3662 } 3663 3664 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3665 { 3666 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3667 struct bnxt_pf_info *pf = bp->pf; 3668 uint16_t port_id; 3669 int i = 0; 3670 uint32_t fifo; 3671 3672 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3673 return -EINVAL; 3674 3675 port_id = pf->port_id; 3676 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3677 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3678 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3679 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3680 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3681 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3682 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3683 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3684 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3685 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3686 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3687 i++; 3688 } 3689 3690 if (i >= BNXT_PTP_RX_PND_CNT) 3691 return -EBUSY; 3692 3693 return 0; 3694 } 3695 3696 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3697 { 3698 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3699 struct bnxt_pf_info *pf = bp->pf; 3700 uint16_t port_id; 3701 uint32_t fifo; 3702 3703 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3704 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3705 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3706 return -EAGAIN; 3707 3708 port_id = pf->port_id; 3709 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3710 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3711 3712 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3713 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3714 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3715 return bnxt_clr_rx_ts(bp, ts); 3716 3717 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3718 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3719 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3720 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3721 3722 return 0; 3723 } 3724 3725 static int 3726 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3727 { 3728 uint64_t ns; 3729 struct bnxt *bp = dev->data->dev_private; 3730 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3731 3732 if (!ptp) 3733 return -ENOTSUP; 3734 3735 ns = rte_timespec_to_ns(ts); 3736 /* Set the timecounters to a new value. */ 3737 ptp->tc.nsec = ns; 3738 ptp->tx_tstamp_tc.nsec = ns; 3739 ptp->rx_tstamp_tc.nsec = ns; 3740 3741 return 0; 3742 } 3743 3744 static int 3745 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3746 { 3747 struct bnxt *bp = dev->data->dev_private; 3748 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3749 uint64_t ns, systime_cycles = 0; 3750 int rc = 0; 3751 3752 if (!ptp) 3753 return -ENOTSUP; 3754 3755 if (BNXT_CHIP_P5(bp)) 3756 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3757 &systime_cycles); 3758 else 3759 systime_cycles = bnxt_cc_read(bp); 3760 3761 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3762 *ts = rte_ns_to_timespec(ns); 3763 3764 return rc; 3765 } 3766 static int 3767 bnxt_timesync_enable(struct rte_eth_dev *dev) 3768 { 3769 struct bnxt *bp = dev->data->dev_private; 3770 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3771 uint32_t shift = 0; 3772 int rc; 3773 3774 if (!ptp) 3775 return -ENOTSUP; 3776 3777 ptp->rx_filter = 1; 3778 ptp->tx_tstamp_en = 1; 3779 ptp->filter_all = 1; 3780 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3781 3782 rc = bnxt_hwrm_ptp_cfg(bp); 3783 if (rc) 3784 return rc; 3785 3786 rte_spinlock_init(&ptp->ptp_lock); 3787 bp->ptp_all_rx_tstamp = 1; 3788 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3789 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3790 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3791 3792 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3793 ptp->tc.cc_shift = shift; 3794 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3795 3796 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3797 ptp->rx_tstamp_tc.cc_shift = shift; 3798 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3799 3800 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3801 ptp->tx_tstamp_tc.cc_shift = shift; 3802 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3803 3804 if (!BNXT_CHIP_P5(bp)) 3805 bnxt_map_ptp_regs(bp); 3806 else 3807 rc = bnxt_ptp_start(bp); 3808 3809 return rc; 3810 } 3811 3812 static int 3813 bnxt_timesync_disable(struct rte_eth_dev *dev) 3814 { 3815 struct bnxt *bp = dev->data->dev_private; 3816 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3817 3818 if (!ptp) 3819 return -ENOTSUP; 3820 3821 ptp->rx_filter = 0; 3822 ptp->tx_tstamp_en = 0; 3823 ptp->rxctl = 0; 3824 ptp->filter_all = 0; 3825 3826 bnxt_hwrm_ptp_cfg(bp); 3827 3828 bp->ptp_all_rx_tstamp = 0; 3829 if (!BNXT_CHIP_P5(bp)) 3830 bnxt_unmap_ptp_regs(bp); 3831 else 3832 bnxt_ptp_stop(bp); 3833 3834 return 0; 3835 } 3836 3837 static int 3838 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3839 struct timespec *timestamp, 3840 uint32_t flags __rte_unused) 3841 { 3842 struct bnxt *bp = dev->data->dev_private; 3843 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3844 uint64_t rx_tstamp_cycles = 0; 3845 uint64_t ns; 3846 3847 if (!ptp) 3848 return -ENOTSUP; 3849 3850 if (BNXT_CHIP_P5(bp)) 3851 rx_tstamp_cycles = ptp->rx_timestamp; 3852 else 3853 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3854 3855 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3856 *timestamp = rte_ns_to_timespec(ns); 3857 return 0; 3858 } 3859 3860 static int 3861 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3862 struct timespec *timestamp) 3863 { 3864 struct bnxt *bp = dev->data->dev_private; 3865 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3866 uint64_t tx_tstamp_cycles = 0; 3867 uint64_t ns; 3868 int rc = 0; 3869 3870 if (!ptp) 3871 return -ENOTSUP; 3872 3873 if (BNXT_CHIP_P5(bp)) 3874 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3875 &tx_tstamp_cycles); 3876 else 3877 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3878 3879 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3880 *timestamp = rte_ns_to_timespec(ns); 3881 3882 return rc; 3883 } 3884 3885 static int 3886 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3887 { 3888 struct bnxt *bp = dev->data->dev_private; 3889 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3890 3891 if (!ptp) 3892 return -ENOTSUP; 3893 3894 ptp->tc.nsec += delta; 3895 ptp->tx_tstamp_tc.nsec += delta; 3896 ptp->rx_tstamp_tc.nsec += delta; 3897 3898 return 0; 3899 } 3900 3901 static int 3902 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3903 { 3904 struct bnxt *bp = dev->data->dev_private; 3905 int rc; 3906 uint32_t dir_entries; 3907 uint32_t entry_length; 3908 3909 rc = is_bnxt_in_error(bp); 3910 if (rc) 3911 return rc; 3912 3913 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3914 bp->pdev->addr.domain, bp->pdev->addr.bus, 3915 bp->pdev->addr.devid, bp->pdev->addr.function); 3916 3917 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3918 if (rc != 0) 3919 return rc; 3920 3921 return dir_entries * entry_length; 3922 } 3923 3924 static int 3925 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3926 struct rte_dev_eeprom_info *in_eeprom) 3927 { 3928 struct bnxt *bp = dev->data->dev_private; 3929 uint32_t index; 3930 uint32_t offset; 3931 int rc; 3932 3933 rc = is_bnxt_in_error(bp); 3934 if (rc) 3935 return rc; 3936 3937 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3938 bp->pdev->addr.domain, bp->pdev->addr.bus, 3939 bp->pdev->addr.devid, bp->pdev->addr.function, 3940 in_eeprom->offset, in_eeprom->length); 3941 3942 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3943 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3944 in_eeprom->data); 3945 3946 index = in_eeprom->offset >> 24; 3947 offset = in_eeprom->offset & 0xffffff; 3948 3949 if (index != 0) 3950 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3951 in_eeprom->length, in_eeprom->data); 3952 3953 return 0; 3954 } 3955 3956 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3957 { 3958 switch (dir_type) { 3959 case BNX_DIR_TYPE_CHIMP_PATCH: 3960 case BNX_DIR_TYPE_BOOTCODE: 3961 case BNX_DIR_TYPE_BOOTCODE_2: 3962 case BNX_DIR_TYPE_APE_FW: 3963 case BNX_DIR_TYPE_APE_PATCH: 3964 case BNX_DIR_TYPE_KONG_FW: 3965 case BNX_DIR_TYPE_KONG_PATCH: 3966 case BNX_DIR_TYPE_BONO_FW: 3967 case BNX_DIR_TYPE_BONO_PATCH: 3968 /* FALLTHROUGH */ 3969 return true; 3970 } 3971 3972 return false; 3973 } 3974 3975 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3976 { 3977 switch (dir_type) { 3978 case BNX_DIR_TYPE_AVS: 3979 case BNX_DIR_TYPE_EXP_ROM_MBA: 3980 case BNX_DIR_TYPE_PCIE: 3981 case BNX_DIR_TYPE_TSCF_UCODE: 3982 case BNX_DIR_TYPE_EXT_PHY: 3983 case BNX_DIR_TYPE_CCM: 3984 case BNX_DIR_TYPE_ISCSI_BOOT: 3985 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3986 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3987 /* FALLTHROUGH */ 3988 return true; 3989 } 3990 3991 return false; 3992 } 3993 3994 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3995 { 3996 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3997 bnxt_dir_type_is_other_exec_format(dir_type); 3998 } 3999 4000 static int 4001 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 4002 struct rte_dev_eeprom_info *in_eeprom) 4003 { 4004 struct bnxt *bp = dev->data->dev_private; 4005 uint8_t index, dir_op; 4006 uint16_t type, ext, ordinal, attr; 4007 int rc; 4008 4009 rc = is_bnxt_in_error(bp); 4010 if (rc) 4011 return rc; 4012 4013 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 4014 bp->pdev->addr.domain, bp->pdev->addr.bus, 4015 bp->pdev->addr.devid, bp->pdev->addr.function, 4016 in_eeprom->offset, in_eeprom->length); 4017 4018 if (!BNXT_PF(bp)) { 4019 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 4020 return -EINVAL; 4021 } 4022 4023 type = in_eeprom->magic >> 16; 4024 4025 if (type == 0xffff) { /* special value for directory operations */ 4026 index = in_eeprom->magic & 0xff; 4027 dir_op = in_eeprom->magic >> 8; 4028 if (index == 0) 4029 return -EINVAL; 4030 switch (dir_op) { 4031 case 0x0e: /* erase */ 4032 if (in_eeprom->offset != ~in_eeprom->magic) 4033 return -EINVAL; 4034 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 4035 default: 4036 return -EINVAL; 4037 } 4038 } 4039 4040 /* Create or re-write an NVM item: */ 4041 if (bnxt_dir_type_is_executable(type) == true) 4042 return -EOPNOTSUPP; 4043 ext = in_eeprom->magic & 0xffff; 4044 ordinal = in_eeprom->offset >> 16; 4045 attr = in_eeprom->offset & 0xffff; 4046 4047 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 4048 in_eeprom->data, in_eeprom->length); 4049 } 4050 4051 static int bnxt_get_module_info(struct rte_eth_dev *dev, 4052 struct rte_eth_dev_module_info *modinfo) 4053 { 4054 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 4055 struct bnxt *bp = dev->data->dev_private; 4056 int rc; 4057 4058 /* No point in going further if phy status indicates 4059 * module is not inserted or if it is powered down or 4060 * if it is of type 10GBase-T 4061 */ 4062 if (bp->link_info->module_status > 4063 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 4064 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 4065 dev->data->port_id); 4066 return -ENOTSUP; 4067 } 4068 4069 /* This feature is not supported in older firmware versions */ 4070 if (bp->hwrm_spec_code < 0x10202) { 4071 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 4072 dev->data->port_id); 4073 return -ENOTSUP; 4074 } 4075 4076 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4077 SFF_DIAG_SUPPORT_OFFSET + 1, 4078 module_info); 4079 4080 if (rc) 4081 return rc; 4082 4083 switch (module_info[0]) { 4084 case SFF_MODULE_ID_SFP: 4085 modinfo->type = RTE_ETH_MODULE_SFF_8472; 4086 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 4087 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 4088 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 4089 break; 4090 case SFF_MODULE_ID_QSFP: 4091 case SFF_MODULE_ID_QSFP_PLUS: 4092 modinfo->type = RTE_ETH_MODULE_SFF_8436; 4093 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 4094 break; 4095 case SFF_MODULE_ID_QSFP28: 4096 modinfo->type = RTE_ETH_MODULE_SFF_8636; 4097 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 4098 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 4099 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 4100 break; 4101 default: 4102 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 4103 return -ENOTSUP; 4104 } 4105 4106 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 4107 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 4108 4109 return 0; 4110 } 4111 4112 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 4113 struct rte_dev_eeprom_info *info) 4114 { 4115 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 4116 uint32_t offset = info->offset, length = info->length; 4117 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 4118 struct bnxt *bp = dev->data->dev_private; 4119 uint8_t *data = info->data; 4120 uint8_t page = offset >> 7; 4121 uint8_t max_pages = 2; 4122 uint8_t opt_pages; 4123 int rc; 4124 4125 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4126 SFF_DIAG_SUPPORT_OFFSET + 1, 4127 module_info); 4128 if (rc) 4129 return rc; 4130 4131 switch (module_info[0]) { 4132 case SFF_MODULE_ID_SFP: 4133 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 4134 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 4135 pg_addr[2] = I2C_DEV_ADDR_A2; 4136 pg_addr[3] = I2C_DEV_ADDR_A2; 4137 max_pages = 4; 4138 } 4139 break; 4140 case SFF_MODULE_ID_QSFP28: 4141 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 4142 SFF8636_OPT_PAGES_OFFSET, 4143 1, &opt_pages); 4144 if (rc) 4145 return rc; 4146 4147 if (opt_pages & SFF8636_PAGE1_MASK) { 4148 pg_addr[2] = I2C_DEV_ADDR_A0; 4149 max_pages = 3; 4150 } 4151 if (opt_pages & SFF8636_PAGE2_MASK) { 4152 pg_addr[3] = I2C_DEV_ADDR_A0; 4153 max_pages = 4; 4154 } 4155 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 4156 pg_addr[4] = I2C_DEV_ADDR_A0; 4157 max_pages = 5; 4158 } 4159 break; 4160 default: 4161 break; 4162 } 4163 4164 memset(data, 0, length); 4165 4166 offset &= 0xff; 4167 while (length && page < max_pages) { 4168 uint8_t raw_page = page ? page - 1 : 0; 4169 uint16_t chunk; 4170 4171 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4172 raw_page = 0; 4173 else if (page) 4174 offset |= 0x80; 4175 chunk = RTE_MIN(length, 256 - offset); 4176 4177 if (pg_addr[page]) { 4178 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4179 raw_page, offset, 4180 chunk, data); 4181 if (rc) 4182 return rc; 4183 } 4184 4185 data += chunk; 4186 length -= chunk; 4187 offset = 0; 4188 page += 1 + (chunk > 128); 4189 } 4190 4191 return length ? -EINVAL : 0; 4192 } 4193 4194 /* 4195 * Initialization 4196 */ 4197 4198 static const struct eth_dev_ops bnxt_dev_ops = { 4199 .dev_infos_get = bnxt_dev_info_get_op, 4200 .dev_close = bnxt_dev_close_op, 4201 .dev_configure = bnxt_dev_configure_op, 4202 .dev_start = bnxt_dev_start_op, 4203 .dev_stop = bnxt_dev_stop_op, 4204 .dev_set_link_up = bnxt_dev_set_link_up_op, 4205 .dev_set_link_down = bnxt_dev_set_link_down_op, 4206 .stats_get = bnxt_stats_get_op, 4207 .stats_reset = bnxt_stats_reset_op, 4208 .rx_queue_setup = bnxt_rx_queue_setup_op, 4209 .rx_queue_release = bnxt_rx_queue_release_op, 4210 .tx_queue_setup = bnxt_tx_queue_setup_op, 4211 .tx_queue_release = bnxt_tx_queue_release_op, 4212 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4213 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4214 .reta_update = bnxt_reta_update_op, 4215 .reta_query = bnxt_reta_query_op, 4216 .rss_hash_update = bnxt_rss_hash_update_op, 4217 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4218 .link_update = bnxt_link_update_op, 4219 .promiscuous_enable = bnxt_promiscuous_enable_op, 4220 .promiscuous_disable = bnxt_promiscuous_disable_op, 4221 .allmulticast_enable = bnxt_allmulticast_enable_op, 4222 .allmulticast_disable = bnxt_allmulticast_disable_op, 4223 .mac_addr_add = bnxt_mac_addr_add_op, 4224 .mac_addr_remove = bnxt_mac_addr_remove_op, 4225 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4226 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4227 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4228 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4229 .vlan_filter_set = bnxt_vlan_filter_set_op, 4230 .vlan_offload_set = bnxt_vlan_offload_set_op, 4231 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4232 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4233 .mtu_set = bnxt_mtu_set_op, 4234 .mac_addr_set = bnxt_set_default_mac_addr_op, 4235 .xstats_get = bnxt_dev_xstats_get_op, 4236 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4237 .xstats_reset = bnxt_dev_xstats_reset_op, 4238 .fw_version_get = bnxt_fw_version_get, 4239 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4240 .rxq_info_get = bnxt_rxq_info_get_op, 4241 .txq_info_get = bnxt_txq_info_get_op, 4242 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4243 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4244 .dev_led_on = bnxt_dev_led_on_op, 4245 .dev_led_off = bnxt_dev_led_off_op, 4246 .rx_queue_start = bnxt_rx_queue_start, 4247 .rx_queue_stop = bnxt_rx_queue_stop, 4248 .tx_queue_start = bnxt_tx_queue_start, 4249 .tx_queue_stop = bnxt_tx_queue_stop, 4250 .flow_ops_get = bnxt_flow_ops_get_op, 4251 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4252 .get_eeprom_length = bnxt_get_eeprom_length_op, 4253 .get_eeprom = bnxt_get_eeprom_op, 4254 .set_eeprom = bnxt_set_eeprom_op, 4255 .get_module_info = bnxt_get_module_info, 4256 .get_module_eeprom = bnxt_get_module_eeprom, 4257 .timesync_enable = bnxt_timesync_enable, 4258 .timesync_disable = bnxt_timesync_disable, 4259 .timesync_read_time = bnxt_timesync_read_time, 4260 .timesync_write_time = bnxt_timesync_write_time, 4261 .timesync_adjust_time = bnxt_timesync_adjust_time, 4262 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4263 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4264 .mtr_ops_get = bnxt_flow_meter_ops_get, 4265 }; 4266 4267 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4268 { 4269 uint32_t offset; 4270 4271 /* Only pre-map the reset GRC registers using window 3 */ 4272 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4273 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4274 4275 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4276 4277 return offset; 4278 } 4279 4280 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4281 { 4282 struct bnxt_error_recovery_info *info = bp->recovery_info; 4283 uint32_t reg_base = 0xffffffff; 4284 int i; 4285 4286 /* Only pre-map the monitoring GRC registers using window 2 */ 4287 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4288 uint32_t reg = info->status_regs[i]; 4289 4290 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4291 continue; 4292 4293 if (reg_base == 0xffffffff) 4294 reg_base = reg & 0xfffff000; 4295 if ((reg & 0xfffff000) != reg_base) 4296 return -ERANGE; 4297 4298 /* Use mask 0xffc as the Lower 2 bits indicates 4299 * address space location 4300 */ 4301 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4302 (reg & 0xffc); 4303 } 4304 4305 if (reg_base == 0xffffffff) 4306 return 0; 4307 4308 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4309 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4310 4311 return 0; 4312 } 4313 4314 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4315 { 4316 struct bnxt_error_recovery_info *info = bp->recovery_info; 4317 uint32_t delay = info->delay_after_reset[index]; 4318 uint32_t val = info->reset_reg_val[index]; 4319 uint32_t reg = info->reset_reg[index]; 4320 uint32_t type, offset; 4321 int ret; 4322 4323 type = BNXT_FW_STATUS_REG_TYPE(reg); 4324 offset = BNXT_FW_STATUS_REG_OFF(reg); 4325 4326 switch (type) { 4327 case BNXT_FW_STATUS_REG_TYPE_CFG: 4328 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4329 if (ret < 0) { 4330 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4331 val, offset); 4332 return; 4333 } 4334 break; 4335 case BNXT_FW_STATUS_REG_TYPE_GRC: 4336 offset = bnxt_map_reset_regs(bp, offset); 4337 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4338 break; 4339 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4340 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4341 break; 4342 } 4343 /* wait on a specific interval of time until core reset is complete */ 4344 if (delay) 4345 rte_delay_ms(delay); 4346 } 4347 4348 static void bnxt_dev_cleanup(struct bnxt *bp) 4349 { 4350 bp->eth_dev->data->dev_link.link_status = 0; 4351 bp->link_info->link_up = 0; 4352 if (bp->eth_dev->data->dev_started) 4353 bnxt_dev_stop(bp->eth_dev); 4354 4355 bnxt_uninit_resources(bp, true); 4356 } 4357 4358 static int 4359 bnxt_check_fw_reset_done(struct bnxt *bp) 4360 { 4361 int timeout = bp->fw_reset_max_msecs; 4362 uint16_t val = 0; 4363 int rc; 4364 4365 do { 4366 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4367 if (rc < 0) { 4368 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4369 return rc; 4370 } 4371 if (val != 0xffff) 4372 break; 4373 rte_delay_ms(1); 4374 } while (timeout--); 4375 4376 if (val == 0xffff) { 4377 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4378 return -1; 4379 } 4380 4381 return 0; 4382 } 4383 4384 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4385 { 4386 struct rte_eth_dev *dev = bp->eth_dev; 4387 struct rte_vlan_filter_conf *vfc; 4388 int vidx, vbit, rc; 4389 uint16_t vlan_id; 4390 4391 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4392 vfc = &dev->data->vlan_filter_conf; 4393 vidx = vlan_id / 64; 4394 vbit = vlan_id % 64; 4395 4396 /* Each bit corresponds to a VLAN id */ 4397 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4398 rc = bnxt_add_vlan_filter(bp, vlan_id); 4399 if (rc) 4400 return rc; 4401 } 4402 } 4403 4404 return 0; 4405 } 4406 4407 static int bnxt_restore_mac_filters(struct bnxt *bp) 4408 { 4409 struct rte_eth_dev *dev = bp->eth_dev; 4410 struct rte_eth_dev_info dev_info; 4411 struct rte_ether_addr *addr; 4412 uint64_t pool_mask; 4413 uint32_t pool = 0; 4414 uint32_t i; 4415 int rc; 4416 4417 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4418 return 0; 4419 4420 rc = bnxt_dev_info_get_op(dev, &dev_info); 4421 if (rc) 4422 return rc; 4423 4424 /* replay MAC address configuration */ 4425 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4426 addr = &dev->data->mac_addrs[i]; 4427 4428 /* skip zero address */ 4429 if (rte_is_zero_ether_addr(addr)) 4430 continue; 4431 4432 pool = 0; 4433 pool_mask = dev->data->mac_pool_sel[i]; 4434 4435 do { 4436 if (pool_mask & 1ULL) { 4437 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4438 if (rc) 4439 return rc; 4440 } 4441 pool_mask >>= 1; 4442 pool++; 4443 } while (pool_mask); 4444 } 4445 4446 return 0; 4447 } 4448 4449 static int bnxt_restore_mcast_mac_filters(struct bnxt *bp) 4450 { 4451 int ret = 0; 4452 4453 ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list, 4454 bp->nb_mc_addr); 4455 if (ret) 4456 PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n"); 4457 4458 return ret; 4459 } 4460 4461 static int bnxt_restore_filters(struct bnxt *bp) 4462 { 4463 struct rte_eth_dev *dev = bp->eth_dev; 4464 int ret = 0; 4465 4466 if (dev->data->all_multicast) { 4467 ret = bnxt_allmulticast_enable_op(dev); 4468 if (ret) 4469 return ret; 4470 } 4471 if (dev->data->promiscuous) { 4472 ret = bnxt_promiscuous_enable_op(dev); 4473 if (ret) 4474 return ret; 4475 } 4476 4477 ret = bnxt_restore_mac_filters(bp); 4478 if (ret) 4479 return ret; 4480 4481 /* if vlans are already programmed, this can fail with -EEXIST */ 4482 ret = bnxt_restore_vlan_filters(bp); 4483 if (ret && ret != -EEXIST) 4484 return ret; 4485 4486 ret = bnxt_restore_mcast_mac_filters(bp); 4487 if (ret) 4488 return ret; 4489 4490 return ret; 4491 } 4492 4493 static int bnxt_check_fw_ready(struct bnxt *bp) 4494 { 4495 int timeout = bp->fw_reset_max_msecs ? : BNXT_MAX_FW_RESET_TIMEOUT; 4496 int rc = 0; 4497 4498 do { 4499 rc = bnxt_hwrm_poll_ver_get(bp); 4500 if (rc == 0) 4501 break; 4502 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4503 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4504 } while (rc && timeout > 0); 4505 4506 if (rc) 4507 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4508 4509 return rc; 4510 } 4511 4512 static void bnxt_dev_recover(void *arg) 4513 { 4514 struct bnxt *bp = arg; 4515 int rc = 0; 4516 4517 pthread_mutex_lock(&bp->err_recovery_lock); 4518 4519 if (!bp->fw_reset_min_msecs) { 4520 rc = bnxt_check_fw_reset_done(bp); 4521 if (rc) 4522 goto err; 4523 } 4524 4525 /* Clear Error flag so that device re-init should happen */ 4526 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4527 PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n", 4528 bp->eth_dev->data->port_id); 4529 4530 rc = bnxt_check_fw_ready(bp); 4531 if (rc) 4532 goto err; 4533 4534 rc = bnxt_init_resources(bp, true); 4535 if (rc) { 4536 PMD_DRV_LOG(ERR, 4537 "Failed to initialize resources after reset\n"); 4538 goto err; 4539 } 4540 /* clear reset flag as the device is initialized now */ 4541 bp->flags &= ~BNXT_FLAG_FW_RESET; 4542 4543 rc = bnxt_dev_start_op(bp->eth_dev); 4544 if (rc) { 4545 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4546 goto err_start; 4547 } 4548 4549 rc = bnxt_restore_filters(bp); 4550 if (rc) 4551 goto err_start; 4552 4553 rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = 4554 bp->eth_dev->rx_pkt_burst; 4555 rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = 4556 bp->eth_dev->tx_pkt_burst; 4557 rte_mb(); 4558 4559 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", 4560 bp->eth_dev->data->port_id); 4561 pthread_mutex_unlock(&bp->err_recovery_lock); 4562 rte_eth_dev_callback_process(bp->eth_dev, 4563 RTE_ETH_EVENT_RECOVERY_SUCCESS, 4564 NULL); 4565 return; 4566 err_start: 4567 bnxt_dev_stop(bp->eth_dev); 4568 err: 4569 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4570 bnxt_uninit_resources(bp, false); 4571 rte_eth_dev_callback_process(bp->eth_dev, 4572 RTE_ETH_EVENT_RECOVERY_FAILED, 4573 NULL); 4574 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4575 rte_eth_dev_callback_process(bp->eth_dev, 4576 RTE_ETH_EVENT_INTR_RMV, 4577 NULL); 4578 pthread_mutex_unlock(&bp->err_recovery_lock); 4579 PMD_DRV_LOG(ERR, "Port %u: Failed to recover from FW reset\n", 4580 bp->eth_dev->data->port_id); 4581 } 4582 4583 void bnxt_dev_reset_and_resume(void *arg) 4584 { 4585 struct bnxt *bp = arg; 4586 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4587 uint16_t val = 0; 4588 int rc; 4589 4590 bnxt_dev_cleanup(bp); 4591 PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n", 4592 bp->eth_dev->data->port_id); 4593 4594 bnxt_wait_for_device_shutdown(bp); 4595 4596 /* During some fatal firmware error conditions, the PCI config space 4597 * register 0x2e which normally contains the subsystem ID will become 4598 * 0xffff. This register will revert back to the normal value after 4599 * the chip has completed core reset. If we detect this condition, 4600 * we can poll this config register immediately for the value to revert. 4601 */ 4602 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4603 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4604 if (rc < 0) { 4605 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4606 return; 4607 } 4608 if (val == 0xffff) { 4609 bp->fw_reset_min_msecs = 0; 4610 us = 1; 4611 } 4612 } 4613 4614 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4615 if (rc) 4616 PMD_DRV_LOG(ERR, "Port %u: Error setting recovery alarm", 4617 bp->eth_dev->data->port_id); 4618 } 4619 4620 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4621 { 4622 struct bnxt_error_recovery_info *info = bp->recovery_info; 4623 uint32_t reg = info->status_regs[index]; 4624 uint32_t type, offset, val = 0; 4625 int ret = 0; 4626 4627 type = BNXT_FW_STATUS_REG_TYPE(reg); 4628 offset = BNXT_FW_STATUS_REG_OFF(reg); 4629 4630 switch (type) { 4631 case BNXT_FW_STATUS_REG_TYPE_CFG: 4632 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4633 if (ret < 0) 4634 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4635 offset); 4636 break; 4637 case BNXT_FW_STATUS_REG_TYPE_GRC: 4638 offset = info->mapped_status_regs[index]; 4639 /* FALLTHROUGH */ 4640 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4641 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4642 offset)); 4643 break; 4644 } 4645 4646 return val; 4647 } 4648 4649 static int bnxt_fw_reset_all(struct bnxt *bp) 4650 { 4651 struct bnxt_error_recovery_info *info = bp->recovery_info; 4652 uint32_t i; 4653 int rc = 0; 4654 4655 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4656 /* Reset through primary function driver */ 4657 for (i = 0; i < info->reg_array_cnt; i++) 4658 bnxt_write_fw_reset_reg(bp, i); 4659 /* Wait for time specified by FW after triggering reset */ 4660 rte_delay_ms(info->primary_func_wait_period_after_reset); 4661 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4662 /* Reset with the help of Kong processor */ 4663 rc = bnxt_hwrm_fw_reset(bp); 4664 if (rc) 4665 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4666 } 4667 4668 return rc; 4669 } 4670 4671 static void bnxt_fw_reset_cb(void *arg) 4672 { 4673 struct bnxt *bp = arg; 4674 struct bnxt_error_recovery_info *info = bp->recovery_info; 4675 int rc = 0; 4676 4677 /* Only Primary function can do FW reset */ 4678 if (bnxt_is_primary_func(bp) && 4679 bnxt_is_recovery_enabled(bp)) { 4680 rc = bnxt_fw_reset_all(bp); 4681 if (rc) { 4682 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4683 return; 4684 } 4685 } 4686 4687 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4688 * EXCEPTION_FATAL_ASYNC event to all the functions 4689 * (including MASTER FUNC). After receiving this Async, all the active 4690 * drivers should treat this case as FW initiated recovery 4691 */ 4692 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4693 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4694 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4695 4696 /* To recover from error */ 4697 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4698 (void *)bp); 4699 } 4700 } 4701 4702 /* Driver should poll FW heartbeat, reset_counter with the frequency 4703 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4704 * When the driver detects heartbeat stop or change in reset_counter, 4705 * it has to trigger a reset to recover from the error condition. 4706 * A “primary function” is the function who will have the privilege to 4707 * initiate the chimp reset. The primary function will be elected by the 4708 * firmware and will be notified through async message. 4709 */ 4710 static void bnxt_check_fw_health(void *arg) 4711 { 4712 struct bnxt *bp = arg; 4713 struct bnxt_error_recovery_info *info = bp->recovery_info; 4714 uint32_t val = 0, wait_msec; 4715 4716 if (!info || !bnxt_is_recovery_enabled(bp) || 4717 is_bnxt_in_error(bp)) 4718 return; 4719 4720 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4721 if (val == info->last_heart_beat) 4722 goto reset; 4723 4724 info->last_heart_beat = val; 4725 4726 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4727 if (val != info->last_reset_counter) 4728 goto reset; 4729 4730 info->last_reset_counter = val; 4731 4732 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4733 bnxt_check_fw_health, (void *)bp); 4734 4735 return; 4736 reset: 4737 /* Stop DMA to/from device */ 4738 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4739 bp->flags |= BNXT_FLAG_FW_RESET; 4740 4741 bnxt_stop_rxtx(bp->eth_dev); 4742 4743 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4744 4745 rte_eth_dev_callback_process(bp->eth_dev, 4746 RTE_ETH_EVENT_ERR_RECOVERING, 4747 NULL); 4748 4749 if (bnxt_is_primary_func(bp)) 4750 wait_msec = info->primary_func_wait_period; 4751 else 4752 wait_msec = info->normal_func_wait_period; 4753 4754 rte_eal_alarm_set(US_PER_MS * wait_msec, 4755 bnxt_fw_reset_cb, (void *)bp); 4756 } 4757 4758 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4759 { 4760 uint32_t polling_freq; 4761 4762 pthread_mutex_lock(&bp->health_check_lock); 4763 4764 if (!bnxt_is_recovery_enabled(bp)) 4765 goto done; 4766 4767 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4768 goto done; 4769 4770 polling_freq = bp->recovery_info->driver_polling_freq; 4771 4772 rte_eal_alarm_set(US_PER_MS * polling_freq, 4773 bnxt_check_fw_health, (void *)bp); 4774 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4775 4776 done: 4777 pthread_mutex_unlock(&bp->health_check_lock); 4778 } 4779 4780 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4781 { 4782 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4783 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4784 } 4785 4786 static bool bnxt_vf_pciid(uint16_t device_id) 4787 { 4788 switch (device_id) { 4789 case BROADCOM_DEV_ID_57304_VF: 4790 case BROADCOM_DEV_ID_57406_VF: 4791 case BROADCOM_DEV_ID_5731X_VF: 4792 case BROADCOM_DEV_ID_5741X_VF: 4793 case BROADCOM_DEV_ID_57414_VF: 4794 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4795 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4796 case BROADCOM_DEV_ID_58802_VF: 4797 case BROADCOM_DEV_ID_57500_VF1: 4798 case BROADCOM_DEV_ID_57500_VF2: 4799 case BROADCOM_DEV_ID_58818_VF: 4800 case BROADCOM_DEV_ID_5760X_VF: 4801 /* FALLTHROUGH */ 4802 return true; 4803 default: 4804 return false; 4805 } 4806 } 4807 4808 /* Phase 5 device */ 4809 static bool bnxt_p5_device(uint16_t device_id) 4810 { 4811 switch (device_id) { 4812 case BROADCOM_DEV_ID_57508: 4813 case BROADCOM_DEV_ID_57504: 4814 case BROADCOM_DEV_ID_57502: 4815 case BROADCOM_DEV_ID_57508_MF1: 4816 case BROADCOM_DEV_ID_57504_MF1: 4817 case BROADCOM_DEV_ID_57502_MF1: 4818 case BROADCOM_DEV_ID_57508_MF2: 4819 case BROADCOM_DEV_ID_57504_MF2: 4820 case BROADCOM_DEV_ID_57502_MF2: 4821 case BROADCOM_DEV_ID_57500_VF1: 4822 case BROADCOM_DEV_ID_57500_VF2: 4823 case BROADCOM_DEV_ID_58812: 4824 case BROADCOM_DEV_ID_58814: 4825 case BROADCOM_DEV_ID_58818: 4826 /* FALLTHROUGH */ 4827 return true; 4828 default: 4829 return false; 4830 } 4831 } 4832 4833 /* Phase 7 device */ 4834 static bool bnxt_p7_device(uint16_t device_id) 4835 { 4836 switch (device_id) { 4837 case BROADCOM_DEV_ID_58818_VF: 4838 case BROADCOM_DEV_ID_57608: 4839 case BROADCOM_DEV_ID_57604: 4840 case BROADCOM_DEV_ID_57602: 4841 case BROADCOM_DEV_ID_57601: 4842 case BROADCOM_DEV_ID_5760X_VF: 4843 /* FALLTHROUGH */ 4844 return true; 4845 default: 4846 return false; 4847 } 4848 } 4849 4850 bool bnxt_stratus_device(struct bnxt *bp) 4851 { 4852 uint16_t device_id = bp->pdev->id.device_id; 4853 4854 switch (device_id) { 4855 case BROADCOM_DEV_ID_STRATUS_NIC: 4856 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4857 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4858 /* FALLTHROUGH */ 4859 return true; 4860 default: 4861 return false; 4862 } 4863 } 4864 4865 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4866 { 4867 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4868 struct bnxt *bp = eth_dev->data->dev_private; 4869 4870 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4871 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4872 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4873 if (!bp->bar0 || !bp->doorbell_base) { 4874 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4875 return -ENODEV; 4876 } 4877 4878 bp->eth_dev = eth_dev; 4879 bp->pdev = pci_dev; 4880 4881 return 0; 4882 } 4883 4884 static void bnxt_init_ctxm_mem(struct bnxt_ctx_mem *ctxm, void *p, int len) 4885 { 4886 uint8_t init_val = ctxm->init_value; 4887 uint16_t offset = ctxm->init_offset; 4888 uint8_t *p2 = p; 4889 int i; 4890 4891 if (!init_val) 4892 return; 4893 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 4894 memset(p, init_val, len); 4895 return; 4896 } 4897 for (i = 0; i < len; i += ctxm->entry_size) 4898 *(p2 + i + offset) = init_val; 4899 } 4900 4901 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4902 struct bnxt_ctx_pg_info *ctx_pg, 4903 struct bnxt_ctx_mem *ctxm, 4904 uint32_t mem_size, 4905 const char *suffix, 4906 uint16_t idx) 4907 { 4908 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4909 const struct rte_memzone *mz = NULL; 4910 char name[RTE_MEMZONE_NAMESIZE]; 4911 rte_iova_t mz_phys_addr; 4912 uint64_t valid_bits = 0; 4913 uint32_t sz; 4914 int i; 4915 4916 if (!mem_size) 4917 return 0; 4918 4919 rmem->nr_pages = 4920 RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / BNXT_PAGE_SIZE; 4921 rmem->page_size = BNXT_PAGE_SIZE; 4922 4923 snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d", 4924 suffix, idx, bp->eth_dev->data->port_id); 4925 ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0); 4926 if (ctx_pg->ctx_pg_arr == NULL) 4927 return -ENOMEM; 4928 4929 snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d", 4930 suffix, idx, bp->eth_dev->data->port_id); 4931 ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0); 4932 if (ctx_pg->ctx_dma_arr == NULL) 4933 return -ENOMEM; 4934 4935 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4936 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4937 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_USE_FULL_PAGE_FLAG; 4938 4939 valid_bits = PTU_PTE_VALID; 4940 4941 if (rmem->nr_pages > 1) { 4942 snprintf(name, RTE_MEMZONE_NAMESIZE, 4943 "bnxt_ctxpgtbl%s_%x_%d", 4944 suffix, idx, bp->eth_dev->data->port_id); 4945 name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4946 mz = rte_memzone_lookup(name); 4947 if (!mz) { 4948 mz = rte_memzone_reserve_aligned(name, 4949 rmem->nr_pages * 8, 4950 bp->eth_dev->device->numa_node, 4951 RTE_MEMZONE_2MB | 4952 RTE_MEMZONE_SIZE_HINT_ONLY | 4953 RTE_MEMZONE_IOVA_CONTIG, 4954 BNXT_PAGE_SIZE); 4955 if (mz == NULL) 4956 return -ENOMEM; 4957 } 4958 4959 memset(mz->addr, 0xff, mz->len); 4960 mz_phys_addr = mz->iova; 4961 4962 if (ctxm != NULL) 4963 bnxt_init_ctxm_mem(ctxm, mz->addr, mz->len); 4964 rmem->pg_tbl = mz->addr; 4965 rmem->pg_tbl_map = mz_phys_addr; 4966 rmem->pg_tbl_mz = mz; 4967 } 4968 4969 snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4970 suffix, idx, bp->eth_dev->data->port_id); 4971 mz = rte_memzone_lookup(name); 4972 if (!mz) { 4973 mz = rte_memzone_reserve_aligned(name, 4974 mem_size, 4975 bp->eth_dev->device->numa_node, 4976 RTE_MEMZONE_1GB | 4977 RTE_MEMZONE_SIZE_HINT_ONLY | 4978 RTE_MEMZONE_IOVA_CONTIG, 4979 BNXT_PAGE_SIZE); 4980 if (mz == NULL) 4981 return -ENOMEM; 4982 } 4983 4984 memset(mz->addr, 0xff, mz->len); 4985 mz_phys_addr = mz->iova; 4986 4987 if (ctxm != NULL) 4988 bnxt_init_ctxm_mem(ctxm, mz->addr, mz->len); 4989 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4990 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4991 rmem->dma_arr[i] = mz_phys_addr + sz; 4992 4993 if (rmem->nr_pages > 1) { 4994 if (i == rmem->nr_pages - 2 && 4995 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4996 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4997 else if (i == rmem->nr_pages - 1 && 4998 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4999 valid_bits |= PTU_PTE_LAST; 5000 5001 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 5002 valid_bits); 5003 } 5004 } 5005 5006 rmem->mz = mz; 5007 if (rmem->vmem_size) 5008 rmem->vmem = (void **)mz->addr; 5009 rmem->dma_arr[0] = mz_phys_addr; 5010 return 0; 5011 } 5012 5013 static void bnxt_free_ctx_mem_v2(struct bnxt *bp) 5014 { 5015 uint16_t type; 5016 5017 for (type = 0; type < bp->ctx->types; type++) { 5018 struct bnxt_ctx_mem *ctxm = &bp->ctx->ctx_arr[type]; 5019 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 5020 int i, n = 1; 5021 5022 if (!ctx_pg) 5023 continue; 5024 if (ctxm->instance_bmap) 5025 n = hweight32(ctxm->instance_bmap); 5026 5027 for (i = 0; i < n; i++) { 5028 rte_free(ctx_pg[i].ctx_pg_arr); 5029 rte_free(ctx_pg[i].ctx_dma_arr); 5030 rte_memzone_free(ctx_pg[i].ring_mem.mz); 5031 rte_memzone_free(ctx_pg[i].ring_mem.pg_tbl_mz); 5032 } 5033 5034 rte_free(ctx_pg); 5035 ctxm->pg_info = NULL; 5036 } 5037 rte_free(bp->ctx->ctx_arr); 5038 bp->ctx->ctx_arr = NULL; 5039 } 5040 5041 static void bnxt_free_ctx_mem(struct bnxt *bp) 5042 { 5043 int i; 5044 5045 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 5046 return; 5047 5048 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 5049 5050 if (BNXT_FW_BACKING_STORE_V2_EN(bp)) { 5051 bnxt_free_ctx_mem_v2(bp); 5052 goto free_ctx; 5053 } 5054 5055 rte_free(bp->ctx->qp_mem.ctx_pg_arr); 5056 rte_free(bp->ctx->srq_mem.ctx_pg_arr); 5057 rte_free(bp->ctx->cq_mem.ctx_pg_arr); 5058 rte_free(bp->ctx->vnic_mem.ctx_pg_arr); 5059 rte_free(bp->ctx->stat_mem.ctx_pg_arr); 5060 rte_free(bp->ctx->qp_mem.ctx_dma_arr); 5061 rte_free(bp->ctx->srq_mem.ctx_dma_arr); 5062 rte_free(bp->ctx->cq_mem.ctx_dma_arr); 5063 rte_free(bp->ctx->vnic_mem.ctx_dma_arr); 5064 rte_free(bp->ctx->stat_mem.ctx_dma_arr); 5065 5066 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 5067 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 5068 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 5069 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 5070 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 5071 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 5072 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 5073 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 5074 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 5075 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 5076 5077 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 5078 rte_free(bp->ctx->tqm_mem[i]->ctx_pg_arr); 5079 rte_free(bp->ctx->tqm_mem[i]->ctx_dma_arr); 5080 if (bp->ctx->tqm_mem[i]) 5081 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 5082 } 5083 5084 free_ctx: 5085 rte_free(bp->ctx); 5086 bp->ctx = NULL; 5087 } 5088 5089 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 5090 5091 #define clamp_t(type, _x, min, max) RTE_MIN_T(RTE_MAX_T(_x, min, type), max, type) 5092 5093 int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp) 5094 { 5095 struct bnxt_ctx_mem_info *ctx = bp->ctx; 5096 struct bnxt_ctx_mem *ctx2; 5097 uint16_t type; 5098 int rc = 0; 5099 5100 ctx2 = &ctx->ctx_arr[0]; 5101 for (type = 0; type < ctx->types && rc == 0; type++) { 5102 struct bnxt_ctx_mem *ctxm = &ctx->ctx_arr[type]; 5103 struct bnxt_ctx_pg_info *ctx_pg; 5104 uint32_t entries, mem_size; 5105 int w = 1; 5106 int i; 5107 5108 if (ctxm->entry_size == 0) 5109 continue; 5110 5111 ctx_pg = ctxm->pg_info; 5112 5113 if (ctxm->instance_bmap) 5114 w = hweight32(ctxm->instance_bmap); 5115 5116 for (i = 0; i < w && rc == 0; i++) { 5117 char name[RTE_MEMZONE_NAMESIZE] = {0}; 5118 5119 sprintf(name, "_%d_%d", i, type); 5120 5121 if (ctxm->entry_multiple) 5122 entries = bnxt_roundup(ctxm->max_entries, 5123 ctxm->entry_multiple); 5124 else 5125 entries = ctxm->max_entries; 5126 5127 if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ) 5128 entries = ctxm->cq_l2_entries; 5129 else if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QP) 5130 entries = ctxm->qp_l2_entries; 5131 else if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MRAV) 5132 entries = ctxm->mrav_av_entries; 5133 else if (ctxm->type == HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TIM) 5134 entries = ctx2->qp_l2_entries; 5135 entries = clamp_t(uint32_t, entries, ctxm->min_entries, 5136 ctxm->max_entries); 5137 ctx_pg[i].entries = entries; 5138 mem_size = ctxm->entry_size * entries; 5139 PMD_DRV_LOG(DEBUG, 5140 "Type:0x%x instance:%d entries:%d size:%d\n", 5141 ctxm->type, i, ctx_pg[i].entries, mem_size); 5142 rc = bnxt_alloc_ctx_mem_blk(bp, &ctx_pg[i], 5143 ctxm->init_value ? ctxm : NULL, 5144 mem_size, name, i); 5145 } 5146 } 5147 5148 return rc; 5149 } 5150 5151 int bnxt_alloc_ctx_mem(struct bnxt *bp) 5152 { 5153 struct bnxt_ctx_pg_info *ctx_pg; 5154 struct bnxt_ctx_mem_info *ctx; 5155 uint32_t mem_size, ena, entries; 5156 int types = BNXT_CTX_MIN; 5157 uint32_t entries_sp, min; 5158 int i, rc = 0; 5159 5160 if (!BNXT_FW_BACKING_STORE_V1_EN(bp) && 5161 !BNXT_FW_BACKING_STORE_V2_EN(bp)) 5162 return rc; 5163 5164 if (BNXT_FW_BACKING_STORE_V2_EN(bp)) { 5165 types = bnxt_hwrm_func_backing_store_types_count(bp); 5166 if (types <= 0) 5167 return types; 5168 } 5169 5170 rc = bnxt_hwrm_func_backing_store_ctx_alloc(bp, types); 5171 if (rc != 0) 5172 return rc; 5173 5174 if (bp->ctx->flags & BNXT_CTX_FLAG_INITED) 5175 return 0; 5176 5177 ctx = bp->ctx; 5178 if (BNXT_FW_BACKING_STORE_V2_EN(bp)) { 5179 rc = bnxt_hwrm_func_backing_store_qcaps_v2(bp); 5180 5181 for (i = 0 ; i < bp->ctx->types && rc == 0; i++) { 5182 struct bnxt_ctx_mem *ctxm = &ctx->ctx_arr[i]; 5183 5184 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm); 5185 } 5186 goto done; 5187 } 5188 5189 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 5190 if (rc) { 5191 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 5192 return rc; 5193 } 5194 5195 ctx_pg = &ctx->qp_mem; 5196 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 5197 if (ctx->qp_entry_size) { 5198 mem_size = ctx->qp_entry_size * ctx_pg->entries; 5199 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "qp_mem", 0); 5200 if (rc) 5201 return rc; 5202 } 5203 5204 ctx_pg = &ctx->srq_mem; 5205 ctx_pg->entries = ctx->srq_max_l2_entries; 5206 if (ctx->srq_entry_size) { 5207 mem_size = ctx->srq_entry_size * ctx_pg->entries; 5208 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "srq_mem", 0); 5209 if (rc) 5210 return rc; 5211 } 5212 5213 ctx_pg = &ctx->cq_mem; 5214 ctx_pg->entries = ctx->cq_max_l2_entries; 5215 if (ctx->cq_entry_size) { 5216 mem_size = ctx->cq_entry_size * ctx_pg->entries; 5217 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "cq_mem", 0); 5218 if (rc) 5219 return rc; 5220 } 5221 5222 ctx_pg = &ctx->vnic_mem; 5223 ctx_pg->entries = ctx->vnic_max_vnic_entries + 5224 ctx->vnic_max_ring_table_entries; 5225 if (ctx->vnic_entry_size) { 5226 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 5227 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "vnic_mem", 0); 5228 if (rc) 5229 return rc; 5230 } 5231 5232 ctx_pg = &ctx->stat_mem; 5233 ctx_pg->entries = ctx->stat_max_entries; 5234 if (ctx->stat_entry_size) { 5235 mem_size = ctx->stat_entry_size * ctx_pg->entries; 5236 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, mem_size, "stat_mem", 0); 5237 if (rc) 5238 return rc; 5239 } 5240 5241 min = ctx->tqm_min_entries_per_ring; 5242 5243 entries_sp = ctx->qp_max_l2_entries + 5244 ctx->vnic_max_vnic_entries + 5245 2 * ctx->qp_min_qp1_entries + min; 5246 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 5247 5248 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 5249 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 5250 entries = clamp_t(uint32_t, entries, min, 5251 ctx->tqm_max_entries_per_ring); 5252 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 5253 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 5254 * i > 8 is other ext rings. 5255 */ 5256 ctx_pg = ctx->tqm_mem[i]; 5257 ctx_pg->entries = i ? entries : entries_sp; 5258 if (ctx->tqm_entry_size) { 5259 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 5260 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, NULL, 5261 mem_size, "tqm_mem", i); 5262 if (rc) 5263 return rc; 5264 } 5265 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 5266 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 5267 else 5268 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 5269 } 5270 5271 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 5272 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 5273 done: 5274 if (rc) 5275 PMD_DRV_LOG(ERR, 5276 "Failed to configure context mem: rc = %d\n", rc); 5277 else 5278 ctx->flags |= BNXT_CTX_FLAG_INITED; 5279 5280 return rc; 5281 } 5282 5283 static int bnxt_alloc_stats_mem(struct bnxt *bp) 5284 { 5285 struct rte_pci_device *pci_dev = bp->pdev; 5286 char mz_name[RTE_MEMZONE_NAMESIZE]; 5287 const struct rte_memzone *mz = NULL; 5288 uint32_t total_alloc_len; 5289 rte_iova_t mz_phys_addr; 5290 5291 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 5292 return 0; 5293 5294 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5295 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5296 pci_dev->addr.bus, pci_dev->addr.devid, 5297 pci_dev->addr.function, "rx_port_stats"); 5298 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5299 mz = rte_memzone_lookup(mz_name); 5300 total_alloc_len = 5301 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 5302 sizeof(struct rx_port_stats_ext) + 512); 5303 if (!mz) { 5304 mz = rte_memzone_reserve(mz_name, total_alloc_len, 5305 SOCKET_ID_ANY, 5306 RTE_MEMZONE_2MB | 5307 RTE_MEMZONE_SIZE_HINT_ONLY | 5308 RTE_MEMZONE_IOVA_CONTIG); 5309 if (mz == NULL) 5310 return -ENOMEM; 5311 } 5312 memset(mz->addr, 0, mz->len); 5313 mz_phys_addr = mz->iova; 5314 5315 bp->rx_mem_zone = (const void *)mz; 5316 bp->hw_rx_port_stats = mz->addr; 5317 bp->hw_rx_port_stats_map = mz_phys_addr; 5318 5319 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5320 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5321 pci_dev->addr.bus, pci_dev->addr.devid, 5322 pci_dev->addr.function, "tx_port_stats"); 5323 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5324 mz = rte_memzone_lookup(mz_name); 5325 total_alloc_len = 5326 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 5327 sizeof(struct tx_port_stats_ext) + 512); 5328 if (!mz) { 5329 mz = rte_memzone_reserve(mz_name, 5330 total_alloc_len, 5331 SOCKET_ID_ANY, 5332 RTE_MEMZONE_2MB | 5333 RTE_MEMZONE_SIZE_HINT_ONLY | 5334 RTE_MEMZONE_IOVA_CONTIG); 5335 if (mz == NULL) 5336 return -ENOMEM; 5337 } 5338 memset(mz->addr, 0, mz->len); 5339 mz_phys_addr = mz->iova; 5340 5341 bp->tx_mem_zone = (const void *)mz; 5342 bp->hw_tx_port_stats = mz->addr; 5343 bp->hw_tx_port_stats_map = mz_phys_addr; 5344 bp->flags |= BNXT_FLAG_PORT_STATS; 5345 5346 /* Display extended statistics if FW supports it */ 5347 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 5348 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 5349 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 5350 return 0; 5351 5352 bp->hw_rx_port_stats_ext = (void *) 5353 ((uint8_t *)bp->hw_rx_port_stats + 5354 sizeof(struct rx_port_stats)); 5355 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 5356 sizeof(struct rx_port_stats); 5357 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 5358 5359 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 5360 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 5361 bp->hw_tx_port_stats_ext = (void *) 5362 ((uint8_t *)bp->hw_tx_port_stats + 5363 sizeof(struct tx_port_stats)); 5364 bp->hw_tx_port_stats_ext_map = 5365 bp->hw_tx_port_stats_map + 5366 sizeof(struct tx_port_stats); 5367 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 5368 } 5369 5370 return 0; 5371 } 5372 5373 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5374 { 5375 struct bnxt *bp = eth_dev->data->dev_private; 5376 size_t max_mac_addr = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5377 int rc = 0; 5378 5379 if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR) 5380 PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n", 5381 bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5382 5383 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5384 RTE_ETHER_ADDR_LEN * max_mac_addr, 5385 0); 5386 if (eth_dev->data->mac_addrs == NULL) { 5387 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5388 return -ENOMEM; 5389 } 5390 5391 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5392 if (BNXT_PF(bp)) 5393 return -EINVAL; 5394 5395 /* Generate a random MAC address, if none was assigned by PF */ 5396 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5397 bnxt_eth_hw_addr_random(bp->mac_addr); 5398 PMD_DRV_LOG(INFO, 5399 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5400 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5401 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5402 5403 rc = bnxt_hwrm_set_mac(bp); 5404 if (rc) 5405 return rc; 5406 } 5407 5408 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5409 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5410 5411 /* 5412 * Allocate memory to hold multicast mac addresses added. 5413 * Used to restore them during reset recovery 5414 */ 5415 bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl", 5416 sizeof(struct rte_ether_addr) * 5417 BNXT_MAX_MC_ADDRS, 0); 5418 if (bp->mcast_addr_list == NULL) { 5419 PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n"); 5420 return -ENOMEM; 5421 } 5422 bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list); 5423 if (bp->mc_list_dma_addr == RTE_BAD_IOVA) { 5424 PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n"); 5425 return -ENOMEM; 5426 } 5427 5428 return rc; 5429 } 5430 5431 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5432 { 5433 int rc = 0; 5434 5435 /* MAC is already configured in FW */ 5436 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5437 return 0; 5438 5439 /* Restore the old MAC configured */ 5440 rc = bnxt_hwrm_set_mac(bp); 5441 if (rc) 5442 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5443 5444 return rc; 5445 } 5446 5447 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5448 { 5449 if (!BNXT_PF(bp)) 5450 return; 5451 5452 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5453 5454 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5455 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5456 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5457 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5458 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5459 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5460 } 5461 5462 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5463 { 5464 struct bnxt_error_recovery_info *info = bp->recovery_info; 5465 5466 if (info) { 5467 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5468 memset(info, 0, sizeof(*info)); 5469 return; 5470 } 5471 5472 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5473 return; 5474 5475 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5476 sizeof(*info), 0); 5477 if (!info) 5478 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5479 5480 bp->recovery_info = info; 5481 } 5482 5483 static void bnxt_check_fw_status(struct bnxt *bp) 5484 { 5485 uint32_t fw_status; 5486 5487 if (!(bp->recovery_info && 5488 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5489 return; 5490 5491 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5492 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5493 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5494 fw_status); 5495 } 5496 5497 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5498 { 5499 struct bnxt_error_recovery_info *info = bp->recovery_info; 5500 uint32_t status_loc; 5501 uint32_t sig_ver; 5502 5503 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5504 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5505 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5506 BNXT_GRCP_WINDOW_2_BASE + 5507 offsetof(struct hcomm_status, 5508 sig_ver))); 5509 /* If the signature is absent, then FW does not support this feature */ 5510 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5511 HCOMM_STATUS_SIGNATURE_VAL) 5512 return 0; 5513 5514 if (!info) { 5515 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5516 sizeof(*info), 0); 5517 if (!info) 5518 return -ENOMEM; 5519 bp->recovery_info = info; 5520 } else { 5521 memset(info, 0, sizeof(*info)); 5522 } 5523 5524 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5525 BNXT_GRCP_WINDOW_2_BASE + 5526 offsetof(struct hcomm_status, 5527 fw_status_loc))); 5528 5529 /* Only pre-map the FW health status GRC register */ 5530 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5531 return 0; 5532 5533 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5534 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5535 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5536 5537 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5538 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5539 5540 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5541 5542 return 0; 5543 } 5544 5545 /* This function gets the FW version along with the 5546 * capabilities(MAX and current) of the function, vnic, 5547 * error recovery, phy and other chip related info 5548 */ 5549 static int bnxt_get_config(struct bnxt *bp) 5550 { 5551 uint16_t mtu; 5552 int timeout; 5553 int rc = 0; 5554 5555 bp->fw_cap = 0; 5556 5557 rc = bnxt_map_hcomm_fw_status_reg(bp); 5558 if (rc) 5559 return rc; 5560 5561 timeout = BNXT_CHIP_P7(bp) ? 5562 PCI_FUNC_RESET_WAIT_TIMEOUT : 5563 DFLT_HWRM_CMD_TIMEOUT; 5564 try_again: 5565 rc = bnxt_hwrm_ver_get(bp, timeout); 5566 if (rc) { 5567 if (rc == -ETIMEDOUT && timeout == PCI_FUNC_RESET_WAIT_TIMEOUT) { 5568 bp->flags &= ~BNXT_FLAG_FW_TIMEDOUT; 5569 timeout = DFLT_HWRM_CMD_TIMEOUT; 5570 goto try_again; 5571 } 5572 bnxt_check_fw_status(bp); 5573 return rc; 5574 } 5575 5576 rc = bnxt_hwrm_func_reset(bp); 5577 if (rc) 5578 return -EIO; 5579 5580 rc = bnxt_hwrm_vnic_qcaps(bp); 5581 if (rc) 5582 return rc; 5583 5584 rc = bnxt_hwrm_queue_qportcfg(bp); 5585 if (rc) 5586 return rc; 5587 5588 /* Get the MAX capabilities for this function. 5589 * This function also allocates context memory for TQM rings and 5590 * informs the firmware about this allocated backing store memory. 5591 */ 5592 rc = bnxt_hwrm_func_qcaps(bp); 5593 if (rc) 5594 return rc; 5595 5596 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5597 if (rc) 5598 return rc; 5599 5600 bnxt_hwrm_port_mac_qcfg(bp); 5601 5602 bnxt_hwrm_parent_pf_qcfg(bp); 5603 5604 bnxt_hwrm_port_phy_qcaps(bp); 5605 5606 bnxt_alloc_error_recovery_info(bp); 5607 /* Get the adapter error recovery support info */ 5608 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5609 if (rc) 5610 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5611 5612 bnxt_hwrm_port_led_qcaps(bp); 5613 5614 return 0; 5615 } 5616 5617 static int 5618 bnxt_init_locks(struct bnxt *bp) 5619 { 5620 int err; 5621 5622 err = pthread_mutex_init(&bp->flow_lock, NULL); 5623 if (err) { 5624 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5625 return err; 5626 } 5627 5628 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5629 if (err) { 5630 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5631 return err; 5632 } 5633 5634 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5635 if (err) { 5636 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5637 return err; 5638 } 5639 5640 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5641 if (err) 5642 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5643 5644 return err; 5645 } 5646 5647 /* This should be called after we have queried trusted VF cap */ 5648 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5649 { 5650 int rc = 0; 5651 5652 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5653 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5654 if (rc) 5655 PMD_DRV_LOG(ERR, 5656 "Failed to alloc switch domain: %d\n", rc); 5657 else 5658 PMD_DRV_LOG(INFO, 5659 "Switch domain allocated %d\n", 5660 bp->switch_domain_id); 5661 } 5662 5663 return rc; 5664 } 5665 5666 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5667 { 5668 int rc = 0; 5669 5670 if (reconfig_dev) { 5671 rc = bnxt_get_config(bp); 5672 if (rc) 5673 return rc; 5674 } 5675 5676 rc = bnxt_alloc_switch_domain(bp); 5677 if (rc) 5678 return rc; 5679 5680 if (!reconfig_dev) { 5681 rc = bnxt_setup_mac_addr(bp->eth_dev); 5682 if (rc) 5683 return rc; 5684 } else { 5685 rc = bnxt_restore_dflt_mac(bp); 5686 if (rc) 5687 return rc; 5688 } 5689 5690 bnxt_config_vf_req_fwd(bp); 5691 5692 rc = bnxt_hwrm_func_driver_register(bp); 5693 if (rc) { 5694 PMD_DRV_LOG(ERR, "Failed to register driver"); 5695 return -EBUSY; 5696 } 5697 5698 if (BNXT_PF(bp)) { 5699 if (bp->pdev->max_vfs) { 5700 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5701 if (rc) { 5702 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5703 return rc; 5704 } 5705 } else { 5706 rc = bnxt_hwrm_allocate_pf_only(bp); 5707 if (rc) { 5708 PMD_DRV_LOG(ERR, 5709 "Failed to allocate PF resources"); 5710 return rc; 5711 } 5712 } 5713 } 5714 5715 if (!reconfig_dev) { 5716 bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key", 5717 HW_HASH_KEY_SIZE, 0); 5718 if (bp->rss_conf.rss_key == NULL) { 5719 PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory", 5720 bp->eth_dev->data->port_id); 5721 return -ENOMEM; 5722 } 5723 } 5724 5725 rc = bnxt_alloc_mem(bp, reconfig_dev); 5726 if (rc) 5727 return rc; 5728 5729 rc = bnxt_setup_int(bp); 5730 if (rc) 5731 return rc; 5732 5733 rc = bnxt_request_int(bp); 5734 if (rc) 5735 return rc; 5736 5737 rc = bnxt_init_ctx_mem(bp); 5738 if (rc) { 5739 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5740 return rc; 5741 } 5742 5743 return 0; 5744 } 5745 5746 static int 5747 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5748 const char *value, void *opaque_arg) 5749 { 5750 struct bnxt *bp = opaque_arg; 5751 unsigned long flow_xstat; 5752 char *end = NULL; 5753 5754 if (!value || !opaque_arg) { 5755 PMD_DRV_LOG(ERR, 5756 "Invalid parameter passed to flow_xstat devarg.\n"); 5757 return -EINVAL; 5758 } 5759 5760 flow_xstat = strtoul(value, &end, 10); 5761 if (end == NULL || *end != '\0' || 5762 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5763 PMD_DRV_LOG(ERR, 5764 "Invalid parameter passed to flow_xstat devarg.\n"); 5765 return -EINVAL; 5766 } 5767 5768 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5769 PMD_DRV_LOG(ERR, 5770 "Invalid value passed to flow_xstat devarg.\n"); 5771 return -EINVAL; 5772 } 5773 5774 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5775 if (BNXT_FLOW_XSTATS_EN(bp)) 5776 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5777 5778 return 0; 5779 } 5780 5781 static int 5782 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5783 const char *value, void *opaque_arg) 5784 { 5785 struct bnxt *bp = opaque_arg; 5786 unsigned long max_num_kflows; 5787 char *end = NULL; 5788 5789 if (!value || !opaque_arg) { 5790 PMD_DRV_LOG(ERR, 5791 "Invalid parameter passed to max_num_kflows devarg.\n"); 5792 return -EINVAL; 5793 } 5794 5795 max_num_kflows = strtoul(value, &end, 10); 5796 if (end == NULL || *end != '\0' || 5797 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5798 PMD_DRV_LOG(ERR, 5799 "Invalid parameter passed to max_num_kflows devarg.\n"); 5800 return -EINVAL; 5801 } 5802 5803 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5804 PMD_DRV_LOG(ERR, 5805 "Invalid value passed to max_num_kflows devarg.\n"); 5806 return -EINVAL; 5807 } 5808 5809 bp->max_num_kflows = max_num_kflows; 5810 if (bp->max_num_kflows) 5811 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5812 max_num_kflows); 5813 5814 return 0; 5815 } 5816 5817 static int 5818 bnxt_parse_devarg_cqe_mode(__rte_unused const char *key, 5819 const char *value, void *opaque_arg) 5820 { 5821 struct bnxt *bp = opaque_arg; 5822 unsigned long cqe_mode; 5823 char *end = NULL; 5824 5825 if (!value || !opaque_arg) { 5826 PMD_DRV_LOG(ERR, 5827 "Invalid parameter passed to cqe-mode " 5828 "devargs.\n"); 5829 return -EINVAL; 5830 } 5831 5832 cqe_mode = strtoul(value, &end, 10); 5833 if (end == NULL || *end != '\0' || 5834 (cqe_mode == ULONG_MAX && errno == ERANGE)) { 5835 PMD_DRV_LOG(ERR, 5836 "Invalid parameter passed to cqe-mode " 5837 "devargs.\n"); 5838 return -EINVAL; 5839 } 5840 5841 if (BNXT_DEVARG_CQE_MODE_INVALID(cqe_mode)) { 5842 PMD_DRV_LOG(ERR, "Invalid cqe-mode(%d) devargs.\n", 5843 (uint16_t)cqe_mode); 5844 return -EINVAL; 5845 } 5846 5847 if (cqe_mode == 1) 5848 bp->flags2 |= BNXT_FLAGS2_COMPRESSED_RX_CQE; 5849 PMD_DRV_LOG(INFO, "cqe-mode=%d feature enabled.\n", (uint8_t)cqe_mode); 5850 5851 return 0; 5852 } 5853 5854 static int 5855 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5856 const char *value, void *opaque_arg) 5857 { 5858 struct bnxt *bp = opaque_arg; 5859 unsigned long app_id; 5860 char *end = NULL; 5861 5862 if (!value || !opaque_arg) { 5863 PMD_DRV_LOG(ERR, 5864 "Invalid parameter passed to app-id " 5865 "devargs.\n"); 5866 return -EINVAL; 5867 } 5868 5869 app_id = strtoul(value, &end, 10); 5870 if (end == NULL || *end != '\0' || 5871 (app_id == ULONG_MAX && errno == ERANGE)) { 5872 PMD_DRV_LOG(ERR, 5873 "Invalid parameter passed to app_id " 5874 "devargs.\n"); 5875 return -EINVAL; 5876 } 5877 5878 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5879 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5880 (uint16_t)app_id); 5881 return -EINVAL; 5882 } 5883 5884 bp->app_id = app_id; 5885 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5886 5887 return 0; 5888 } 5889 5890 static int 5891 bnxt_parse_devarg_ieee_1588(__rte_unused const char *key, 5892 const char *value, void *opaque_arg) 5893 { 5894 struct bnxt *bp = opaque_arg; 5895 unsigned long ieee_1588; 5896 char *end = NULL; 5897 5898 if (!value || !opaque_arg) { 5899 PMD_DRV_LOG(ERR, 5900 "Invalid parameter passed to ieee-1588 " 5901 "devargs.\n"); 5902 return -EINVAL; 5903 } 5904 5905 ieee_1588 = strtoul(value, &end, 10); 5906 if (end == NULL || *end != '\0' || 5907 (ieee_1588 == ULONG_MAX && errno == ERANGE)) { 5908 PMD_DRV_LOG(ERR, 5909 "Invalid parameter passed to ieee_1588 " 5910 "devargs.\n"); 5911 return -EINVAL; 5912 } 5913 5914 if (BNXT_DEVARG_IEEE_1588_INVALID(ieee_1588)) { 5915 PMD_DRV_LOG(ERR, "Invalid ieee-1588(%d) devargs.\n", 5916 (uint16_t)ieee_1588); 5917 return -EINVAL; 5918 } 5919 5920 bp->ieee_1588 = ieee_1588; 5921 PMD_DRV_LOG(INFO, "ieee-1588=%d feature enabled.\n", (uint16_t)ieee_1588); 5922 5923 return 0; 5924 } 5925 5926 static int 5927 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5928 const char *value, void *opaque_arg) 5929 { 5930 struct bnxt_representor *vfr_bp = opaque_arg; 5931 unsigned long rep_is_pf; 5932 char *end = NULL; 5933 5934 if (!value || !opaque_arg) { 5935 PMD_DRV_LOG(ERR, 5936 "Invalid parameter passed to rep_is_pf devargs.\n"); 5937 return -EINVAL; 5938 } 5939 5940 rep_is_pf = strtoul(value, &end, 10); 5941 if (end == NULL || *end != '\0' || 5942 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5943 PMD_DRV_LOG(ERR, 5944 "Invalid parameter passed to rep_is_pf devargs.\n"); 5945 return -EINVAL; 5946 } 5947 5948 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5949 PMD_DRV_LOG(ERR, 5950 "Invalid value passed to rep_is_pf devargs.\n"); 5951 return -EINVAL; 5952 } 5953 5954 vfr_bp->flags |= rep_is_pf; 5955 if (BNXT_REP_PF(vfr_bp)) 5956 PMD_DRV_LOG(INFO, "PF representor\n"); 5957 else 5958 PMD_DRV_LOG(INFO, "VF representor\n"); 5959 5960 return 0; 5961 } 5962 5963 static int 5964 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5965 const char *value, void *opaque_arg) 5966 { 5967 struct bnxt_representor *vfr_bp = opaque_arg; 5968 unsigned long rep_based_pf; 5969 char *end = NULL; 5970 5971 if (!value || !opaque_arg) { 5972 PMD_DRV_LOG(ERR, 5973 "Invalid parameter passed to rep_based_pf " 5974 "devargs.\n"); 5975 return -EINVAL; 5976 } 5977 5978 rep_based_pf = strtoul(value, &end, 10); 5979 if (end == NULL || *end != '\0' || 5980 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5981 PMD_DRV_LOG(ERR, 5982 "Invalid parameter passed to rep_based_pf " 5983 "devargs.\n"); 5984 return -EINVAL; 5985 } 5986 5987 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5988 PMD_DRV_LOG(ERR, 5989 "Invalid value passed to rep_based_pf devargs.\n"); 5990 return -EINVAL; 5991 } 5992 5993 vfr_bp->rep_based_pf = rep_based_pf; 5994 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5995 5996 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5997 5998 return 0; 5999 } 6000 6001 static int 6002 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 6003 const char *value, void *opaque_arg) 6004 { 6005 struct bnxt_representor *vfr_bp = opaque_arg; 6006 unsigned long rep_q_r2f; 6007 char *end = NULL; 6008 6009 if (!value || !opaque_arg) { 6010 PMD_DRV_LOG(ERR, 6011 "Invalid parameter passed to rep_q_r2f " 6012 "devargs.\n"); 6013 return -EINVAL; 6014 } 6015 6016 rep_q_r2f = strtoul(value, &end, 10); 6017 if (end == NULL || *end != '\0' || 6018 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 6019 PMD_DRV_LOG(ERR, 6020 "Invalid parameter passed to rep_q_r2f " 6021 "devargs.\n"); 6022 return -EINVAL; 6023 } 6024 6025 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 6026 PMD_DRV_LOG(ERR, 6027 "Invalid value passed to rep_q_r2f devargs.\n"); 6028 return -EINVAL; 6029 } 6030 6031 vfr_bp->rep_q_r2f = rep_q_r2f; 6032 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 6033 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 6034 6035 return 0; 6036 } 6037 6038 static int 6039 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 6040 const char *value, void *opaque_arg) 6041 { 6042 struct bnxt_representor *vfr_bp = opaque_arg; 6043 unsigned long rep_q_f2r; 6044 char *end = NULL; 6045 6046 if (!value || !opaque_arg) { 6047 PMD_DRV_LOG(ERR, 6048 "Invalid parameter passed to rep_q_f2r " 6049 "devargs.\n"); 6050 return -EINVAL; 6051 } 6052 6053 rep_q_f2r = strtoul(value, &end, 10); 6054 if (end == NULL || *end != '\0' || 6055 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 6056 PMD_DRV_LOG(ERR, 6057 "Invalid parameter passed to rep_q_f2r " 6058 "devargs.\n"); 6059 return -EINVAL; 6060 } 6061 6062 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 6063 PMD_DRV_LOG(ERR, 6064 "Invalid value passed to rep_q_f2r devargs.\n"); 6065 return -EINVAL; 6066 } 6067 6068 vfr_bp->rep_q_f2r = rep_q_f2r; 6069 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 6070 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 6071 6072 return 0; 6073 } 6074 6075 static int 6076 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 6077 const char *value, void *opaque_arg) 6078 { 6079 struct bnxt_representor *vfr_bp = opaque_arg; 6080 unsigned long rep_fc_r2f; 6081 char *end = NULL; 6082 6083 if (!value || !opaque_arg) { 6084 PMD_DRV_LOG(ERR, 6085 "Invalid parameter passed to rep_fc_r2f " 6086 "devargs.\n"); 6087 return -EINVAL; 6088 } 6089 6090 rep_fc_r2f = strtoul(value, &end, 10); 6091 if (end == NULL || *end != '\0' || 6092 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 6093 PMD_DRV_LOG(ERR, 6094 "Invalid parameter passed to rep_fc_r2f " 6095 "devargs.\n"); 6096 return -EINVAL; 6097 } 6098 6099 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 6100 PMD_DRV_LOG(ERR, 6101 "Invalid value passed to rep_fc_r2f devargs.\n"); 6102 return -EINVAL; 6103 } 6104 6105 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 6106 vfr_bp->rep_fc_r2f = rep_fc_r2f; 6107 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 6108 6109 return 0; 6110 } 6111 6112 static int 6113 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 6114 const char *value, void *opaque_arg) 6115 { 6116 struct bnxt_representor *vfr_bp = opaque_arg; 6117 unsigned long rep_fc_f2r; 6118 char *end = NULL; 6119 6120 if (!value || !opaque_arg) { 6121 PMD_DRV_LOG(ERR, 6122 "Invalid parameter passed to rep_fc_f2r " 6123 "devargs.\n"); 6124 return -EINVAL; 6125 } 6126 6127 rep_fc_f2r = strtoul(value, &end, 10); 6128 if (end == NULL || *end != '\0' || 6129 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 6130 PMD_DRV_LOG(ERR, 6131 "Invalid parameter passed to rep_fc_f2r " 6132 "devargs.\n"); 6133 return -EINVAL; 6134 } 6135 6136 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 6137 PMD_DRV_LOG(ERR, 6138 "Invalid value passed to rep_fc_f2r devargs.\n"); 6139 return -EINVAL; 6140 } 6141 6142 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 6143 vfr_bp->rep_fc_f2r = rep_fc_f2r; 6144 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 6145 6146 return 0; 6147 } 6148 6149 static int 6150 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 6151 { 6152 struct rte_kvargs *kvlist; 6153 int ret = 0; 6154 6155 if (devargs == NULL) 6156 return 0; 6157 6158 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 6159 if (kvlist == NULL) 6160 return -EINVAL; 6161 6162 /* 6163 * Handler for "flow_xstat" devarg. 6164 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 6165 */ 6166 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 6167 bnxt_parse_devarg_flow_xstat, bp); 6168 if (ret) 6169 goto err; 6170 6171 /* 6172 * Handler for "max_num_kflows" devarg. 6173 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 6174 */ 6175 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 6176 bnxt_parse_devarg_max_num_kflows, bp); 6177 if (ret) 6178 goto err; 6179 6180 err: 6181 /* 6182 * Handler for "app-id" devarg. 6183 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 6184 */ 6185 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 6186 bnxt_parse_devarg_app_id, bp); 6187 6188 /* 6189 * Handler for "ieee-1588" devarg. 6190 * Invoked as for ex: "-a 000:00:0d.0,ieee-1588=1" 6191 */ 6192 rte_kvargs_process(kvlist, BNXT_DEVARG_IEEE_1588, 6193 bnxt_parse_devarg_ieee_1588, bp); 6194 6195 /* 6196 * Handler for "cqe-mode" devarg. 6197 * Invoked as for ex: "-a 000:00:0d.0,cqe-mode=1" 6198 */ 6199 rte_kvargs_process(kvlist, BNXT_DEVARG_CQE_MODE, 6200 bnxt_parse_devarg_cqe_mode, bp); 6201 6202 rte_kvargs_free(kvlist); 6203 return ret; 6204 } 6205 6206 /* Allocate and initialize various fields in bnxt struct that 6207 * need to be allocated/destroyed only once in the lifetime of the driver 6208 */ 6209 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 6210 { 6211 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 6212 struct bnxt *bp = eth_dev->data->dev_private; 6213 int rc = 0; 6214 6215 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 6216 6217 if (bnxt_vf_pciid(pci_dev->id.device_id)) 6218 bp->flags |= BNXT_FLAG_VF; 6219 6220 if (bnxt_p5_device(pci_dev->id.device_id)) 6221 bp->flags |= BNXT_FLAG_CHIP_P5; 6222 6223 if (bnxt_p7_device(pci_dev->id.device_id)) 6224 bp->flags |= BNXT_FLAG_CHIP_P7; 6225 6226 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 6227 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 6228 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 6229 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 6230 bp->flags |= BNXT_FLAG_STINGRAY; 6231 6232 rc = bnxt_map_pci_bars(eth_dev); 6233 if (rc) { 6234 PMD_DRV_LOG(ERR, 6235 "Failed to initialize board rc: %x\n", rc); 6236 return rc; 6237 } 6238 6239 rc = bnxt_alloc_pf_info(bp); 6240 if (rc) 6241 return rc; 6242 6243 rc = bnxt_alloc_link_info(bp); 6244 if (rc) 6245 return rc; 6246 6247 rc = bnxt_alloc_parent_info(bp); 6248 if (rc) 6249 return rc; 6250 6251 rc = bnxt_alloc_hwrm_resources(bp); 6252 if (rc) { 6253 PMD_DRV_LOG(ERR, 6254 "Failed to allocate response buffer rc: %x\n", rc); 6255 return rc; 6256 } 6257 rc = bnxt_alloc_leds_info(bp); 6258 if (rc) 6259 return rc; 6260 6261 rc = bnxt_alloc_cos_queues(bp); 6262 if (rc) 6263 return rc; 6264 6265 rc = bnxt_init_locks(bp); 6266 if (rc) 6267 return rc; 6268 6269 rc = bnxt_get_config(bp); 6270 if (rc) 6271 return rc; 6272 6273 if (BNXT_TRUFLOW_EN(bp)) { 6274 /* extra mbuf field is required to store CFA code from mark */ 6275 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 6276 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 6277 .size = sizeof(bnxt_cfa_code_dynfield_t), 6278 .align = alignof(bnxt_cfa_code_dynfield_t), 6279 }; 6280 bnxt_cfa_code_dynfield_offset = 6281 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 6282 if (bnxt_cfa_code_dynfield_offset < 0) { 6283 PMD_DRV_LOG(ERR, 6284 "Failed to register mbuf field for TruFlow mark\n"); 6285 return -rte_errno; 6286 } 6287 } 6288 6289 return rc; 6290 } 6291 6292 static int 6293 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 6294 { 6295 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 6296 static int version_printed; 6297 struct bnxt *bp; 6298 int rc; 6299 6300 if (version_printed++ == 0) 6301 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 6302 6303 eth_dev->dev_ops = &bnxt_dev_ops; 6304 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 6305 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 6306 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 6307 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 6308 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 6309 6310 /* 6311 * For secondary processes, we don't initialise any further 6312 * as primary has already done this work. 6313 */ 6314 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6315 return 0; 6316 6317 rte_eth_copy_pci_info(eth_dev, pci_dev); 6318 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 6319 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 6320 6321 bp = eth_dev->data->dev_private; 6322 6323 /* set the default app id */ 6324 bp->app_id = bnxt_ulp_default_app_id_get(); 6325 6326 /* Parse dev arguments passed on when starting the DPDK application. */ 6327 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 6328 if (rc) 6329 goto error_free; 6330 6331 rc = bnxt_drv_init(eth_dev); 6332 if (rc) 6333 goto error_free; 6334 6335 rc = bnxt_init_resources(bp, false); 6336 if (rc) 6337 goto error_free; 6338 6339 rc = bnxt_alloc_stats_mem(bp); 6340 if (rc) 6341 goto error_free; 6342 6343 PMD_DRV_LOG(INFO, 6344 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 6345 DRV_MODULE_NAME, 6346 pci_dev->mem_resource[0].phys_addr, 6347 pci_dev->mem_resource[0].addr); 6348 6349 return 0; 6350 6351 error_free: 6352 bnxt_dev_uninit(eth_dev); 6353 return rc; 6354 } 6355 6356 6357 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 6358 { 6359 if (!ctx) 6360 return; 6361 6362 rte_free(ctx->va); 6363 6364 ctx->va = NULL; 6365 ctx->dma = RTE_BAD_IOVA; 6366 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6367 } 6368 6369 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6370 { 6371 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6372 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6373 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6374 bp->flow_stat->max_fc, 6375 false); 6376 6377 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6378 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6379 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6380 bp->flow_stat->max_fc, 6381 false); 6382 6383 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6384 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6385 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6386 6387 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6388 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6389 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6390 6391 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6392 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6393 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6394 6395 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6396 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6397 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6398 } 6399 6400 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6401 { 6402 bnxt_unregister_fc_ctx_mem(bp); 6403 6404 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6405 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6406 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6407 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6408 } 6409 6410 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6411 { 6412 if (BNXT_FLOW_XSTATS_EN(bp)) 6413 bnxt_uninit_fc_ctx_mem(bp); 6414 } 6415 6416 static void 6417 bnxt_free_error_recovery_info(struct bnxt *bp) 6418 { 6419 rte_free(bp->recovery_info); 6420 bp->recovery_info = NULL; 6421 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6422 } 6423 6424 static int 6425 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6426 { 6427 int rc; 6428 6429 bnxt_free_int(bp); 6430 bnxt_free_mem(bp, reconfig_dev); 6431 6432 bnxt_hwrm_func_buf_unrgtr(bp); 6433 if (bp->pf != NULL) { 6434 rte_free(bp->pf->vf_req_buf); 6435 bp->pf->vf_req_buf = NULL; 6436 } 6437 6438 rc = bnxt_hwrm_func_driver_unregister(bp); 6439 bp->flags &= ~BNXT_FLAG_REGISTERED; 6440 bnxt_free_ctx_mem(bp); 6441 if (!reconfig_dev) { 6442 bnxt_free_hwrm_resources(bp); 6443 bnxt_free_error_recovery_info(bp); 6444 rte_free(bp->mcast_addr_list); 6445 bp->mcast_addr_list = NULL; 6446 rte_free(bp->rss_conf.rss_key); 6447 bp->rss_conf.rss_key = NULL; 6448 } 6449 6450 bnxt_uninit_ctx_mem(bp); 6451 6452 bnxt_free_flow_stats_info(bp); 6453 bnxt_free_switch_domain(bp); 6454 rte_free(bp->ptp_cfg); 6455 bp->ptp_cfg = NULL; 6456 return rc; 6457 } 6458 6459 static int 6460 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6461 { 6462 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6463 return -EPERM; 6464 6465 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6466 6467 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6468 bnxt_dev_close_op(eth_dev); 6469 6470 return 0; 6471 } 6472 6473 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6474 { 6475 struct bnxt *bp = eth_dev->data->dev_private; 6476 struct rte_eth_dev *vf_rep_eth_dev; 6477 int ret = 0, i; 6478 6479 if (!bp) 6480 return -EINVAL; 6481 6482 for (i = 0; i < bp->num_reps; i++) { 6483 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6484 if (!vf_rep_eth_dev) 6485 continue; 6486 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6487 vf_rep_eth_dev->data->port_id); 6488 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6489 } 6490 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6491 eth_dev->data->port_id); 6492 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6493 6494 return ret; 6495 } 6496 6497 static void bnxt_free_rep_info(struct bnxt *bp) 6498 { 6499 rte_free(bp->rep_info); 6500 bp->rep_info = NULL; 6501 rte_free(bp->cfa_code_map); 6502 bp->cfa_code_map = NULL; 6503 } 6504 6505 static int bnxt_init_rep_info(struct bnxt *bp) 6506 { 6507 int i = 0, rc; 6508 6509 if (bp->rep_info) 6510 return 0; 6511 6512 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6513 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp), 6514 0); 6515 if (!bp->rep_info) { 6516 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6517 return -ENOMEM; 6518 } 6519 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6520 sizeof(*bp->cfa_code_map) * 6521 BNXT_MAX_CFA_CODE, 0); 6522 if (!bp->cfa_code_map) { 6523 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6524 bnxt_free_rep_info(bp); 6525 return -ENOMEM; 6526 } 6527 6528 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6529 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6530 6531 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6532 if (rc) { 6533 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6534 bnxt_free_rep_info(bp); 6535 return rc; 6536 } 6537 6538 return rc; 6539 } 6540 6541 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6542 struct rte_eth_devargs *eth_da, 6543 struct rte_eth_dev *backing_eth_dev, 6544 const char *dev_args) 6545 { 6546 struct rte_eth_dev *vf_rep_eth_dev; 6547 char name[RTE_ETH_NAME_MAX_LEN]; 6548 struct bnxt *backing_bp = backing_eth_dev->data->dev_private; 6549 uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp); 6550 6551 uint16_t num_rep; 6552 int i, ret = 0; 6553 struct rte_kvargs *kvlist = NULL; 6554 6555 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6556 return 0; 6557 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6558 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6559 eth_da->type); 6560 return -ENOTSUP; 6561 } 6562 num_rep = eth_da->nb_representor_ports; 6563 if (num_rep > max_vf_reps) { 6564 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6565 num_rep, max_vf_reps); 6566 return -EINVAL; 6567 } 6568 6569 if (num_rep >= RTE_MAX_ETHPORTS) { 6570 PMD_DRV_LOG(ERR, 6571 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6572 num_rep, RTE_MAX_ETHPORTS); 6573 return -EINVAL; 6574 } 6575 6576 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6577 PMD_DRV_LOG(ERR, 6578 "Not a PF or trusted VF. No Representor support\n"); 6579 /* Returning an error is not an option. 6580 * Applications are not handling this correctly 6581 */ 6582 return 0; 6583 } 6584 6585 if (bnxt_init_rep_info(backing_bp)) 6586 return 0; 6587 6588 for (i = 0; i < num_rep; i++) { 6589 struct bnxt_representor representor = { 6590 .vf_id = eth_da->representor_ports[i], 6591 .switch_domain_id = backing_bp->switch_domain_id, 6592 .parent_dev = backing_eth_dev 6593 }; 6594 6595 if (representor.vf_id >= max_vf_reps) { 6596 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6597 representor.vf_id, max_vf_reps); 6598 continue; 6599 } 6600 6601 /* representor port net_bdf_port */ 6602 snprintf(name, sizeof(name), "net_%s_representor_%d", 6603 pci_dev->device.name, eth_da->representor_ports[i]); 6604 6605 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6606 if (kvlist) { 6607 /* 6608 * Handler for "rep_is_pf" devarg. 6609 * Invoked as for ex: "-a 000:00:0d.0, 6610 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6611 */ 6612 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6613 bnxt_parse_devarg_rep_is_pf, 6614 (void *)&representor); 6615 if (ret) { 6616 ret = -EINVAL; 6617 goto err; 6618 } 6619 /* 6620 * Handler for "rep_based_pf" devarg. 6621 * Invoked as for ex: "-a 000:00:0d.0, 6622 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6623 */ 6624 ret = rte_kvargs_process(kvlist, 6625 BNXT_DEVARG_REP_BASED_PF, 6626 bnxt_parse_devarg_rep_based_pf, 6627 (void *)&representor); 6628 if (ret) { 6629 ret = -EINVAL; 6630 goto err; 6631 } 6632 /* 6633 * Handler for "rep_based_pf" devarg. 6634 * Invoked as for ex: "-a 000:00:0d.0, 6635 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6636 */ 6637 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6638 bnxt_parse_devarg_rep_q_r2f, 6639 (void *)&representor); 6640 if (ret) { 6641 ret = -EINVAL; 6642 goto err; 6643 } 6644 /* 6645 * Handler for "rep_based_pf" devarg. 6646 * Invoked as for ex: "-a 000:00:0d.0, 6647 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6648 */ 6649 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6650 bnxt_parse_devarg_rep_q_f2r, 6651 (void *)&representor); 6652 if (ret) { 6653 ret = -EINVAL; 6654 goto err; 6655 } 6656 /* 6657 * Handler for "rep_based_pf" devarg. 6658 * Invoked as for ex: "-a 000:00:0d.0, 6659 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6660 */ 6661 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6662 bnxt_parse_devarg_rep_fc_r2f, 6663 (void *)&representor); 6664 if (ret) { 6665 ret = -EINVAL; 6666 goto err; 6667 } 6668 /* 6669 * Handler for "rep_based_pf" devarg. 6670 * Invoked as for ex: "-a 000:00:0d.0, 6671 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6672 */ 6673 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6674 bnxt_parse_devarg_rep_fc_f2r, 6675 (void *)&representor); 6676 if (ret) { 6677 ret = -EINVAL; 6678 goto err; 6679 } 6680 } 6681 6682 ret = rte_eth_dev_create(&pci_dev->device, name, 6683 sizeof(struct bnxt_representor), 6684 NULL, NULL, 6685 bnxt_representor_init, 6686 &representor); 6687 if (ret) { 6688 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6689 "representor %s.", name); 6690 goto err; 6691 } 6692 6693 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6694 if (!vf_rep_eth_dev) { 6695 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6696 " for VF-Rep: %s.", name); 6697 ret = -ENODEV; 6698 goto err; 6699 } 6700 6701 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6702 backing_eth_dev->data->port_id); 6703 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6704 vf_rep_eth_dev; 6705 backing_bp->num_reps++; 6706 6707 } 6708 6709 rte_kvargs_free(kvlist); 6710 return 0; 6711 6712 err: 6713 /* If num_rep > 1, then rollback already created 6714 * ports, since we'll be failing the probe anyway 6715 */ 6716 if (num_rep > 1) 6717 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6718 rte_errno = -ret; 6719 rte_kvargs_free(kvlist); 6720 6721 return ret; 6722 } 6723 6724 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6725 struct rte_pci_device *pci_dev) 6726 { 6727 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6728 struct rte_eth_dev *backing_eth_dev; 6729 uint16_t num_rep; 6730 int ret = 0; 6731 6732 if (pci_dev->device.devargs) { 6733 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6734 ð_da, 1); 6735 if (ret < 0) 6736 return ret; 6737 } 6738 6739 num_rep = eth_da.nb_representor_ports; 6740 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6741 num_rep); 6742 6743 /* We could come here after first level of probe is already invoked 6744 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6745 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6746 */ 6747 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6748 if (backing_eth_dev == NULL) { 6749 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6750 sizeof(struct bnxt), 6751 eth_dev_pci_specific_init, pci_dev, 6752 bnxt_dev_init, NULL); 6753 6754 if (ret || !num_rep) 6755 return ret; 6756 6757 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6758 } 6759 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6760 backing_eth_dev->data->port_id); 6761 6762 if (!num_rep) 6763 return ret; 6764 6765 /* probe representor ports now */ 6766 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6767 pci_dev->device.devargs->args); 6768 6769 return ret; 6770 } 6771 6772 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6773 { 6774 struct rte_eth_dev *eth_dev; 6775 6776 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6777 if (!eth_dev) 6778 return 0; /* Invoked typically only by OVS-DPDK, by the 6779 * time it comes here the eth_dev is already 6780 * deleted by rte_eth_dev_close(), so returning 6781 * +ve value will at least help in proper cleanup 6782 */ 6783 6784 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6785 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6786 if (rte_eth_dev_is_repr(eth_dev)) 6787 return rte_eth_dev_destroy(eth_dev, 6788 bnxt_representor_uninit); 6789 else 6790 return rte_eth_dev_destroy(eth_dev, 6791 bnxt_dev_uninit); 6792 } else { 6793 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6794 } 6795 } 6796 6797 static struct rte_pci_driver bnxt_rte_pmd = { 6798 .id_table = bnxt_pci_id_map, 6799 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6800 RTE_PCI_DRV_INTR_RMV | 6801 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6802 * and OVS-DPDK 6803 */ 6804 .probe = bnxt_pci_probe, 6805 .remove = bnxt_pci_remove, 6806 }; 6807 6808 static bool 6809 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6810 { 6811 if (strcmp(dev->device->driver->name, drv->driver.name)) 6812 return false; 6813 6814 return true; 6815 } 6816 6817 bool is_bnxt_supported(struct rte_eth_dev *dev) 6818 { 6819 return is_device_supported(dev, &bnxt_rte_pmd); 6820 } 6821 6822 struct tf *bnxt_get_tfp_session(struct bnxt *bp, enum bnxt_session_type type) 6823 { 6824 return (type >= BNXT_SESSION_TYPE_LAST) ? 6825 &bp->tfp[BNXT_SESSION_TYPE_REGULAR] : &bp->tfp[type]; 6826 } 6827 6828 /* check if ULP should be enabled or not */ 6829 static bool bnxt_enable_ulp(struct bnxt *bp) 6830 { 6831 /* truflow and MPC should be enabled */ 6832 /* not enabling ulp for cli and no truflow apps */ 6833 if (BNXT_TRUFLOW_EN(bp) && bp->app_id != 254 && 6834 bp->app_id != 255) { 6835 if (BNXT_CHIP_P7(bp)) 6836 return false; 6837 return true; 6838 } 6839 return false; 6840 } 6841 6842 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6843 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6844 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6845 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6846