1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2023 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <dev_driver.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 91 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 92 #define BNXT_DEVARG_REPRESENTOR "representor" 93 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 94 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 95 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 96 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 97 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 98 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 99 #define BNXT_DEVARG_APP_ID "app-id" 100 #define BNXT_DEVARG_IEEE_1588 "ieee-1588" 101 102 static const char *const bnxt_dev_args[] = { 103 BNXT_DEVARG_REPRESENTOR, 104 BNXT_DEVARG_FLOW_XSTAT, 105 BNXT_DEVARG_MAX_NUM_KFLOWS, 106 BNXT_DEVARG_REP_BASED_PF, 107 BNXT_DEVARG_REP_IS_PF, 108 BNXT_DEVARG_REP_Q_R2F, 109 BNXT_DEVARG_REP_Q_F2R, 110 BNXT_DEVARG_REP_FC_R2F, 111 BNXT_DEVARG_REP_FC_F2R, 112 BNXT_DEVARG_APP_ID, 113 BNXT_DEVARG_IEEE_1588, 114 NULL 115 }; 116 117 /* 118 * app-id = an non-negative 8-bit number 119 */ 120 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 121 122 /* 123 * ieee-1588 = an non-negative 8-bit number 124 */ 125 #define BNXT_DEVARG_IEEE_1588_INVALID(val) ((val) > 255) 126 127 /* 128 * flow_xstat == false to disable the feature 129 * flow_xstat == true to enable the feature 130 */ 131 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 132 133 /* 134 * rep_is_pf == false to indicate VF representor 135 * rep_is_pf == true to indicate PF representor 136 */ 137 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 138 139 /* 140 * rep_based_pf == Physical index of the PF 141 */ 142 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 143 /* 144 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 145 */ 146 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 147 148 /* 149 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 150 */ 151 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 152 153 /* 154 * rep_fc_r2f == Flow control for the representor to endpoint direction 155 */ 156 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 157 158 /* 159 * rep_fc_f2r == Flow control for the endpoint to representor direction 160 */ 161 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 162 163 int bnxt_cfa_code_dynfield_offset = -1; 164 165 /* 166 * max_num_kflows must be >= 32 167 * and must be a power-of-2 supported value 168 * return: 1 -> invalid 169 * 0 -> valid 170 */ 171 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 172 { 173 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 174 return 1; 175 return 0; 176 } 177 178 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 179 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 180 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 181 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 182 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 183 static int bnxt_restore_vlan_filters(struct bnxt *bp); 184 static void bnxt_dev_recover(void *arg); 185 static void bnxt_free_error_recovery_info(struct bnxt *bp); 186 static void bnxt_free_rep_info(struct bnxt *bp); 187 static int bnxt_check_fw_ready(struct bnxt *bp); 188 189 int is_bnxt_in_error(struct bnxt *bp) 190 { 191 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 192 return -EIO; 193 if (bp->flags & BNXT_FLAG_FW_RESET) 194 return -EBUSY; 195 196 return 0; 197 } 198 199 /***********************/ 200 201 /* 202 * High level utility functions 203 */ 204 205 uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 206 { 207 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 208 BNXT_RSS_TBL_SIZE_P5); 209 210 if (!BNXT_CHIP_P5(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_P5) / 215 BNXT_RSS_ENTRIES_PER_CTX_P5; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_P5(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 bp->parent = NULL; 230 } 231 232 static void bnxt_free_pf_info(struct bnxt *bp) 233 { 234 rte_free(bp->pf); 235 bp->pf = NULL; 236 } 237 238 static void bnxt_free_link_info(struct bnxt *bp) 239 { 240 rte_free(bp->link_info); 241 bp->link_info = NULL; 242 } 243 244 static void bnxt_free_leds_info(struct bnxt *bp) 245 { 246 if (BNXT_VF(bp)) 247 return; 248 249 rte_free(bp->leds); 250 bp->leds = NULL; 251 } 252 253 static void bnxt_free_flow_stats_info(struct bnxt *bp) 254 { 255 rte_free(bp->flow_stat); 256 bp->flow_stat = NULL; 257 } 258 259 static void bnxt_free_cos_queues(struct bnxt *bp) 260 { 261 rte_free(bp->rx_cos_queue); 262 bp->rx_cos_queue = NULL; 263 rte_free(bp->tx_cos_queue); 264 bp->tx_cos_queue = NULL; 265 } 266 267 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 268 { 269 bnxt_free_filter_mem(bp); 270 bnxt_free_vnic_attributes(bp); 271 bnxt_free_vnic_mem(bp); 272 273 /* tx/rx rings are configured as part of *_queue_setup callbacks. 274 * If the number of rings change across fw update, 275 * we don't have much choice except to warn the user. 276 */ 277 if (!reconfig) { 278 bnxt_free_stats(bp); 279 bnxt_free_tx_rings(bp); 280 bnxt_free_rx_rings(bp); 281 } 282 bnxt_free_async_cp_ring(bp); 283 bnxt_free_rxtx_nq_ring(bp); 284 285 rte_free(bp->grp_info); 286 bp->grp_info = NULL; 287 } 288 289 static int bnxt_alloc_parent_info(struct bnxt *bp) 290 { 291 bp->parent = rte_zmalloc("bnxt_parent_info", 292 sizeof(struct bnxt_parent_info), 0); 293 if (bp->parent == NULL) 294 return -ENOMEM; 295 296 return 0; 297 } 298 299 static int bnxt_alloc_pf_info(struct bnxt *bp) 300 { 301 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 302 if (bp->pf == NULL) 303 return -ENOMEM; 304 305 return 0; 306 } 307 308 static int bnxt_alloc_link_info(struct bnxt *bp) 309 { 310 bp->link_info = 311 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 312 if (bp->link_info == NULL) 313 return -ENOMEM; 314 315 return 0; 316 } 317 318 static int bnxt_alloc_leds_info(struct bnxt *bp) 319 { 320 if (BNXT_VF(bp)) 321 return 0; 322 323 bp->leds = rte_zmalloc("bnxt_leds", 324 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 325 0); 326 if (bp->leds == NULL) 327 return -ENOMEM; 328 329 return 0; 330 } 331 332 static int bnxt_alloc_cos_queues(struct bnxt *bp) 333 { 334 bp->rx_cos_queue = 335 rte_zmalloc("bnxt_rx_cosq", 336 BNXT_COS_QUEUE_COUNT * 337 sizeof(struct bnxt_cos_queue_info), 338 0); 339 if (bp->rx_cos_queue == NULL) 340 return -ENOMEM; 341 342 bp->tx_cos_queue = 343 rte_zmalloc("bnxt_tx_cosq", 344 BNXT_COS_QUEUE_COUNT * 345 sizeof(struct bnxt_cos_queue_info), 346 0); 347 if (bp->tx_cos_queue == NULL) 348 return -ENOMEM; 349 350 return 0; 351 } 352 353 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 354 { 355 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 356 sizeof(struct bnxt_flow_stat_info), 0); 357 if (bp->flow_stat == NULL) 358 return -ENOMEM; 359 360 return 0; 361 } 362 363 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 364 { 365 int rc; 366 367 rc = bnxt_alloc_ring_grps(bp); 368 if (rc) 369 goto alloc_mem_err; 370 371 rc = bnxt_alloc_async_ring_struct(bp); 372 if (rc) 373 goto alloc_mem_err; 374 375 rc = bnxt_alloc_vnic_mem(bp); 376 if (rc) 377 goto alloc_mem_err; 378 379 rc = bnxt_alloc_vnic_attributes(bp, reconfig); 380 if (rc) 381 goto alloc_mem_err; 382 383 rc = bnxt_alloc_filter_mem(bp); 384 if (rc) 385 goto alloc_mem_err; 386 387 rc = bnxt_alloc_async_cp_ring(bp); 388 if (rc) 389 goto alloc_mem_err; 390 391 rc = bnxt_alloc_rxtx_nq_ring(bp); 392 if (rc) 393 goto alloc_mem_err; 394 395 if (BNXT_FLOW_XSTATS_EN(bp)) { 396 rc = bnxt_alloc_flow_stats_info(bp); 397 if (rc) 398 goto alloc_mem_err; 399 } 400 401 return 0; 402 403 alloc_mem_err: 404 bnxt_free_mem(bp, reconfig); 405 return rc; 406 } 407 408 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 409 { 410 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 411 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 412 uint64_t rx_offloads = dev_conf->rxmode.offloads; 413 struct bnxt_rx_queue *rxq; 414 unsigned int j; 415 int rc; 416 417 rc = bnxt_vnic_grp_alloc(bp, vnic); 418 if (rc) 419 goto err_out; 420 421 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 422 vnic_id, vnic, vnic->fw_grp_ids); 423 424 /* populate the fw group table */ 425 bnxt_vnic_ring_grp_populate(bp, vnic); 426 bnxt_vnic_rules_init(vnic); 427 428 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 429 if (rc) 430 goto err_out; 431 432 /* Alloc RSS context only if RSS mode is enabled */ 433 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { 434 int j, nr_ctxs = bnxt_rss_ctxts(bp); 435 436 /* RSS table size in P5 is 512. 437 * Cap max Rx rings to same value 438 */ 439 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 440 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 441 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 442 goto err_out; 443 } 444 445 rc = 0; 446 for (j = 0; j < nr_ctxs; j++) { 447 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 448 if (rc) 449 break; 450 } 451 if (rc) { 452 PMD_DRV_LOG(ERR, 453 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 454 vnic_id, j, rc); 455 goto err_out; 456 } 457 vnic->num_lb_ctxts = nr_ctxs; 458 } 459 460 /* 461 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 462 * setting is not available at this time, it will not be 463 * configured correctly in the CFA. 464 */ 465 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 466 vnic->vlan_strip = true; 467 else 468 vnic->vlan_strip = false; 469 470 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 471 if (rc) 472 goto err_out; 473 474 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 475 if (rc) 476 goto err_out; 477 478 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 479 rxq = bp->eth_dev->data->rx_queues[j]; 480 481 PMD_DRV_LOG(DEBUG, 482 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 483 j, rxq->vnic, rxq->vnic->fw_grp_ids); 484 485 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 486 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 487 } 488 489 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 490 491 rc = bnxt_vnic_rss_configure(bp, vnic); 492 if (rc) 493 goto err_out; 494 495 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 496 497 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 498 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? 499 true : false); 500 if (rc) 501 goto err_out; 502 503 return 0; 504 err_out: 505 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 506 vnic_id, rc); 507 return rc; 508 } 509 510 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 511 { 512 int rc = 0; 513 514 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 515 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 516 if (rc) 517 return rc; 518 519 PMD_DRV_LOG(DEBUG, 520 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 521 " rx_fc_in_tbl.ctx_id = %d\n", 522 bp->flow_stat->rx_fc_in_tbl.va, 523 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 524 bp->flow_stat->rx_fc_in_tbl.ctx_id); 525 526 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 527 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 528 if (rc) 529 return rc; 530 531 PMD_DRV_LOG(DEBUG, 532 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 533 " rx_fc_out_tbl.ctx_id = %d\n", 534 bp->flow_stat->rx_fc_out_tbl.va, 535 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 536 bp->flow_stat->rx_fc_out_tbl.ctx_id); 537 538 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 539 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 540 if (rc) 541 return rc; 542 543 PMD_DRV_LOG(DEBUG, 544 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 545 " tx_fc_in_tbl.ctx_id = %d\n", 546 bp->flow_stat->tx_fc_in_tbl.va, 547 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 548 bp->flow_stat->tx_fc_in_tbl.ctx_id); 549 550 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 551 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 552 if (rc) 553 return rc; 554 555 PMD_DRV_LOG(DEBUG, 556 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 557 " tx_fc_out_tbl.ctx_id = %d\n", 558 bp->flow_stat->tx_fc_out_tbl.va, 559 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 560 bp->flow_stat->tx_fc_out_tbl.ctx_id); 561 562 memset(bp->flow_stat->rx_fc_out_tbl.va, 563 0, 564 bp->flow_stat->rx_fc_out_tbl.size); 565 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 566 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 567 bp->flow_stat->rx_fc_out_tbl.ctx_id, 568 bp->flow_stat->max_fc, 569 true); 570 if (rc) 571 return rc; 572 573 memset(bp->flow_stat->tx_fc_out_tbl.va, 574 0, 575 bp->flow_stat->tx_fc_out_tbl.size); 576 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 577 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 578 bp->flow_stat->tx_fc_out_tbl.ctx_id, 579 bp->flow_stat->max_fc, 580 true); 581 582 return rc; 583 } 584 585 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 586 struct bnxt_ctx_mem_buf_info *ctx) 587 { 588 if (!ctx) 589 return -EINVAL; 590 591 ctx->va = rte_zmalloc_socket(type, size, 0, 592 bp->eth_dev->device->numa_node); 593 if (ctx->va == NULL) 594 return -ENOMEM; 595 rte_mem_lock_page(ctx->va); 596 ctx->size = size; 597 ctx->dma = rte_mem_virt2iova(ctx->va); 598 if (ctx->dma == RTE_BAD_IOVA) 599 return -ENOMEM; 600 601 return 0; 602 } 603 604 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 605 { 606 struct rte_pci_device *pdev = bp->pdev; 607 char type[RTE_MEMZONE_NAMESIZE]; 608 uint16_t max_fc; 609 int rc = 0; 610 611 max_fc = bp->flow_stat->max_fc; 612 613 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 614 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 615 /* 4 bytes for each counter-id */ 616 rc = bnxt_alloc_ctx_mem_buf(bp, type, 617 max_fc * 4, 618 &bp->flow_stat->rx_fc_in_tbl); 619 if (rc) 620 return rc; 621 622 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 623 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 624 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 625 rc = bnxt_alloc_ctx_mem_buf(bp, type, 626 max_fc * 16, 627 &bp->flow_stat->rx_fc_out_tbl); 628 if (rc) 629 return rc; 630 631 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 632 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 633 /* 4 bytes for each counter-id */ 634 rc = bnxt_alloc_ctx_mem_buf(bp, type, 635 max_fc * 4, 636 &bp->flow_stat->tx_fc_in_tbl); 637 if (rc) 638 return rc; 639 640 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 641 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 642 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 643 rc = bnxt_alloc_ctx_mem_buf(bp, type, 644 max_fc * 16, 645 &bp->flow_stat->tx_fc_out_tbl); 646 if (rc) 647 return rc; 648 649 rc = bnxt_register_fc_ctx_mem(bp); 650 651 return rc; 652 } 653 654 static int bnxt_init_ctx_mem(struct bnxt *bp) 655 { 656 int rc = 0; 657 658 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 659 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 660 !BNXT_FLOW_XSTATS_EN(bp)) 661 return 0; 662 663 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 664 if (rc) 665 return rc; 666 667 rc = bnxt_init_fc_ctx_mem(bp); 668 669 return rc; 670 } 671 672 static inline bool bnxt_force_link_config(struct bnxt *bp) 673 { 674 uint16_t subsystem_device_id = bp->pdev->id.subsystem_device_id; 675 676 switch (subsystem_device_id) { 677 case BROADCOM_DEV_957508_N2100: 678 case BROADCOM_DEV_957414_N225: 679 return true; 680 default: 681 return false; 682 } 683 } 684 685 static int bnxt_update_phy_setting(struct bnxt *bp) 686 { 687 struct rte_eth_link new; 688 int rc; 689 690 rc = bnxt_get_hwrm_link_config(bp, &new); 691 if (rc) { 692 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 693 return rc; 694 } 695 696 /* 697 * Device is not obliged link down in certain scenarios, even 698 * when forced. When FW does not allow any user other than BMC 699 * to shutdown the port, bnxt_get_hwrm_link_config() call always 700 * returns link up. Force phy update always in that case. 701 */ 702 if (!new.link_status || bnxt_force_link_config(bp)) { 703 rc = bnxt_set_hwrm_link_config(bp, true); 704 if (rc) { 705 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 706 return rc; 707 } 708 } 709 710 return rc; 711 } 712 713 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 714 { 715 rte_free(bp->prev_rx_ring_stats); 716 rte_free(bp->prev_tx_ring_stats); 717 718 bp->prev_rx_ring_stats = NULL; 719 bp->prev_tx_ring_stats = NULL; 720 } 721 722 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 723 { 724 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 725 sizeof(struct bnxt_ring_stats) * 726 bp->rx_cp_nr_rings, 727 0); 728 if (bp->prev_rx_ring_stats == NULL) 729 return -ENOMEM; 730 731 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 732 sizeof(struct bnxt_ring_stats) * 733 bp->tx_cp_nr_rings, 734 0); 735 if (bp->tx_cp_nr_rings > 0 && bp->prev_tx_ring_stats == NULL) 736 goto error; 737 738 return 0; 739 740 error: 741 bnxt_free_prev_ring_stats(bp); 742 return -ENOMEM; 743 } 744 745 static int bnxt_start_nic(struct bnxt *bp) 746 { 747 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 748 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 749 uint32_t intr_vector = 0; 750 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 751 uint32_t vec = BNXT_MISC_VEC_ID; 752 unsigned int i, j; 753 int rc; 754 755 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) 756 bp->flags |= BNXT_FLAG_JUMBO; 757 else 758 bp->flags &= ~BNXT_FLAG_JUMBO; 759 760 /* P5 does not support ring groups. 761 * But we will use the array to save RSS context IDs. 762 */ 763 if (BNXT_CHIP_P5(bp)) 764 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 765 766 rc = bnxt_vnic_queue_db_init(bp); 767 if (rc) { 768 PMD_DRV_LOG(ERR, "could not allocate vnic db\n"); 769 goto err_out; 770 } 771 772 rc = bnxt_alloc_hwrm_rings(bp); 773 if (rc) { 774 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 775 goto err_out; 776 } 777 778 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 779 if (rc) { 780 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 781 goto err_out; 782 } 783 784 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 785 goto skip_cosq_cfg; 786 787 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 788 if (bp->rx_cos_queue[i].id != 0xff) { 789 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 790 791 if (!vnic) { 792 PMD_DRV_LOG(ERR, 793 "Num pools more than FW profile\n"); 794 rc = -EINVAL; 795 goto err_out; 796 } 797 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 798 bp->rx_cosq_cnt++; 799 } 800 } 801 802 skip_cosq_cfg: 803 rc = bnxt_mq_rx_configure(bp); 804 if (rc) { 805 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 806 goto err_out; 807 } 808 809 for (j = 0; j < bp->rx_nr_rings; j++) { 810 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 811 812 if (!rxq->rx_deferred_start) { 813 bp->eth_dev->data->rx_queue_state[j] = 814 RTE_ETH_QUEUE_STATE_STARTED; 815 rxq->rx_started = true; 816 } 817 } 818 819 /* setup the default vnic details*/ 820 bnxt_vnic_queue_db_update_dlft_vnic(bp); 821 822 /* VNIC configuration */ 823 for (i = 0; i < bp->nr_vnics; i++) { 824 rc = bnxt_setup_one_vnic(bp, i); 825 if (rc) 826 goto err_out; 827 } 828 829 for (j = 0; j < bp->tx_nr_rings; j++) { 830 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 831 832 if (!txq->tx_deferred_start) { 833 bp->eth_dev->data->tx_queue_state[j] = 834 RTE_ETH_QUEUE_STATE_STARTED; 835 txq->tx_started = true; 836 } 837 } 838 839 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 840 if (rc) { 841 PMD_DRV_LOG(ERR, 842 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 843 goto err_out; 844 } 845 846 /* check and configure queue intr-vector mapping */ 847 if ((rte_intr_cap_multiple(intr_handle) || 848 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 849 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 850 intr_vector = bp->eth_dev->data->nb_rx_queues; 851 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 852 if (intr_vector > bp->rx_cp_nr_rings) { 853 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 854 bp->rx_cp_nr_rings); 855 return -ENOTSUP; 856 } 857 rc = rte_intr_efd_enable(intr_handle, intr_vector); 858 if (rc) 859 return rc; 860 } 861 862 if (rte_intr_dp_is_en(intr_handle)) { 863 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 864 bp->eth_dev->data->nb_rx_queues)) { 865 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 866 " intr_vec", bp->eth_dev->data->nb_rx_queues); 867 rc = -ENOMEM; 868 goto err_out; 869 } 870 PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " 871 "intr_handle->max_intr = %d\n", 872 rte_intr_nb_efd_get(intr_handle), 873 rte_intr_max_intr_get(intr_handle)); 874 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 875 queue_id++) { 876 rte_intr_vec_list_index_set(intr_handle, 877 queue_id, vec + BNXT_RX_VEC_START); 878 if (vec < base + rte_intr_nb_efd_get(intr_handle) 879 - 1) 880 vec++; 881 } 882 } 883 884 /* enable uio/vfio intr/eventfd mapping */ 885 rc = rte_intr_enable(intr_handle); 886 #ifndef RTE_EXEC_ENV_FREEBSD 887 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 888 if (rc) 889 goto err_out; 890 #endif 891 892 rc = bnxt_update_phy_setting(bp); 893 if (rc) 894 goto err_out; 895 896 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 897 if (!bp->mark_table) 898 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 899 900 return 0; 901 902 err_out: 903 /* Some of the error status returned by FW may not be from errno.h */ 904 if (rc > 0) 905 rc = -EIO; 906 907 return rc; 908 } 909 910 static int bnxt_shutdown_nic(struct bnxt *bp) 911 { 912 bnxt_free_all_hwrm_resources(bp); 913 bnxt_free_all_filters(bp); 914 bnxt_free_all_vnics(bp); 915 bnxt_vnic_queue_db_deinit(bp); 916 return 0; 917 } 918 919 /* 920 * Device configuration and status function 921 */ 922 923 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 924 { 925 uint32_t pam4_link_speed = 0; 926 uint32_t link_speed = 0; 927 uint32_t speed_capa = 0; 928 929 if (bp->link_info == NULL) 930 return 0; 931 932 link_speed = bp->link_info->support_speeds; 933 934 /* If PAM4 is configured, use PAM4 supported speed */ 935 if (bp->link_info->support_pam4_speeds > 0) 936 pam4_link_speed = bp->link_info->support_pam4_speeds; 937 938 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 939 speed_capa |= RTE_ETH_LINK_SPEED_100M; 940 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 941 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 942 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 943 speed_capa |= RTE_ETH_LINK_SPEED_1G; 944 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 945 speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 946 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 947 speed_capa |= RTE_ETH_LINK_SPEED_10G; 948 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 949 speed_capa |= RTE_ETH_LINK_SPEED_20G; 950 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 951 speed_capa |= RTE_ETH_LINK_SPEED_25G; 952 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 953 speed_capa |= RTE_ETH_LINK_SPEED_40G; 954 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 955 speed_capa |= RTE_ETH_LINK_SPEED_50G; 956 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 957 speed_capa |= RTE_ETH_LINK_SPEED_100G; 958 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 959 speed_capa |= RTE_ETH_LINK_SPEED_50G; 960 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 961 speed_capa |= RTE_ETH_LINK_SPEED_100G; 962 if (pam4_link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 963 speed_capa |= RTE_ETH_LINK_SPEED_200G; 964 965 if (bp->link_info->auto_mode == 966 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 967 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 968 969 return speed_capa; 970 } 971 972 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 973 struct rte_eth_dev_info *dev_info) 974 { 975 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 976 struct bnxt *bp = eth_dev->data->dev_private; 977 uint16_t max_vnics, i, j, vpool, vrxq; 978 unsigned int max_rx_rings; 979 int rc; 980 981 rc = is_bnxt_in_error(bp); 982 if (rc) 983 return rc; 984 985 /* MAC Specifics */ 986 dev_info->max_mac_addrs = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 987 dev_info->max_hash_mac_addrs = 0; 988 989 /* PF/VF specifics */ 990 if (BNXT_PF(bp)) 991 dev_info->max_vfs = pdev->max_vfs; 992 993 max_rx_rings = bnxt_max_rings(bp); 994 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 995 dev_info->max_rx_queues = max_rx_rings; 996 dev_info->max_tx_queues = max_rx_rings; 997 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 998 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 999 max_vnics = bp->max_vnics; 1000 1001 /* MTU specifics */ 1002 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1003 dev_info->max_mtu = BNXT_MAX_MTU; 1004 1005 /* Fast path specifics */ 1006 dev_info->min_rx_bufsize = 1; 1007 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 1008 1009 dev_info->rx_offload_capa = bnxt_get_rx_port_offloads(bp); 1010 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1011 dev_info->tx_offload_capa = bnxt_get_tx_port_offloads(bp) | 1012 dev_info->tx_queue_offload_capa; 1013 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 1014 1015 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 1016 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1017 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1018 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1019 1020 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1021 .rx_thresh = { 1022 .pthresh = 8, 1023 .hthresh = 8, 1024 .wthresh = 0, 1025 }, 1026 .rx_free_thresh = 32, 1027 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1028 }; 1029 1030 dev_info->default_txconf = (struct rte_eth_txconf) { 1031 .tx_thresh = { 1032 .pthresh = 32, 1033 .hthresh = 0, 1034 .wthresh = 0, 1035 }, 1036 .tx_free_thresh = 32, 1037 .tx_rs_thresh = 32, 1038 }; 1039 1040 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1041 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1042 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1043 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1044 1045 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1046 dev_info->switch_info.name = eth_dev->device->name; 1047 dev_info->switch_info.domain_id = bp->switch_domain_id; 1048 dev_info->switch_info.port_id = 1049 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1050 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1051 } 1052 1053 /* 1054 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1055 * need further investigation. 1056 */ 1057 1058 /* VMDq resources */ 1059 vpool = 64; /* RTE_ETH_64_POOLS */ 1060 vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ 1061 for (i = 0; i < 4; vpool >>= 1, i++) { 1062 if (max_vnics > vpool) { 1063 for (j = 0; j < 5; vrxq >>= 1, j++) { 1064 if (dev_info->max_rx_queues > vrxq) { 1065 if (vpool > vrxq) 1066 vpool = vrxq; 1067 goto found; 1068 } 1069 } 1070 /* Not enough resources to support VMDq */ 1071 break; 1072 } 1073 } 1074 /* Not enough resources to support VMDq */ 1075 vpool = 0; 1076 vrxq = 0; 1077 found: 1078 dev_info->max_vmdq_pools = vpool; 1079 dev_info->vmdq_queue_num = vrxq; 1080 1081 dev_info->vmdq_pool_base = 0; 1082 dev_info->vmdq_queue_base = 0; 1083 1084 dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE; 1085 1086 return 0; 1087 } 1088 1089 /* Configure the device based on the configuration provided */ 1090 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1091 { 1092 struct bnxt *bp = eth_dev->data->dev_private; 1093 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1094 struct rte_eth_rss_conf *rss_conf = ð_dev->data->dev_conf.rx_adv_conf.rss_conf; 1095 int rc; 1096 1097 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1098 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1099 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1100 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1101 1102 rc = is_bnxt_in_error(bp); 1103 if (rc) 1104 return rc; 1105 1106 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1107 rc = bnxt_hwrm_check_vf_rings(bp); 1108 if (rc) { 1109 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1110 return -ENOSPC; 1111 } 1112 1113 /* If a resource has already been allocated - in this case 1114 * it is the async completion ring, free it. Reallocate it after 1115 * resource reservation. This will ensure the resource counts 1116 * are calculated correctly. 1117 */ 1118 1119 pthread_mutex_lock(&bp->def_cp_lock); 1120 1121 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1122 bnxt_disable_int(bp); 1123 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1124 } 1125 1126 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1127 if (rc) { 1128 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1129 pthread_mutex_unlock(&bp->def_cp_lock); 1130 return -ENOSPC; 1131 } 1132 1133 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1134 rc = bnxt_alloc_async_cp_ring(bp); 1135 if (rc) { 1136 pthread_mutex_unlock(&bp->def_cp_lock); 1137 return rc; 1138 } 1139 bnxt_enable_int(bp); 1140 } 1141 1142 pthread_mutex_unlock(&bp->def_cp_lock); 1143 } 1144 1145 /* Inherit new configurations */ 1146 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1147 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1148 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1149 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1150 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1151 bp->max_stat_ctx) 1152 goto resource_error; 1153 1154 if (BNXT_HAS_RING_GRPS(bp) && 1155 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1156 goto resource_error; 1157 1158 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && 1159 bp->max_vnics < eth_dev->data->nb_rx_queues) 1160 goto resource_error; 1161 1162 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1163 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1164 1165 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1166 rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1167 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1168 1169 /* application provides the hash key to program */ 1170 if (rss_conf->rss_key != NULL) { 1171 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) 1172 PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long", 1173 eth_dev->data->port_id, HW_HASH_KEY_SIZE); 1174 else 1175 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 1176 } 1177 bp->rss_conf.rss_key_len = HW_HASH_KEY_SIZE; 1178 bp->rss_conf.rss_hf = rss_conf->rss_hf; 1179 1180 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1181 1182 return 0; 1183 1184 resource_error: 1185 PMD_DRV_LOG(ERR, 1186 "Insufficient resources to support requested config\n"); 1187 PMD_DRV_LOG(ERR, 1188 "Num Queues Requested: Tx %d, Rx %d\n", 1189 eth_dev->data->nb_tx_queues, 1190 eth_dev->data->nb_rx_queues); 1191 PMD_DRV_LOG(ERR, 1192 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1193 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1194 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1195 return -ENOSPC; 1196 } 1197 1198 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1199 { 1200 struct rte_eth_link *link = ð_dev->data->dev_link; 1201 1202 if (link->link_status) 1203 PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n", 1204 eth_dev->data->port_id, 1205 (uint32_t)link->link_speed, 1206 (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 1207 ("full-duplex") : ("half-duplex\n")); 1208 else 1209 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1210 eth_dev->data->port_id); 1211 } 1212 1213 /* 1214 * Determine whether the current configuration requires support for scattered 1215 * receive; return 1 if scattered receive is required and 0 if not. 1216 */ 1217 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1218 { 1219 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 1220 uint16_t buf_size; 1221 int i; 1222 1223 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1224 return 1; 1225 1226 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 1227 return 1; 1228 1229 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1230 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1231 1232 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1233 RTE_PKTMBUF_HEADROOM); 1234 if (eth_dev->data->mtu + overhead > buf_size) 1235 return 1; 1236 } 1237 return 0; 1238 } 1239 1240 static eth_rx_burst_t 1241 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1242 { 1243 struct bnxt *bp = eth_dev->data->dev_private; 1244 1245 /* Disable vector mode RX for Stingray2 for now */ 1246 if (BNXT_CHIP_SR2(bp)) { 1247 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1248 return bnxt_recv_pkts; 1249 } 1250 1251 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1252 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1253 if (eth_dev->data->scattered_rx) 1254 goto use_scalar_rx; 1255 1256 /* 1257 * Vector mode receive cannot be enabled if Truflow is enabled or if 1258 * asynchronous completions and receive completions can be placed in 1259 * the same completion ring. 1260 */ 1261 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1262 goto use_scalar_rx; 1263 1264 /* 1265 * Vector mode receive cannot be enabled if any receive offloads outside 1266 * a limited subset have been enabled. 1267 */ 1268 if (eth_dev->data->dev_conf.rxmode.offloads & 1269 ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1270 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1271 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1272 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1273 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1274 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1275 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 1276 RTE_ETH_RX_OFFLOAD_RSS_HASH | 1277 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) 1278 goto use_scalar_rx; 1279 1280 if (bp->ieee_1588) 1281 goto use_scalar_rx; 1282 1283 #if defined(RTE_ARCH_X86) 1284 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1285 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1286 PMD_DRV_LOG(INFO, 1287 "Using AVX2 vector mode receive for port %d\n", 1288 eth_dev->data->port_id); 1289 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1290 return bnxt_recv_pkts_vec_avx2; 1291 } 1292 #endif 1293 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1294 PMD_DRV_LOG(INFO, 1295 "Using SSE vector mode receive for port %d\n", 1296 eth_dev->data->port_id); 1297 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1298 return bnxt_recv_pkts_vec; 1299 } 1300 1301 use_scalar_rx: 1302 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1303 eth_dev->data->port_id); 1304 PMD_DRV_LOG(INFO, 1305 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1306 eth_dev->data->port_id, 1307 eth_dev->data->scattered_rx, 1308 eth_dev->data->dev_conf.rxmode.offloads); 1309 #endif 1310 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1311 return bnxt_recv_pkts; 1312 } 1313 1314 static eth_tx_burst_t 1315 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1316 { 1317 struct bnxt *bp = eth_dev->data->dev_private; 1318 1319 /* Disable vector mode TX for Stingray2 for now */ 1320 if (BNXT_CHIP_SR2(bp)) 1321 return bnxt_xmit_pkts; 1322 1323 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1324 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1325 1326 /* 1327 * Vector mode transmit can be enabled only if not using scatter rx 1328 * or tx offloads. 1329 */ 1330 if (eth_dev->data->scattered_rx || 1331 (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || 1332 BNXT_TRUFLOW_EN(bp) || bp->ieee_1588) 1333 goto use_scalar_tx; 1334 1335 #if defined(RTE_ARCH_X86) 1336 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1337 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1338 PMD_DRV_LOG(INFO, 1339 "Using AVX2 vector mode transmit for port %d\n", 1340 eth_dev->data->port_id); 1341 return bnxt_xmit_pkts_vec_avx2; 1342 } 1343 #endif 1344 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1345 PMD_DRV_LOG(INFO, 1346 "Using SSE vector mode transmit for port %d\n", 1347 eth_dev->data->port_id); 1348 return bnxt_xmit_pkts_vec; 1349 } 1350 1351 use_scalar_tx: 1352 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1353 eth_dev->data->port_id); 1354 PMD_DRV_LOG(INFO, 1355 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1356 eth_dev->data->port_id, 1357 eth_dev->data->scattered_rx, 1358 offloads); 1359 #endif 1360 return bnxt_xmit_pkts; 1361 } 1362 1363 static int bnxt_handle_if_change_status(struct bnxt *bp) 1364 { 1365 int rc; 1366 1367 /* Since fw has undergone a reset and lost all contexts, 1368 * set fatal flag to not issue hwrm during cleanup 1369 */ 1370 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1371 bnxt_uninit_resources(bp, true); 1372 1373 /* clear fatal flag so that re-init happens */ 1374 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1375 1376 rc = bnxt_check_fw_ready(bp); 1377 if (rc) 1378 return rc; 1379 1380 rc = bnxt_init_resources(bp, true); 1381 1382 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1383 1384 return rc; 1385 } 1386 1387 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1388 { 1389 struct bnxt *bp = eth_dev->data->dev_private; 1390 int rc = 0; 1391 1392 if (!BNXT_SINGLE_PF(bp)) 1393 return -ENOTSUP; 1394 1395 if (!bp->link_info->link_up) 1396 rc = bnxt_set_hwrm_link_config(bp, true); 1397 if (!rc) 1398 eth_dev->data->dev_link.link_status = 1; 1399 1400 bnxt_print_link_info(eth_dev); 1401 return rc; 1402 } 1403 1404 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1405 { 1406 struct bnxt *bp = eth_dev->data->dev_private; 1407 1408 if (!BNXT_SINGLE_PF(bp)) 1409 return -ENOTSUP; 1410 1411 eth_dev->data->dev_link.link_status = 0; 1412 bnxt_set_hwrm_link_config(bp, false); 1413 bp->link_info->link_up = 0; 1414 1415 return 0; 1416 } 1417 1418 static void bnxt_free_switch_domain(struct bnxt *bp) 1419 { 1420 int rc = 0; 1421 1422 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1423 return; 1424 1425 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1426 if (rc) 1427 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1428 bp->switch_domain_id, rc); 1429 } 1430 1431 static void bnxt_ptp_get_current_time(void *arg) 1432 { 1433 struct bnxt *bp = arg; 1434 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1435 int rc; 1436 1437 rc = is_bnxt_in_error(bp); 1438 if (rc) 1439 return; 1440 1441 if (!ptp) 1442 return; 1443 1444 rte_spinlock_lock(&ptp->ptp_lock); 1445 ptp->old_time = ptp->current_time; 1446 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1447 &ptp->current_time); 1448 rte_spinlock_unlock(&ptp->ptp_lock); 1449 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1450 if (rc != 0) { 1451 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1452 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1453 } 1454 } 1455 1456 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1457 { 1458 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1459 int rc; 1460 1461 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1462 return 0; 1463 1464 rte_spinlock_lock(&ptp->ptp_lock); 1465 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1466 &ptp->current_time); 1467 ptp->old_time = ptp->current_time; 1468 rte_spinlock_unlock(&ptp->ptp_lock); 1469 1470 1471 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1472 return rc; 1473 } 1474 1475 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1476 { 1477 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1478 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1479 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1480 } 1481 } 1482 1483 static void bnxt_ptp_stop(struct bnxt *bp) 1484 { 1485 bnxt_cancel_ptp_alarm(bp); 1486 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1487 } 1488 1489 static int bnxt_ptp_start(struct bnxt *bp) 1490 { 1491 int rc; 1492 1493 rc = bnxt_schedule_ptp_alarm(bp); 1494 if (rc != 0) { 1495 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1496 } else { 1497 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1498 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1499 } 1500 1501 return rc; 1502 } 1503 1504 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1505 { 1506 struct bnxt *bp = eth_dev->data->dev_private; 1507 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1508 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1509 struct rte_eth_link link; 1510 int ret; 1511 1512 eth_dev->data->dev_started = 0; 1513 1514 /* Prevent crashes when queues are still in use */ 1515 bnxt_stop_rxtx(eth_dev); 1516 1517 bnxt_disable_int(bp); 1518 1519 /* disable uio/vfio intr/eventfd mapping */ 1520 rte_intr_disable(intr_handle); 1521 1522 /* Stop the child representors for this device */ 1523 ret = bnxt_rep_stop_all(bp); 1524 if (ret != 0) 1525 return ret; 1526 1527 /* delete the bnxt ULP port details */ 1528 bnxt_ulp_port_deinit(bp); 1529 1530 bnxt_cancel_fw_health_check(bp); 1531 1532 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1533 bnxt_cancel_ptp_alarm(bp); 1534 1535 /* Do not bring link down during reset recovery */ 1536 if (!is_bnxt_in_error(bp)) { 1537 bnxt_dev_set_link_down_op(eth_dev); 1538 /* Wait for link to be reset */ 1539 if (BNXT_SINGLE_PF(bp)) 1540 rte_delay_ms(500); 1541 /* clear the recorded link status */ 1542 memset(&link, 0, sizeof(link)); 1543 rte_eth_linkstatus_set(eth_dev, &link); 1544 } 1545 1546 /* Clean queue intr-vector mapping */ 1547 rte_intr_efd_disable(intr_handle); 1548 rte_intr_vec_list_free(intr_handle); 1549 1550 bnxt_hwrm_port_clr_stats(bp); 1551 bnxt_free_tx_mbufs(bp); 1552 bnxt_free_rx_mbufs(bp); 1553 /* Process any remaining notifications in default completion queue */ 1554 bnxt_int_handler(eth_dev); 1555 bnxt_shutdown_nic(bp); 1556 bnxt_hwrm_if_change(bp, false); 1557 1558 bnxt_free_prev_ring_stats(bp); 1559 rte_free(bp->mark_table); 1560 bp->mark_table = NULL; 1561 1562 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1563 bp->rx_cosq_cnt = 0; 1564 /* All filters are deleted on a port stop. */ 1565 if (BNXT_FLOW_XSTATS_EN(bp)) 1566 bp->flow_stat->flow_count = 0; 1567 1568 eth_dev->data->scattered_rx = 0; 1569 1570 return 0; 1571 } 1572 1573 /* Unload the driver, release resources */ 1574 int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1575 { 1576 struct bnxt *bp = eth_dev->data->dev_private; 1577 1578 pthread_mutex_lock(&bp->err_recovery_lock); 1579 if (bp->flags & BNXT_FLAG_FW_RESET) { 1580 PMD_DRV_LOG(ERR, 1581 "Adapter recovering from error..Please retry\n"); 1582 pthread_mutex_unlock(&bp->err_recovery_lock); 1583 return -EAGAIN; 1584 } 1585 pthread_mutex_unlock(&bp->err_recovery_lock); 1586 1587 return bnxt_dev_stop(eth_dev); 1588 } 1589 1590 int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1591 { 1592 struct bnxt *bp = eth_dev->data->dev_private; 1593 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1594 int vlan_mask = 0; 1595 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1596 1597 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1598 PMD_DRV_LOG(ERR, 1599 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1600 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1601 1602 do { 1603 rc = bnxt_hwrm_if_change(bp, true); 1604 if (rc == 0 || rc != -EAGAIN) 1605 break; 1606 1607 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1608 } while (retry_cnt--); 1609 1610 if (rc) 1611 return rc; 1612 1613 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1614 rc = bnxt_handle_if_change_status(bp); 1615 if (rc) 1616 return rc; 1617 } 1618 1619 bnxt_enable_int(bp); 1620 1621 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1622 1623 rc = bnxt_start_nic(bp); 1624 if (rc) 1625 goto error; 1626 1627 rc = bnxt_alloc_prev_ring_stats(bp); 1628 if (rc) 1629 goto error; 1630 1631 eth_dev->data->dev_started = 1; 1632 1633 bnxt_link_update_op(eth_dev, 0); 1634 1635 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1636 vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; 1637 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1638 vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; 1639 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1640 if (rc) 1641 goto error; 1642 1643 /* Initialize bnxt ULP port details */ 1644 rc = bnxt_ulp_port_init(bp); 1645 if (rc) 1646 goto error; 1647 1648 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1649 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1650 1651 bnxt_schedule_fw_health_check(bp); 1652 1653 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1654 bnxt_schedule_ptp_alarm(bp); 1655 1656 return 0; 1657 1658 error: 1659 bnxt_dev_stop(eth_dev); 1660 return rc; 1661 } 1662 1663 static void 1664 bnxt_uninit_locks(struct bnxt *bp) 1665 { 1666 pthread_mutex_destroy(&bp->flow_lock); 1667 pthread_mutex_destroy(&bp->def_cp_lock); 1668 pthread_mutex_destroy(&bp->health_check_lock); 1669 pthread_mutex_destroy(&bp->err_recovery_lock); 1670 if (bp->rep_info) { 1671 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1672 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1673 } 1674 } 1675 1676 static void bnxt_drv_uninit(struct bnxt *bp) 1677 { 1678 bnxt_free_leds_info(bp); 1679 bnxt_free_cos_queues(bp); 1680 bnxt_free_link_info(bp); 1681 bnxt_free_parent_info(bp); 1682 bnxt_uninit_locks(bp); 1683 bnxt_free_rep_info(bp); 1684 1685 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1686 bp->tx_mem_zone = NULL; 1687 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1688 bp->rx_mem_zone = NULL; 1689 1690 bnxt_free_vf_info(bp); 1691 bnxt_free_pf_info(bp); 1692 1693 rte_free(bp->grp_info); 1694 bp->grp_info = NULL; 1695 } 1696 1697 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1698 { 1699 struct bnxt *bp = eth_dev->data->dev_private; 1700 int ret = 0; 1701 1702 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1703 return 0; 1704 1705 pthread_mutex_lock(&bp->err_recovery_lock); 1706 if (bp->flags & BNXT_FLAG_FW_RESET) { 1707 PMD_DRV_LOG(ERR, 1708 "Adapter recovering from error...Please retry\n"); 1709 pthread_mutex_unlock(&bp->err_recovery_lock); 1710 return -EAGAIN; 1711 } 1712 pthread_mutex_unlock(&bp->err_recovery_lock); 1713 1714 /* cancel the recovery handler before remove dev */ 1715 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1716 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1717 bnxt_cancel_fc_thread(bp); 1718 rte_eal_alarm_cancel(bnxt_handle_vf_cfg_change, (void *)bp); 1719 1720 if (eth_dev->data->dev_started) 1721 ret = bnxt_dev_stop(eth_dev); 1722 1723 bnxt_uninit_resources(bp, false); 1724 1725 bnxt_drv_uninit(bp); 1726 1727 return ret; 1728 } 1729 1730 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1731 uint32_t index) 1732 { 1733 struct bnxt *bp = eth_dev->data->dev_private; 1734 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1735 struct bnxt_vnic_info *vnic; 1736 struct bnxt_filter_info *filter, *temp_filter; 1737 uint32_t i; 1738 1739 if (is_bnxt_in_error(bp)) 1740 return; 1741 1742 /* 1743 * Loop through all VNICs from the specified filter flow pools to 1744 * remove the corresponding MAC addr filter 1745 */ 1746 for (i = 0; i < bp->nr_vnics; i++) { 1747 if (!(pool_mask & (1ULL << i))) 1748 continue; 1749 1750 vnic = &bp->vnic_info[i]; 1751 filter = STAILQ_FIRST(&vnic->filter); 1752 while (filter) { 1753 temp_filter = STAILQ_NEXT(filter, next); 1754 if (filter->mac_index == index) { 1755 STAILQ_REMOVE(&vnic->filter, filter, 1756 bnxt_filter_info, next); 1757 bnxt_hwrm_clear_l2_filter(bp, filter); 1758 bnxt_free_filter(bp, filter); 1759 } 1760 filter = temp_filter; 1761 } 1762 } 1763 } 1764 1765 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1766 struct rte_ether_addr *mac_addr, uint32_t index, 1767 uint32_t pool) 1768 { 1769 struct bnxt_filter_info *filter; 1770 int rc = 0; 1771 1772 /* Attach requested MAC address to the new l2_filter */ 1773 STAILQ_FOREACH(filter, &vnic->filter, next) { 1774 if (filter->mac_index == index) { 1775 PMD_DRV_LOG(DEBUG, 1776 "MAC addr already existed for pool %d\n", 1777 pool); 1778 return 0; 1779 } 1780 } 1781 1782 filter = bnxt_alloc_filter(bp); 1783 if (!filter) { 1784 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1785 return -ENODEV; 1786 } 1787 1788 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1789 * if the MAC that's been programmed now is a different one, then, 1790 * copy that addr to filter->l2_addr 1791 */ 1792 if (mac_addr) 1793 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1794 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1795 1796 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1797 if (!rc) { 1798 filter->mac_index = index; 1799 if (filter->mac_index == 0) 1800 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1801 else 1802 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1803 } else { 1804 bnxt_free_filter(bp, filter); 1805 } 1806 1807 return rc; 1808 } 1809 1810 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1811 struct rte_ether_addr *mac_addr, 1812 uint32_t index, uint32_t pool) 1813 { 1814 struct bnxt *bp = eth_dev->data->dev_private; 1815 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1816 int rc = 0; 1817 1818 rc = is_bnxt_in_error(bp); 1819 if (rc) 1820 return rc; 1821 1822 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1823 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1824 return -ENOTSUP; 1825 } 1826 1827 if (!vnic) { 1828 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1829 return -EINVAL; 1830 } 1831 1832 /* Filter settings will get applied when port is started */ 1833 if (!eth_dev->data->dev_started) 1834 return 0; 1835 1836 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1837 1838 return rc; 1839 } 1840 1841 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1842 { 1843 int rc = 0; 1844 struct bnxt *bp = eth_dev->data->dev_private; 1845 struct rte_eth_link new; 1846 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1847 BNXT_MIN_LINK_WAIT_CNT; 1848 1849 rc = is_bnxt_in_error(bp); 1850 if (rc) 1851 return rc; 1852 1853 memset(&new, 0, sizeof(new)); 1854 1855 if (bp->link_info == NULL) 1856 goto out; 1857 1858 /* Only single function PF can bring the phy down. 1859 * In certain scenarios, device is not obliged link down even when forced. 1860 * When port is stopped, report link down in those cases. 1861 */ 1862 if (!eth_dev->data->dev_started && 1863 (!BNXT_SINGLE_PF(bp) || bnxt_force_link_config(bp))) 1864 goto out; 1865 1866 do { 1867 /* Retrieve link info from hardware */ 1868 rc = bnxt_get_hwrm_link_config(bp, &new); 1869 if (rc) { 1870 new.link_speed = RTE_ETH_LINK_SPEED_100M; 1871 new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1872 PMD_DRV_LOG(ERR, 1873 "Failed to retrieve link rc = 0x%x!\n", rc); 1874 goto out; 1875 } 1876 1877 if (!wait_to_complete || new.link_status) 1878 break; 1879 1880 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1881 } while (cnt--); 1882 1883 out: 1884 /* Timed out or success */ 1885 if (new.link_status != eth_dev->data->dev_link.link_status || 1886 new.link_speed != eth_dev->data->dev_link.link_speed) { 1887 rte_eth_linkstatus_set(eth_dev, &new); 1888 bnxt_print_link_info(eth_dev); 1889 } 1890 1891 return rc; 1892 } 1893 1894 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1895 { 1896 struct bnxt *bp = eth_dev->data->dev_private; 1897 struct bnxt_vnic_info *vnic; 1898 uint32_t old_flags; 1899 int rc; 1900 1901 rc = is_bnxt_in_error(bp); 1902 if (rc) 1903 return rc; 1904 1905 /* Filter settings will get applied when port is started */ 1906 if (!eth_dev->data->dev_started) 1907 return 0; 1908 1909 if (bp->vnic_info == NULL) 1910 return 0; 1911 1912 vnic = bnxt_get_default_vnic(bp); 1913 1914 old_flags = vnic->flags; 1915 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1916 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1917 if (rc != 0) 1918 vnic->flags = old_flags; 1919 1920 return rc; 1921 } 1922 1923 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1924 { 1925 struct bnxt *bp = eth_dev->data->dev_private; 1926 struct bnxt_vnic_info *vnic; 1927 uint32_t old_flags; 1928 int rc; 1929 1930 rc = is_bnxt_in_error(bp); 1931 if (rc) 1932 return rc; 1933 1934 /* Filter settings will get applied when port is started */ 1935 if (!eth_dev->data->dev_started) 1936 return 0; 1937 1938 if (bp->vnic_info == NULL) 1939 return 0; 1940 1941 vnic = bnxt_get_default_vnic(bp); 1942 1943 old_flags = vnic->flags; 1944 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1945 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1946 if (rc != 0) 1947 vnic->flags = old_flags; 1948 1949 return rc; 1950 } 1951 1952 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1953 { 1954 struct bnxt *bp = eth_dev->data->dev_private; 1955 struct bnxt_vnic_info *vnic; 1956 uint32_t old_flags; 1957 int rc; 1958 1959 rc = is_bnxt_in_error(bp); 1960 if (rc) 1961 return rc; 1962 1963 /* Filter settings will get applied when port is started */ 1964 if (!eth_dev->data->dev_started) 1965 return 0; 1966 1967 if (bp->vnic_info == NULL) 1968 return 0; 1969 1970 vnic = bnxt_get_default_vnic(bp); 1971 1972 old_flags = vnic->flags; 1973 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1974 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1975 if (rc != 0) 1976 vnic->flags = old_flags; 1977 1978 return rc; 1979 } 1980 1981 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1982 { 1983 struct bnxt *bp = eth_dev->data->dev_private; 1984 struct bnxt_vnic_info *vnic; 1985 uint32_t old_flags; 1986 int rc; 1987 1988 rc = is_bnxt_in_error(bp); 1989 if (rc) 1990 return rc; 1991 1992 /* Filter settings will get applied when port is started */ 1993 if (!eth_dev->data->dev_started) 1994 return 0; 1995 1996 if (bp->vnic_info == NULL) 1997 return 0; 1998 1999 vnic = bnxt_get_default_vnic(bp); 2000 2001 old_flags = vnic->flags; 2002 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2003 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2004 if (rc != 0) 2005 vnic->flags = old_flags; 2006 2007 return rc; 2008 } 2009 2010 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 2011 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 2012 { 2013 if (qid >= bp->rx_nr_rings) 2014 return NULL; 2015 2016 return bp->eth_dev->data->rx_queues[qid]; 2017 } 2018 2019 /* Return rxq corresponding to a given rss table ring/group ID. */ 2020 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 2021 { 2022 struct bnxt_rx_queue *rxq; 2023 unsigned int i; 2024 2025 if (!BNXT_HAS_RING_GRPS(bp)) { 2026 for (i = 0; i < bp->rx_nr_rings; i++) { 2027 rxq = bp->eth_dev->data->rx_queues[i]; 2028 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 2029 return rxq->index; 2030 } 2031 } else { 2032 for (i = 0; i < bp->rx_nr_rings; i++) { 2033 if (bp->grp_info[i].fw_grp_id == fwr) 2034 return i; 2035 } 2036 } 2037 2038 return INVALID_HW_RING_ID; 2039 } 2040 2041 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2042 struct rte_eth_rss_reta_entry64 *reta_conf, 2043 uint16_t reta_size) 2044 { 2045 struct bnxt *bp = eth_dev->data->dev_private; 2046 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2047 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2048 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2049 uint16_t idx, sft; 2050 int i, rc; 2051 2052 rc = is_bnxt_in_error(bp); 2053 if (rc) 2054 return rc; 2055 2056 if (!vnic->rss_table) 2057 return -EINVAL; 2058 2059 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 2060 return -EINVAL; 2061 2062 if (reta_size != tbl_size) { 2063 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2064 "(%d) must equal the size supported by the hardware " 2065 "(%d)\n", reta_size, tbl_size); 2066 return -EINVAL; 2067 } 2068 2069 if (bnxt_vnic_reta_config_update(bp, vnic, reta_conf, reta_size)) { 2070 PMD_DRV_LOG(ERR, "Error in setting the reta config\n"); 2071 return -EINVAL; 2072 } 2073 for (i = 0; i < reta_size; i++) { 2074 struct bnxt_rx_queue *rxq; 2075 2076 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2077 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2078 2079 if (!(reta_conf[idx].mask & (1ULL << sft))) 2080 continue; 2081 2082 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2083 if (BNXT_CHIP_P5(bp)) { 2084 vnic->rss_table[i * 2] = 2085 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2086 vnic->rss_table[i * 2 + 1] = 2087 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2088 } else { 2089 vnic->rss_table[i] = 2090 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2091 } 2092 } 2093 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2094 return rc; 2095 } 2096 2097 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2098 struct rte_eth_rss_reta_entry64 *reta_conf, 2099 uint16_t reta_size) 2100 { 2101 struct bnxt *bp = eth_dev->data->dev_private; 2102 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2103 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2104 uint16_t idx, sft, i; 2105 int rc; 2106 2107 rc = is_bnxt_in_error(bp); 2108 if (rc) 2109 return rc; 2110 2111 if (!vnic) 2112 return -EINVAL; 2113 if (!vnic->rss_table) 2114 return -EINVAL; 2115 2116 if (reta_size != tbl_size) { 2117 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2118 "(%d) must equal the size supported by the hardware " 2119 "(%d)\n", reta_size, tbl_size); 2120 return -EINVAL; 2121 } 2122 2123 for (idx = 0, i = 0; i < reta_size; i++) { 2124 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2125 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2126 2127 if (reta_conf[idx].mask & (1ULL << sft)) { 2128 uint16_t qid; 2129 2130 if (BNXT_CHIP_P5(bp)) 2131 qid = bnxt_rss_to_qid(bp, 2132 vnic->rss_table[i * 2]); 2133 else 2134 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2135 2136 if (qid == INVALID_HW_RING_ID) { 2137 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2138 return -EINVAL; 2139 } 2140 reta_conf[idx].reta[sft] = qid; 2141 } 2142 } 2143 2144 return 0; 2145 } 2146 2147 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2148 struct rte_eth_rss_conf *rss_conf) 2149 { 2150 struct bnxt *bp = eth_dev->data->dev_private; 2151 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2152 struct bnxt_vnic_info *vnic; 2153 int rc; 2154 2155 rc = is_bnxt_in_error(bp); 2156 if (rc) 2157 return rc; 2158 2159 /* 2160 * If RSS enablement were different than dev_configure, 2161 * then return -EINVAL 2162 */ 2163 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2164 if (!rss_conf->rss_hf) 2165 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2166 } else { 2167 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2168 return -EINVAL; 2169 } 2170 2171 /* Update the default RSS VNIC(s) */ 2172 vnic = bnxt_get_default_vnic(bp); 2173 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2174 vnic->hash_mode = 2175 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2176 RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); 2177 2178 /* Cache the hash function */ 2179 bp->rss_conf.rss_hf = rss_conf->rss_hf; 2180 2181 /* 2182 * If hashkey is not specified, use the previously configured 2183 * hashkey 2184 */ 2185 if (!rss_conf->rss_key) 2186 goto rss_config; 2187 2188 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2189 PMD_DRV_LOG(ERR, 2190 "Invalid hashkey length, should be %d bytes\n", 2191 HW_HASH_KEY_SIZE); 2192 return -EINVAL; 2193 } 2194 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2195 2196 /* Cache the hash key */ 2197 memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE); 2198 2199 rss_config: 2200 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2201 return rc; 2202 } 2203 2204 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2205 struct rte_eth_rss_conf *rss_conf) 2206 { 2207 struct bnxt *bp = eth_dev->data->dev_private; 2208 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2209 int len, rc; 2210 uint32_t hash_types; 2211 2212 rc = is_bnxt_in_error(bp); 2213 if (rc) 2214 return rc; 2215 2216 /* RSS configuration is the same for all VNICs */ 2217 if (vnic && vnic->rss_hash_key) { 2218 if (rss_conf->rss_key) { 2219 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2220 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2221 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2222 } 2223 2224 hash_types = vnic->hash_type; 2225 rss_conf->rss_hf = 0; 2226 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2227 rss_conf->rss_hf |= RTE_ETH_RSS_IPV4; 2228 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2229 } 2230 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2231 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 2232 hash_types &= 2233 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2234 } 2235 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2236 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; 2237 hash_types &= 2238 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2239 } 2240 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2241 rss_conf->rss_hf |= RTE_ETH_RSS_IPV6; 2242 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2243 } 2244 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2245 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 2246 hash_types &= 2247 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2248 } 2249 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2250 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; 2251 hash_types &= 2252 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2253 } 2254 2255 rss_conf->rss_hf |= 2256 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2257 2258 if (hash_types) { 2259 PMD_DRV_LOG(ERR, 2260 "Unknown RSS config from firmware (%08x), RSS disabled", 2261 vnic->hash_type); 2262 return -ENOTSUP; 2263 } 2264 } else { 2265 rss_conf->rss_hf = 0; 2266 } 2267 return 0; 2268 } 2269 2270 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2271 struct rte_eth_fc_conf *fc_conf) 2272 { 2273 struct bnxt *bp = dev->data->dev_private; 2274 struct rte_eth_link link_info; 2275 int rc; 2276 2277 rc = is_bnxt_in_error(bp); 2278 if (rc) 2279 return rc; 2280 2281 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2282 if (rc) 2283 return rc; 2284 2285 memset(fc_conf, 0, sizeof(*fc_conf)); 2286 if (bp->link_info->auto_pause) 2287 fc_conf->autoneg = 1; 2288 switch (bp->link_info->pause) { 2289 case 0: 2290 fc_conf->mode = RTE_ETH_FC_NONE; 2291 break; 2292 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2293 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2294 break; 2295 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2296 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2297 break; 2298 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2299 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2300 fc_conf->mode = RTE_ETH_FC_FULL; 2301 break; 2302 } 2303 return 0; 2304 } 2305 2306 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2307 struct rte_eth_fc_conf *fc_conf) 2308 { 2309 struct bnxt *bp = dev->data->dev_private; 2310 int rc; 2311 2312 rc = is_bnxt_in_error(bp); 2313 if (rc) 2314 return rc; 2315 2316 if (!BNXT_SINGLE_PF(bp)) { 2317 PMD_DRV_LOG(ERR, 2318 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2319 return -ENOTSUP; 2320 } 2321 2322 switch (fc_conf->mode) { 2323 case RTE_ETH_FC_NONE: 2324 bp->link_info->auto_pause = 0; 2325 bp->link_info->force_pause = 0; 2326 break; 2327 case RTE_ETH_FC_RX_PAUSE: 2328 if (fc_conf->autoneg) { 2329 bp->link_info->auto_pause = 2330 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2331 bp->link_info->force_pause = 0; 2332 } else { 2333 bp->link_info->auto_pause = 0; 2334 bp->link_info->force_pause = 2335 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2336 } 2337 break; 2338 case RTE_ETH_FC_TX_PAUSE: 2339 if (fc_conf->autoneg) { 2340 bp->link_info->auto_pause = 2341 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2342 bp->link_info->force_pause = 0; 2343 } else { 2344 bp->link_info->auto_pause = 0; 2345 bp->link_info->force_pause = 2346 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2347 } 2348 break; 2349 case RTE_ETH_FC_FULL: 2350 if (fc_conf->autoneg) { 2351 bp->link_info->auto_pause = 2352 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2353 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2354 bp->link_info->force_pause = 0; 2355 } else { 2356 bp->link_info->auto_pause = 0; 2357 bp->link_info->force_pause = 2358 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2359 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2360 } 2361 break; 2362 } 2363 return bnxt_set_hwrm_link_config(bp, true); 2364 } 2365 2366 /* Add UDP tunneling port */ 2367 int 2368 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2369 struct rte_eth_udp_tunnel *udp_tunnel) 2370 { 2371 struct bnxt *bp = eth_dev->data->dev_private; 2372 uint16_t tunnel_type = 0; 2373 int rc = 0; 2374 2375 rc = is_bnxt_in_error(bp); 2376 if (rc) 2377 return rc; 2378 2379 switch (udp_tunnel->prot_type) { 2380 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2381 if (bp->vxlan_port_cnt) { 2382 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2383 udp_tunnel->udp_port); 2384 if (bp->vxlan_port != udp_tunnel->udp_port) { 2385 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2386 return -ENOSPC; 2387 } 2388 bp->vxlan_port_cnt++; 2389 return 0; 2390 } 2391 tunnel_type = 2392 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2393 break; 2394 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2395 if (bp->geneve_port_cnt) { 2396 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2397 udp_tunnel->udp_port); 2398 if (bp->geneve_port != udp_tunnel->udp_port) { 2399 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2400 return -ENOSPC; 2401 } 2402 bp->geneve_port_cnt++; 2403 return 0; 2404 } 2405 tunnel_type = 2406 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2407 break; 2408 case RTE_ETH_TUNNEL_TYPE_ECPRI: 2409 if (bp->ecpri_port_cnt) { 2410 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2411 udp_tunnel->udp_port); 2412 if (bp->ecpri_port != udp_tunnel->udp_port) { 2413 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2414 return -ENOSPC; 2415 } 2416 bp->ecpri_port_cnt++; 2417 return 0; 2418 } 2419 tunnel_type = 2420 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI; 2421 break; 2422 default: 2423 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2424 return -ENOTSUP; 2425 } 2426 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2427 tunnel_type); 2428 2429 if (rc != 0) 2430 return rc; 2431 2432 if (tunnel_type == 2433 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2434 bp->vxlan_port_cnt++; 2435 2436 if (tunnel_type == 2437 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2438 bp->geneve_port_cnt++; 2439 2440 if (tunnel_type == 2441 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI) 2442 bp->ecpri_port_cnt++; 2443 2444 return rc; 2445 } 2446 2447 int 2448 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2449 struct rte_eth_udp_tunnel *udp_tunnel) 2450 { 2451 struct bnxt *bp = eth_dev->data->dev_private; 2452 uint16_t tunnel_type = 0; 2453 uint16_t port = 0; 2454 int rc = 0; 2455 2456 rc = is_bnxt_in_error(bp); 2457 if (rc) 2458 return rc; 2459 2460 switch (udp_tunnel->prot_type) { 2461 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2462 if (!bp->vxlan_port_cnt) { 2463 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2464 return -EINVAL; 2465 } 2466 if (bp->vxlan_port != udp_tunnel->udp_port) { 2467 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2468 udp_tunnel->udp_port, bp->vxlan_port); 2469 return -EINVAL; 2470 } 2471 if (--bp->vxlan_port_cnt) 2472 return 0; 2473 2474 tunnel_type = 2475 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2476 port = bp->vxlan_fw_dst_port_id; 2477 break; 2478 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2479 if (!bp->geneve_port_cnt) { 2480 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2481 return -EINVAL; 2482 } 2483 if (bp->geneve_port != udp_tunnel->udp_port) { 2484 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2485 udp_tunnel->udp_port, bp->geneve_port); 2486 return -EINVAL; 2487 } 2488 if (--bp->geneve_port_cnt) 2489 return 0; 2490 2491 tunnel_type = 2492 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2493 port = bp->geneve_fw_dst_port_id; 2494 break; 2495 case RTE_ETH_TUNNEL_TYPE_ECPRI: 2496 if (!bp->ecpri_port_cnt) { 2497 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2498 return -EINVAL; 2499 } 2500 if (bp->ecpri_port != udp_tunnel->udp_port) { 2501 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2502 udp_tunnel->udp_port, bp->ecpri_port); 2503 return -EINVAL; 2504 } 2505 if (--bp->ecpri_port_cnt) 2506 return 0; 2507 2508 tunnel_type = 2509 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI; 2510 port = bp->ecpri_fw_dst_port_id; 2511 break; 2512 default: 2513 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2514 return -ENOTSUP; 2515 } 2516 2517 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2518 return rc; 2519 } 2520 2521 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2522 { 2523 struct bnxt_filter_info *filter; 2524 struct bnxt_vnic_info *vnic; 2525 int rc = 0; 2526 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2527 2528 vnic = bnxt_get_default_vnic(bp); 2529 filter = STAILQ_FIRST(&vnic->filter); 2530 while (filter) { 2531 /* Search for this matching MAC+VLAN filter */ 2532 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2533 /* Delete the filter */ 2534 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2535 if (rc) 2536 return rc; 2537 STAILQ_REMOVE(&vnic->filter, filter, 2538 bnxt_filter_info, next); 2539 bnxt_free_filter(bp, filter); 2540 PMD_DRV_LOG(INFO, 2541 "Deleted vlan filter for %d\n", 2542 vlan_id); 2543 return 0; 2544 } 2545 filter = STAILQ_NEXT(filter, next); 2546 } 2547 return -ENOENT; 2548 } 2549 2550 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2551 { 2552 struct bnxt_filter_info *filter; 2553 struct bnxt_vnic_info *vnic; 2554 int rc = 0; 2555 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2556 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2557 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2558 2559 /* Implementation notes on the use of VNIC in this command: 2560 * 2561 * By default, these filters belong to default vnic for the function. 2562 * Once these filters are set up, only destination VNIC can be modified. 2563 * If the destination VNIC is not specified in this command, 2564 * then the HWRM shall only create an l2 context id. 2565 */ 2566 2567 vnic = bnxt_get_default_vnic(bp); 2568 filter = STAILQ_FIRST(&vnic->filter); 2569 /* Check if the VLAN has already been added */ 2570 while (filter) { 2571 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2572 return -EEXIST; 2573 2574 filter = STAILQ_NEXT(filter, next); 2575 } 2576 2577 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2578 * command to create MAC+VLAN filter with the right flags, enables set. 2579 */ 2580 filter = bnxt_alloc_filter(bp); 2581 if (!filter) { 2582 PMD_DRV_LOG(ERR, 2583 "MAC/VLAN filter alloc failed\n"); 2584 return -ENOMEM; 2585 } 2586 /* MAC + VLAN ID filter */ 2587 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2588 * untagged packets are received 2589 * 2590 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2591 * packets and only the programmed vlan's packets are received 2592 */ 2593 filter->l2_ivlan = vlan_id; 2594 filter->l2_ivlan_mask = 0x0FFF; 2595 filter->enables |= en; 2596 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2597 2598 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2599 if (rc) { 2600 /* Free the newly allocated filter as we were 2601 * not able to create the filter in hardware. 2602 */ 2603 bnxt_free_filter(bp, filter); 2604 return rc; 2605 } 2606 2607 filter->mac_index = 0; 2608 /* Add this new filter to the list */ 2609 if (vlan_id == 0) 2610 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2611 else 2612 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2613 2614 PMD_DRV_LOG(INFO, 2615 "Added Vlan filter for %d\n", vlan_id); 2616 return rc; 2617 } 2618 2619 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2620 uint16_t vlan_id, int on) 2621 { 2622 struct bnxt *bp = eth_dev->data->dev_private; 2623 int rc; 2624 2625 rc = is_bnxt_in_error(bp); 2626 if (rc) 2627 return rc; 2628 2629 if (!eth_dev->data->dev_started) { 2630 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2631 return -EINVAL; 2632 } 2633 2634 /* These operations apply to ALL existing MAC/VLAN filters */ 2635 if (on) 2636 return bnxt_add_vlan_filter(bp, vlan_id); 2637 else 2638 return bnxt_del_vlan_filter(bp, vlan_id); 2639 } 2640 2641 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2642 struct bnxt_vnic_info *vnic) 2643 { 2644 struct bnxt_filter_info *filter; 2645 int rc; 2646 2647 filter = STAILQ_FIRST(&vnic->filter); 2648 while (filter) { 2649 if (filter->mac_index == 0 && 2650 !memcmp(filter->l2_addr, bp->mac_addr, 2651 RTE_ETHER_ADDR_LEN)) { 2652 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2653 if (!rc) { 2654 STAILQ_REMOVE(&vnic->filter, filter, 2655 bnxt_filter_info, next); 2656 bnxt_free_filter(bp, filter); 2657 } 2658 return rc; 2659 } 2660 filter = STAILQ_NEXT(filter, next); 2661 } 2662 return 0; 2663 } 2664 2665 static int 2666 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2667 { 2668 struct bnxt_vnic_info *vnic; 2669 unsigned int i; 2670 int rc; 2671 2672 vnic = bnxt_get_default_vnic(bp); 2673 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 2674 /* Remove any VLAN filters programmed */ 2675 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2676 bnxt_del_vlan_filter(bp, i); 2677 2678 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2679 if (rc) 2680 return rc; 2681 } else { 2682 /* Default filter will allow packets that match the 2683 * dest mac. So, it has to be deleted, otherwise, we 2684 * will endup receiving vlan packets for which the 2685 * filter is not programmed, when hw-vlan-filter 2686 * configuration is ON 2687 */ 2688 bnxt_del_dflt_mac_filter(bp, vnic); 2689 /* This filter will allow only untagged packets */ 2690 bnxt_add_vlan_filter(bp, 0); 2691 } 2692 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2693 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); 2694 2695 return 0; 2696 } 2697 2698 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2699 { 2700 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2701 unsigned int i; 2702 int rc; 2703 2704 /* Destroy vnic filters and vnic */ 2705 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2706 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2707 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2708 bnxt_del_vlan_filter(bp, i); 2709 } 2710 bnxt_del_dflt_mac_filter(bp, vnic); 2711 2712 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2713 if (rc) 2714 return rc; 2715 2716 rc = bnxt_hwrm_vnic_free(bp, vnic); 2717 if (rc) 2718 return rc; 2719 2720 rte_free(vnic->fw_grp_ids); 2721 vnic->fw_grp_ids = NULL; 2722 2723 vnic->rx_queue_cnt = 0; 2724 2725 return 0; 2726 } 2727 2728 static int 2729 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2730 { 2731 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2732 int rc; 2733 2734 /* Destroy, recreate and reconfigure the default vnic */ 2735 rc = bnxt_free_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id); 2736 if (rc) 2737 return rc; 2738 2739 /* setup the default vnic details*/ 2740 bnxt_vnic_queue_db_update_dlft_vnic(bp); 2741 2742 rc = bnxt_setup_one_vnic(bp, bp->vnic_queue_db.dflt_vnic_id); 2743 if (rc) 2744 return rc; 2745 2746 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2747 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2748 rc = bnxt_add_vlan_filter(bp, 0); 2749 if (rc) 2750 return rc; 2751 rc = bnxt_restore_vlan_filters(bp); 2752 if (rc) 2753 return rc; 2754 } else { 2755 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2756 if (rc) 2757 return rc; 2758 } 2759 2760 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2761 if (rc) 2762 return rc; 2763 2764 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2765 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); 2766 2767 return rc; 2768 } 2769 2770 static int 2771 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2772 { 2773 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2774 struct bnxt *bp = dev->data->dev_private; 2775 int rc; 2776 2777 rc = is_bnxt_in_error(bp); 2778 if (rc) 2779 return rc; 2780 2781 /* Filter settings will get applied when port is started */ 2782 if (!dev->data->dev_started) 2783 return 0; 2784 2785 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2786 /* Enable or disable VLAN filtering */ 2787 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2788 if (rc) 2789 return rc; 2790 } 2791 2792 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2793 /* Enable or disable VLAN stripping */ 2794 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2795 if (rc) 2796 return rc; 2797 } 2798 2799 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2800 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2801 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2802 else 2803 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2804 } 2805 2806 return 0; 2807 } 2808 2809 static int 2810 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2811 uint16_t tpid) 2812 { 2813 struct bnxt *bp = dev->data->dev_private; 2814 int qinq = dev->data->dev_conf.rxmode.offloads & 2815 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 2816 2817 if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && 2818 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { 2819 PMD_DRV_LOG(ERR, 2820 "Unsupported vlan type."); 2821 return -EINVAL; 2822 } 2823 if (!qinq) { 2824 PMD_DRV_LOG(ERR, 2825 "QinQ not enabled. Needs to be ON as we can " 2826 "accelerate only outer vlan\n"); 2827 return -EINVAL; 2828 } 2829 2830 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2831 switch (tpid) { 2832 case RTE_ETHER_TYPE_QINQ: 2833 bp->outer_tpid_bd = 2834 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2835 break; 2836 case RTE_ETHER_TYPE_VLAN: 2837 bp->outer_tpid_bd = 2838 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2839 break; 2840 case RTE_ETHER_TYPE_QINQ1: 2841 bp->outer_tpid_bd = 2842 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2843 break; 2844 case RTE_ETHER_TYPE_QINQ2: 2845 bp->outer_tpid_bd = 2846 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2847 break; 2848 case RTE_ETHER_TYPE_QINQ3: 2849 bp->outer_tpid_bd = 2850 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2851 break; 2852 default: 2853 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2854 return -EINVAL; 2855 } 2856 bp->outer_tpid_bd |= tpid; 2857 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2858 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 2859 PMD_DRV_LOG(ERR, 2860 "Can accelerate only outer vlan in QinQ\n"); 2861 return -EINVAL; 2862 } 2863 2864 return 0; 2865 } 2866 2867 static int 2868 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2869 struct rte_ether_addr *addr) 2870 { 2871 struct bnxt *bp = dev->data->dev_private; 2872 /* Default Filter is tied to VNIC 0 */ 2873 struct bnxt_vnic_info *vnic = bnxt_get_default_vnic(bp); 2874 int rc; 2875 2876 rc = is_bnxt_in_error(bp); 2877 if (rc) 2878 return rc; 2879 2880 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2881 return -EPERM; 2882 2883 if (rte_is_zero_ether_addr(addr)) 2884 return -EINVAL; 2885 2886 /* Filter settings will get applied when port is started */ 2887 if (!dev->data->dev_started) 2888 return 0; 2889 2890 /* Check if the requested MAC is already added */ 2891 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2892 return 0; 2893 2894 /* Destroy filter and re-create it */ 2895 bnxt_del_dflt_mac_filter(bp, vnic); 2896 2897 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2898 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2899 /* This filter will allow only untagged packets */ 2900 rc = bnxt_add_vlan_filter(bp, 0); 2901 } else { 2902 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2903 } 2904 2905 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2906 return rc; 2907 } 2908 2909 static int 2910 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2911 struct rte_ether_addr *mc_addr_set, 2912 uint32_t nb_mc_addr) 2913 { 2914 struct bnxt *bp = eth_dev->data->dev_private; 2915 struct bnxt_vnic_info *vnic; 2916 uint32_t i = 0; 2917 int rc; 2918 2919 rc = is_bnxt_in_error(bp); 2920 if (rc) 2921 return rc; 2922 2923 vnic = bnxt_get_default_vnic(bp); 2924 2925 bp->nb_mc_addr = nb_mc_addr; 2926 2927 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2928 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2929 goto allmulti; 2930 } 2931 2932 /* TODO Check for Duplicate mcast addresses */ 2933 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2934 for (i = 0; i < nb_mc_addr; i++) 2935 rte_ether_addr_copy(&mc_addr_set[i], &bp->mcast_addr_list[i]); 2936 2937 if (bp->nb_mc_addr) 2938 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2939 else 2940 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2941 2942 allmulti: 2943 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2944 } 2945 2946 static int 2947 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2948 { 2949 struct bnxt *bp = dev->data->dev_private; 2950 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2951 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2952 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2953 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2954 int ret; 2955 2956 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2957 fw_major, fw_minor, fw_updt, fw_rsvd); 2958 if (ret < 0) 2959 return -EINVAL; 2960 2961 ret += 1; /* add the size of '\0' */ 2962 if (fw_size < (size_t)ret) 2963 return ret; 2964 else 2965 return 0; 2966 } 2967 2968 static void 2969 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2970 struct rte_eth_rxq_info *qinfo) 2971 { 2972 struct bnxt *bp = dev->data->dev_private; 2973 struct bnxt_rx_queue *rxq; 2974 2975 if (is_bnxt_in_error(bp)) 2976 return; 2977 2978 rxq = dev->data->rx_queues[queue_id]; 2979 2980 qinfo->mp = rxq->mb_pool; 2981 qinfo->scattered_rx = dev->data->scattered_rx; 2982 qinfo->nb_desc = rxq->nb_rx_desc; 2983 2984 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2985 qinfo->conf.rx_drop_en = rxq->drop_en; 2986 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2987 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2988 } 2989 2990 static void 2991 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2992 struct rte_eth_txq_info *qinfo) 2993 { 2994 struct bnxt *bp = dev->data->dev_private; 2995 struct bnxt_tx_queue *txq; 2996 2997 if (is_bnxt_in_error(bp)) 2998 return; 2999 3000 txq = dev->data->tx_queues[queue_id]; 3001 3002 qinfo->nb_desc = txq->nb_tx_desc; 3003 3004 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 3005 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 3006 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 3007 3008 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 3009 qinfo->conf.tx_rs_thresh = 0; 3010 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 3011 qinfo->conf.offloads = txq->offloads; 3012 } 3013 3014 static const struct { 3015 eth_rx_burst_t pkt_burst; 3016 const char *info; 3017 } bnxt_rx_burst_info[] = { 3018 {bnxt_recv_pkts, "Scalar"}, 3019 #if defined(RTE_ARCH_X86) 3020 {bnxt_recv_pkts_vec, "Vector SSE"}, 3021 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 3022 #endif 3023 #if defined(RTE_ARCH_ARM64) 3024 {bnxt_recv_pkts_vec, "Vector Neon"}, 3025 #endif 3026 }; 3027 3028 static int 3029 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3030 struct rte_eth_burst_mode *mode) 3031 { 3032 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 3033 size_t i; 3034 3035 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 3036 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 3037 snprintf(mode->info, sizeof(mode->info), "%s", 3038 bnxt_rx_burst_info[i].info); 3039 return 0; 3040 } 3041 } 3042 3043 return -EINVAL; 3044 } 3045 3046 static const struct { 3047 eth_tx_burst_t pkt_burst; 3048 const char *info; 3049 } bnxt_tx_burst_info[] = { 3050 {bnxt_xmit_pkts, "Scalar"}, 3051 #if defined(RTE_ARCH_X86) 3052 {bnxt_xmit_pkts_vec, "Vector SSE"}, 3053 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 3054 #endif 3055 #if defined(RTE_ARCH_ARM64) 3056 {bnxt_xmit_pkts_vec, "Vector Neon"}, 3057 #endif 3058 }; 3059 3060 static int 3061 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3062 struct rte_eth_burst_mode *mode) 3063 { 3064 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 3065 size_t i; 3066 3067 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 3068 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 3069 snprintf(mode->info, sizeof(mode->info), "%s", 3070 bnxt_tx_burst_info[i].info); 3071 return 0; 3072 } 3073 } 3074 3075 return -EINVAL; 3076 } 3077 3078 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3079 { 3080 struct bnxt *bp = eth_dev->data->dev_private; 3081 uint32_t rc = 0; 3082 3083 rc = is_bnxt_in_error(bp); 3084 if (rc) 3085 return rc; 3086 3087 /* Return if port is active */ 3088 if (eth_dev->data->dev_started) { 3089 PMD_DRV_LOG(ERR, "Stop port before changing MTU\n"); 3090 return -EBUSY; 3091 } 3092 3093 /* Exit if receive queues are not configured yet */ 3094 if (!eth_dev->data->nb_rx_queues) 3095 return -ENOTSUP; 3096 3097 /* Is there a change in mtu setting? */ 3098 if (eth_dev->data->mtu == new_mtu) 3099 return rc; 3100 3101 if (new_mtu > RTE_ETHER_MTU) 3102 bp->flags |= BNXT_FLAG_JUMBO; 3103 else 3104 bp->flags &= ~BNXT_FLAG_JUMBO; 3105 3106 rc = bnxt_vnic_mru_config(bp, new_mtu); 3107 if (rc) { 3108 PMD_DRV_LOG(ERR, "failed to update mtu in vnic context\n"); 3109 return rc; 3110 } 3111 3112 if (bnxt_hwrm_config_host_mtu(bp)) 3113 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3114 3115 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3116 3117 return rc; 3118 } 3119 3120 static int 3121 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3122 { 3123 struct bnxt *bp = dev->data->dev_private; 3124 uint16_t vlan = bp->vlan; 3125 int rc; 3126 3127 rc = is_bnxt_in_error(bp); 3128 if (rc) 3129 return rc; 3130 3131 if (!BNXT_SINGLE_PF(bp)) { 3132 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3133 return -ENOTSUP; 3134 } 3135 bp->vlan = on ? pvid : 0; 3136 3137 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3138 if (rc) 3139 bp->vlan = vlan; 3140 return rc; 3141 } 3142 3143 static int 3144 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3145 { 3146 struct bnxt *bp = dev->data->dev_private; 3147 int rc; 3148 3149 rc = is_bnxt_in_error(bp); 3150 if (rc) 3151 return rc; 3152 3153 return bnxt_hwrm_port_led_cfg(bp, true); 3154 } 3155 3156 static int 3157 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3158 { 3159 struct bnxt *bp = dev->data->dev_private; 3160 int rc; 3161 3162 rc = is_bnxt_in_error(bp); 3163 if (rc) 3164 return rc; 3165 3166 return bnxt_hwrm_port_led_cfg(bp, false); 3167 } 3168 3169 static uint32_t 3170 bnxt_rx_queue_count_op(void *rx_queue) 3171 { 3172 struct bnxt *bp; 3173 struct bnxt_cp_ring_info *cpr; 3174 uint32_t desc = 0, raw_cons, cp_ring_size; 3175 struct bnxt_rx_queue *rxq; 3176 struct rx_pkt_cmpl *rxcmp; 3177 int rc; 3178 3179 rxq = rx_queue; 3180 bp = rxq->bp; 3181 3182 rc = is_bnxt_in_error(bp); 3183 if (rc) 3184 return rc; 3185 3186 cpr = rxq->cp_ring; 3187 raw_cons = cpr->cp_raw_cons; 3188 cp_ring_size = cpr->cp_ring_struct->ring_size; 3189 3190 while (1) { 3191 uint32_t agg_cnt, cons, cmpl_type; 3192 3193 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3194 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3195 3196 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3197 break; 3198 3199 cmpl_type = CMP_TYPE(rxcmp); 3200 3201 switch (cmpl_type) { 3202 case CMPL_BASE_TYPE_RX_L2: 3203 case CMPL_BASE_TYPE_RX_L2_V2: 3204 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3205 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3206 desc++; 3207 break; 3208 3209 case CMPL_BASE_TYPE_RX_TPA_END: 3210 if (BNXT_CHIP_P5(rxq->bp)) { 3211 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3212 3213 p5_tpa_end = (void *)rxcmp; 3214 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3215 } else { 3216 struct rx_tpa_end_cmpl *tpa_end; 3217 3218 tpa_end = (void *)rxcmp; 3219 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3220 } 3221 3222 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3223 desc++; 3224 break; 3225 3226 default: 3227 raw_cons += CMP_LEN(cmpl_type); 3228 } 3229 } 3230 3231 return desc; 3232 } 3233 3234 static int 3235 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3236 { 3237 struct bnxt_rx_queue *rxq = rx_queue; 3238 struct bnxt_cp_ring_info *cpr; 3239 struct bnxt_rx_ring_info *rxr; 3240 uint32_t desc, raw_cons, cp_ring_size; 3241 struct bnxt *bp = rxq->bp; 3242 struct rx_pkt_cmpl *rxcmp; 3243 int rc; 3244 3245 rc = is_bnxt_in_error(bp); 3246 if (rc) 3247 return rc; 3248 3249 if (offset >= rxq->nb_rx_desc) 3250 return -EINVAL; 3251 3252 rxr = rxq->rx_ring; 3253 cpr = rxq->cp_ring; 3254 cp_ring_size = cpr->cp_ring_struct->ring_size; 3255 3256 /* 3257 * For the vector receive case, the completion at the requested 3258 * offset can be indexed directly. 3259 */ 3260 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3261 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3262 struct rx_pkt_cmpl *rxcmp; 3263 uint32_t cons; 3264 3265 /* Check status of completion descriptor. */ 3266 raw_cons = cpr->cp_raw_cons + 3267 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3268 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3269 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3270 3271 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3272 return RTE_ETH_RX_DESC_DONE; 3273 3274 /* Check whether rx desc has an mbuf attached. */ 3275 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3276 if (cons >= rxq->rxrearm_start && 3277 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3278 return RTE_ETH_RX_DESC_UNAVAIL; 3279 } 3280 3281 return RTE_ETH_RX_DESC_AVAIL; 3282 } 3283 #endif 3284 3285 /* 3286 * For the non-vector receive case, scan the completion ring to 3287 * locate the completion descriptor for the requested offset. 3288 */ 3289 raw_cons = cpr->cp_raw_cons; 3290 desc = 0; 3291 while (1) { 3292 uint32_t agg_cnt, cons, cmpl_type; 3293 3294 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3295 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3296 3297 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3298 break; 3299 3300 cmpl_type = CMP_TYPE(rxcmp); 3301 3302 switch (cmpl_type) { 3303 case CMPL_BASE_TYPE_RX_L2: 3304 case CMPL_BASE_TYPE_RX_L2_V2: 3305 if (desc == offset) { 3306 cons = rxcmp->opaque; 3307 if (rxr->rx_buf_ring[cons]) 3308 return RTE_ETH_RX_DESC_DONE; 3309 else 3310 return RTE_ETH_RX_DESC_UNAVAIL; 3311 } 3312 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3313 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3314 desc++; 3315 break; 3316 3317 case CMPL_BASE_TYPE_RX_TPA_END: 3318 if (desc == offset) 3319 return RTE_ETH_RX_DESC_DONE; 3320 3321 if (BNXT_CHIP_P5(rxq->bp)) { 3322 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3323 3324 p5_tpa_end = (void *)rxcmp; 3325 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3326 } else { 3327 struct rx_tpa_end_cmpl *tpa_end; 3328 3329 tpa_end = (void *)rxcmp; 3330 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3331 } 3332 3333 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3334 desc++; 3335 break; 3336 3337 default: 3338 raw_cons += CMP_LEN(cmpl_type); 3339 } 3340 } 3341 3342 return RTE_ETH_RX_DESC_AVAIL; 3343 } 3344 3345 static int 3346 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3347 { 3348 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3349 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3350 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3351 struct cmpl_base *cp_desc_ring; 3352 int rc; 3353 3354 rc = is_bnxt_in_error(txq->bp); 3355 if (rc) 3356 return rc; 3357 3358 if (offset >= txq->nb_tx_desc) 3359 return -EINVAL; 3360 3361 /* Return "desc done" if descriptor is available for use. */ 3362 if (bnxt_tx_bds_in_hw(txq) <= offset) 3363 return RTE_ETH_TX_DESC_DONE; 3364 3365 raw_cons = cpr->cp_raw_cons; 3366 cp_desc_ring = cpr->cp_desc_ring; 3367 ring_mask = cpr->cp_ring_struct->ring_mask; 3368 3369 /* Check to see if hw has posted a completion for the descriptor. */ 3370 while (1) { 3371 struct tx_cmpl *txcmp; 3372 uint32_t cons; 3373 3374 cons = RING_CMPL(ring_mask, raw_cons); 3375 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3376 3377 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3378 break; 3379 3380 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3381 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3382 3383 if (nb_tx_pkts > offset) 3384 return RTE_ETH_TX_DESC_DONE; 3385 3386 raw_cons = NEXT_RAW_CMP(raw_cons); 3387 } 3388 3389 /* Descriptor is pending transmit, not yet completed by hardware. */ 3390 return RTE_ETH_TX_DESC_FULL; 3391 } 3392 3393 int 3394 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3395 const struct rte_flow_ops **ops) 3396 { 3397 struct bnxt *bp = dev->data->dev_private; 3398 int ret = 0; 3399 3400 if (!bp) 3401 return -EIO; 3402 3403 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3404 struct bnxt_representor *vfr = dev->data->dev_private; 3405 bp = vfr->parent_dev->data->dev_private; 3406 /* parent is deleted while children are still valid */ 3407 if (!bp) { 3408 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3409 dev->data->port_id); 3410 return -EIO; 3411 } 3412 } 3413 3414 ret = is_bnxt_in_error(bp); 3415 if (ret) 3416 return ret; 3417 3418 /* PMD supports thread-safe flow operations. rte_flow API 3419 * functions can avoid mutex for multi-thread safety. 3420 */ 3421 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3422 3423 if (BNXT_TRUFLOW_EN(bp)) 3424 *ops = &bnxt_ulp_rte_flow_ops; 3425 else 3426 *ops = &bnxt_flow_ops; 3427 3428 return ret; 3429 } 3430 3431 static const uint32_t * 3432 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3433 { 3434 static const uint32_t ptypes[] = { 3435 RTE_PTYPE_L2_ETHER_VLAN, 3436 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3437 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3438 RTE_PTYPE_L4_ICMP, 3439 RTE_PTYPE_L4_TCP, 3440 RTE_PTYPE_L4_UDP, 3441 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3442 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3443 RTE_PTYPE_INNER_L4_ICMP, 3444 RTE_PTYPE_INNER_L4_TCP, 3445 RTE_PTYPE_INNER_L4_UDP, 3446 RTE_PTYPE_UNKNOWN 3447 }; 3448 3449 if (!dev->rx_pkt_burst) 3450 return NULL; 3451 3452 return ptypes; 3453 } 3454 3455 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3456 int reg_win) 3457 { 3458 uint32_t reg_base = *reg_arr & 0xfffff000; 3459 uint32_t win_off; 3460 int i; 3461 3462 for (i = 0; i < count; i++) { 3463 if ((reg_arr[i] & 0xfffff000) != reg_base) 3464 return -ERANGE; 3465 } 3466 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3467 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3468 return 0; 3469 } 3470 3471 static int bnxt_map_ptp_regs(struct bnxt *bp) 3472 { 3473 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3474 uint32_t *reg_arr; 3475 int rc, i; 3476 3477 reg_arr = ptp->rx_regs; 3478 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3479 if (rc) 3480 return rc; 3481 3482 reg_arr = ptp->tx_regs; 3483 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3484 if (rc) 3485 return rc; 3486 3487 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3488 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3489 3490 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3491 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3492 3493 return 0; 3494 } 3495 3496 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3497 { 3498 rte_write32(0, (uint8_t *)bp->bar0 + 3499 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3500 rte_write32(0, (uint8_t *)bp->bar0 + 3501 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3502 } 3503 3504 static uint64_t bnxt_cc_read(struct bnxt *bp) 3505 { 3506 uint64_t ns; 3507 3508 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3509 BNXT_GRCPF_REG_SYNC_TIME)); 3510 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3511 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3512 return ns; 3513 } 3514 3515 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3516 { 3517 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3518 uint32_t fifo; 3519 3520 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3521 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3522 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3523 return -EAGAIN; 3524 3525 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3526 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3527 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3528 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3529 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3530 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3531 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3532 3533 return 0; 3534 } 3535 3536 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3537 { 3538 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3539 struct bnxt_pf_info *pf = bp->pf; 3540 uint16_t port_id; 3541 int i = 0; 3542 uint32_t fifo; 3543 3544 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3545 return -EINVAL; 3546 3547 port_id = pf->port_id; 3548 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3549 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3550 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3551 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3552 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3553 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3554 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3555 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3556 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3557 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3558 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3559 i++; 3560 } 3561 3562 if (i >= BNXT_PTP_RX_PND_CNT) 3563 return -EBUSY; 3564 3565 return 0; 3566 } 3567 3568 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3569 { 3570 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3571 struct bnxt_pf_info *pf = bp->pf; 3572 uint16_t port_id; 3573 uint32_t fifo; 3574 3575 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3576 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3577 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3578 return -EAGAIN; 3579 3580 port_id = pf->port_id; 3581 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3582 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3583 3584 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3585 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3586 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3587 return bnxt_clr_rx_ts(bp, ts); 3588 3589 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3590 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3591 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3592 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3593 3594 return 0; 3595 } 3596 3597 static int 3598 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3599 { 3600 uint64_t ns; 3601 struct bnxt *bp = dev->data->dev_private; 3602 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3603 3604 if (!ptp) 3605 return -ENOTSUP; 3606 3607 ns = rte_timespec_to_ns(ts); 3608 /* Set the timecounters to a new value. */ 3609 ptp->tc.nsec = ns; 3610 ptp->tx_tstamp_tc.nsec = ns; 3611 ptp->rx_tstamp_tc.nsec = ns; 3612 3613 return 0; 3614 } 3615 3616 static int 3617 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3618 { 3619 struct bnxt *bp = dev->data->dev_private; 3620 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3621 uint64_t ns, systime_cycles = 0; 3622 int rc = 0; 3623 3624 if (!ptp) 3625 return -ENOTSUP; 3626 3627 if (BNXT_CHIP_P5(bp)) 3628 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3629 &systime_cycles); 3630 else 3631 systime_cycles = bnxt_cc_read(bp); 3632 3633 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3634 *ts = rte_ns_to_timespec(ns); 3635 3636 return rc; 3637 } 3638 static int 3639 bnxt_timesync_enable(struct rte_eth_dev *dev) 3640 { 3641 struct bnxt *bp = dev->data->dev_private; 3642 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3643 uint32_t shift = 0; 3644 int rc; 3645 3646 if (!ptp) 3647 return -ENOTSUP; 3648 3649 ptp->rx_filter = 1; 3650 ptp->tx_tstamp_en = 1; 3651 ptp->filter_all = 1; 3652 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3653 3654 rc = bnxt_hwrm_ptp_cfg(bp); 3655 if (rc) 3656 return rc; 3657 3658 rte_spinlock_init(&ptp->ptp_lock); 3659 bp->ptp_all_rx_tstamp = 1; 3660 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3661 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3662 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3663 3664 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3665 ptp->tc.cc_shift = shift; 3666 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3667 3668 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3669 ptp->rx_tstamp_tc.cc_shift = shift; 3670 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3671 3672 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3673 ptp->tx_tstamp_tc.cc_shift = shift; 3674 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3675 3676 if (!BNXT_CHIP_P5(bp)) 3677 bnxt_map_ptp_regs(bp); 3678 else 3679 rc = bnxt_ptp_start(bp); 3680 3681 return rc; 3682 } 3683 3684 static int 3685 bnxt_timesync_disable(struct rte_eth_dev *dev) 3686 { 3687 struct bnxt *bp = dev->data->dev_private; 3688 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3689 3690 if (!ptp) 3691 return -ENOTSUP; 3692 3693 ptp->rx_filter = 0; 3694 ptp->tx_tstamp_en = 0; 3695 ptp->rxctl = 0; 3696 ptp->filter_all = 0; 3697 3698 bnxt_hwrm_ptp_cfg(bp); 3699 3700 bp->ptp_all_rx_tstamp = 0; 3701 if (!BNXT_CHIP_P5(bp)) 3702 bnxt_unmap_ptp_regs(bp); 3703 else 3704 bnxt_ptp_stop(bp); 3705 3706 return 0; 3707 } 3708 3709 static int 3710 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3711 struct timespec *timestamp, 3712 uint32_t flags __rte_unused) 3713 { 3714 struct bnxt *bp = dev->data->dev_private; 3715 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3716 uint64_t rx_tstamp_cycles = 0; 3717 uint64_t ns; 3718 3719 if (!ptp) 3720 return -ENOTSUP; 3721 3722 if (BNXT_CHIP_P5(bp)) 3723 rx_tstamp_cycles = ptp->rx_timestamp; 3724 else 3725 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3726 3727 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3728 *timestamp = rte_ns_to_timespec(ns); 3729 return 0; 3730 } 3731 3732 static int 3733 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3734 struct timespec *timestamp) 3735 { 3736 struct bnxt *bp = dev->data->dev_private; 3737 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3738 uint64_t tx_tstamp_cycles = 0; 3739 uint64_t ns; 3740 int rc = 0; 3741 3742 if (!ptp) 3743 return -ENOTSUP; 3744 3745 if (BNXT_CHIP_P5(bp)) 3746 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3747 &tx_tstamp_cycles); 3748 else 3749 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3750 3751 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3752 *timestamp = rte_ns_to_timespec(ns); 3753 3754 return rc; 3755 } 3756 3757 static int 3758 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3759 { 3760 struct bnxt *bp = dev->data->dev_private; 3761 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3762 3763 if (!ptp) 3764 return -ENOTSUP; 3765 3766 ptp->tc.nsec += delta; 3767 ptp->tx_tstamp_tc.nsec += delta; 3768 ptp->rx_tstamp_tc.nsec += delta; 3769 3770 return 0; 3771 } 3772 3773 static int 3774 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3775 { 3776 struct bnxt *bp = dev->data->dev_private; 3777 int rc; 3778 uint32_t dir_entries; 3779 uint32_t entry_length; 3780 3781 rc = is_bnxt_in_error(bp); 3782 if (rc) 3783 return rc; 3784 3785 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3786 bp->pdev->addr.domain, bp->pdev->addr.bus, 3787 bp->pdev->addr.devid, bp->pdev->addr.function); 3788 3789 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3790 if (rc != 0) 3791 return rc; 3792 3793 return dir_entries * entry_length; 3794 } 3795 3796 static int 3797 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3798 struct rte_dev_eeprom_info *in_eeprom) 3799 { 3800 struct bnxt *bp = dev->data->dev_private; 3801 uint32_t index; 3802 uint32_t offset; 3803 int rc; 3804 3805 rc = is_bnxt_in_error(bp); 3806 if (rc) 3807 return rc; 3808 3809 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3810 bp->pdev->addr.domain, bp->pdev->addr.bus, 3811 bp->pdev->addr.devid, bp->pdev->addr.function, 3812 in_eeprom->offset, in_eeprom->length); 3813 3814 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3815 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3816 in_eeprom->data); 3817 3818 index = in_eeprom->offset >> 24; 3819 offset = in_eeprom->offset & 0xffffff; 3820 3821 if (index != 0) 3822 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3823 in_eeprom->length, in_eeprom->data); 3824 3825 return 0; 3826 } 3827 3828 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3829 { 3830 switch (dir_type) { 3831 case BNX_DIR_TYPE_CHIMP_PATCH: 3832 case BNX_DIR_TYPE_BOOTCODE: 3833 case BNX_DIR_TYPE_BOOTCODE_2: 3834 case BNX_DIR_TYPE_APE_FW: 3835 case BNX_DIR_TYPE_APE_PATCH: 3836 case BNX_DIR_TYPE_KONG_FW: 3837 case BNX_DIR_TYPE_KONG_PATCH: 3838 case BNX_DIR_TYPE_BONO_FW: 3839 case BNX_DIR_TYPE_BONO_PATCH: 3840 /* FALLTHROUGH */ 3841 return true; 3842 } 3843 3844 return false; 3845 } 3846 3847 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3848 { 3849 switch (dir_type) { 3850 case BNX_DIR_TYPE_AVS: 3851 case BNX_DIR_TYPE_EXP_ROM_MBA: 3852 case BNX_DIR_TYPE_PCIE: 3853 case BNX_DIR_TYPE_TSCF_UCODE: 3854 case BNX_DIR_TYPE_EXT_PHY: 3855 case BNX_DIR_TYPE_CCM: 3856 case BNX_DIR_TYPE_ISCSI_BOOT: 3857 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3858 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3859 /* FALLTHROUGH */ 3860 return true; 3861 } 3862 3863 return false; 3864 } 3865 3866 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3867 { 3868 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3869 bnxt_dir_type_is_other_exec_format(dir_type); 3870 } 3871 3872 static int 3873 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3874 struct rte_dev_eeprom_info *in_eeprom) 3875 { 3876 struct bnxt *bp = dev->data->dev_private; 3877 uint8_t index, dir_op; 3878 uint16_t type, ext, ordinal, attr; 3879 int rc; 3880 3881 rc = is_bnxt_in_error(bp); 3882 if (rc) 3883 return rc; 3884 3885 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3886 bp->pdev->addr.domain, bp->pdev->addr.bus, 3887 bp->pdev->addr.devid, bp->pdev->addr.function, 3888 in_eeprom->offset, in_eeprom->length); 3889 3890 if (!BNXT_PF(bp)) { 3891 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3892 return -EINVAL; 3893 } 3894 3895 type = in_eeprom->magic >> 16; 3896 3897 if (type == 0xffff) { /* special value for directory operations */ 3898 index = in_eeprom->magic & 0xff; 3899 dir_op = in_eeprom->magic >> 8; 3900 if (index == 0) 3901 return -EINVAL; 3902 switch (dir_op) { 3903 case 0x0e: /* erase */ 3904 if (in_eeprom->offset != ~in_eeprom->magic) 3905 return -EINVAL; 3906 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3907 default: 3908 return -EINVAL; 3909 } 3910 } 3911 3912 /* Create or re-write an NVM item: */ 3913 if (bnxt_dir_type_is_executable(type) == true) 3914 return -EOPNOTSUPP; 3915 ext = in_eeprom->magic & 0xffff; 3916 ordinal = in_eeprom->offset >> 16; 3917 attr = in_eeprom->offset & 0xffff; 3918 3919 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3920 in_eeprom->data, in_eeprom->length); 3921 } 3922 3923 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3924 struct rte_eth_dev_module_info *modinfo) 3925 { 3926 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3927 struct bnxt *bp = dev->data->dev_private; 3928 int rc; 3929 3930 /* No point in going further if phy status indicates 3931 * module is not inserted or if it is powered down or 3932 * if it is of type 10GBase-T 3933 */ 3934 if (bp->link_info->module_status > 3935 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3936 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3937 dev->data->port_id); 3938 return -ENOTSUP; 3939 } 3940 3941 /* This feature is not supported in older firmware versions */ 3942 if (bp->hwrm_spec_code < 0x10202) { 3943 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3944 dev->data->port_id); 3945 return -ENOTSUP; 3946 } 3947 3948 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3949 SFF_DIAG_SUPPORT_OFFSET + 1, 3950 module_info); 3951 3952 if (rc) 3953 return rc; 3954 3955 switch (module_info[0]) { 3956 case SFF_MODULE_ID_SFP: 3957 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3958 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3959 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3960 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3961 break; 3962 case SFF_MODULE_ID_QSFP: 3963 case SFF_MODULE_ID_QSFP_PLUS: 3964 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3965 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3966 break; 3967 case SFF_MODULE_ID_QSFP28: 3968 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3969 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3970 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3971 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3972 break; 3973 default: 3974 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3975 return -ENOTSUP; 3976 } 3977 3978 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3979 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3980 3981 return 0; 3982 } 3983 3984 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3985 struct rte_dev_eeprom_info *info) 3986 { 3987 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3988 uint32_t offset = info->offset, length = info->length; 3989 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3990 struct bnxt *bp = dev->data->dev_private; 3991 uint8_t *data = info->data; 3992 uint8_t page = offset >> 7; 3993 uint8_t max_pages = 2; 3994 uint8_t opt_pages; 3995 int rc; 3996 3997 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3998 SFF_DIAG_SUPPORT_OFFSET + 1, 3999 module_info); 4000 if (rc) 4001 return rc; 4002 4003 switch (module_info[0]) { 4004 case SFF_MODULE_ID_SFP: 4005 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 4006 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 4007 pg_addr[2] = I2C_DEV_ADDR_A2; 4008 pg_addr[3] = I2C_DEV_ADDR_A2; 4009 max_pages = 4; 4010 } 4011 break; 4012 case SFF_MODULE_ID_QSFP28: 4013 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 4014 SFF8636_OPT_PAGES_OFFSET, 4015 1, &opt_pages); 4016 if (rc) 4017 return rc; 4018 4019 if (opt_pages & SFF8636_PAGE1_MASK) { 4020 pg_addr[2] = I2C_DEV_ADDR_A0; 4021 max_pages = 3; 4022 } 4023 if (opt_pages & SFF8636_PAGE2_MASK) { 4024 pg_addr[3] = I2C_DEV_ADDR_A0; 4025 max_pages = 4; 4026 } 4027 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 4028 pg_addr[4] = I2C_DEV_ADDR_A0; 4029 max_pages = 5; 4030 } 4031 break; 4032 default: 4033 break; 4034 } 4035 4036 memset(data, 0, length); 4037 4038 offset &= 0xff; 4039 while (length && page < max_pages) { 4040 uint8_t raw_page = page ? page - 1 : 0; 4041 uint16_t chunk; 4042 4043 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4044 raw_page = 0; 4045 else if (page) 4046 offset |= 0x80; 4047 chunk = RTE_MIN(length, 256 - offset); 4048 4049 if (pg_addr[page]) { 4050 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4051 raw_page, offset, 4052 chunk, data); 4053 if (rc) 4054 return rc; 4055 } 4056 4057 data += chunk; 4058 length -= chunk; 4059 offset = 0; 4060 page += 1 + (chunk > 128); 4061 } 4062 4063 return length ? -EINVAL : 0; 4064 } 4065 4066 /* 4067 * Initialization 4068 */ 4069 4070 static const struct eth_dev_ops bnxt_dev_ops = { 4071 .dev_infos_get = bnxt_dev_info_get_op, 4072 .dev_close = bnxt_dev_close_op, 4073 .dev_configure = bnxt_dev_configure_op, 4074 .dev_start = bnxt_dev_start_op, 4075 .dev_stop = bnxt_dev_stop_op, 4076 .dev_set_link_up = bnxt_dev_set_link_up_op, 4077 .dev_set_link_down = bnxt_dev_set_link_down_op, 4078 .stats_get = bnxt_stats_get_op, 4079 .stats_reset = bnxt_stats_reset_op, 4080 .rx_queue_setup = bnxt_rx_queue_setup_op, 4081 .rx_queue_release = bnxt_rx_queue_release_op, 4082 .tx_queue_setup = bnxt_tx_queue_setup_op, 4083 .tx_queue_release = bnxt_tx_queue_release_op, 4084 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4085 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4086 .reta_update = bnxt_reta_update_op, 4087 .reta_query = bnxt_reta_query_op, 4088 .rss_hash_update = bnxt_rss_hash_update_op, 4089 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4090 .link_update = bnxt_link_update_op, 4091 .promiscuous_enable = bnxt_promiscuous_enable_op, 4092 .promiscuous_disable = bnxt_promiscuous_disable_op, 4093 .allmulticast_enable = bnxt_allmulticast_enable_op, 4094 .allmulticast_disable = bnxt_allmulticast_disable_op, 4095 .mac_addr_add = bnxt_mac_addr_add_op, 4096 .mac_addr_remove = bnxt_mac_addr_remove_op, 4097 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4098 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4099 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4100 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4101 .vlan_filter_set = bnxt_vlan_filter_set_op, 4102 .vlan_offload_set = bnxt_vlan_offload_set_op, 4103 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4104 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4105 .mtu_set = bnxt_mtu_set_op, 4106 .mac_addr_set = bnxt_set_default_mac_addr_op, 4107 .xstats_get = bnxt_dev_xstats_get_op, 4108 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4109 .xstats_reset = bnxt_dev_xstats_reset_op, 4110 .fw_version_get = bnxt_fw_version_get, 4111 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4112 .rxq_info_get = bnxt_rxq_info_get_op, 4113 .txq_info_get = bnxt_txq_info_get_op, 4114 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4115 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4116 .dev_led_on = bnxt_dev_led_on_op, 4117 .dev_led_off = bnxt_dev_led_off_op, 4118 .rx_queue_start = bnxt_rx_queue_start, 4119 .rx_queue_stop = bnxt_rx_queue_stop, 4120 .tx_queue_start = bnxt_tx_queue_start, 4121 .tx_queue_stop = bnxt_tx_queue_stop, 4122 .flow_ops_get = bnxt_flow_ops_get_op, 4123 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4124 .get_eeprom_length = bnxt_get_eeprom_length_op, 4125 .get_eeprom = bnxt_get_eeprom_op, 4126 .set_eeprom = bnxt_set_eeprom_op, 4127 .get_module_info = bnxt_get_module_info, 4128 .get_module_eeprom = bnxt_get_module_eeprom, 4129 .timesync_enable = bnxt_timesync_enable, 4130 .timesync_disable = bnxt_timesync_disable, 4131 .timesync_read_time = bnxt_timesync_read_time, 4132 .timesync_write_time = bnxt_timesync_write_time, 4133 .timesync_adjust_time = bnxt_timesync_adjust_time, 4134 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4135 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4136 .mtr_ops_get = bnxt_flow_meter_ops_get, 4137 }; 4138 4139 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4140 { 4141 uint32_t offset; 4142 4143 /* Only pre-map the reset GRC registers using window 3 */ 4144 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4145 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4146 4147 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4148 4149 return offset; 4150 } 4151 4152 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4153 { 4154 struct bnxt_error_recovery_info *info = bp->recovery_info; 4155 uint32_t reg_base = 0xffffffff; 4156 int i; 4157 4158 /* Only pre-map the monitoring GRC registers using window 2 */ 4159 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4160 uint32_t reg = info->status_regs[i]; 4161 4162 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4163 continue; 4164 4165 if (reg_base == 0xffffffff) 4166 reg_base = reg & 0xfffff000; 4167 if ((reg & 0xfffff000) != reg_base) 4168 return -ERANGE; 4169 4170 /* Use mask 0xffc as the Lower 2 bits indicates 4171 * address space location 4172 */ 4173 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4174 (reg & 0xffc); 4175 } 4176 4177 if (reg_base == 0xffffffff) 4178 return 0; 4179 4180 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4181 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4182 4183 return 0; 4184 } 4185 4186 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4187 { 4188 struct bnxt_error_recovery_info *info = bp->recovery_info; 4189 uint32_t delay = info->delay_after_reset[index]; 4190 uint32_t val = info->reset_reg_val[index]; 4191 uint32_t reg = info->reset_reg[index]; 4192 uint32_t type, offset; 4193 int ret; 4194 4195 type = BNXT_FW_STATUS_REG_TYPE(reg); 4196 offset = BNXT_FW_STATUS_REG_OFF(reg); 4197 4198 switch (type) { 4199 case BNXT_FW_STATUS_REG_TYPE_CFG: 4200 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4201 if (ret < 0) { 4202 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4203 val, offset); 4204 return; 4205 } 4206 break; 4207 case BNXT_FW_STATUS_REG_TYPE_GRC: 4208 offset = bnxt_map_reset_regs(bp, offset); 4209 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4210 break; 4211 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4212 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4213 break; 4214 } 4215 /* wait on a specific interval of time until core reset is complete */ 4216 if (delay) 4217 rte_delay_ms(delay); 4218 } 4219 4220 static void bnxt_dev_cleanup(struct bnxt *bp) 4221 { 4222 bp->eth_dev->data->dev_link.link_status = 0; 4223 bp->link_info->link_up = 0; 4224 if (bp->eth_dev->data->dev_started) 4225 bnxt_dev_stop(bp->eth_dev); 4226 4227 bnxt_uninit_resources(bp, true); 4228 } 4229 4230 static int 4231 bnxt_check_fw_reset_done(struct bnxt *bp) 4232 { 4233 int timeout = bp->fw_reset_max_msecs; 4234 uint16_t val = 0; 4235 int rc; 4236 4237 do { 4238 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4239 if (rc < 0) { 4240 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4241 return rc; 4242 } 4243 if (val != 0xffff) 4244 break; 4245 rte_delay_ms(1); 4246 } while (timeout--); 4247 4248 if (val == 0xffff) { 4249 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4250 return -1; 4251 } 4252 4253 return 0; 4254 } 4255 4256 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4257 { 4258 struct rte_eth_dev *dev = bp->eth_dev; 4259 struct rte_vlan_filter_conf *vfc; 4260 int vidx, vbit, rc; 4261 uint16_t vlan_id; 4262 4263 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4264 vfc = &dev->data->vlan_filter_conf; 4265 vidx = vlan_id / 64; 4266 vbit = vlan_id % 64; 4267 4268 /* Each bit corresponds to a VLAN id */ 4269 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4270 rc = bnxt_add_vlan_filter(bp, vlan_id); 4271 if (rc) 4272 return rc; 4273 } 4274 } 4275 4276 return 0; 4277 } 4278 4279 static int bnxt_restore_mac_filters(struct bnxt *bp) 4280 { 4281 struct rte_eth_dev *dev = bp->eth_dev; 4282 struct rte_eth_dev_info dev_info; 4283 struct rte_ether_addr *addr; 4284 uint64_t pool_mask; 4285 uint32_t pool = 0; 4286 uint32_t i; 4287 int rc; 4288 4289 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4290 return 0; 4291 4292 rc = bnxt_dev_info_get_op(dev, &dev_info); 4293 if (rc) 4294 return rc; 4295 4296 /* replay MAC address configuration */ 4297 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4298 addr = &dev->data->mac_addrs[i]; 4299 4300 /* skip zero address */ 4301 if (rte_is_zero_ether_addr(addr)) 4302 continue; 4303 4304 pool = 0; 4305 pool_mask = dev->data->mac_pool_sel[i]; 4306 4307 do { 4308 if (pool_mask & 1ULL) { 4309 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4310 if (rc) 4311 return rc; 4312 } 4313 pool_mask >>= 1; 4314 pool++; 4315 } while (pool_mask); 4316 } 4317 4318 return 0; 4319 } 4320 4321 static int bnxt_restore_mcast_mac_filters(struct bnxt *bp) 4322 { 4323 int ret = 0; 4324 4325 ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list, 4326 bp->nb_mc_addr); 4327 if (ret) 4328 PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n"); 4329 4330 return ret; 4331 } 4332 4333 static int bnxt_restore_filters(struct bnxt *bp) 4334 { 4335 struct rte_eth_dev *dev = bp->eth_dev; 4336 int ret = 0; 4337 4338 if (dev->data->all_multicast) { 4339 ret = bnxt_allmulticast_enable_op(dev); 4340 if (ret) 4341 return ret; 4342 } 4343 if (dev->data->promiscuous) { 4344 ret = bnxt_promiscuous_enable_op(dev); 4345 if (ret) 4346 return ret; 4347 } 4348 4349 ret = bnxt_restore_mac_filters(bp); 4350 if (ret) 4351 return ret; 4352 4353 /* if vlans are already programmed, this can fail with -EEXIST */ 4354 ret = bnxt_restore_vlan_filters(bp); 4355 if (ret && ret != -EEXIST) 4356 return ret; 4357 4358 ret = bnxt_restore_mcast_mac_filters(bp); 4359 if (ret) 4360 return ret; 4361 4362 return ret; 4363 } 4364 4365 static int bnxt_check_fw_ready(struct bnxt *bp) 4366 { 4367 int timeout = bp->fw_reset_max_msecs ? : BNXT_MAX_FW_RESET_TIMEOUT; 4368 int rc = 0; 4369 4370 do { 4371 rc = bnxt_hwrm_poll_ver_get(bp); 4372 if (rc == 0) 4373 break; 4374 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4375 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4376 } while (rc && timeout > 0); 4377 4378 if (rc) 4379 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4380 4381 return rc; 4382 } 4383 4384 static void bnxt_dev_recover(void *arg) 4385 { 4386 struct bnxt *bp = arg; 4387 int rc = 0; 4388 4389 pthread_mutex_lock(&bp->err_recovery_lock); 4390 4391 if (!bp->fw_reset_min_msecs) { 4392 rc = bnxt_check_fw_reset_done(bp); 4393 if (rc) 4394 goto err; 4395 } 4396 4397 /* Clear Error flag so that device re-init should happen */ 4398 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4399 PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n", 4400 bp->eth_dev->data->port_id); 4401 4402 rc = bnxt_check_fw_ready(bp); 4403 if (rc) 4404 goto err; 4405 4406 rc = bnxt_init_resources(bp, true); 4407 if (rc) { 4408 PMD_DRV_LOG(ERR, 4409 "Failed to initialize resources after reset\n"); 4410 goto err; 4411 } 4412 /* clear reset flag as the device is initialized now */ 4413 bp->flags &= ~BNXT_FLAG_FW_RESET; 4414 4415 rc = bnxt_dev_start_op(bp->eth_dev); 4416 if (rc) { 4417 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4418 goto err_start; 4419 } 4420 4421 rc = bnxt_restore_filters(bp); 4422 if (rc) 4423 goto err_start; 4424 4425 rte_eth_fp_ops[bp->eth_dev->data->port_id].rx_pkt_burst = 4426 bp->eth_dev->rx_pkt_burst; 4427 rte_eth_fp_ops[bp->eth_dev->data->port_id].tx_pkt_burst = 4428 bp->eth_dev->tx_pkt_burst; 4429 rte_mb(); 4430 4431 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", 4432 bp->eth_dev->data->port_id); 4433 pthread_mutex_unlock(&bp->err_recovery_lock); 4434 rte_eth_dev_callback_process(bp->eth_dev, 4435 RTE_ETH_EVENT_RECOVERY_SUCCESS, 4436 NULL); 4437 return; 4438 err_start: 4439 bnxt_dev_stop(bp->eth_dev); 4440 err: 4441 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4442 bnxt_uninit_resources(bp, false); 4443 rte_eth_dev_callback_process(bp->eth_dev, 4444 RTE_ETH_EVENT_RECOVERY_FAILED, 4445 NULL); 4446 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4447 rte_eth_dev_callback_process(bp->eth_dev, 4448 RTE_ETH_EVENT_INTR_RMV, 4449 NULL); 4450 pthread_mutex_unlock(&bp->err_recovery_lock); 4451 PMD_DRV_LOG(ERR, "Port %u: Failed to recover from FW reset\n", 4452 bp->eth_dev->data->port_id); 4453 } 4454 4455 void bnxt_dev_reset_and_resume(void *arg) 4456 { 4457 struct bnxt *bp = arg; 4458 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4459 uint16_t val = 0; 4460 int rc; 4461 4462 bnxt_dev_cleanup(bp); 4463 PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n", 4464 bp->eth_dev->data->port_id); 4465 4466 bnxt_wait_for_device_shutdown(bp); 4467 4468 /* During some fatal firmware error conditions, the PCI config space 4469 * register 0x2e which normally contains the subsystem ID will become 4470 * 0xffff. This register will revert back to the normal value after 4471 * the chip has completed core reset. If we detect this condition, 4472 * we can poll this config register immediately for the value to revert. 4473 */ 4474 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4475 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4476 if (rc < 0) { 4477 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4478 return; 4479 } 4480 if (val == 0xffff) { 4481 bp->fw_reset_min_msecs = 0; 4482 us = 1; 4483 } 4484 } 4485 4486 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4487 if (rc) 4488 PMD_DRV_LOG(ERR, "Port %u: Error setting recovery alarm", 4489 bp->eth_dev->data->port_id); 4490 } 4491 4492 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4493 { 4494 struct bnxt_error_recovery_info *info = bp->recovery_info; 4495 uint32_t reg = info->status_regs[index]; 4496 uint32_t type, offset, val = 0; 4497 int ret = 0; 4498 4499 type = BNXT_FW_STATUS_REG_TYPE(reg); 4500 offset = BNXT_FW_STATUS_REG_OFF(reg); 4501 4502 switch (type) { 4503 case BNXT_FW_STATUS_REG_TYPE_CFG: 4504 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4505 if (ret < 0) 4506 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4507 offset); 4508 break; 4509 case BNXT_FW_STATUS_REG_TYPE_GRC: 4510 offset = info->mapped_status_regs[index]; 4511 /* FALLTHROUGH */ 4512 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4513 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4514 offset)); 4515 break; 4516 } 4517 4518 return val; 4519 } 4520 4521 static int bnxt_fw_reset_all(struct bnxt *bp) 4522 { 4523 struct bnxt_error_recovery_info *info = bp->recovery_info; 4524 uint32_t i; 4525 int rc = 0; 4526 4527 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4528 /* Reset through primary function driver */ 4529 for (i = 0; i < info->reg_array_cnt; i++) 4530 bnxt_write_fw_reset_reg(bp, i); 4531 /* Wait for time specified by FW after triggering reset */ 4532 rte_delay_ms(info->primary_func_wait_period_after_reset); 4533 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4534 /* Reset with the help of Kong processor */ 4535 rc = bnxt_hwrm_fw_reset(bp); 4536 if (rc) 4537 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4538 } 4539 4540 return rc; 4541 } 4542 4543 static void bnxt_fw_reset_cb(void *arg) 4544 { 4545 struct bnxt *bp = arg; 4546 struct bnxt_error_recovery_info *info = bp->recovery_info; 4547 int rc = 0; 4548 4549 /* Only Primary function can do FW reset */ 4550 if (bnxt_is_primary_func(bp) && 4551 bnxt_is_recovery_enabled(bp)) { 4552 rc = bnxt_fw_reset_all(bp); 4553 if (rc) { 4554 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4555 return; 4556 } 4557 } 4558 4559 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4560 * EXCEPTION_FATAL_ASYNC event to all the functions 4561 * (including MASTER FUNC). After receiving this Async, all the active 4562 * drivers should treat this case as FW initiated recovery 4563 */ 4564 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4565 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4566 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4567 4568 /* To recover from error */ 4569 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4570 (void *)bp); 4571 } 4572 } 4573 4574 /* Driver should poll FW heartbeat, reset_counter with the frequency 4575 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4576 * When the driver detects heartbeat stop or change in reset_counter, 4577 * it has to trigger a reset to recover from the error condition. 4578 * A “primary function” is the function who will have the privilege to 4579 * initiate the chimp reset. The primary function will be elected by the 4580 * firmware and will be notified through async message. 4581 */ 4582 static void bnxt_check_fw_health(void *arg) 4583 { 4584 struct bnxt *bp = arg; 4585 struct bnxt_error_recovery_info *info = bp->recovery_info; 4586 uint32_t val = 0, wait_msec; 4587 4588 if (!info || !bnxt_is_recovery_enabled(bp) || 4589 is_bnxt_in_error(bp)) 4590 return; 4591 4592 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4593 if (val == info->last_heart_beat) 4594 goto reset; 4595 4596 info->last_heart_beat = val; 4597 4598 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4599 if (val != info->last_reset_counter) 4600 goto reset; 4601 4602 info->last_reset_counter = val; 4603 4604 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4605 bnxt_check_fw_health, (void *)bp); 4606 4607 return; 4608 reset: 4609 /* Stop DMA to/from device */ 4610 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4611 bp->flags |= BNXT_FLAG_FW_RESET; 4612 4613 bnxt_stop_rxtx(bp->eth_dev); 4614 4615 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4616 4617 rte_eth_dev_callback_process(bp->eth_dev, 4618 RTE_ETH_EVENT_ERR_RECOVERING, 4619 NULL); 4620 4621 if (bnxt_is_primary_func(bp)) 4622 wait_msec = info->primary_func_wait_period; 4623 else 4624 wait_msec = info->normal_func_wait_period; 4625 4626 rte_eal_alarm_set(US_PER_MS * wait_msec, 4627 bnxt_fw_reset_cb, (void *)bp); 4628 } 4629 4630 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4631 { 4632 uint32_t polling_freq; 4633 4634 pthread_mutex_lock(&bp->health_check_lock); 4635 4636 if (!bnxt_is_recovery_enabled(bp)) 4637 goto done; 4638 4639 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4640 goto done; 4641 4642 polling_freq = bp->recovery_info->driver_polling_freq; 4643 4644 rte_eal_alarm_set(US_PER_MS * polling_freq, 4645 bnxt_check_fw_health, (void *)bp); 4646 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4647 4648 done: 4649 pthread_mutex_unlock(&bp->health_check_lock); 4650 } 4651 4652 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4653 { 4654 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4655 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4656 } 4657 4658 static bool bnxt_vf_pciid(uint16_t device_id) 4659 { 4660 switch (device_id) { 4661 case BROADCOM_DEV_ID_57304_VF: 4662 case BROADCOM_DEV_ID_57406_VF: 4663 case BROADCOM_DEV_ID_5731X_VF: 4664 case BROADCOM_DEV_ID_5741X_VF: 4665 case BROADCOM_DEV_ID_57414_VF: 4666 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4667 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4668 case BROADCOM_DEV_ID_58802_VF: 4669 case BROADCOM_DEV_ID_57500_VF1: 4670 case BROADCOM_DEV_ID_57500_VF2: 4671 case BROADCOM_DEV_ID_58818_VF: 4672 /* FALLTHROUGH */ 4673 return true; 4674 default: 4675 return false; 4676 } 4677 } 4678 4679 /* Phase 5 device */ 4680 static bool bnxt_p5_device(uint16_t device_id) 4681 { 4682 switch (device_id) { 4683 case BROADCOM_DEV_ID_57508: 4684 case BROADCOM_DEV_ID_57504: 4685 case BROADCOM_DEV_ID_57502: 4686 case BROADCOM_DEV_ID_57508_MF1: 4687 case BROADCOM_DEV_ID_57504_MF1: 4688 case BROADCOM_DEV_ID_57502_MF1: 4689 case BROADCOM_DEV_ID_57508_MF2: 4690 case BROADCOM_DEV_ID_57504_MF2: 4691 case BROADCOM_DEV_ID_57502_MF2: 4692 case BROADCOM_DEV_ID_57500_VF1: 4693 case BROADCOM_DEV_ID_57500_VF2: 4694 case BROADCOM_DEV_ID_58812: 4695 case BROADCOM_DEV_ID_58814: 4696 case BROADCOM_DEV_ID_58818: 4697 case BROADCOM_DEV_ID_58818_VF: 4698 /* FALLTHROUGH */ 4699 return true; 4700 default: 4701 return false; 4702 } 4703 } 4704 4705 bool bnxt_stratus_device(struct bnxt *bp) 4706 { 4707 uint16_t device_id = bp->pdev->id.device_id; 4708 4709 switch (device_id) { 4710 case BROADCOM_DEV_ID_STRATUS_NIC: 4711 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4712 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4713 /* FALLTHROUGH */ 4714 return true; 4715 default: 4716 return false; 4717 } 4718 } 4719 4720 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4721 { 4722 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4723 struct bnxt *bp = eth_dev->data->dev_private; 4724 4725 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4726 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4727 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4728 if (!bp->bar0 || !bp->doorbell_base) { 4729 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4730 return -ENODEV; 4731 } 4732 4733 bp->eth_dev = eth_dev; 4734 bp->pdev = pci_dev; 4735 4736 return 0; 4737 } 4738 4739 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4740 struct bnxt_ctx_pg_info *ctx_pg, 4741 uint32_t mem_size, 4742 const char *suffix, 4743 uint16_t idx) 4744 { 4745 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4746 const struct rte_memzone *mz = NULL; 4747 char mz_name[RTE_MEMZONE_NAMESIZE]; 4748 rte_iova_t mz_phys_addr; 4749 uint64_t valid_bits = 0; 4750 uint32_t sz; 4751 int i; 4752 4753 if (!mem_size) 4754 return 0; 4755 4756 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4757 BNXT_PAGE_SIZE; 4758 rmem->page_size = BNXT_PAGE_SIZE; 4759 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4760 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4761 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4762 4763 valid_bits = PTU_PTE_VALID; 4764 4765 if (rmem->nr_pages > 1) { 4766 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4767 "bnxt_ctx_pg_tbl%s_%x_%d", 4768 suffix, idx, bp->eth_dev->data->port_id); 4769 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4770 mz = rte_memzone_lookup(mz_name); 4771 if (!mz) { 4772 mz = rte_memzone_reserve_aligned(mz_name, 4773 rmem->nr_pages * 8, 4774 bp->eth_dev->device->numa_node, 4775 RTE_MEMZONE_2MB | 4776 RTE_MEMZONE_SIZE_HINT_ONLY | 4777 RTE_MEMZONE_IOVA_CONTIG, 4778 BNXT_PAGE_SIZE); 4779 if (mz == NULL) 4780 return -ENOMEM; 4781 } 4782 4783 memset(mz->addr, 0, mz->len); 4784 mz_phys_addr = mz->iova; 4785 4786 rmem->pg_tbl = mz->addr; 4787 rmem->pg_tbl_map = mz_phys_addr; 4788 rmem->pg_tbl_mz = mz; 4789 } 4790 4791 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4792 suffix, idx, bp->eth_dev->data->port_id); 4793 mz = rte_memzone_lookup(mz_name); 4794 if (!mz) { 4795 mz = rte_memzone_reserve_aligned(mz_name, 4796 mem_size, 4797 bp->eth_dev->device->numa_node, 4798 RTE_MEMZONE_1GB | 4799 RTE_MEMZONE_SIZE_HINT_ONLY | 4800 RTE_MEMZONE_IOVA_CONTIG, 4801 BNXT_PAGE_SIZE); 4802 if (mz == NULL) 4803 return -ENOMEM; 4804 } 4805 4806 memset(mz->addr, 0, mz->len); 4807 mz_phys_addr = mz->iova; 4808 4809 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4810 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4811 rmem->dma_arr[i] = mz_phys_addr + sz; 4812 4813 if (rmem->nr_pages > 1) { 4814 if (i == rmem->nr_pages - 2 && 4815 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4816 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4817 else if (i == rmem->nr_pages - 1 && 4818 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4819 valid_bits |= PTU_PTE_LAST; 4820 4821 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4822 valid_bits); 4823 } 4824 } 4825 4826 rmem->mz = mz; 4827 if (rmem->vmem_size) 4828 rmem->vmem = (void **)mz->addr; 4829 rmem->dma_arr[0] = mz_phys_addr; 4830 return 0; 4831 } 4832 4833 static void bnxt_free_ctx_mem(struct bnxt *bp) 4834 { 4835 int i; 4836 4837 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4838 return; 4839 4840 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4841 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4842 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4843 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4844 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4845 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4846 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4847 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4848 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4849 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4850 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4851 4852 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4853 if (bp->ctx->tqm_mem[i]) 4854 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4855 } 4856 4857 rte_free(bp->ctx); 4858 bp->ctx = NULL; 4859 } 4860 4861 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4862 4863 #define min_t(type, x, y) ({ \ 4864 type __min1 = (x); \ 4865 type __min2 = (y); \ 4866 __min1 < __min2 ? __min1 : __min2; }) 4867 4868 #define max_t(type, x, y) ({ \ 4869 type __max1 = (x); \ 4870 type __max2 = (y); \ 4871 __max1 > __max2 ? __max1 : __max2; }) 4872 4873 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4874 4875 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4876 { 4877 struct bnxt_ctx_pg_info *ctx_pg; 4878 struct bnxt_ctx_mem_info *ctx; 4879 uint32_t mem_size, ena, entries; 4880 uint32_t entries_sp, min; 4881 int i, rc; 4882 4883 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4884 if (rc) { 4885 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4886 return rc; 4887 } 4888 ctx = bp->ctx; 4889 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4890 return 0; 4891 4892 ctx_pg = &ctx->qp_mem; 4893 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4894 if (ctx->qp_entry_size) { 4895 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4896 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4897 if (rc) 4898 return rc; 4899 } 4900 4901 ctx_pg = &ctx->srq_mem; 4902 ctx_pg->entries = ctx->srq_max_l2_entries; 4903 if (ctx->srq_entry_size) { 4904 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4905 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4906 if (rc) 4907 return rc; 4908 } 4909 4910 ctx_pg = &ctx->cq_mem; 4911 ctx_pg->entries = ctx->cq_max_l2_entries; 4912 if (ctx->cq_entry_size) { 4913 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4914 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4915 if (rc) 4916 return rc; 4917 } 4918 4919 ctx_pg = &ctx->vnic_mem; 4920 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4921 ctx->vnic_max_ring_table_entries; 4922 if (ctx->vnic_entry_size) { 4923 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4924 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4925 if (rc) 4926 return rc; 4927 } 4928 4929 ctx_pg = &ctx->stat_mem; 4930 ctx_pg->entries = ctx->stat_max_entries; 4931 if (ctx->stat_entry_size) { 4932 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4933 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4934 if (rc) 4935 return rc; 4936 } 4937 4938 min = ctx->tqm_min_entries_per_ring; 4939 4940 entries_sp = ctx->qp_max_l2_entries + 4941 ctx->vnic_max_vnic_entries + 4942 2 * ctx->qp_min_qp1_entries + min; 4943 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4944 4945 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4946 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4947 entries = clamp_t(uint32_t, entries, min, 4948 ctx->tqm_max_entries_per_ring); 4949 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4950 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4951 * i > 8 is other ext rings. 4952 */ 4953 ctx_pg = ctx->tqm_mem[i]; 4954 ctx_pg->entries = i ? entries : entries_sp; 4955 if (ctx->tqm_entry_size) { 4956 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4957 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4958 "tqm_mem", i); 4959 if (rc) 4960 return rc; 4961 } 4962 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4963 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4964 else 4965 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4966 } 4967 4968 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4969 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4970 if (rc) 4971 PMD_DRV_LOG(ERR, 4972 "Failed to configure context mem: rc = %d\n", rc); 4973 else 4974 ctx->flags |= BNXT_CTX_FLAG_INITED; 4975 4976 return rc; 4977 } 4978 4979 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4980 { 4981 struct rte_pci_device *pci_dev = bp->pdev; 4982 char mz_name[RTE_MEMZONE_NAMESIZE]; 4983 const struct rte_memzone *mz = NULL; 4984 uint32_t total_alloc_len; 4985 rte_iova_t mz_phys_addr; 4986 4987 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4988 return 0; 4989 4990 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4991 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4992 pci_dev->addr.bus, pci_dev->addr.devid, 4993 pci_dev->addr.function, "rx_port_stats"); 4994 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4995 mz = rte_memzone_lookup(mz_name); 4996 total_alloc_len = 4997 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4998 sizeof(struct rx_port_stats_ext) + 512); 4999 if (!mz) { 5000 mz = rte_memzone_reserve(mz_name, total_alloc_len, 5001 SOCKET_ID_ANY, 5002 RTE_MEMZONE_2MB | 5003 RTE_MEMZONE_SIZE_HINT_ONLY | 5004 RTE_MEMZONE_IOVA_CONTIG); 5005 if (mz == NULL) 5006 return -ENOMEM; 5007 } 5008 memset(mz->addr, 0, mz->len); 5009 mz_phys_addr = mz->iova; 5010 5011 bp->rx_mem_zone = (const void *)mz; 5012 bp->hw_rx_port_stats = mz->addr; 5013 bp->hw_rx_port_stats_map = mz_phys_addr; 5014 5015 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 5016 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 5017 pci_dev->addr.bus, pci_dev->addr.devid, 5018 pci_dev->addr.function, "tx_port_stats"); 5019 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 5020 mz = rte_memzone_lookup(mz_name); 5021 total_alloc_len = 5022 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 5023 sizeof(struct tx_port_stats_ext) + 512); 5024 if (!mz) { 5025 mz = rte_memzone_reserve(mz_name, 5026 total_alloc_len, 5027 SOCKET_ID_ANY, 5028 RTE_MEMZONE_2MB | 5029 RTE_MEMZONE_SIZE_HINT_ONLY | 5030 RTE_MEMZONE_IOVA_CONTIG); 5031 if (mz == NULL) 5032 return -ENOMEM; 5033 } 5034 memset(mz->addr, 0, mz->len); 5035 mz_phys_addr = mz->iova; 5036 5037 bp->tx_mem_zone = (const void *)mz; 5038 bp->hw_tx_port_stats = mz->addr; 5039 bp->hw_tx_port_stats_map = mz_phys_addr; 5040 bp->flags |= BNXT_FLAG_PORT_STATS; 5041 5042 /* Display extended statistics if FW supports it */ 5043 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 5044 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 5045 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 5046 return 0; 5047 5048 bp->hw_rx_port_stats_ext = (void *) 5049 ((uint8_t *)bp->hw_rx_port_stats + 5050 sizeof(struct rx_port_stats)); 5051 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 5052 sizeof(struct rx_port_stats); 5053 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 5054 5055 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 5056 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 5057 bp->hw_tx_port_stats_ext = (void *) 5058 ((uint8_t *)bp->hw_tx_port_stats + 5059 sizeof(struct tx_port_stats)); 5060 bp->hw_tx_port_stats_ext_map = 5061 bp->hw_tx_port_stats_map + 5062 sizeof(struct tx_port_stats); 5063 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 5064 } 5065 5066 return 0; 5067 } 5068 5069 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5070 { 5071 struct bnxt *bp = eth_dev->data->dev_private; 5072 size_t max_mac_addr = RTE_MIN(bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5073 int rc = 0; 5074 5075 if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR) 5076 PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n", 5077 bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR); 5078 5079 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5080 RTE_ETHER_ADDR_LEN * max_mac_addr, 5081 0); 5082 if (eth_dev->data->mac_addrs == NULL) { 5083 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5084 return -ENOMEM; 5085 } 5086 5087 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5088 if (BNXT_PF(bp)) 5089 return -EINVAL; 5090 5091 /* Generate a random MAC address, if none was assigned by PF */ 5092 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5093 bnxt_eth_hw_addr_random(bp->mac_addr); 5094 PMD_DRV_LOG(INFO, 5095 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5096 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5097 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5098 5099 rc = bnxt_hwrm_set_mac(bp); 5100 if (rc) 5101 return rc; 5102 } 5103 5104 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5105 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5106 5107 /* 5108 * Allocate memory to hold multicast mac addresses added. 5109 * Used to restore them during reset recovery 5110 */ 5111 bp->mcast_addr_list = rte_zmalloc("bnxt_mcast_addr_tbl", 5112 sizeof(struct rte_ether_addr) * 5113 BNXT_MAX_MC_ADDRS, 0); 5114 if (bp->mcast_addr_list == NULL) { 5115 PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n"); 5116 return -ENOMEM; 5117 } 5118 bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list); 5119 if (bp->mc_list_dma_addr == RTE_BAD_IOVA) { 5120 PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n"); 5121 return -ENOMEM; 5122 } 5123 5124 return rc; 5125 } 5126 5127 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5128 { 5129 int rc = 0; 5130 5131 /* MAC is already configured in FW */ 5132 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5133 return 0; 5134 5135 /* Restore the old MAC configured */ 5136 rc = bnxt_hwrm_set_mac(bp); 5137 if (rc) 5138 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5139 5140 return rc; 5141 } 5142 5143 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5144 { 5145 if (!BNXT_PF(bp)) 5146 return; 5147 5148 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5149 5150 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5151 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5152 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5153 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5154 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5155 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5156 } 5157 5158 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5159 { 5160 struct bnxt_error_recovery_info *info = bp->recovery_info; 5161 5162 if (info) { 5163 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5164 memset(info, 0, sizeof(*info)); 5165 return; 5166 } 5167 5168 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5169 return; 5170 5171 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5172 sizeof(*info), 0); 5173 if (!info) 5174 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5175 5176 bp->recovery_info = info; 5177 } 5178 5179 static void bnxt_check_fw_status(struct bnxt *bp) 5180 { 5181 uint32_t fw_status; 5182 5183 if (!(bp->recovery_info && 5184 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5185 return; 5186 5187 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5188 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5189 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5190 fw_status); 5191 } 5192 5193 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5194 { 5195 struct bnxt_error_recovery_info *info = bp->recovery_info; 5196 uint32_t status_loc; 5197 uint32_t sig_ver; 5198 5199 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5200 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5201 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5202 BNXT_GRCP_WINDOW_2_BASE + 5203 offsetof(struct hcomm_status, 5204 sig_ver))); 5205 /* If the signature is absent, then FW does not support this feature */ 5206 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5207 HCOMM_STATUS_SIGNATURE_VAL) 5208 return 0; 5209 5210 if (!info) { 5211 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5212 sizeof(*info), 0); 5213 if (!info) 5214 return -ENOMEM; 5215 bp->recovery_info = info; 5216 } else { 5217 memset(info, 0, sizeof(*info)); 5218 } 5219 5220 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5221 BNXT_GRCP_WINDOW_2_BASE + 5222 offsetof(struct hcomm_status, 5223 fw_status_loc))); 5224 5225 /* Only pre-map the FW health status GRC register */ 5226 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5227 return 0; 5228 5229 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5230 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5231 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5232 5233 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5234 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5235 5236 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5237 5238 return 0; 5239 } 5240 5241 /* This function gets the FW version along with the 5242 * capabilities(MAX and current) of the function, vnic, 5243 * error recovery, phy and other chip related info 5244 */ 5245 static int bnxt_get_config(struct bnxt *bp) 5246 { 5247 uint16_t mtu; 5248 int rc = 0; 5249 5250 bp->fw_cap = 0; 5251 5252 rc = bnxt_map_hcomm_fw_status_reg(bp); 5253 if (rc) 5254 return rc; 5255 5256 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5257 if (rc) { 5258 bnxt_check_fw_status(bp); 5259 return rc; 5260 } 5261 5262 rc = bnxt_hwrm_func_reset(bp); 5263 if (rc) 5264 return -EIO; 5265 5266 rc = bnxt_hwrm_vnic_qcaps(bp); 5267 if (rc) 5268 return rc; 5269 5270 rc = bnxt_hwrm_queue_qportcfg(bp); 5271 if (rc) 5272 return rc; 5273 5274 /* Get the MAX capabilities for this function. 5275 * This function also allocates context memory for TQM rings and 5276 * informs the firmware about this allocated backing store memory. 5277 */ 5278 rc = bnxt_hwrm_func_qcaps(bp); 5279 if (rc) 5280 return rc; 5281 5282 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5283 if (rc) 5284 return rc; 5285 5286 bnxt_hwrm_port_mac_qcfg(bp); 5287 5288 bnxt_hwrm_parent_pf_qcfg(bp); 5289 5290 bnxt_hwrm_port_phy_qcaps(bp); 5291 5292 bnxt_alloc_error_recovery_info(bp); 5293 /* Get the adapter error recovery support info */ 5294 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5295 if (rc) 5296 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5297 5298 bnxt_hwrm_port_led_qcaps(bp); 5299 5300 return 0; 5301 } 5302 5303 static int 5304 bnxt_init_locks(struct bnxt *bp) 5305 { 5306 int err; 5307 5308 err = pthread_mutex_init(&bp->flow_lock, NULL); 5309 if (err) { 5310 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5311 return err; 5312 } 5313 5314 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5315 if (err) { 5316 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5317 return err; 5318 } 5319 5320 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5321 if (err) { 5322 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5323 return err; 5324 } 5325 5326 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5327 if (err) 5328 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5329 5330 return err; 5331 } 5332 5333 /* This should be called after we have queried trusted VF cap */ 5334 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5335 { 5336 int rc = 0; 5337 5338 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5339 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5340 if (rc) 5341 PMD_DRV_LOG(ERR, 5342 "Failed to alloc switch domain: %d\n", rc); 5343 else 5344 PMD_DRV_LOG(INFO, 5345 "Switch domain allocated %d\n", 5346 bp->switch_domain_id); 5347 } 5348 5349 return rc; 5350 } 5351 5352 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5353 { 5354 int rc = 0; 5355 5356 if (reconfig_dev) { 5357 rc = bnxt_get_config(bp); 5358 if (rc) 5359 return rc; 5360 } 5361 5362 rc = bnxt_alloc_switch_domain(bp); 5363 if (rc) 5364 return rc; 5365 5366 if (!reconfig_dev) { 5367 rc = bnxt_setup_mac_addr(bp->eth_dev); 5368 if (rc) 5369 return rc; 5370 } else { 5371 rc = bnxt_restore_dflt_mac(bp); 5372 if (rc) 5373 return rc; 5374 } 5375 5376 bnxt_config_vf_req_fwd(bp); 5377 5378 rc = bnxt_hwrm_func_driver_register(bp); 5379 if (rc) { 5380 PMD_DRV_LOG(ERR, "Failed to register driver"); 5381 return -EBUSY; 5382 } 5383 5384 if (BNXT_PF(bp)) { 5385 if (bp->pdev->max_vfs) { 5386 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5387 if (rc) { 5388 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5389 return rc; 5390 } 5391 } else { 5392 rc = bnxt_hwrm_allocate_pf_only(bp); 5393 if (rc) { 5394 PMD_DRV_LOG(ERR, 5395 "Failed to allocate PF resources"); 5396 return rc; 5397 } 5398 } 5399 } 5400 5401 if (!reconfig_dev) { 5402 bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key", 5403 HW_HASH_KEY_SIZE, 0); 5404 if (bp->rss_conf.rss_key == NULL) { 5405 PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory", 5406 bp->eth_dev->data->port_id); 5407 return -ENOMEM; 5408 } 5409 } 5410 5411 rc = bnxt_alloc_mem(bp, reconfig_dev); 5412 if (rc) 5413 return rc; 5414 5415 rc = bnxt_setup_int(bp); 5416 if (rc) 5417 return rc; 5418 5419 rc = bnxt_request_int(bp); 5420 if (rc) 5421 return rc; 5422 5423 rc = bnxt_init_ctx_mem(bp); 5424 if (rc) { 5425 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5426 return rc; 5427 } 5428 5429 return 0; 5430 } 5431 5432 static int 5433 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5434 const char *value, void *opaque_arg) 5435 { 5436 struct bnxt *bp = opaque_arg; 5437 unsigned long flow_xstat; 5438 char *end = NULL; 5439 5440 if (!value || !opaque_arg) { 5441 PMD_DRV_LOG(ERR, 5442 "Invalid parameter passed to flow_xstat devarg.\n"); 5443 return -EINVAL; 5444 } 5445 5446 flow_xstat = strtoul(value, &end, 10); 5447 if (end == NULL || *end != '\0' || 5448 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5449 PMD_DRV_LOG(ERR, 5450 "Invalid parameter passed to flow_xstat devarg.\n"); 5451 return -EINVAL; 5452 } 5453 5454 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5455 PMD_DRV_LOG(ERR, 5456 "Invalid value passed to flow_xstat devarg.\n"); 5457 return -EINVAL; 5458 } 5459 5460 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5461 if (BNXT_FLOW_XSTATS_EN(bp)) 5462 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5463 5464 return 0; 5465 } 5466 5467 static int 5468 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5469 const char *value, void *opaque_arg) 5470 { 5471 struct bnxt *bp = opaque_arg; 5472 unsigned long max_num_kflows; 5473 char *end = NULL; 5474 5475 if (!value || !opaque_arg) { 5476 PMD_DRV_LOG(ERR, 5477 "Invalid parameter passed to max_num_kflows devarg.\n"); 5478 return -EINVAL; 5479 } 5480 5481 max_num_kflows = strtoul(value, &end, 10); 5482 if (end == NULL || *end != '\0' || 5483 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5484 PMD_DRV_LOG(ERR, 5485 "Invalid parameter passed to max_num_kflows devarg.\n"); 5486 return -EINVAL; 5487 } 5488 5489 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5490 PMD_DRV_LOG(ERR, 5491 "Invalid value passed to max_num_kflows devarg.\n"); 5492 return -EINVAL; 5493 } 5494 5495 bp->max_num_kflows = max_num_kflows; 5496 if (bp->max_num_kflows) 5497 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5498 max_num_kflows); 5499 5500 return 0; 5501 } 5502 5503 static int 5504 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5505 const char *value, void *opaque_arg) 5506 { 5507 struct bnxt *bp = opaque_arg; 5508 unsigned long app_id; 5509 char *end = NULL; 5510 5511 if (!value || !opaque_arg) { 5512 PMD_DRV_LOG(ERR, 5513 "Invalid parameter passed to app-id " 5514 "devargs.\n"); 5515 return -EINVAL; 5516 } 5517 5518 app_id = strtoul(value, &end, 10); 5519 if (end == NULL || *end != '\0' || 5520 (app_id == ULONG_MAX && errno == ERANGE)) { 5521 PMD_DRV_LOG(ERR, 5522 "Invalid parameter passed to app_id " 5523 "devargs.\n"); 5524 return -EINVAL; 5525 } 5526 5527 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5528 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5529 (uint16_t)app_id); 5530 return -EINVAL; 5531 } 5532 5533 bp->app_id = app_id; 5534 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5535 5536 return 0; 5537 } 5538 5539 static int 5540 bnxt_parse_devarg_ieee_1588(__rte_unused const char *key, 5541 const char *value, void *opaque_arg) 5542 { 5543 struct bnxt *bp = opaque_arg; 5544 unsigned long ieee_1588; 5545 char *end = NULL; 5546 5547 if (!value || !opaque_arg) { 5548 PMD_DRV_LOG(ERR, 5549 "Invalid parameter passed to ieee-1588 " 5550 "devargs.\n"); 5551 return -EINVAL; 5552 } 5553 5554 ieee_1588 = strtoul(value, &end, 10); 5555 if (end == NULL || *end != '\0' || 5556 (ieee_1588 == ULONG_MAX && errno == ERANGE)) { 5557 PMD_DRV_LOG(ERR, 5558 "Invalid parameter passed to ieee_1588 " 5559 "devargs.\n"); 5560 return -EINVAL; 5561 } 5562 5563 if (BNXT_DEVARG_IEEE_1588_INVALID(ieee_1588)) { 5564 PMD_DRV_LOG(ERR, "Invalid ieee-1588(%d) devargs.\n", 5565 (uint16_t)ieee_1588); 5566 return -EINVAL; 5567 } 5568 5569 bp->ieee_1588 = ieee_1588; 5570 PMD_DRV_LOG(INFO, "ieee-1588=%d feature enabled.\n", (uint16_t)ieee_1588); 5571 5572 return 0; 5573 } 5574 5575 static int 5576 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5577 const char *value, void *opaque_arg) 5578 { 5579 struct bnxt_representor *vfr_bp = opaque_arg; 5580 unsigned long rep_is_pf; 5581 char *end = NULL; 5582 5583 if (!value || !opaque_arg) { 5584 PMD_DRV_LOG(ERR, 5585 "Invalid parameter passed to rep_is_pf devargs.\n"); 5586 return -EINVAL; 5587 } 5588 5589 rep_is_pf = strtoul(value, &end, 10); 5590 if (end == NULL || *end != '\0' || 5591 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5592 PMD_DRV_LOG(ERR, 5593 "Invalid parameter passed to rep_is_pf devargs.\n"); 5594 return -EINVAL; 5595 } 5596 5597 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5598 PMD_DRV_LOG(ERR, 5599 "Invalid value passed to rep_is_pf devargs.\n"); 5600 return -EINVAL; 5601 } 5602 5603 vfr_bp->flags |= rep_is_pf; 5604 if (BNXT_REP_PF(vfr_bp)) 5605 PMD_DRV_LOG(INFO, "PF representor\n"); 5606 else 5607 PMD_DRV_LOG(INFO, "VF representor\n"); 5608 5609 return 0; 5610 } 5611 5612 static int 5613 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5614 const char *value, void *opaque_arg) 5615 { 5616 struct bnxt_representor *vfr_bp = opaque_arg; 5617 unsigned long rep_based_pf; 5618 char *end = NULL; 5619 5620 if (!value || !opaque_arg) { 5621 PMD_DRV_LOG(ERR, 5622 "Invalid parameter passed to rep_based_pf " 5623 "devargs.\n"); 5624 return -EINVAL; 5625 } 5626 5627 rep_based_pf = strtoul(value, &end, 10); 5628 if (end == NULL || *end != '\0' || 5629 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5630 PMD_DRV_LOG(ERR, 5631 "Invalid parameter passed to rep_based_pf " 5632 "devargs.\n"); 5633 return -EINVAL; 5634 } 5635 5636 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5637 PMD_DRV_LOG(ERR, 5638 "Invalid value passed to rep_based_pf devargs.\n"); 5639 return -EINVAL; 5640 } 5641 5642 vfr_bp->rep_based_pf = rep_based_pf; 5643 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5644 5645 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5646 5647 return 0; 5648 } 5649 5650 static int 5651 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5652 const char *value, void *opaque_arg) 5653 { 5654 struct bnxt_representor *vfr_bp = opaque_arg; 5655 unsigned long rep_q_r2f; 5656 char *end = NULL; 5657 5658 if (!value || !opaque_arg) { 5659 PMD_DRV_LOG(ERR, 5660 "Invalid parameter passed to rep_q_r2f " 5661 "devargs.\n"); 5662 return -EINVAL; 5663 } 5664 5665 rep_q_r2f = strtoul(value, &end, 10); 5666 if (end == NULL || *end != '\0' || 5667 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5668 PMD_DRV_LOG(ERR, 5669 "Invalid parameter passed to rep_q_r2f " 5670 "devargs.\n"); 5671 return -EINVAL; 5672 } 5673 5674 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5675 PMD_DRV_LOG(ERR, 5676 "Invalid value passed to rep_q_r2f devargs.\n"); 5677 return -EINVAL; 5678 } 5679 5680 vfr_bp->rep_q_r2f = rep_q_r2f; 5681 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5682 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5683 5684 return 0; 5685 } 5686 5687 static int 5688 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5689 const char *value, void *opaque_arg) 5690 { 5691 struct bnxt_representor *vfr_bp = opaque_arg; 5692 unsigned long rep_q_f2r; 5693 char *end = NULL; 5694 5695 if (!value || !opaque_arg) { 5696 PMD_DRV_LOG(ERR, 5697 "Invalid parameter passed to rep_q_f2r " 5698 "devargs.\n"); 5699 return -EINVAL; 5700 } 5701 5702 rep_q_f2r = strtoul(value, &end, 10); 5703 if (end == NULL || *end != '\0' || 5704 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5705 PMD_DRV_LOG(ERR, 5706 "Invalid parameter passed to rep_q_f2r " 5707 "devargs.\n"); 5708 return -EINVAL; 5709 } 5710 5711 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5712 PMD_DRV_LOG(ERR, 5713 "Invalid value passed to rep_q_f2r devargs.\n"); 5714 return -EINVAL; 5715 } 5716 5717 vfr_bp->rep_q_f2r = rep_q_f2r; 5718 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5719 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5720 5721 return 0; 5722 } 5723 5724 static int 5725 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5726 const char *value, void *opaque_arg) 5727 { 5728 struct bnxt_representor *vfr_bp = opaque_arg; 5729 unsigned long rep_fc_r2f; 5730 char *end = NULL; 5731 5732 if (!value || !opaque_arg) { 5733 PMD_DRV_LOG(ERR, 5734 "Invalid parameter passed to rep_fc_r2f " 5735 "devargs.\n"); 5736 return -EINVAL; 5737 } 5738 5739 rep_fc_r2f = strtoul(value, &end, 10); 5740 if (end == NULL || *end != '\0' || 5741 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5742 PMD_DRV_LOG(ERR, 5743 "Invalid parameter passed to rep_fc_r2f " 5744 "devargs.\n"); 5745 return -EINVAL; 5746 } 5747 5748 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5749 PMD_DRV_LOG(ERR, 5750 "Invalid value passed to rep_fc_r2f devargs.\n"); 5751 return -EINVAL; 5752 } 5753 5754 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5755 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5756 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5757 5758 return 0; 5759 } 5760 5761 static int 5762 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5763 const char *value, void *opaque_arg) 5764 { 5765 struct bnxt_representor *vfr_bp = opaque_arg; 5766 unsigned long rep_fc_f2r; 5767 char *end = NULL; 5768 5769 if (!value || !opaque_arg) { 5770 PMD_DRV_LOG(ERR, 5771 "Invalid parameter passed to rep_fc_f2r " 5772 "devargs.\n"); 5773 return -EINVAL; 5774 } 5775 5776 rep_fc_f2r = strtoul(value, &end, 10); 5777 if (end == NULL || *end != '\0' || 5778 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5779 PMD_DRV_LOG(ERR, 5780 "Invalid parameter passed to rep_fc_f2r " 5781 "devargs.\n"); 5782 return -EINVAL; 5783 } 5784 5785 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5786 PMD_DRV_LOG(ERR, 5787 "Invalid value passed to rep_fc_f2r devargs.\n"); 5788 return -EINVAL; 5789 } 5790 5791 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5792 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5793 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5794 5795 return 0; 5796 } 5797 5798 static int 5799 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5800 { 5801 struct rte_kvargs *kvlist; 5802 int ret = 0; 5803 5804 if (devargs == NULL) 5805 return 0; 5806 5807 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5808 if (kvlist == NULL) 5809 return -EINVAL; 5810 5811 /* 5812 * Handler for "flow_xstat" devarg. 5813 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5814 */ 5815 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5816 bnxt_parse_devarg_flow_xstat, bp); 5817 if (ret) 5818 goto err; 5819 5820 /* 5821 * Handler for "max_num_kflows" devarg. 5822 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5823 */ 5824 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5825 bnxt_parse_devarg_max_num_kflows, bp); 5826 if (ret) 5827 goto err; 5828 5829 err: 5830 /* 5831 * Handler for "app-id" devarg. 5832 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5833 */ 5834 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5835 bnxt_parse_devarg_app_id, bp); 5836 5837 /* 5838 * Handler for "ieee-1588" devarg. 5839 * Invoked as for ex: "-a 000:00:0d.0,ieee-1588=1" 5840 */ 5841 rte_kvargs_process(kvlist, BNXT_DEVARG_IEEE_1588, 5842 bnxt_parse_devarg_ieee_1588, bp); 5843 5844 rte_kvargs_free(kvlist); 5845 return ret; 5846 } 5847 5848 /* Allocate and initialize various fields in bnxt struct that 5849 * need to be allocated/destroyed only once in the lifetime of the driver 5850 */ 5851 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5852 { 5853 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5854 struct bnxt *bp = eth_dev->data->dev_private; 5855 int rc = 0; 5856 5857 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5858 5859 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5860 bp->flags |= BNXT_FLAG_VF; 5861 5862 if (bnxt_p5_device(pci_dev->id.device_id)) 5863 bp->flags |= BNXT_FLAG_CHIP_P5; 5864 5865 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5866 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5867 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5868 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5869 bp->flags |= BNXT_FLAG_STINGRAY; 5870 5871 rc = bnxt_map_pci_bars(eth_dev); 5872 if (rc) { 5873 PMD_DRV_LOG(ERR, 5874 "Failed to initialize board rc: %x\n", rc); 5875 return rc; 5876 } 5877 5878 rc = bnxt_alloc_pf_info(bp); 5879 if (rc) 5880 return rc; 5881 5882 rc = bnxt_alloc_link_info(bp); 5883 if (rc) 5884 return rc; 5885 5886 rc = bnxt_alloc_parent_info(bp); 5887 if (rc) 5888 return rc; 5889 5890 rc = bnxt_alloc_hwrm_resources(bp); 5891 if (rc) { 5892 PMD_DRV_LOG(ERR, 5893 "Failed to allocate response buffer rc: %x\n", rc); 5894 return rc; 5895 } 5896 rc = bnxt_alloc_leds_info(bp); 5897 if (rc) 5898 return rc; 5899 5900 rc = bnxt_alloc_cos_queues(bp); 5901 if (rc) 5902 return rc; 5903 5904 rc = bnxt_init_locks(bp); 5905 if (rc) 5906 return rc; 5907 5908 rc = bnxt_get_config(bp); 5909 if (rc) 5910 return rc; 5911 5912 if (BNXT_TRUFLOW_EN(bp)) { 5913 /* extra mbuf field is required to store CFA code from mark */ 5914 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5915 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5916 .size = sizeof(bnxt_cfa_code_dynfield_t), 5917 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5918 }; 5919 bnxt_cfa_code_dynfield_offset = 5920 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5921 if (bnxt_cfa_code_dynfield_offset < 0) { 5922 PMD_DRV_LOG(ERR, 5923 "Failed to register mbuf field for TruFlow mark\n"); 5924 return -rte_errno; 5925 } 5926 } 5927 5928 return rc; 5929 } 5930 5931 static int 5932 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5933 { 5934 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5935 static int version_printed; 5936 struct bnxt *bp; 5937 int rc; 5938 5939 if (version_printed++ == 0) 5940 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5941 5942 eth_dev->dev_ops = &bnxt_dev_ops; 5943 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5944 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5945 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5946 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5947 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5948 5949 /* 5950 * For secondary processes, we don't initialise any further 5951 * as primary has already done this work. 5952 */ 5953 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5954 return 0; 5955 5956 rte_eth_copy_pci_info(eth_dev, pci_dev); 5957 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 5958 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 5959 5960 bp = eth_dev->data->dev_private; 5961 5962 /* set the default app id */ 5963 bp->app_id = bnxt_ulp_default_app_id_get(); 5964 5965 /* Parse dev arguments passed on when starting the DPDK application. */ 5966 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5967 if (rc) 5968 goto error_free; 5969 5970 rc = bnxt_drv_init(eth_dev); 5971 if (rc) 5972 goto error_free; 5973 5974 rc = bnxt_init_resources(bp, false); 5975 if (rc) 5976 goto error_free; 5977 5978 rc = bnxt_alloc_stats_mem(bp); 5979 if (rc) 5980 goto error_free; 5981 5982 PMD_DRV_LOG(INFO, 5983 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 5984 DRV_MODULE_NAME, 5985 pci_dev->mem_resource[0].phys_addr, 5986 pci_dev->mem_resource[0].addr); 5987 5988 return 0; 5989 5990 error_free: 5991 bnxt_dev_uninit(eth_dev); 5992 return rc; 5993 } 5994 5995 5996 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5997 { 5998 if (!ctx) 5999 return; 6000 6001 if (ctx->va) 6002 rte_free(ctx->va); 6003 6004 ctx->va = NULL; 6005 ctx->dma = RTE_BAD_IOVA; 6006 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6007 } 6008 6009 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6010 { 6011 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6012 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6013 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6014 bp->flow_stat->max_fc, 6015 false); 6016 6017 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6018 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6019 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6020 bp->flow_stat->max_fc, 6021 false); 6022 6023 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6024 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6025 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6026 6027 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6028 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6029 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6030 6031 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6032 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6033 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6034 6035 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6036 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6037 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6038 } 6039 6040 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6041 { 6042 bnxt_unregister_fc_ctx_mem(bp); 6043 6044 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6045 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6046 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6047 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6048 } 6049 6050 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6051 { 6052 if (BNXT_FLOW_XSTATS_EN(bp)) 6053 bnxt_uninit_fc_ctx_mem(bp); 6054 } 6055 6056 static void 6057 bnxt_free_error_recovery_info(struct bnxt *bp) 6058 { 6059 rte_free(bp->recovery_info); 6060 bp->recovery_info = NULL; 6061 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6062 } 6063 6064 static int 6065 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6066 { 6067 int rc; 6068 6069 bnxt_free_int(bp); 6070 bnxt_free_mem(bp, reconfig_dev); 6071 6072 bnxt_hwrm_func_buf_unrgtr(bp); 6073 if (bp->pf != NULL) { 6074 rte_free(bp->pf->vf_req_buf); 6075 bp->pf->vf_req_buf = NULL; 6076 } 6077 6078 rc = bnxt_hwrm_func_driver_unregister(bp); 6079 bp->flags &= ~BNXT_FLAG_REGISTERED; 6080 bnxt_free_ctx_mem(bp); 6081 if (!reconfig_dev) { 6082 bnxt_free_hwrm_resources(bp); 6083 bnxt_free_error_recovery_info(bp); 6084 rte_free(bp->mcast_addr_list); 6085 bp->mcast_addr_list = NULL; 6086 rte_free(bp->rss_conf.rss_key); 6087 bp->rss_conf.rss_key = NULL; 6088 } 6089 6090 bnxt_uninit_ctx_mem(bp); 6091 6092 bnxt_free_flow_stats_info(bp); 6093 bnxt_free_switch_domain(bp); 6094 rte_free(bp->ptp_cfg); 6095 bp->ptp_cfg = NULL; 6096 return rc; 6097 } 6098 6099 static int 6100 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6101 { 6102 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6103 return -EPERM; 6104 6105 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6106 6107 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6108 bnxt_dev_close_op(eth_dev); 6109 6110 return 0; 6111 } 6112 6113 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6114 { 6115 struct bnxt *bp = eth_dev->data->dev_private; 6116 struct rte_eth_dev *vf_rep_eth_dev; 6117 int ret = 0, i; 6118 6119 if (!bp) 6120 return -EINVAL; 6121 6122 for (i = 0; i < bp->num_reps; i++) { 6123 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6124 if (!vf_rep_eth_dev) 6125 continue; 6126 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6127 vf_rep_eth_dev->data->port_id); 6128 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6129 } 6130 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6131 eth_dev->data->port_id); 6132 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6133 6134 return ret; 6135 } 6136 6137 static void bnxt_free_rep_info(struct bnxt *bp) 6138 { 6139 rte_free(bp->rep_info); 6140 bp->rep_info = NULL; 6141 rte_free(bp->cfa_code_map); 6142 bp->cfa_code_map = NULL; 6143 } 6144 6145 static int bnxt_init_rep_info(struct bnxt *bp) 6146 { 6147 int i = 0, rc; 6148 6149 if (bp->rep_info) 6150 return 0; 6151 6152 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6153 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp), 6154 0); 6155 if (!bp->rep_info) { 6156 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6157 return -ENOMEM; 6158 } 6159 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6160 sizeof(*bp->cfa_code_map) * 6161 BNXT_MAX_CFA_CODE, 0); 6162 if (!bp->cfa_code_map) { 6163 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6164 bnxt_free_rep_info(bp); 6165 return -ENOMEM; 6166 } 6167 6168 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6169 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6170 6171 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6172 if (rc) { 6173 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6174 bnxt_free_rep_info(bp); 6175 return rc; 6176 } 6177 6178 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6179 if (rc) { 6180 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6181 bnxt_free_rep_info(bp); 6182 return rc; 6183 } 6184 6185 return rc; 6186 } 6187 6188 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6189 struct rte_eth_devargs *eth_da, 6190 struct rte_eth_dev *backing_eth_dev, 6191 const char *dev_args) 6192 { 6193 struct rte_eth_dev *vf_rep_eth_dev; 6194 char name[RTE_ETH_NAME_MAX_LEN]; 6195 struct bnxt *backing_bp = backing_eth_dev->data->dev_private; 6196 uint16_t max_vf_reps = BNXT_MAX_VF_REPS(backing_bp); 6197 6198 uint16_t num_rep; 6199 int i, ret = 0; 6200 struct rte_kvargs *kvlist = NULL; 6201 6202 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6203 return 0; 6204 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6205 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6206 eth_da->type); 6207 return -ENOTSUP; 6208 } 6209 num_rep = eth_da->nb_representor_ports; 6210 if (num_rep > max_vf_reps) { 6211 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6212 num_rep, max_vf_reps); 6213 return -EINVAL; 6214 } 6215 6216 if (num_rep >= RTE_MAX_ETHPORTS) { 6217 PMD_DRV_LOG(ERR, 6218 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6219 num_rep, RTE_MAX_ETHPORTS); 6220 return -EINVAL; 6221 } 6222 6223 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6224 PMD_DRV_LOG(ERR, 6225 "Not a PF or trusted VF. No Representor support\n"); 6226 /* Returning an error is not an option. 6227 * Applications are not handling this correctly 6228 */ 6229 return 0; 6230 } 6231 6232 if (bnxt_init_rep_info(backing_bp)) 6233 return 0; 6234 6235 for (i = 0; i < num_rep; i++) { 6236 struct bnxt_representor representor = { 6237 .vf_id = eth_da->representor_ports[i], 6238 .switch_domain_id = backing_bp->switch_domain_id, 6239 .parent_dev = backing_eth_dev 6240 }; 6241 6242 if (representor.vf_id >= max_vf_reps) { 6243 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6244 representor.vf_id, max_vf_reps); 6245 continue; 6246 } 6247 6248 /* representor port net_bdf_port */ 6249 snprintf(name, sizeof(name), "net_%s_representor_%d", 6250 pci_dev->device.name, eth_da->representor_ports[i]); 6251 6252 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6253 if (kvlist) { 6254 /* 6255 * Handler for "rep_is_pf" devarg. 6256 * Invoked as for ex: "-a 000:00:0d.0, 6257 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6258 */ 6259 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6260 bnxt_parse_devarg_rep_is_pf, 6261 (void *)&representor); 6262 if (ret) { 6263 ret = -EINVAL; 6264 goto err; 6265 } 6266 /* 6267 * Handler for "rep_based_pf" devarg. 6268 * Invoked as for ex: "-a 000:00:0d.0, 6269 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6270 */ 6271 ret = rte_kvargs_process(kvlist, 6272 BNXT_DEVARG_REP_BASED_PF, 6273 bnxt_parse_devarg_rep_based_pf, 6274 (void *)&representor); 6275 if (ret) { 6276 ret = -EINVAL; 6277 goto err; 6278 } 6279 /* 6280 * Handler for "rep_based_pf" devarg. 6281 * Invoked as for ex: "-a 000:00:0d.0, 6282 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6283 */ 6284 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6285 bnxt_parse_devarg_rep_q_r2f, 6286 (void *)&representor); 6287 if (ret) { 6288 ret = -EINVAL; 6289 goto err; 6290 } 6291 /* 6292 * Handler for "rep_based_pf" devarg. 6293 * Invoked as for ex: "-a 000:00:0d.0, 6294 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6295 */ 6296 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6297 bnxt_parse_devarg_rep_q_f2r, 6298 (void *)&representor); 6299 if (ret) { 6300 ret = -EINVAL; 6301 goto err; 6302 } 6303 /* 6304 * Handler for "rep_based_pf" devarg. 6305 * Invoked as for ex: "-a 000:00:0d.0, 6306 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6307 */ 6308 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6309 bnxt_parse_devarg_rep_fc_r2f, 6310 (void *)&representor); 6311 if (ret) { 6312 ret = -EINVAL; 6313 goto err; 6314 } 6315 /* 6316 * Handler for "rep_based_pf" devarg. 6317 * Invoked as for ex: "-a 000:00:0d.0, 6318 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6319 */ 6320 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6321 bnxt_parse_devarg_rep_fc_f2r, 6322 (void *)&representor); 6323 if (ret) { 6324 ret = -EINVAL; 6325 goto err; 6326 } 6327 } 6328 6329 ret = rte_eth_dev_create(&pci_dev->device, name, 6330 sizeof(struct bnxt_representor), 6331 NULL, NULL, 6332 bnxt_representor_init, 6333 &representor); 6334 if (ret) { 6335 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6336 "representor %s.", name); 6337 goto err; 6338 } 6339 6340 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6341 if (!vf_rep_eth_dev) { 6342 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6343 " for VF-Rep: %s.", name); 6344 ret = -ENODEV; 6345 goto err; 6346 } 6347 6348 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6349 backing_eth_dev->data->port_id); 6350 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6351 vf_rep_eth_dev; 6352 backing_bp->num_reps++; 6353 6354 } 6355 6356 rte_kvargs_free(kvlist); 6357 return 0; 6358 6359 err: 6360 /* If num_rep > 1, then rollback already created 6361 * ports, since we'll be failing the probe anyway 6362 */ 6363 if (num_rep > 1) 6364 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6365 rte_errno = -ret; 6366 rte_kvargs_free(kvlist); 6367 6368 return ret; 6369 } 6370 6371 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6372 struct rte_pci_device *pci_dev) 6373 { 6374 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6375 struct rte_eth_dev *backing_eth_dev; 6376 uint16_t num_rep; 6377 int ret = 0; 6378 6379 if (pci_dev->device.devargs) { 6380 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6381 ð_da); 6382 if (ret) 6383 return ret; 6384 } 6385 6386 num_rep = eth_da.nb_representor_ports; 6387 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6388 num_rep); 6389 6390 /* We could come here after first level of probe is already invoked 6391 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6392 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6393 */ 6394 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6395 if (backing_eth_dev == NULL) { 6396 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6397 sizeof(struct bnxt), 6398 eth_dev_pci_specific_init, pci_dev, 6399 bnxt_dev_init, NULL); 6400 6401 if (ret || !num_rep) 6402 return ret; 6403 6404 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6405 } 6406 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6407 backing_eth_dev->data->port_id); 6408 6409 if (!num_rep) 6410 return ret; 6411 6412 /* probe representor ports now */ 6413 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6414 pci_dev->device.devargs->args); 6415 6416 return ret; 6417 } 6418 6419 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6420 { 6421 struct rte_eth_dev *eth_dev; 6422 6423 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6424 if (!eth_dev) 6425 return 0; /* Invoked typically only by OVS-DPDK, by the 6426 * time it comes here the eth_dev is already 6427 * deleted by rte_eth_dev_close(), so returning 6428 * +ve value will at least help in proper cleanup 6429 */ 6430 6431 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6432 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6433 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6434 return rte_eth_dev_destroy(eth_dev, 6435 bnxt_representor_uninit); 6436 else 6437 return rte_eth_dev_destroy(eth_dev, 6438 bnxt_dev_uninit); 6439 } else { 6440 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6441 } 6442 } 6443 6444 static struct rte_pci_driver bnxt_rte_pmd = { 6445 .id_table = bnxt_pci_id_map, 6446 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6447 RTE_PCI_DRV_INTR_RMV | 6448 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6449 * and OVS-DPDK 6450 */ 6451 .probe = bnxt_pci_probe, 6452 .remove = bnxt_pci_remove, 6453 }; 6454 6455 static bool 6456 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6457 { 6458 if (strcmp(dev->device->driver->name, drv->driver.name)) 6459 return false; 6460 6461 return true; 6462 } 6463 6464 bool is_bnxt_supported(struct rte_eth_dev *dev) 6465 { 6466 return is_device_supported(dev, &bnxt_rte_pmd); 6467 } 6468 6469 struct tf *bnxt_get_tfp_session(struct bnxt *bp, enum bnxt_session_type type) 6470 { 6471 return (type >= BNXT_SESSION_TYPE_LAST) ? 6472 &bp->tfp[BNXT_SESSION_TYPE_REGULAR] : &bp->tfp[type]; 6473 } 6474 6475 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6476 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6477 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6478 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6479